aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMark Brown <broonie@linaro.org>2013-10-07 06:46:56 -0400
committerMark Brown <broonie@linaro.org>2013-10-07 06:46:56 -0400
commit9f9e4266a66b6f9dcde305e85035615c06bdb7f7 (patch)
treec6f867c7a1bf94a3b4f13b19a9ce442c7aec9e79 /drivers
parentbf551413038f74343ec4d1413c3610e2362d0aeb (diff)
parent249ce1387b7739dbea2ac1a697e4bf1e37ec06b7 (diff)
Merge remote-tracking branch 'asoc/topic/dapm' into asoc-twl6040
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_ipmi.c24
-rw-r--r--drivers/acpi/scan.c4
-rw-r--r--drivers/ata/sata_promise.c2
-rw-r--r--drivers/atm/he.c13
-rw-r--r--drivers/atm/nicstar.c2
-rw-r--r--drivers/base/core.c14
-rw-r--r--drivers/bcma/driver_pci.c49
-rw-r--r--drivers/bcma/scan.c12
-rw-r--r--drivers/block/cciss.c1
-rw-r--r--drivers/block/cpqarray.c1
-rw-r--r--drivers/block/rbd.c77
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c5
-rw-r--r--drivers/bus/mvebu-mbus.c12
-rw-r--r--drivers/char/tpm/xen-tpmfront.c36
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/clksrc-of.c3
-rw-r--r--drivers/clocksource/em_sti.c2
-rw-r--r--drivers/clocksource/exynos_mct.c10
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c9
-rw-r--r--drivers/cpufreq/cpufreq.c34
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c2
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c7
-rw-r--r--drivers/cpufreq/intel_pstate.c5
-rw-r--r--drivers/cpufreq/spear-cpufreq.c2
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/edma.c2
-rw-r--r--drivers/dma/imx-dma.c31
-rw-r--r--drivers/gpio/gpio-omap.c158
-rw-r--r--drivers/gpio/gpio-rcar.c7
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/drm_context.c73
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c8
-rw-r--r--drivers/gpu/drm/drm_fops.c21
-rw-r--r--drivers/gpu/drm/drm_stub.c10
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c68
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c46
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c13
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c1
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c6
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c63
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c10
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c58
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c41
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c24
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c24
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c23
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c57
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c32
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c39
-rw-r--r--drivers/gpu/drm/radeon/cik.c53
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c12
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c164
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.h1
-rw-r--r--drivers/gpu/drm/radeon/kv_smc.c8
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c30
-rw-r--r--drivers/gpu/drm/radeon/ppsmc.h2
-rw-r--r--drivers/gpu/drm/radeon/r100.c15
-rw-r--r--drivers/gpu/drm/radeon/r420.c7
-rw-r--r--drivers/gpu/drm/radeon/r600.c19
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c40
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c20
-rw-r--r--drivers/gpu/drm/radeon/r600d.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h82
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c69
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c89
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h27
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c3
-rw-r--r--drivers/gpu/drm/radeon/rs400.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c12
-rw-r--r--drivers/gpu/drm/radeon/rs690.c7
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c112
-rw-r--r--drivers/gpu/drm/radeon/rv515.c8
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c16
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.c44
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.h2
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h2
-rw-r--r--drivers/gpu/drm/radeon/si.c21
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c30
-rw-r--r--drivers/gpu/drm/radeon/si_smc.c43
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c17
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/trinity_smc.c8
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c1
-rw-r--r--drivers/hid/hid-core.c74
-rw-r--r--drivers/hid/hid-input.c11
-rw-r--r--drivers/hid/hid-lenovo-tpkbd.c25
-rw-r--r--drivers/hid/hid-lg2ff.c19
-rw-r--r--drivers/hid/hid-lg3ff.c29
-rw-r--r--drivers/hid/hid-lg4ff.c20
-rw-r--r--drivers/hid/hid-lgff.c17
-rw-r--r--drivers/hid/hid-logitech-dj.c10
-rw-r--r--drivers/hid/hid-multitouch.c26
-rw-r--r--drivers/hid/hid-sony.c4
-rw-r--r--drivers/hid/hid-steelseries.c5
-rw-r--r--drivers/hid/hid-zpff.c18
-rw-r--r--drivers/hv/connection.c2
-rw-r--r--drivers/hv/hv_kvp.c38
-rw-r--r--drivers/hv/hv_snapshot.c6
-rw-r--r--drivers/hv/hv_util.c71
-rw-r--r--drivers/hwmon/applesmc.c11
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c26
-rw-r--r--drivers/i2c/busses/i2c-ismt.c3
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c16
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/iio/accel/bma180.c4
-rw-r--r--drivers/iio/adc/at91_adc.c11
-rw-r--r--drivers/iio/amplifiers/ad8366.c4
-rw-r--r--drivers/iio/buffer_cb.c2
-rw-r--r--drivers/iio/dac/mcp4725.c12
-rw-r--r--drivers/iio/iio_core.h4
-rw-r--r--drivers/iio/industrialio-buffer.c30
-rw-r--r--drivers/iio/industrialio-core.c33
-rw-r--r--drivers/iio/industrialio-event.c20
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c18
-rw-r--r--drivers/iio/temperature/tmp006.c6
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c14
-rw-r--r--drivers/iommu/arm-smmu.c13
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c4
-rw-r--r--drivers/isdn/hisax/amd7930_fn.c4
-rw-r--r--drivers/isdn/hisax/avm_pci.c4
-rw-r--r--drivers/isdn/hisax/config.c2
-rw-r--r--drivers/isdn/hisax/diva.c4
-rw-r--r--drivers/isdn/hisax/elsa.c2
-rw-r--r--drivers/isdn/hisax/elsa_ser.c2
-rw-r--r--drivers/isdn/hisax/hfc_pci.c2
-rw-r--r--drivers/isdn/hisax/hfc_sx.c2
-rw-r--r--drivers/isdn/hisax/hscx_irq.c4
-rw-r--r--drivers/isdn/hisax/icc.c4
-rw-r--r--drivers/isdn/hisax/ipacx.c8
-rw-r--r--drivers/isdn/hisax/isac.c4
-rw-r--r--drivers/isdn/hisax/isar.c6
-rw-r--r--drivers/isdn/hisax/jade.c18
-rw-r--r--drivers/isdn/hisax/jade_irq.c4
-rw-r--r--drivers/isdn/hisax/l3_1tr6.c50
-rw-r--r--drivers/isdn/hisax/netjet.c2
-rw-r--r--drivers/isdn/hisax/q931.c6
-rw-r--r--drivers/isdn/hisax/w6692.c8
-rw-r--r--drivers/mailbox/mailbox-omap2.c1
-rw-r--r--drivers/md/bcache/bcache.h7
-rw-r--r--drivers/md/bcache/bset.c39
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/journal.c33
-rw-r--r--drivers/md/bcache/request.c15
-rw-r--r--drivers/md/bcache/sysfs.c9
-rw-r--r--drivers/md/bcache/util.c11
-rw-r--r--drivers/md/bcache/util.h12
-rw-r--r--drivers/md/bcache/writeback.c42
-rw-r--r--drivers/md/dm-io.c7
-rw-r--r--drivers/md/dm-mpath.c18
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-snap.c5
-rw-r--r--drivers/md/dm-stats.c23
-rw-r--r--drivers/md/dm-thin.c14
-rw-r--r--drivers/md/dm.c71
-rw-r--r--drivers/md/dm.h3
-rw-r--r--drivers/misc/mei/amthif.c1
-rw-r--r--drivers/misc/mei/bus.c5
-rw-r--r--drivers/misc/mei/client.h6
-rw-r--r--drivers/misc/mei/hbm.c10
-rw-r--r--drivers/misc/mei/init.c3
-rw-r--r--drivers/misc/mei/main.c11
-rw-r--r--drivers/misc/mei/mei_dev.h6
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c16
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c7
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_alb.h9
-rw-r--r--drivers/net/bonding/bond_main.c14
-rw-r--r--drivers/net/bonding/bond_sysfs.c39
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/can/flexcan.c12
-rw-r--r--drivers/net/can/slcan.c139
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c15
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c4
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c9
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c44
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h37
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c189
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c50
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c78
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c4
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c13
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c162
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c10
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/ethernet/lantiq_etop.c6
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c3
-rw-r--r--drivers/net/ethernet/marvell/skge.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c16
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c2
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c2
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c3
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c3
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c39
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_mpi.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1
-rw-r--r--drivers/net/ethernet/sfc/Kconfig2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c58
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c10
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c2
-rw-r--r--drivers/net/ethernet/sfc/nic.h3
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h2
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c3
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c9
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c6
-rw-r--r--drivers/net/irda/mcs7780.c40
-rw-r--r--drivers/net/loopback.c1
-rw-r--r--drivers/net/netconsole.c5
-rw-r--r--drivers/net/phy/cicada.c4
-rw-r--r--drivers/net/ppp/pptp.c2
-rw-r--r--drivers/net/slip/slip.c3
-rw-r--r--drivers/net/tun.c11
-rw-r--r--drivers/net/usb/cdc_ether.c115
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/usbnet.c27
-rw-r--r--drivers/net/vxlan.c49
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c17
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c28
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c14
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c4
-rw-r--r--drivers/net/wireless/cw1200/cw1200_spi.c16
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c3
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.h2
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c5
-rw-r--r--drivers/net/wireless/mwifiex/usb.c7
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c3
-rw-r--r--drivers/net/wireless/p54/p54usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c11
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c15
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h2
-rw-r--r--drivers/net/xen-netback/common.h1
-rw-r--r--drivers/net/xen-netback/interface.c28
-rw-r--r--drivers/net/xen-netback/netback.c94
-rw-r--r--drivers/net/xen-netback/xenbus.c151
-rw-r--r--drivers/pci/pci-acpi.c6
-rw-r--r--drivers/pci/pci.c8
-rw-r--r--drivers/pinctrl/pinconf.c4
-rw-r--r--drivers/pinctrl/pinctrl-exynos.c12
-rw-r--r--drivers/pinctrl/pinctrl-palmas.c5
-rw-r--r--drivers/pinctrl/pinctrl-tegra114.c5
-rw-r--r--drivers/regulator/da9063-regulator.c2
-rw-r--r--drivers/regulator/palmas-regulator.c14
-rw-r--r--drivers/regulator/ti-abb-regulator.c16
-rw-r--r--drivers/regulator/wm831x-ldo.c4
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c3
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c3
-rw-r--r--drivers/staging/comedi/Kconfig33
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c25
-rw-r--r--drivers/staging/dgap/dgap_driver.c17
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c4
-rw-r--r--drivers/staging/iio/Kconfig2
-rw-r--r--drivers/staging/iio/light/isl29018.c1
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c2
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c2
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c11
-rw-r--r--drivers/staging/line6/toneport.c10
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c2
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c2
-rw-r--r--drivers/staging/lustre/lustre/Kconfig4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c4
-rw-r--r--drivers/staging/lustre/lustre/libcfs/workitem.c2
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c8
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c6
-rw-r--r--drivers/staging/octeon-usb/cvmx-usb.c2
-rw-r--r--drivers/staging/octeon/ethernet-mem.c7
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c4
-rw-r--r--drivers/staging/octeon/ethernet-rx.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c14
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mp.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c2
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h1
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c2
-rw-r--r--drivers/staging/vt6656/card.c4
-rw-r--r--drivers/staging/vt6656/iwctl.c3
-rw-r--r--drivers/staging/vt6656/main_usb.c3
-rw-r--r--drivers/staging/vt6656/rxtx.c2
-rw-r--r--drivers/staging/xillybus/xillybus_core.c2
-rw-r--r--drivers/staging/zram/zram_drv.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c4
-rw-r--r--drivers/target/target_core_sbc.c28
-rw-r--r--drivers/target/target_core_transport.c20
-rw-r--r--drivers/target/target_core_xcopy.c4
-rw-r--r--drivers/tty/hvc/hvc_xen.c1
-rw-r--r--drivers/tty/n_tty.c49
-rw-r--r--drivers/tty/serial/pch_uart.c13
-rw-r--r--drivers/tty/serial/serial-tegra.c4
-rw-r--r--drivers/tty/tty_io.c3
-rw-r--r--drivers/tty/tty_ioctl.c3
-rw-r--r--drivers/usb/chipidea/Kconfig2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c7
-rw-r--r--drivers/usb/chipidea/ci_hdrc_pci.c7
-rw-r--r--drivers/usb/chipidea/core.c1
-rw-r--r--drivers/usb/chipidea/udc.c4
-rw-r--r--drivers/usb/core/devio.c16
-rw-r--r--drivers/usb/core/hub.c3
-rw-r--r--drivers/usb/dwc3/Kconfig1
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c6
-rw-r--r--drivers/usb/gadget/cdc2.c19
-rw-r--r--drivers/usb/gadget/dummy_hcd.c7
-rw-r--r--drivers/usb/gadget/f_ecm.c2
-rw-r--r--drivers/usb/gadget/f_eem.c2
-rw-r--r--drivers/usb/gadget/f_fs.c62
-rw-r--r--drivers/usb/gadget/f_mass_storage.c2
-rw-r--r--drivers/usb/gadget/fotg210-udc.c2
-rw-r--r--drivers/usb/gadget/fusb300_udc.c2
-rw-r--r--drivers/usb/gadget/multi.c8
-rw-r--r--drivers/usb/gadget/mv_u3d_core.c3
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c9
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c15
-rw-r--r--drivers/usb/host/ehci-fsl.c19
-rw-r--r--drivers/usb/host/ehci-grlib.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c2
-rw-r--r--drivers/usb/host/ehci-mv.c2
-rw-r--r--drivers/usb/host/ehci-octeon.c2
-rw-r--r--drivers/usb/host/ehci-pci.c2
-rw-r--r--drivers/usb/host/ehci-pmcmsp.c2
-rw-r--r--drivers/usb/host/ehci-ppc-of.c2
-rw-r--r--drivers/usb/host/ehci-ps3.c2
-rw-r--r--drivers/usb/host/ehci-q.c5
-rw-r--r--drivers/usb/host/ehci-sead3.c2
-rw-r--r--drivers/usb/host/ehci-sh.c2
-rw-r--r--drivers/usb/host/ehci-tilegx.c2
-rw-r--r--drivers/usb/host/ehci-w90x900.c2
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c2
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c6
-rw-r--r--drivers/usb/host/imx21-hcd.c8
-rw-r--r--drivers/usb/host/ohci-hcd.c22
-rw-r--r--drivers/usb/host/ohci-q.c26
-rw-r--r--drivers/usb/host/uhci-pci.c2
-rw-r--r--drivers/usb/host/uhci-q.c12
-rw-r--r--drivers/usb/host/xhci-hub.c47
-rw-r--r--drivers/usb/host/xhci-mem.c2
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-ring.c37
-rw-r--r--drivers/usb/host/xhci.c25
-rw-r--r--drivers/usb/host/xhci.h11
-rw-r--r--drivers/usb/musb/musb_dsps.c3
-rw-r--r--drivers/usb/musb/musb_gadget.c5
-rw-r--r--drivers/usb/phy/phy-gpio-vbus-usb.c11
-rw-r--r--drivers/usb/phy/phy-omap-usb3.c2
-rw-r--r--drivers/usb/serial/Kconfig2
-rw-r--r--drivers/usb/serial/option.c3
-rw-r--r--drivers/usb/serial/pl2303.c43
-rw-r--r--drivers/vhost/scsi.c50
-rw-r--r--drivers/vhost/vhost.c4
-rw-r--r--drivers/video/mmp/hw/mmp_ctrl.c17
-rw-r--r--drivers/video/mxsfb.c1
-rw-r--r--drivers/video/neofb.c4
-rw-r--r--drivers/video/of_display_timing.c6
-rw-r--r--drivers/video/omap2/displays-new/Kconfig1
-rw-r--r--drivers/video/omap2/displays-new/connector-analog-tv.c2
-rw-r--r--drivers/video/omap2/displays-new/connector-dvi.c2
-rw-r--r--drivers/video/omap2/displays-new/connector-hdmi.c2
-rw-r--r--drivers/video/omap2/dss/dispc.c1
-rw-r--r--drivers/video/s3fb.c9
-rw-r--r--drivers/xen/balloon.c23
443 files changed, 4222 insertions, 2331 deletions
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
index f40acef80269..a6977e12d574 100644
--- a/drivers/acpi/acpi_ipmi.c
+++ b/drivers/acpi/acpi_ipmi.c
@@ -39,6 +39,7 @@
39#include <linux/ipmi.h> 39#include <linux/ipmi.h>
40#include <linux/device.h> 40#include <linux/device.h>
41#include <linux/pnp.h> 41#include <linux/pnp.h>
42#include <linux/spinlock.h>
42 43
43MODULE_AUTHOR("Zhao Yakui"); 44MODULE_AUTHOR("Zhao Yakui");
44MODULE_DESCRIPTION("ACPI IPMI Opregion driver"); 45MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
@@ -57,7 +58,7 @@ struct acpi_ipmi_device {
57 struct list_head head; 58 struct list_head head;
58 /* the IPMI request message list */ 59 /* the IPMI request message list */
59 struct list_head tx_msg_list; 60 struct list_head tx_msg_list;
60 struct mutex tx_msg_lock; 61 spinlock_t tx_msg_lock;
61 acpi_handle handle; 62 acpi_handle handle;
62 struct pnp_dev *pnp_dev; 63 struct pnp_dev *pnp_dev;
63 ipmi_user_t user_interface; 64 ipmi_user_t user_interface;
@@ -147,6 +148,7 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
147 struct kernel_ipmi_msg *msg; 148 struct kernel_ipmi_msg *msg;
148 struct acpi_ipmi_buffer *buffer; 149 struct acpi_ipmi_buffer *buffer;
149 struct acpi_ipmi_device *device; 150 struct acpi_ipmi_device *device;
151 unsigned long flags;
150 152
151 msg = &tx_msg->tx_message; 153 msg = &tx_msg->tx_message;
152 /* 154 /*
@@ -177,10 +179,10 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
177 179
178 /* Get the msgid */ 180 /* Get the msgid */
179 device = tx_msg->device; 181 device = tx_msg->device;
180 mutex_lock(&device->tx_msg_lock); 182 spin_lock_irqsave(&device->tx_msg_lock, flags);
181 device->curr_msgid++; 183 device->curr_msgid++;
182 tx_msg->tx_msgid = device->curr_msgid; 184 tx_msg->tx_msgid = device->curr_msgid;
183 mutex_unlock(&device->tx_msg_lock); 185 spin_unlock_irqrestore(&device->tx_msg_lock, flags);
184} 186}
185 187
186static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg, 188static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
@@ -242,6 +244,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
242 int msg_found = 0; 244 int msg_found = 0;
243 struct acpi_ipmi_msg *tx_msg; 245 struct acpi_ipmi_msg *tx_msg;
244 struct pnp_dev *pnp_dev = ipmi_device->pnp_dev; 246 struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
247 unsigned long flags;
245 248
246 if (msg->user != ipmi_device->user_interface) { 249 if (msg->user != ipmi_device->user_interface) {
247 dev_warn(&pnp_dev->dev, "Unexpected response is returned. " 250 dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
@@ -250,7 +253,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
250 ipmi_free_recv_msg(msg); 253 ipmi_free_recv_msg(msg);
251 return; 254 return;
252 } 255 }
253 mutex_lock(&ipmi_device->tx_msg_lock); 256 spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
254 list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) { 257 list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
255 if (msg->msgid == tx_msg->tx_msgid) { 258 if (msg->msgid == tx_msg->tx_msgid) {
256 msg_found = 1; 259 msg_found = 1;
@@ -258,7 +261,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
258 } 261 }
259 } 262 }
260 263
261 mutex_unlock(&ipmi_device->tx_msg_lock); 264 spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
262 if (!msg_found) { 265 if (!msg_found) {
263 dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is " 266 dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
264 "returned.\n", msg->msgid); 267 "returned.\n", msg->msgid);
@@ -378,6 +381,7 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
378 struct acpi_ipmi_device *ipmi_device = handler_context; 381 struct acpi_ipmi_device *ipmi_device = handler_context;
379 int err, rem_time; 382 int err, rem_time;
380 acpi_status status; 383 acpi_status status;
384 unsigned long flags;
381 /* 385 /*
382 * IPMI opregion message. 386 * IPMI opregion message.
383 * IPMI message is firstly written to the BMC and system software 387 * IPMI message is firstly written to the BMC and system software
@@ -395,9 +399,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
395 return AE_NO_MEMORY; 399 return AE_NO_MEMORY;
396 400
397 acpi_format_ipmi_msg(tx_msg, address, value); 401 acpi_format_ipmi_msg(tx_msg, address, value);
398 mutex_lock(&ipmi_device->tx_msg_lock); 402 spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
399 list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); 403 list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
400 mutex_unlock(&ipmi_device->tx_msg_lock); 404 spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
401 err = ipmi_request_settime(ipmi_device->user_interface, 405 err = ipmi_request_settime(ipmi_device->user_interface,
402 &tx_msg->addr, 406 &tx_msg->addr,
403 tx_msg->tx_msgid, 407 tx_msg->tx_msgid,
@@ -413,9 +417,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
413 status = AE_OK; 417 status = AE_OK;
414 418
415end_label: 419end_label:
416 mutex_lock(&ipmi_device->tx_msg_lock); 420 spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
417 list_del(&tx_msg->head); 421 list_del(&tx_msg->head);
418 mutex_unlock(&ipmi_device->tx_msg_lock); 422 spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
419 kfree(tx_msg); 423 kfree(tx_msg);
420 return status; 424 return status;
421} 425}
@@ -457,7 +461,7 @@ static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
457 461
458 INIT_LIST_HEAD(&ipmi_device->head); 462 INIT_LIST_HEAD(&ipmi_device->head);
459 463
460 mutex_init(&ipmi_device->tx_msg_lock); 464 spin_lock_init(&ipmi_device->tx_msg_lock);
461 INIT_LIST_HEAD(&ipmi_device->tx_msg_list); 465 INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
462 ipmi_install_space_handler(ipmi_device); 466 ipmi_install_space_handler(ipmi_device);
463 467
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index fbdb82e70d10..407ad13cac2f 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -968,7 +968,7 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
968 } 968 }
969 return 0; 969 return 0;
970} 970}
971EXPORT_SYMBOL_GPL(acpi_bus_get_device); 971EXPORT_SYMBOL(acpi_bus_get_device);
972 972
973int acpi_device_add(struct acpi_device *device, 973int acpi_device_add(struct acpi_device *device,
974 void (*release)(struct device *)) 974 void (*release)(struct device *))
@@ -1121,7 +1121,7 @@ int acpi_bus_register_driver(struct acpi_driver *driver)
1121EXPORT_SYMBOL(acpi_bus_register_driver); 1121EXPORT_SYMBOL(acpi_bus_register_driver);
1122 1122
1123/** 1123/**
1124 * acpi_bus_unregister_driver - unregisters a driver with the APIC bus 1124 * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
1125 * @driver: driver to unregister 1125 * @driver: driver to unregister
1126 * 1126 *
1127 * Unregisters a driver with the ACPI bus. Searches the namespace for all 1127 * Unregisters a driver with the ACPI bus. Searches the namespace for all
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 958ba2a420c3..97f4acb54ad6 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -2,7 +2,7 @@
2 * sata_promise.c - Promise SATA 2 * sata_promise.c - Promise SATA
3 * 3 *
4 * Maintained by: Tejun Heo <tj@kernel.org> 4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Mikael Pettersson <mikpe@it.uu.se> 5 * Mikael Pettersson
6 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails. 7 * on emails.
8 * 8 *
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 449f6298dc89..8557adcd34ee 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -2865,15 +2865,4 @@ static struct pci_driver he_driver = {
2865 .id_table = he_pci_tbl, 2865 .id_table = he_pci_tbl,
2866}; 2866};
2867 2867
2868static int __init he_init(void) 2868module_pci_driver(he_driver);
2869{
2870 return pci_register_driver(&he_driver);
2871}
2872
2873static void __exit he_cleanup(void)
2874{
2875 pci_unregister_driver(&he_driver);
2876}
2877
2878module_init(he_init);
2879module_exit(he_cleanup);
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 409502a78e7e..5aca5f4c5458 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -778,7 +778,7 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
778 return error; 778 return error;
779 } 779 }
780 780
781 if (mac[i] == NULL || mac_pton(mac[i], card->atmdev->esi)) { 781 if (mac[i] == NULL || !mac_pton(mac[i], card->atmdev->esi)) {
782 nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, 782 nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
783 card->atmdev->esi, 6); 783 card->atmdev->esi, 6);
784 if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) == 784 if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) ==
diff --git a/drivers/base/core.c b/drivers/base/core.c
index c7cfadcf6752..34abf4d8a45f 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2017,7 +2017,7 @@ EXPORT_SYMBOL_GPL(device_move);
2017 */ 2017 */
2018void device_shutdown(void) 2018void device_shutdown(void)
2019{ 2019{
2020 struct device *dev; 2020 struct device *dev, *parent;
2021 2021
2022 spin_lock(&devices_kset->list_lock); 2022 spin_lock(&devices_kset->list_lock);
2023 /* 2023 /*
@@ -2034,7 +2034,7 @@ void device_shutdown(void)
2034 * prevent it from being freed because parent's 2034 * prevent it from being freed because parent's
2035 * lock is to be held 2035 * lock is to be held
2036 */ 2036 */
2037 get_device(dev->parent); 2037 parent = get_device(dev->parent);
2038 get_device(dev); 2038 get_device(dev);
2039 /* 2039 /*
2040 * Make sure the device is off the kset list, in the 2040 * Make sure the device is off the kset list, in the
@@ -2044,8 +2044,8 @@ void device_shutdown(void)
2044 spin_unlock(&devices_kset->list_lock); 2044 spin_unlock(&devices_kset->list_lock);
2045 2045
2046 /* hold lock to avoid race with probe/release */ 2046 /* hold lock to avoid race with probe/release */
2047 if (dev->parent) 2047 if (parent)
2048 device_lock(dev->parent); 2048 device_lock(parent);
2049 device_lock(dev); 2049 device_lock(dev);
2050 2050
2051 /* Don't allow any more runtime suspends */ 2051 /* Don't allow any more runtime suspends */
@@ -2063,11 +2063,11 @@ void device_shutdown(void)
2063 } 2063 }
2064 2064
2065 device_unlock(dev); 2065 device_unlock(dev);
2066 if (dev->parent) 2066 if (parent)
2067 device_unlock(dev->parent); 2067 device_unlock(parent);
2068 2068
2069 put_device(dev); 2069 put_device(dev);
2070 put_device(dev->parent); 2070 put_device(parent);
2071 2071
2072 spin_lock(&devices_kset->list_lock); 2072 spin_lock(&devices_kset->list_lock);
2073 } 2073 }
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index c9fd6943ce45..50329d1057ed 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -210,25 +210,6 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
210 } 210 }
211} 211}
212 212
213static void bcma_core_pci_power_save(struct bcma_drv_pci *pc, bool up)
214{
215 u16 data;
216
217 if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
218 data = up ? 0x74 : 0x7C;
219 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
220 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
221 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
222 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
223 } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
224 data = up ? 0x75 : 0x7D;
225 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
226 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
227 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
228 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
229 }
230}
231
232/************************************************** 213/**************************************************
233 * Init. 214 * Init.
234 **************************************************/ 215 **************************************************/
@@ -255,6 +236,32 @@ void bcma_core_pci_init(struct bcma_drv_pci *pc)
255 bcma_core_pci_clientmode_init(pc); 236 bcma_core_pci_clientmode_init(pc);
256} 237}
257 238
239void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
240{
241 struct bcma_drv_pci *pc;
242 u16 data;
243
244 if (bus->hosttype != BCMA_HOSTTYPE_PCI)
245 return;
246
247 pc = &bus->drv_pci[0];
248
249 if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
250 data = up ? 0x74 : 0x7C;
251 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
252 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
253 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
254 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
255 } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
256 data = up ? 0x75 : 0x7D;
257 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
258 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
259 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
260 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
261 }
262}
263EXPORT_SYMBOL_GPL(bcma_core_pci_power_save);
264
258int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, 265int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
259 bool enable) 266 bool enable)
260{ 267{
@@ -310,8 +317,6 @@ void bcma_core_pci_up(struct bcma_bus *bus)
310 317
311 pc = &bus->drv_pci[0]; 318 pc = &bus->drv_pci[0];
312 319
313 bcma_core_pci_power_save(pc, true);
314
315 bcma_core_pci_extend_L1timer(pc, true); 320 bcma_core_pci_extend_L1timer(pc, true);
316} 321}
317EXPORT_SYMBOL_GPL(bcma_core_pci_up); 322EXPORT_SYMBOL_GPL(bcma_core_pci_up);
@@ -326,7 +331,5 @@ void bcma_core_pci_down(struct bcma_bus *bus)
326 pc = &bus->drv_pci[0]; 331 pc = &bus->drv_pci[0];
327 332
328 bcma_core_pci_extend_L1timer(pc, false); 333 bcma_core_pci_extend_L1timer(pc, false);
329
330 bcma_core_pci_power_save(pc, false);
331} 334}
332EXPORT_SYMBOL_GPL(bcma_core_pci_down); 335EXPORT_SYMBOL_GPL(bcma_core_pci_down);
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index cd6b20fce680..37768401d113 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -269,6 +269,8 @@ static struct bcma_device *bcma_find_core_reverse(struct bcma_bus *bus, u16 core
269 return NULL; 269 return NULL;
270} 270}
271 271
272#define IS_ERR_VALUE_U32(x) ((x) >= (u32)-MAX_ERRNO)
273
272static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr, 274static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
273 struct bcma_device_id *match, int core_num, 275 struct bcma_device_id *match, int core_num,
274 struct bcma_device *core) 276 struct bcma_device *core)
@@ -351,11 +353,11 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
351 * the main register space for the core 353 * the main register space for the core
352 */ 354 */
353 tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, 0); 355 tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, 0);
354 if (tmp == 0 || IS_ERR_VALUE(tmp)) { 356 if (tmp == 0 || IS_ERR_VALUE_U32(tmp)) {
355 /* Try again to see if it is a bridge */ 357 /* Try again to see if it is a bridge */
356 tmp = bcma_erom_get_addr_desc(bus, eromptr, 358 tmp = bcma_erom_get_addr_desc(bus, eromptr,
357 SCAN_ADDR_TYPE_BRIDGE, 0); 359 SCAN_ADDR_TYPE_BRIDGE, 0);
358 if (tmp == 0 || IS_ERR_VALUE(tmp)) { 360 if (tmp == 0 || IS_ERR_VALUE_U32(tmp)) {
359 return -EILSEQ; 361 return -EILSEQ;
360 } else { 362 } else {
361 bcma_info(bus, "Bridge found\n"); 363 bcma_info(bus, "Bridge found\n");
@@ -369,7 +371,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
369 for (j = 0; ; j++) { 371 for (j = 0; ; j++) {
370 tmp = bcma_erom_get_addr_desc(bus, eromptr, 372 tmp = bcma_erom_get_addr_desc(bus, eromptr,
371 SCAN_ADDR_TYPE_SLAVE, i); 373 SCAN_ADDR_TYPE_SLAVE, i);
372 if (IS_ERR_VALUE(tmp)) { 374 if (IS_ERR_VALUE_U32(tmp)) {
373 /* no more entries for port _i_ */ 375 /* no more entries for port _i_ */
374 /* pr_debug("erom: slave port %d " 376 /* pr_debug("erom: slave port %d "
375 * "has %d descriptors\n", i, j); */ 377 * "has %d descriptors\n", i, j); */
@@ -386,7 +388,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
386 for (j = 0; ; j++) { 388 for (j = 0; ; j++) {
387 tmp = bcma_erom_get_addr_desc(bus, eromptr, 389 tmp = bcma_erom_get_addr_desc(bus, eromptr,
388 SCAN_ADDR_TYPE_MWRAP, i); 390 SCAN_ADDR_TYPE_MWRAP, i);
389 if (IS_ERR_VALUE(tmp)) { 391 if (IS_ERR_VALUE_U32(tmp)) {
390 /* no more entries for port _i_ */ 392 /* no more entries for port _i_ */
391 /* pr_debug("erom: master wrapper %d " 393 /* pr_debug("erom: master wrapper %d "
392 * "has %d descriptors\n", i, j); */ 394 * "has %d descriptors\n", i, j); */
@@ -404,7 +406,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
404 for (j = 0; ; j++) { 406 for (j = 0; ; j++) {
405 tmp = bcma_erom_get_addr_desc(bus, eromptr, 407 tmp = bcma_erom_get_addr_desc(bus, eromptr,
406 SCAN_ADDR_TYPE_SWRAP, i + hack); 408 SCAN_ADDR_TYPE_SWRAP, i + hack);
407 if (IS_ERR_VALUE(tmp)) { 409 if (IS_ERR_VALUE_U32(tmp)) {
408 /* no more entries for port _i_ */ 410 /* no more entries for port _i_ */
409 /* pr_debug("erom: master wrapper %d " 411 /* pr_debug("erom: master wrapper %d "
410 * has %d descriptors\n", i, j); */ 412 * has %d descriptors\n", i, j); */
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index d2d95ff5353b..edfa2515bc86 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1189,6 +1189,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
1189 int err; 1189 int err;
1190 u32 cp; 1190 u32 cp;
1191 1191
1192 memset(&arg64, 0, sizeof(arg64));
1192 err = 0; 1193 err = 0;
1193 err |= 1194 err |=
1194 copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 1195 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 639d26b90b91..2b9440384536 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -1193,6 +1193,7 @@ out_passthru:
1193 ida_pci_info_struct pciinfo; 1193 ida_pci_info_struct pciinfo;
1194 1194
1195 if (!arg) return -EINVAL; 1195 if (!arg) return -EINVAL;
1196 memset(&pciinfo, 0, sizeof(pciinfo));
1196 pciinfo.bus = host->pci_dev->bus->number; 1197 pciinfo.bus = host->pci_dev->bus->number;
1197 pciinfo.dev_fn = host->pci_dev->devfn; 1198 pciinfo.dev_fn = host->pci_dev->devfn;
1198 pciinfo.board_id = host->board_id; 1199 pciinfo.board_id = host->board_id;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index b22a7d0fe5b7..cb1db2979d3d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -931,12 +931,14 @@ static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
931 u64 snap_id) 931 u64 snap_id)
932{ 932{
933 u32 which; 933 u32 which;
934 const char *snap_name;
934 935
935 which = rbd_dev_snap_index(rbd_dev, snap_id); 936 which = rbd_dev_snap_index(rbd_dev, snap_id);
936 if (which == BAD_SNAP_INDEX) 937 if (which == BAD_SNAP_INDEX)
937 return NULL; 938 return ERR_PTR(-ENOENT);
938 939
939 return _rbd_dev_v1_snap_name(rbd_dev, which); 940 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
941 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
940} 942}
941 943
942static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id) 944static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
@@ -2812,7 +2814,7 @@ out_err:
2812 obj_request_done_set(obj_request); 2814 obj_request_done_set(obj_request);
2813} 2815}
2814 2816
2815static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id) 2817static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
2816{ 2818{
2817 struct rbd_obj_request *obj_request; 2819 struct rbd_obj_request *obj_request;
2818 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 2820 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
@@ -2827,16 +2829,17 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
2827 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request); 2829 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2828 if (!obj_request->osd_req) 2830 if (!obj_request->osd_req)
2829 goto out; 2831 goto out;
2830 obj_request->callback = rbd_obj_request_put;
2831 2832
2832 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK, 2833 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2833 notify_id, 0, 0); 2834 notify_id, 0, 0);
2834 rbd_osd_req_format_read(obj_request); 2835 rbd_osd_req_format_read(obj_request);
2835 2836
2836 ret = rbd_obj_request_submit(osdc, obj_request); 2837 ret = rbd_obj_request_submit(osdc, obj_request);
2837out:
2838 if (ret) 2838 if (ret)
2839 rbd_obj_request_put(obj_request); 2839 goto out;
2840 ret = rbd_obj_request_wait(obj_request);
2841out:
2842 rbd_obj_request_put(obj_request);
2840 2843
2841 return ret; 2844 return ret;
2842} 2845}
@@ -2856,7 +2859,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2856 if (ret) 2859 if (ret)
2857 rbd_warn(rbd_dev, "header refresh error (%d)\n", ret); 2860 rbd_warn(rbd_dev, "header refresh error (%d)\n", ret);
2858 2861
2859 rbd_obj_notify_ack(rbd_dev, notify_id); 2862 rbd_obj_notify_ack_sync(rbd_dev, notify_id);
2860} 2863}
2861 2864
2862/* 2865/*
@@ -3328,6 +3331,31 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev)
3328 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 3331 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3329} 3332}
3330 3333
3334static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3335{
3336 sector_t size;
3337 bool removing;
3338
3339 /*
3340 * Don't hold the lock while doing disk operations,
3341 * or lock ordering will conflict with the bdev mutex via:
3342 * rbd_add() -> blkdev_get() -> rbd_open()
3343 */
3344 spin_lock_irq(&rbd_dev->lock);
3345 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3346 spin_unlock_irq(&rbd_dev->lock);
3347 /*
3348 * If the device is being removed, rbd_dev->disk has
3349 * been destroyed, so don't try to update its size
3350 */
3351 if (!removing) {
3352 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3353 dout("setting size to %llu sectors", (unsigned long long)size);
3354 set_capacity(rbd_dev->disk, size);
3355 revalidate_disk(rbd_dev->disk);
3356 }
3357}
3358
3331static int rbd_dev_refresh(struct rbd_device *rbd_dev) 3359static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3332{ 3360{
3333 u64 mapping_size; 3361 u64 mapping_size;
@@ -3347,12 +3375,7 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3347 up_write(&rbd_dev->header_rwsem); 3375 up_write(&rbd_dev->header_rwsem);
3348 3376
3349 if (mapping_size != rbd_dev->mapping.size) { 3377 if (mapping_size != rbd_dev->mapping.size) {
3350 sector_t size; 3378 rbd_dev_update_size(rbd_dev);
3351
3352 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3353 dout("setting size to %llu sectors", (unsigned long long)size);
3354 set_capacity(rbd_dev->disk, size);
3355 revalidate_disk(rbd_dev->disk);
3356 } 3379 }
3357 3380
3358 return ret; 3381 return ret;
@@ -4061,8 +4084,13 @@ static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4061 4084
4062 snap_id = snapc->snaps[which]; 4085 snap_id = snapc->snaps[which];
4063 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id); 4086 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4064 if (IS_ERR(snap_name)) 4087 if (IS_ERR(snap_name)) {
4065 break; 4088 /* ignore no-longer existing snapshots */
4089 if (PTR_ERR(snap_name) == -ENOENT)
4090 continue;
4091 else
4092 break;
4093 }
4066 found = !strcmp(name, snap_name); 4094 found = !strcmp(name, snap_name);
4067 kfree(snap_name); 4095 kfree(snap_name);
4068 } 4096 }
@@ -4141,8 +4169,8 @@ static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
4141 /* Look up the snapshot name, and make a copy */ 4169 /* Look up the snapshot name, and make a copy */
4142 4170
4143 snap_name = rbd_snap_name(rbd_dev, spec->snap_id); 4171 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4144 if (!snap_name) { 4172 if (IS_ERR(snap_name)) {
4145 ret = -ENOMEM; 4173 ret = PTR_ERR(snap_name);
4146 goto out_err; 4174 goto out_err;
4147 } 4175 }
4148 4176
@@ -5163,10 +5191,23 @@ static ssize_t rbd_remove(struct bus_type *bus,
5163 if (ret < 0 || already) 5191 if (ret < 0 || already)
5164 return ret; 5192 return ret;
5165 5193
5166 rbd_bus_del_dev(rbd_dev);
5167 ret = rbd_dev_header_watch_sync(rbd_dev, false); 5194 ret = rbd_dev_header_watch_sync(rbd_dev, false);
5168 if (ret) 5195 if (ret)
5169 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret); 5196 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
5197
5198 /*
5199 * flush remaining watch callbacks - these must be complete
5200 * before the osd_client is shutdown
5201 */
5202 dout("%s: flushing notifies", __func__);
5203 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5204 /*
5205 * Don't free anything from rbd_dev->disk until after all
5206 * notifies are completely processed. Otherwise
5207 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5208 * in a potential use after free of rbd_dev->disk or rbd_dev.
5209 */
5210 rbd_bus_del_dev(rbd_dev);
5170 rbd_dev_image_release(rbd_dev); 5211 rbd_dev_image_release(rbd_dev);
5171 module_put(THIS_MODULE); 5212 module_put(THIS_MODULE);
5172 5213
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index a12b923bbaca..0a327f4154a2 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -85,6 +85,7 @@ static struct usb_device_id ath3k_table[] = {
85 { USB_DEVICE(0x04CA, 0x3008) }, 85 { USB_DEVICE(0x04CA, 0x3008) },
86 { USB_DEVICE(0x13d3, 0x3362) }, 86 { USB_DEVICE(0x13d3, 0x3362) },
87 { USB_DEVICE(0x0CF3, 0xE004) }, 87 { USB_DEVICE(0x0CF3, 0xE004) },
88 { USB_DEVICE(0x0CF3, 0xE005) },
88 { USB_DEVICE(0x0930, 0x0219) }, 89 { USB_DEVICE(0x0930, 0x0219) },
89 { USB_DEVICE(0x0489, 0xe057) }, 90 { USB_DEVICE(0x0489, 0xe057) },
90 { USB_DEVICE(0x13d3, 0x3393) }, 91 { USB_DEVICE(0x13d3, 0x3393) },
@@ -126,6 +127,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
126 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 127 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
127 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 128 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
128 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 129 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
130 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
129 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 131 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
130 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 132 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
131 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 133 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 8e16f0af6358..f3dfc0a88fdc 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -102,6 +102,7 @@ static struct usb_device_id btusb_table[] = {
102 102
103 /* Broadcom BCM20702A0 */ 103 /* Broadcom BCM20702A0 */
104 { USB_DEVICE(0x0b05, 0x17b5) }, 104 { USB_DEVICE(0x0b05, 0x17b5) },
105 { USB_DEVICE(0x0b05, 0x17cb) },
105 { USB_DEVICE(0x04ca, 0x2003) }, 106 { USB_DEVICE(0x04ca, 0x2003) },
106 { USB_DEVICE(0x0489, 0xe042) }, 107 { USB_DEVICE(0x0489, 0xe042) },
107 { USB_DEVICE(0x413c, 0x8197) }, 108 { USB_DEVICE(0x413c, 0x8197) },
@@ -112,6 +113,9 @@ static struct usb_device_id btusb_table[] = {
112 /*Broadcom devices with vendor specific id */ 113 /*Broadcom devices with vendor specific id */
113 { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) }, 114 { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
114 115
116 /* Belkin F8065bf - Broadcom based */
117 { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
118
115 { } /* Terminating entry */ 119 { } /* Terminating entry */
116}; 120};
117 121
@@ -148,6 +152,7 @@ static struct usb_device_id blacklist_table[] = {
148 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 152 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
149 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 153 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
150 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 154 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
155 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
151 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 156 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
152 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 157 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
153 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 158 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 19ab6ff53d59..2394e9753ef5 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -700,6 +700,7 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
700 phys_addr_t sdramwins_phys_base, 700 phys_addr_t sdramwins_phys_base,
701 size_t sdramwins_size) 701 size_t sdramwins_size)
702{ 702{
703 struct device_node *np;
703 int win; 704 int win;
704 705
705 mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size); 706 mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size);
@@ -712,8 +713,11 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
712 return -ENOMEM; 713 return -ENOMEM;
713 } 714 }
714 715
715 if (of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric")) 716 np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric");
717 if (np) {
716 mbus->hw_io_coherency = 1; 718 mbus->hw_io_coherency = 1;
719 of_node_put(np);
720 }
717 721
718 for (win = 0; win < mbus->soc->num_wins; win++) 722 for (win = 0; win < mbus->soc->num_wins; win++)
719 mvebu_mbus_disable_window(mbus, win); 723 mvebu_mbus_disable_window(mbus, win);
@@ -861,11 +865,13 @@ static void __init mvebu_mbus_get_pcie_resources(struct device_node *np,
861 int ret; 865 int ret;
862 866
863 /* 867 /*
864 * These are optional, so we clear them and they'll 868 * These are optional, so we make sure that resource_size(x) will
865 * be zero if they are missing from the DT. 869 * return 0.
866 */ 870 */
867 memset(mem, 0, sizeof(struct resource)); 871 memset(mem, 0, sizeof(struct resource));
872 mem->end = -1;
868 memset(io, 0, sizeof(struct resource)); 873 memset(io, 0, sizeof(struct resource));
874 io->end = -1;
869 875
870 ret = of_property_read_u32_array(np, "pcie-mem-aperture", reg, ARRAY_SIZE(reg)); 876 ret = of_property_read_u32_array(np, "pcie-mem-aperture", reg, ARRAY_SIZE(reg));
871 if (!ret) { 877 if (!ret) {
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 7a7929ba2658..06189e55b4e5 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -142,32 +142,6 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
142 return length; 142 return length;
143} 143}
144 144
145ssize_t tpm_show_locality(struct device *dev, struct device_attribute *attr,
146 char *buf)
147{
148 struct tpm_chip *chip = dev_get_drvdata(dev);
149 struct tpm_private *priv = TPM_VPRIV(chip);
150 u8 locality = priv->shr->locality;
151
152 return sprintf(buf, "%d\n", locality);
153}
154
155ssize_t tpm_store_locality(struct device *dev, struct device_attribute *attr,
156 const char *buf, size_t len)
157{
158 struct tpm_chip *chip = dev_get_drvdata(dev);
159 struct tpm_private *priv = TPM_VPRIV(chip);
160 u8 val;
161
162 int rv = kstrtou8(buf, 0, &val);
163 if (rv)
164 return rv;
165
166 priv->shr->locality = val;
167
168 return len;
169}
170
171static const struct file_operations vtpm_ops = { 145static const struct file_operations vtpm_ops = {
172 .owner = THIS_MODULE, 146 .owner = THIS_MODULE,
173 .llseek = no_llseek, 147 .llseek = no_llseek,
@@ -188,8 +162,6 @@ static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
188static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); 162static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
189static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); 163static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
190static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); 164static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
191static DEVICE_ATTR(locality, S_IRUGO | S_IWUSR, tpm_show_locality,
192 tpm_store_locality);
193 165
194static struct attribute *vtpm_attrs[] = { 166static struct attribute *vtpm_attrs[] = {
195 &dev_attr_pubek.attr, 167 &dev_attr_pubek.attr,
@@ -202,7 +174,6 @@ static struct attribute *vtpm_attrs[] = {
202 &dev_attr_cancel.attr, 174 &dev_attr_cancel.attr,
203 &dev_attr_durations.attr, 175 &dev_attr_durations.attr,
204 &dev_attr_timeouts.attr, 176 &dev_attr_timeouts.attr,
205 &dev_attr_locality.attr,
206 NULL, 177 NULL,
207}; 178};
208 179
@@ -210,8 +181,6 @@ static struct attribute_group vtpm_attr_grp = {
210 .attrs = vtpm_attrs, 181 .attrs = vtpm_attrs,
211}; 182};
212 183
213#define TPM_LONG_TIMEOUT (10 * 60 * HZ)
214
215static const struct tpm_vendor_specific tpm_vtpm = { 184static const struct tpm_vendor_specific tpm_vtpm = {
216 .status = vtpm_status, 185 .status = vtpm_status,
217 .recv = vtpm_recv, 186 .recv = vtpm_recv,
@@ -224,11 +193,6 @@ static const struct tpm_vendor_specific tpm_vtpm = {
224 .miscdev = { 193 .miscdev = {
225 .fops = &vtpm_ops, 194 .fops = &vtpm_ops,
226 }, 195 },
227 .duration = {
228 TPM_LONG_TIMEOUT,
229 TPM_LONG_TIMEOUT,
230 TPM_LONG_TIMEOUT,
231 },
232}; 196};
233 197
234static irqreturn_t tpmif_interrupt(int dummy, void *dev_id) 198static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 41c69469ce20..971d796e071d 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -26,6 +26,7 @@ config DW_APB_TIMER_OF
26 26
27config ARMADA_370_XP_TIMER 27config ARMADA_370_XP_TIMER
28 bool 28 bool
29 select CLKSRC_OF
29 30
30config ORION_TIMER 31config ORION_TIMER
31 select CLKSRC_OF 32 select CLKSRC_OF
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
index 37f5325bec95..b9ddd9e3a2f5 100644
--- a/drivers/clocksource/clksrc-of.c
+++ b/drivers/clocksource/clksrc-of.c
@@ -30,6 +30,9 @@ void __init clocksource_of_init(void)
30 clocksource_of_init_fn init_func; 30 clocksource_of_init_fn init_func;
31 31
32 for_each_matching_node_and_match(np, __clksrc_of_table, &match) { 32 for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
33 if (!of_device_is_available(np))
34 continue;
35
33 init_func = match->data; 36 init_func = match->data;
34 init_func(np); 37 init_func(np);
35 } 38 }
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index b9c81b7c3a3b..3a5909c12d42 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -301,7 +301,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
301 ced->name = dev_name(&p->pdev->dev); 301 ced->name = dev_name(&p->pdev->dev);
302 ced->features = CLOCK_EVT_FEAT_ONESHOT; 302 ced->features = CLOCK_EVT_FEAT_ONESHOT;
303 ced->rating = 200; 303 ced->rating = 200;
304 ced->cpumask = cpumask_of(0); 304 ced->cpumask = cpu_possible_mask;
305 ced->set_next_event = em_sti_clock_event_next; 305 ced->set_next_event = em_sti_clock_event_next;
306 ced->set_mode = em_sti_clock_event_mode; 306 ced->set_mode = em_sti_clock_event_mode;
307 307
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 5b34768f4d7c..62b0de6a1837 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -428,7 +428,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
428 evt->irq); 428 evt->irq);
429 return -EIO; 429 return -EIO;
430 } 430 }
431 irq_set_affinity(evt->irq, cpumask_of(cpu));
432 } else { 431 } else {
433 enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0); 432 enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
434 } 433 }
@@ -449,6 +448,7 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
449 unsigned long action, void *hcpu) 448 unsigned long action, void *hcpu)
450{ 449{
451 struct mct_clock_event_device *mevt; 450 struct mct_clock_event_device *mevt;
451 unsigned int cpu;
452 452
453 /* 453 /*
454 * Grab cpu pointer in each case to avoid spurious 454 * Grab cpu pointer in each case to avoid spurious
@@ -459,6 +459,12 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
459 mevt = this_cpu_ptr(&percpu_mct_tick); 459 mevt = this_cpu_ptr(&percpu_mct_tick);
460 exynos4_local_timer_setup(&mevt->evt); 460 exynos4_local_timer_setup(&mevt->evt);
461 break; 461 break;
462 case CPU_ONLINE:
463 cpu = (unsigned long)hcpu;
464 if (mct_int_type == MCT_INT_SPI)
465 irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu],
466 cpumask_of(cpu));
467 break;
462 case CPU_DYING: 468 case CPU_DYING:
463 mevt = this_cpu_ptr(&percpu_mct_tick); 469 mevt = this_cpu_ptr(&percpu_mct_tick);
464 exynos4_local_timer_stop(&mevt->evt); 470 exynos4_local_timer_stop(&mevt->evt);
@@ -500,6 +506,8 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
500 &percpu_mct_tick); 506 &percpu_mct_tick);
501 WARN(err, "MCT: can't request IRQ %d (%d)\n", 507 WARN(err, "MCT: can't request IRQ %d (%d)\n",
502 mct_irqs[MCT_L0_IRQ], err); 508 mct_irqs[MCT_L0_IRQ], err);
509 } else {
510 irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0));
503 } 511 }
504 512
505 err = register_cpu_notifier(&exynos4_mct_cpu_nb); 513 err = register_cpu_notifier(&exynos4_mct_cpu_nb);
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index a1260b4549db..d2c3253e015e 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -986,6 +986,10 @@ static int __init acpi_cpufreq_init(void)
986{ 986{
987 int ret; 987 int ret;
988 988
989 /* don't keep reloading if cpufreq_driver exists */
990 if (cpufreq_get_current_driver())
991 return 0;
992
989 if (acpi_disabled) 993 if (acpi_disabled)
990 return 0; 994 return 0;
991 995
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index cbfffa91ebdd..c522a95c0e16 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -12,6 +12,7 @@
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 13
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/cpu.h>
15#include <linux/cpufreq.h> 16#include <linux/cpufreq.h>
16#include <linux/err.h> 17#include <linux/err.h>
17#include <linux/module.h> 18#include <linux/module.h>
@@ -177,7 +178,11 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
177 struct device_node *np; 178 struct device_node *np;
178 int ret; 179 int ret;
179 180
180 cpu_dev = &pdev->dev; 181 cpu_dev = get_cpu_device(0);
182 if (!cpu_dev) {
183 pr_err("failed to get cpu0 device\n");
184 return -ENODEV;
185 }
181 186
182 np = of_node_get(cpu_dev->of_node); 187 np = of_node_get(cpu_dev->of_node);
183 if (!np) { 188 if (!np) {
@@ -224,7 +229,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
224 if (of_property_read_u32(np, "clock-latency", &transition_latency)) 229 if (of_property_read_u32(np, "clock-latency", &transition_latency))
225 transition_latency = CPUFREQ_ETERNAL; 230 transition_latency = CPUFREQ_ETERNAL;
226 231
227 if (cpu_reg) { 232 if (!IS_ERR(cpu_reg)) {
228 struct opp *opp; 233 struct opp *opp;
229 unsigned long min_uV, max_uV; 234 unsigned long min_uV, max_uV;
230 int i; 235 int i;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 43c24aa756f6..04548f7023af 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -952,9 +952,20 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
952 if (cpu == policy->cpu) 952 if (cpu == policy->cpu)
953 return; 953 return;
954 954
955 /*
956 * Take direct locks as lock_policy_rwsem_write wouldn't work here.
957 * Also lock for last cpu is enough here as contention will happen only
958 * after policy->cpu is changed and after it is changed, other threads
959 * will try to acquire lock for new cpu. And policy is already updated
960 * by then.
961 */
962 down_write(&per_cpu(cpu_policy_rwsem, policy->cpu));
963
955 policy->last_cpu = policy->cpu; 964 policy->last_cpu = policy->cpu;
956 policy->cpu = cpu; 965 policy->cpu = cpu;
957 966
967 up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu));
968
958#ifdef CONFIG_CPU_FREQ_TABLE 969#ifdef CONFIG_CPU_FREQ_TABLE
959 cpufreq_frequency_table_update_policy_cpu(policy); 970 cpufreq_frequency_table_update_policy_cpu(policy);
960#endif 971#endif
@@ -1125,7 +1136,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1125 int ret; 1136 int ret;
1126 1137
1127 /* first sibling now owns the new sysfs dir */ 1138 /* first sibling now owns the new sysfs dir */
1128 cpu_dev = get_cpu_device(cpumask_first(policy->cpus)); 1139 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
1129 1140
1130 /* Don't touch sysfs files during light-weight tear-down */ 1141 /* Don't touch sysfs files during light-weight tear-down */
1131 if (frozen) 1142 if (frozen)
@@ -1189,12 +1200,9 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1189 policy->governor->name, CPUFREQ_NAME_LEN); 1200 policy->governor->name, CPUFREQ_NAME_LEN);
1190#endif 1201#endif
1191 1202
1192 WARN_ON(lock_policy_rwsem_write(cpu)); 1203 lock_policy_rwsem_read(cpu);
1193 cpus = cpumask_weight(policy->cpus); 1204 cpus = cpumask_weight(policy->cpus);
1194 1205 unlock_policy_rwsem_read(cpu);
1195 if (cpus > 1)
1196 cpumask_clear_cpu(cpu, policy->cpus);
1197 unlock_policy_rwsem_write(cpu);
1198 1206
1199 if (cpu != policy->cpu) { 1207 if (cpu != policy->cpu) {
1200 if (!frozen) 1208 if (!frozen)
@@ -1203,9 +1211,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1203 1211
1204 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen); 1212 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
1205 if (new_cpu >= 0) { 1213 if (new_cpu >= 0) {
1206 WARN_ON(lock_policy_rwsem_write(cpu));
1207 update_policy_cpu(policy, new_cpu); 1214 update_policy_cpu(policy, new_cpu);
1208 unlock_policy_rwsem_write(cpu);
1209 1215
1210 if (!frozen) { 1216 if (!frozen) {
1211 pr_debug("%s: policy Kobject moved to cpu: %d " 1217 pr_debug("%s: policy Kobject moved to cpu: %d "
@@ -1237,9 +1243,12 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1237 return -EINVAL; 1243 return -EINVAL;
1238 } 1244 }
1239 1245
1240 lock_policy_rwsem_read(cpu); 1246 WARN_ON(lock_policy_rwsem_write(cpu));
1241 cpus = cpumask_weight(policy->cpus); 1247 cpus = cpumask_weight(policy->cpus);
1242 unlock_policy_rwsem_read(cpu); 1248
1249 if (cpus > 1)
1250 cpumask_clear_cpu(cpu, policy->cpus);
1251 unlock_policy_rwsem_write(cpu);
1243 1252
1244 /* If cpu is last user of policy, free policy */ 1253 /* If cpu is last user of policy, free policy */
1245 if (cpus == 1) { 1254 if (cpus == 1) {
@@ -1451,6 +1460,9 @@ unsigned int cpufreq_get(unsigned int cpu)
1451{ 1460{
1452 unsigned int ret_freq = 0; 1461 unsigned int ret_freq = 0;
1453 1462
1463 if (cpufreq_disabled() || !cpufreq_driver)
1464 return -ENOENT;
1465
1454 if (!down_read_trylock(&cpufreq_rwsem)) 1466 if (!down_read_trylock(&cpufreq_rwsem))
1455 return 0; 1467 return 0;
1456 1468
@@ -2095,7 +2107,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2095 write_lock_irqsave(&cpufreq_driver_lock, flags); 2107 write_lock_irqsave(&cpufreq_driver_lock, flags);
2096 if (cpufreq_driver) { 2108 if (cpufreq_driver) {
2097 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2109 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2098 return -EBUSY; 2110 return -EEXIST;
2099 } 2111 }
2100 cpufreq_driver = driver_data; 2112 cpufreq_driver = driver_data;
2101 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2113 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index d514c152fd1a..be5380ecdcd4 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -457,7 +457,7 @@ err_free_table:
457 opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); 457 opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
458err_put_node: 458err_put_node:
459 of_node_put(np); 459 of_node_put(np);
460 dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__); 460 dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
461 return ret; 461 return ret;
462} 462}
463 463
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 3e396543aea4..c3fd2a101ca0 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/clk.h> 9#include <linux/clk.h>
10#include <linux/cpu.h>
10#include <linux/cpufreq.h> 11#include <linux/cpufreq.h>
11#include <linux/delay.h> 12#include <linux/delay.h>
12#include <linux/err.h> 13#include <linux/err.h>
@@ -202,7 +203,11 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
202 unsigned long min_volt, max_volt; 203 unsigned long min_volt, max_volt;
203 int num, ret; 204 int num, ret;
204 205
205 cpu_dev = &pdev->dev; 206 cpu_dev = get_cpu_device(0);
207 if (!cpu_dev) {
208 pr_err("failed to get cpu0 device\n");
209 return -ENODEV;
210 }
206 211
207 np = of_node_get(cpu_dev->of_node); 212 np = of_node_get(cpu_dev->of_node);
208 if (!np) { 213 if (!np) {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 9733f29ed148..32b3479a2405 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -394,7 +394,10 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
394 trace_cpu_frequency(pstate * 100000, cpu->cpu); 394 trace_cpu_frequency(pstate * 100000, cpu->cpu);
395 395
396 cpu->pstate.current_pstate = pstate; 396 cpu->pstate.current_pstate = pstate;
397 wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); 397 if (limits.no_turbo)
398 wrmsrl(MSR_IA32_PERF_CTL, BIT(32) | (pstate << 8));
399 else
400 wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
398 401
399} 402}
400 403
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 19e364fa5955..3f418166ce02 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -113,7 +113,7 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
113 unsigned int target_freq, unsigned int relation) 113 unsigned int target_freq, unsigned int relation)
114{ 114{
115 struct cpufreq_freqs freqs; 115 struct cpufreq_freqs freqs;
116 unsigned long newfreq; 116 long newfreq;
117 struct clk *srcclk; 117 struct clk *srcclk;
118 int index, ret, mult = 1; 118 int index, ret, mult = 1;
119 119
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 526ec77c7ba0..f238cfd33847 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -198,6 +198,7 @@ config TI_EDMA
198 depends on ARCH_DAVINCI || ARCH_OMAP 198 depends on ARCH_DAVINCI || ARCH_OMAP
199 select DMA_ENGINE 199 select DMA_ENGINE
200 select DMA_VIRTUAL_CHANNELS 200 select DMA_VIRTUAL_CHANNELS
201 select TI_PRIV_EDMA
201 default n 202 default n
202 help 203 help
203 Enable support for the TI EDMA controller. This DMA 204 Enable support for the TI EDMA controller. This DMA
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index ff50ff4c6a57..098a8da450f0 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -749,6 +749,6 @@ static void __exit edma_exit(void)
749} 749}
750module_exit(edma_exit); 750module_exit(edma_exit);
751 751
752MODULE_AUTHOR("Matt Porter <mporter@ti.com>"); 752MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
753MODULE_DESCRIPTION("TI EDMA DMA engine driver"); 753MODULE_DESCRIPTION("TI EDMA DMA engine driver");
754MODULE_LICENSE("GPL v2"); 754MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 78f8ca5fccee..55852c026791 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -437,17 +437,18 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
437 struct imxdma_engine *imxdma = imxdmac->imxdma; 437 struct imxdma_engine *imxdma = imxdmac->imxdma;
438 int chno = imxdmac->channel; 438 int chno = imxdmac->channel;
439 struct imxdma_desc *desc; 439 struct imxdma_desc *desc;
440 unsigned long flags;
440 441
441 spin_lock(&imxdma->lock); 442 spin_lock_irqsave(&imxdma->lock, flags);
442 if (list_empty(&imxdmac->ld_active)) { 443 if (list_empty(&imxdmac->ld_active)) {
443 spin_unlock(&imxdma->lock); 444 spin_unlock_irqrestore(&imxdma->lock, flags);
444 goto out; 445 goto out;
445 } 446 }
446 447
447 desc = list_first_entry(&imxdmac->ld_active, 448 desc = list_first_entry(&imxdmac->ld_active,
448 struct imxdma_desc, 449 struct imxdma_desc,
449 node); 450 node);
450 spin_unlock(&imxdma->lock); 451 spin_unlock_irqrestore(&imxdma->lock, flags);
451 452
452 if (desc->sg) { 453 if (desc->sg) {
453 u32 tmp; 454 u32 tmp;
@@ -519,7 +520,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
519{ 520{
520 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); 521 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
521 struct imxdma_engine *imxdma = imxdmac->imxdma; 522 struct imxdma_engine *imxdma = imxdmac->imxdma;
522 unsigned long flags;
523 int slot = -1; 523 int slot = -1;
524 int i; 524 int i;
525 525
@@ -527,7 +527,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
527 switch (d->type) { 527 switch (d->type) {
528 case IMXDMA_DESC_INTERLEAVED: 528 case IMXDMA_DESC_INTERLEAVED:
529 /* Try to get a free 2D slot */ 529 /* Try to get a free 2D slot */
530 spin_lock_irqsave(&imxdma->lock, flags);
531 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) { 530 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
532 if ((imxdma->slots_2d[i].count > 0) && 531 if ((imxdma->slots_2d[i].count > 0) &&
533 ((imxdma->slots_2d[i].xsr != d->x) || 532 ((imxdma->slots_2d[i].xsr != d->x) ||
@@ -537,10 +536,8 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
537 slot = i; 536 slot = i;
538 break; 537 break;
539 } 538 }
540 if (slot < 0) { 539 if (slot < 0)
541 spin_unlock_irqrestore(&imxdma->lock, flags);
542 return -EBUSY; 540 return -EBUSY;
543 }
544 541
545 imxdma->slots_2d[slot].xsr = d->x; 542 imxdma->slots_2d[slot].xsr = d->x;
546 imxdma->slots_2d[slot].ysr = d->y; 543 imxdma->slots_2d[slot].ysr = d->y;
@@ -549,7 +546,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
549 546
550 imxdmac->slot_2d = slot; 547 imxdmac->slot_2d = slot;
551 imxdmac->enabled_2d = true; 548 imxdmac->enabled_2d = true;
552 spin_unlock_irqrestore(&imxdma->lock, flags);
553 549
554 if (slot == IMX_DMA_2D_SLOT_A) { 550 if (slot == IMX_DMA_2D_SLOT_A) {
555 d->config_mem &= ~CCR_MSEL_B; 551 d->config_mem &= ~CCR_MSEL_B;
@@ -625,18 +621,17 @@ static void imxdma_tasklet(unsigned long data)
625 struct imxdma_channel *imxdmac = (void *)data; 621 struct imxdma_channel *imxdmac = (void *)data;
626 struct imxdma_engine *imxdma = imxdmac->imxdma; 622 struct imxdma_engine *imxdma = imxdmac->imxdma;
627 struct imxdma_desc *desc; 623 struct imxdma_desc *desc;
624 unsigned long flags;
628 625
629 spin_lock(&imxdma->lock); 626 spin_lock_irqsave(&imxdma->lock, flags);
630 627
631 if (list_empty(&imxdmac->ld_active)) { 628 if (list_empty(&imxdmac->ld_active)) {
632 /* Someone might have called terminate all */ 629 /* Someone might have called terminate all */
633 goto out; 630 spin_unlock_irqrestore(&imxdma->lock, flags);
631 return;
634 } 632 }
635 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); 633 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
636 634
637 if (desc->desc.callback)
638 desc->desc.callback(desc->desc.callback_param);
639
640 /* If we are dealing with a cyclic descriptor, keep it on ld_active 635 /* If we are dealing with a cyclic descriptor, keep it on ld_active
641 * and dont mark the descriptor as complete. 636 * and dont mark the descriptor as complete.
642 * Only in non-cyclic cases it would be marked as complete 637 * Only in non-cyclic cases it would be marked as complete
@@ -663,7 +658,11 @@ static void imxdma_tasklet(unsigned long data)
663 __func__, imxdmac->channel); 658 __func__, imxdmac->channel);
664 } 659 }
665out: 660out:
666 spin_unlock(&imxdma->lock); 661 spin_unlock_irqrestore(&imxdma->lock, flags);
662
663 if (desc->desc.callback)
664 desc->desc.callback(desc->desc.callback_param);
665
667} 666}
668 667
669static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 668static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -883,7 +882,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
883 kfree(imxdmac->sg_list); 882 kfree(imxdmac->sg_list);
884 883
885 imxdmac->sg_list = kcalloc(periods + 1, 884 imxdmac->sg_list = kcalloc(periods + 1,
886 sizeof(struct scatterlist), GFP_KERNEL); 885 sizeof(struct scatterlist), GFP_ATOMIC);
887 if (!imxdmac->sg_list) 886 if (!imxdmac->sg_list)
888 return NULL; 887 return NULL;
889 888
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 0ff43552d472..89675f862308 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -63,6 +63,7 @@ struct gpio_bank {
63 struct gpio_chip chip; 63 struct gpio_chip chip;
64 struct clk *dbck; 64 struct clk *dbck;
65 u32 mod_usage; 65 u32 mod_usage;
66 u32 irq_usage;
66 u32 dbck_enable_mask; 67 u32 dbck_enable_mask;
67 bool dbck_enabled; 68 bool dbck_enabled;
68 struct device *dev; 69 struct device *dev;
@@ -86,6 +87,9 @@ struct gpio_bank {
86#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio)) 87#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
87#define GPIO_MOD_CTRL_BIT BIT(0) 88#define GPIO_MOD_CTRL_BIT BIT(0)
88 89
90#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
91#define LINE_USED(line, offset) (line & (1 << offset))
92
89static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) 93static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
90{ 94{
91 return bank->chip.base + gpio_irq; 95 return bank->chip.base + gpio_irq;
@@ -420,15 +424,69 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
420 return 0; 424 return 0;
421} 425}
422 426
427static void _enable_gpio_module(struct gpio_bank *bank, unsigned offset)
428{
429 if (bank->regs->pinctrl) {
430 void __iomem *reg = bank->base + bank->regs->pinctrl;
431
432 /* Claim the pin for MPU */
433 __raw_writel(__raw_readl(reg) | (1 << offset), reg);
434 }
435
436 if (bank->regs->ctrl && !BANK_USED(bank)) {
437 void __iomem *reg = bank->base + bank->regs->ctrl;
438 u32 ctrl;
439
440 ctrl = __raw_readl(reg);
441 /* Module is enabled, clocks are not gated */
442 ctrl &= ~GPIO_MOD_CTRL_BIT;
443 __raw_writel(ctrl, reg);
444 bank->context.ctrl = ctrl;
445 }
446}
447
448static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset)
449{
450 void __iomem *base = bank->base;
451
452 if (bank->regs->wkup_en &&
453 !LINE_USED(bank->mod_usage, offset) &&
454 !LINE_USED(bank->irq_usage, offset)) {
455 /* Disable wake-up during idle for dynamic tick */
456 _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
457 bank->context.wake_en =
458 __raw_readl(bank->base + bank->regs->wkup_en);
459 }
460
461 if (bank->regs->ctrl && !BANK_USED(bank)) {
462 void __iomem *reg = bank->base + bank->regs->ctrl;
463 u32 ctrl;
464
465 ctrl = __raw_readl(reg);
466 /* Module is disabled, clocks are gated */
467 ctrl |= GPIO_MOD_CTRL_BIT;
468 __raw_writel(ctrl, reg);
469 bank->context.ctrl = ctrl;
470 }
471}
472
473static int gpio_is_input(struct gpio_bank *bank, int mask)
474{
475 void __iomem *reg = bank->base + bank->regs->direction;
476
477 return __raw_readl(reg) & mask;
478}
479
423static int gpio_irq_type(struct irq_data *d, unsigned type) 480static int gpio_irq_type(struct irq_data *d, unsigned type)
424{ 481{
425 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 482 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
426 unsigned gpio = 0; 483 unsigned gpio = 0;
427 int retval; 484 int retval;
428 unsigned long flags; 485 unsigned long flags;
486 unsigned offset;
429 487
430 if (WARN_ON(!bank->mod_usage)) 488 if (!BANK_USED(bank))
431 return -EINVAL; 489 pm_runtime_get_sync(bank->dev);
432 490
433#ifdef CONFIG_ARCH_OMAP1 491#ifdef CONFIG_ARCH_OMAP1
434 if (d->irq > IH_MPUIO_BASE) 492 if (d->irq > IH_MPUIO_BASE)
@@ -446,7 +504,17 @@ static int gpio_irq_type(struct irq_data *d, unsigned type)
446 return -EINVAL; 504 return -EINVAL;
447 505
448 spin_lock_irqsave(&bank->lock, flags); 506 spin_lock_irqsave(&bank->lock, flags);
449 retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type); 507 offset = GPIO_INDEX(bank, gpio);
508 retval = _set_gpio_triggering(bank, offset, type);
509 if (!LINE_USED(bank->mod_usage, offset)) {
510 _enable_gpio_module(bank, offset);
511 _set_gpio_direction(bank, offset, 1);
512 } else if (!gpio_is_input(bank, 1 << offset)) {
513 spin_unlock_irqrestore(&bank->lock, flags);
514 return -EINVAL;
515 }
516
517 bank->irq_usage |= 1 << GPIO_INDEX(bank, gpio);
450 spin_unlock_irqrestore(&bank->lock, flags); 518 spin_unlock_irqrestore(&bank->lock, flags);
451 519
452 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 520 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
@@ -603,35 +671,19 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
603 * If this is the first gpio_request for the bank, 671 * If this is the first gpio_request for the bank,
604 * enable the bank module. 672 * enable the bank module.
605 */ 673 */
606 if (!bank->mod_usage) 674 if (!BANK_USED(bank))
607 pm_runtime_get_sync(bank->dev); 675 pm_runtime_get_sync(bank->dev);
608 676
609 spin_lock_irqsave(&bank->lock, flags); 677 spin_lock_irqsave(&bank->lock, flags);
610 /* Set trigger to none. You need to enable the desired trigger with 678 /* Set trigger to none. You need to enable the desired trigger with
611 * request_irq() or set_irq_type(). 679 * request_irq() or set_irq_type(). Only do this if the IRQ line has
680 * not already been requested.
612 */ 681 */
613 _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 682 if (!LINE_USED(bank->irq_usage, offset)) {
614 683 _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
615 if (bank->regs->pinctrl) { 684 _enable_gpio_module(bank, offset);
616 void __iomem *reg = bank->base + bank->regs->pinctrl;
617
618 /* Claim the pin for MPU */
619 __raw_writel(__raw_readl(reg) | (1 << offset), reg);
620 }
621
622 if (bank->regs->ctrl && !bank->mod_usage) {
623 void __iomem *reg = bank->base + bank->regs->ctrl;
624 u32 ctrl;
625
626 ctrl = __raw_readl(reg);
627 /* Module is enabled, clocks are not gated */
628 ctrl &= ~GPIO_MOD_CTRL_BIT;
629 __raw_writel(ctrl, reg);
630 bank->context.ctrl = ctrl;
631 } 685 }
632
633 bank->mod_usage |= 1 << offset; 686 bank->mod_usage |= 1 << offset;
634
635 spin_unlock_irqrestore(&bank->lock, flags); 687 spin_unlock_irqrestore(&bank->lock, flags);
636 688
637 return 0; 689 return 0;
@@ -640,31 +692,11 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
640static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) 692static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
641{ 693{
642 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip); 694 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
643 void __iomem *base = bank->base;
644 unsigned long flags; 695 unsigned long flags;
645 696
646 spin_lock_irqsave(&bank->lock, flags); 697 spin_lock_irqsave(&bank->lock, flags);
647
648 if (bank->regs->wkup_en) {
649 /* Disable wake-up during idle for dynamic tick */
650 _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
651 bank->context.wake_en =
652 __raw_readl(bank->base + bank->regs->wkup_en);
653 }
654
655 bank->mod_usage &= ~(1 << offset); 698 bank->mod_usage &= ~(1 << offset);
656 699 _disable_gpio_module(bank, offset);
657 if (bank->regs->ctrl && !bank->mod_usage) {
658 void __iomem *reg = bank->base + bank->regs->ctrl;
659 u32 ctrl;
660
661 ctrl = __raw_readl(reg);
662 /* Module is disabled, clocks are gated */
663 ctrl |= GPIO_MOD_CTRL_BIT;
664 __raw_writel(ctrl, reg);
665 bank->context.ctrl = ctrl;
666 }
667
668 _reset_gpio(bank, bank->chip.base + offset); 700 _reset_gpio(bank, bank->chip.base + offset);
669 spin_unlock_irqrestore(&bank->lock, flags); 701 spin_unlock_irqrestore(&bank->lock, flags);
670 702
@@ -672,7 +704,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
672 * If this is the last gpio to be freed in the bank, 704 * If this is the last gpio to be freed in the bank,
673 * disable the bank module. 705 * disable the bank module.
674 */ 706 */
675 if (!bank->mod_usage) 707 if (!BANK_USED(bank))
676 pm_runtime_put(bank->dev); 708 pm_runtime_put(bank->dev);
677} 709}
678 710
@@ -762,10 +794,20 @@ static void gpio_irq_shutdown(struct irq_data *d)
762 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 794 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
763 unsigned int gpio = irq_to_gpio(bank, d->hwirq); 795 unsigned int gpio = irq_to_gpio(bank, d->hwirq);
764 unsigned long flags; 796 unsigned long flags;
797 unsigned offset = GPIO_INDEX(bank, gpio);
765 798
766 spin_lock_irqsave(&bank->lock, flags); 799 spin_lock_irqsave(&bank->lock, flags);
800 bank->irq_usage &= ~(1 << offset);
801 _disable_gpio_module(bank, offset);
767 _reset_gpio(bank, gpio); 802 _reset_gpio(bank, gpio);
768 spin_unlock_irqrestore(&bank->lock, flags); 803 spin_unlock_irqrestore(&bank->lock, flags);
804
805 /*
806 * If this is the last IRQ to be freed in the bank,
807 * disable the bank module.
808 */
809 if (!BANK_USED(bank))
810 pm_runtime_put(bank->dev);
769} 811}
770 812
771static void gpio_ack_irq(struct irq_data *d) 813static void gpio_ack_irq(struct irq_data *d)
@@ -897,13 +939,6 @@ static int gpio_input(struct gpio_chip *chip, unsigned offset)
897 return 0; 939 return 0;
898} 940}
899 941
900static int gpio_is_input(struct gpio_bank *bank, int mask)
901{
902 void __iomem *reg = bank->base + bank->regs->direction;
903
904 return __raw_readl(reg) & mask;
905}
906
907static int gpio_get(struct gpio_chip *chip, unsigned offset) 942static int gpio_get(struct gpio_chip *chip, unsigned offset)
908{ 943{
909 struct gpio_bank *bank; 944 struct gpio_bank *bank;
@@ -922,13 +957,22 @@ static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
922{ 957{
923 struct gpio_bank *bank; 958 struct gpio_bank *bank;
924 unsigned long flags; 959 unsigned long flags;
960 int retval = 0;
925 961
926 bank = container_of(chip, struct gpio_bank, chip); 962 bank = container_of(chip, struct gpio_bank, chip);
927 spin_lock_irqsave(&bank->lock, flags); 963 spin_lock_irqsave(&bank->lock, flags);
964
965 if (LINE_USED(bank->irq_usage, offset)) {
966 retval = -EINVAL;
967 goto exit;
968 }
969
928 bank->set_dataout(bank, offset, value); 970 bank->set_dataout(bank, offset, value);
929 _set_gpio_direction(bank, offset, 0); 971 _set_gpio_direction(bank, offset, 0);
972
973exit:
930 spin_unlock_irqrestore(&bank->lock, flags); 974 spin_unlock_irqrestore(&bank->lock, flags);
931 return 0; 975 return retval;
932} 976}
933 977
934static int gpio_debounce(struct gpio_chip *chip, unsigned offset, 978static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
@@ -1400,7 +1444,7 @@ void omap2_gpio_prepare_for_idle(int pwr_mode)
1400 struct gpio_bank *bank; 1444 struct gpio_bank *bank;
1401 1445
1402 list_for_each_entry(bank, &omap_gpio_list, node) { 1446 list_for_each_entry(bank, &omap_gpio_list, node) {
1403 if (!bank->mod_usage || !bank->loses_context) 1447 if (!BANK_USED(bank) || !bank->loses_context)
1404 continue; 1448 continue;
1405 1449
1406 bank->power_mode = pwr_mode; 1450 bank->power_mode = pwr_mode;
@@ -1414,7 +1458,7 @@ void omap2_gpio_resume_after_idle(void)
1414 struct gpio_bank *bank; 1458 struct gpio_bank *bank;
1415 1459
1416 list_for_each_entry(bank, &omap_gpio_list, node) { 1460 list_for_each_entry(bank, &omap_gpio_list, node) {
1417 if (!bank->mod_usage || !bank->loses_context) 1461 if (!BANK_USED(bank) || !bank->loses_context)
1418 continue; 1462 continue;
1419 1463
1420 pm_runtime_get_sync(bank->dev); 1464 pm_runtime_get_sync(bank->dev);
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index e3745eb07570..6038966ab045 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -293,10 +293,9 @@ static void gpio_rcar_parse_pdata(struct gpio_rcar_priv *p)
293 if (pdata) { 293 if (pdata) {
294 p->config = *pdata; 294 p->config = *pdata;
295 } else if (IS_ENABLED(CONFIG_OF) && np) { 295 } else if (IS_ENABLED(CONFIG_OF) && np) {
296 ret = of_parse_phandle_with_args(np, "gpio-ranges", 296 ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0,
297 "#gpio-range-cells", 0, &args); 297 &args);
298 p->config.number_of_pins = ret == 0 && args.args_count == 3 298 p->config.number_of_pins = ret == 0 ? args.args[2]
299 ? args.args[2]
300 : RCAR_MAX_GPIO_PER_BANK; 299 : RCAR_MAX_GPIO_PER_BANK;
301 p->config.gpio_base = -1; 300 p->config.gpio_base = -1;
302 } 301 }
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 796dbb212a41..8492b68e873c 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -177,7 +177,7 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
177 177
178static inline void ast_open_key(struct ast_private *ast) 178static inline void ast_open_key(struct ast_private *ast)
179{ 179{
180 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xA1, 0xFF, 0x04); 180 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
181} 181}
182 182
183#define AST_VIDMEM_SIZE_8M 0x00800000 183#define AST_VIDMEM_SIZE_8M 0x00800000
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index b4fb86d89850..224ff965bcf7 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -42,6 +42,10 @@
42 42
43#include <drm/drmP.h> 43#include <drm/drmP.h>
44 44
45/******************************************************************/
46/** \name Context bitmap support */
47/*@{*/
48
45/** 49/**
46 * Free a handle from the context bitmap. 50 * Free a handle from the context bitmap.
47 * 51 *
@@ -52,48 +56,13 @@
52 * in drm_device::ctx_idr, while holding the drm_device::struct_mutex 56 * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
53 * lock. 57 * lock.
54 */ 58 */
55static void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle) 59void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
56{ 60{
57 if (drm_core_check_feature(dev, DRIVER_MODESET))
58 return;
59
60 mutex_lock(&dev->struct_mutex); 61 mutex_lock(&dev->struct_mutex);
61 idr_remove(&dev->ctx_idr, ctx_handle); 62 idr_remove(&dev->ctx_idr, ctx_handle);
62 mutex_unlock(&dev->struct_mutex); 63 mutex_unlock(&dev->struct_mutex);
63} 64}
64 65
65/******************************************************************/
66/** \name Context bitmap support */
67/*@{*/
68
69void drm_legacy_ctxbitmap_release(struct drm_device *dev,
70 struct drm_file *file_priv)
71{
72 if (drm_core_check_feature(dev, DRIVER_MODESET))
73 return;
74
75 mutex_lock(&dev->ctxlist_mutex);
76 if (!list_empty(&dev->ctxlist)) {
77 struct drm_ctx_list *pos, *n;
78
79 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
80 if (pos->tag == file_priv &&
81 pos->handle != DRM_KERNEL_CONTEXT) {
82 if (dev->driver->context_dtor)
83 dev->driver->context_dtor(dev,
84 pos->handle);
85
86 drm_ctxbitmap_free(dev, pos->handle);
87
88 list_del(&pos->head);
89 kfree(pos);
90 --dev->ctx_count;
91 }
92 }
93 }
94 mutex_unlock(&dev->ctxlist_mutex);
95}
96
97/** 66/**
98 * Context bitmap allocation. 67 * Context bitmap allocation.
99 * 68 *
@@ -121,12 +90,10 @@ static int drm_ctxbitmap_next(struct drm_device * dev)
121 * 90 *
122 * Initialise the drm_device::ctx_idr 91 * Initialise the drm_device::ctx_idr
123 */ 92 */
124void drm_legacy_ctxbitmap_init(struct drm_device * dev) 93int drm_ctxbitmap_init(struct drm_device * dev)
125{ 94{
126 if (drm_core_check_feature(dev, DRIVER_MODESET))
127 return;
128
129 idr_init(&dev->ctx_idr); 95 idr_init(&dev->ctx_idr);
96 return 0;
130} 97}
131 98
132/** 99/**
@@ -137,7 +104,7 @@ void drm_legacy_ctxbitmap_init(struct drm_device * dev)
137 * Free all idr members using drm_ctx_sarea_free helper function 104 * Free all idr members using drm_ctx_sarea_free helper function
138 * while holding the drm_device::struct_mutex lock. 105 * while holding the drm_device::struct_mutex lock.
139 */ 106 */
140void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev) 107void drm_ctxbitmap_cleanup(struct drm_device * dev)
141{ 108{
142 mutex_lock(&dev->struct_mutex); 109 mutex_lock(&dev->struct_mutex);
143 idr_destroy(&dev->ctx_idr); 110 idr_destroy(&dev->ctx_idr);
@@ -169,9 +136,6 @@ int drm_getsareactx(struct drm_device *dev, void *data,
169 struct drm_local_map *map; 136 struct drm_local_map *map;
170 struct drm_map_list *_entry; 137 struct drm_map_list *_entry;
171 138
172 if (drm_core_check_feature(dev, DRIVER_MODESET))
173 return -EINVAL;
174
175 mutex_lock(&dev->struct_mutex); 139 mutex_lock(&dev->struct_mutex);
176 140
177 map = idr_find(&dev->ctx_idr, request->ctx_id); 141 map = idr_find(&dev->ctx_idr, request->ctx_id);
@@ -216,9 +180,6 @@ int drm_setsareactx(struct drm_device *dev, void *data,
216 struct drm_local_map *map = NULL; 180 struct drm_local_map *map = NULL;
217 struct drm_map_list *r_list = NULL; 181 struct drm_map_list *r_list = NULL;
218 182
219 if (drm_core_check_feature(dev, DRIVER_MODESET))
220 return -EINVAL;
221
222 mutex_lock(&dev->struct_mutex); 183 mutex_lock(&dev->struct_mutex);
223 list_for_each_entry(r_list, &dev->maplist, head) { 184 list_for_each_entry(r_list, &dev->maplist, head) {
224 if (r_list->map 185 if (r_list->map
@@ -319,9 +280,6 @@ int drm_resctx(struct drm_device *dev, void *data,
319 struct drm_ctx ctx; 280 struct drm_ctx ctx;
320 int i; 281 int i;
321 282
322 if (drm_core_check_feature(dev, DRIVER_MODESET))
323 return -EINVAL;
324
325 if (res->count >= DRM_RESERVED_CONTEXTS) { 283 if (res->count >= DRM_RESERVED_CONTEXTS) {
326 memset(&ctx, 0, sizeof(ctx)); 284 memset(&ctx, 0, sizeof(ctx));
327 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { 285 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
@@ -352,9 +310,6 @@ int drm_addctx(struct drm_device *dev, void *data,
352 struct drm_ctx_list *ctx_entry; 310 struct drm_ctx_list *ctx_entry;
353 struct drm_ctx *ctx = data; 311 struct drm_ctx *ctx = data;
354 312
355 if (drm_core_check_feature(dev, DRIVER_MODESET))
356 return -EINVAL;
357
358 ctx->handle = drm_ctxbitmap_next(dev); 313 ctx->handle = drm_ctxbitmap_next(dev);
359 if (ctx->handle == DRM_KERNEL_CONTEXT) { 314 if (ctx->handle == DRM_KERNEL_CONTEXT) {
360 /* Skip kernel's context and get a new one. */ 315 /* Skip kernel's context and get a new one. */
@@ -398,9 +353,6 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
398{ 353{
399 struct drm_ctx *ctx = data; 354 struct drm_ctx *ctx = data;
400 355
401 if (drm_core_check_feature(dev, DRIVER_MODESET))
402 return -EINVAL;
403
404 /* This is 0, because we don't handle any context flags */ 356 /* This is 0, because we don't handle any context flags */
405 ctx->flags = 0; 357 ctx->flags = 0;
406 358
@@ -423,9 +375,6 @@ int drm_switchctx(struct drm_device *dev, void *data,
423{ 375{
424 struct drm_ctx *ctx = data; 376 struct drm_ctx *ctx = data;
425 377
426 if (drm_core_check_feature(dev, DRIVER_MODESET))
427 return -EINVAL;
428
429 DRM_DEBUG("%d\n", ctx->handle); 378 DRM_DEBUG("%d\n", ctx->handle);
430 return drm_context_switch(dev, dev->last_context, ctx->handle); 379 return drm_context_switch(dev, dev->last_context, ctx->handle);
431} 380}
@@ -446,9 +395,6 @@ int drm_newctx(struct drm_device *dev, void *data,
446{ 395{
447 struct drm_ctx *ctx = data; 396 struct drm_ctx *ctx = data;
448 397
449 if (drm_core_check_feature(dev, DRIVER_MODESET))
450 return -EINVAL;
451
452 DRM_DEBUG("%d\n", ctx->handle); 398 DRM_DEBUG("%d\n", ctx->handle);
453 drm_context_switch_complete(dev, file_priv, ctx->handle); 399 drm_context_switch_complete(dev, file_priv, ctx->handle);
454 400
@@ -471,9 +417,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
471{ 417{
472 struct drm_ctx *ctx = data; 418 struct drm_ctx *ctx = data;
473 419
474 if (drm_core_check_feature(dev, DRIVER_MODESET))
475 return -EINVAL;
476
477 DRM_DEBUG("%d\n", ctx->handle); 420 DRM_DEBUG("%d\n", ctx->handle);
478 if (ctx->handle != DRM_KERNEL_CONTEXT) { 421 if (ctx->handle != DRM_KERNEL_CONTEXT) {
479 if (dev->driver->context_dtor) 422 if (dev->driver->context_dtor)
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 3d13ca6e257f..f6f6cc7fc133 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -416,6 +416,14 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
416 return; 416 return;
417 417
418 /* 418 /*
419 * fbdev->blank can be called from irq context in case of a panic.
420 * Since we already have our own special panic handler which will
421 * restore the fbdev console mode completely, just bail out early.
422 */
423 if (oops_in_progress)
424 return;
425
426 /*
419 * For each CRTC in this fb, turn the connectors on/off. 427 * For each CRTC in this fb, turn the connectors on/off.
420 */ 428 */
421 drm_modeset_lock_all(dev); 429 drm_modeset_lock_all(dev);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 4be8e09a32ef..3f84277d7036 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -439,7 +439,26 @@ int drm_release(struct inode *inode, struct file *filp)
439 if (dev->driver->driver_features & DRIVER_GEM) 439 if (dev->driver->driver_features & DRIVER_GEM)
440 drm_gem_release(dev, file_priv); 440 drm_gem_release(dev, file_priv);
441 441
442 drm_legacy_ctxbitmap_release(dev, file_priv); 442 mutex_lock(&dev->ctxlist_mutex);
443 if (!list_empty(&dev->ctxlist)) {
444 struct drm_ctx_list *pos, *n;
445
446 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
447 if (pos->tag == file_priv &&
448 pos->handle != DRM_KERNEL_CONTEXT) {
449 if (dev->driver->context_dtor)
450 dev->driver->context_dtor(dev,
451 pos->handle);
452
453 drm_ctxbitmap_free(dev, pos->handle);
454
455 list_del(&pos->head);
456 kfree(pos);
457 --dev->ctx_count;
458 }
459 }
460 }
461 mutex_unlock(&dev->ctxlist_mutex);
443 462
444 mutex_lock(&dev->struct_mutex); 463 mutex_lock(&dev->struct_mutex);
445 464
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index e7eb0276f7f1..39d864576be4 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -292,7 +292,13 @@ int drm_fill_in_dev(struct drm_device *dev,
292 goto error_out_unreg; 292 goto error_out_unreg;
293 } 293 }
294 294
295 drm_legacy_ctxbitmap_init(dev); 295
296
297 retcode = drm_ctxbitmap_init(dev);
298 if (retcode) {
299 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
300 goto error_out_unreg;
301 }
296 302
297 if (driver->driver_features & DRIVER_GEM) { 303 if (driver->driver_features & DRIVER_GEM) {
298 retcode = drm_gem_init(dev); 304 retcode = drm_gem_init(dev);
@@ -446,7 +452,7 @@ void drm_put_dev(struct drm_device *dev)
446 drm_rmmap(dev, r_list->map); 452 drm_rmmap(dev, r_list->map);
447 drm_ht_remove(&dev->map_hash); 453 drm_ht_remove(&dev->map_hash);
448 454
449 drm_legacy_ctxbitmap_cleanup(dev); 455 drm_ctxbitmap_cleanup(dev);
450 456
451 if (drm_core_check_feature(dev, DRIVER_MODESET)) 457 if (drm_core_check_feature(dev, DRIVER_MODESET))
452 drm_put_minor(&dev->control); 458 drm_put_minor(&dev->control);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 4752f223e5b2..45b6ef595965 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -56,7 +56,7 @@ config DRM_EXYNOS_IPP
56 56
57config DRM_EXYNOS_FIMC 57config DRM_EXYNOS_FIMC
58 bool "Exynos DRM FIMC" 58 bool "Exynos DRM FIMC"
59 depends on DRM_EXYNOS_IPP && MFD_SYSCON && OF 59 depends on DRM_EXYNOS_IPP && MFD_SYSCON
60 help 60 help
61 Choose this option if you want to use Exynos FIMC for DRM. 61 Choose this option if you want to use Exynos FIMC for DRM.
62 62
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 3445a0f3a6b2..9c8088462c26 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -63,7 +63,8 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
63 return -ENOMEM; 63 return -ENOMEM;
64 } 64 }
65 65
66 buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size, 66 buf->kvaddr = (void __iomem *)dma_alloc_attrs(dev->dev,
67 buf->size,
67 &buf->dma_addr, GFP_KERNEL, 68 &buf->dma_addr, GFP_KERNEL,
68 &buf->dma_attrs); 69 &buf->dma_attrs);
69 if (!buf->kvaddr) { 70 if (!buf->kvaddr) {
@@ -90,9 +91,9 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
90 } 91 }
91 92
92 buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages); 93 buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
93 if (!buf->sgt) { 94 if (IS_ERR(buf->sgt)) {
94 DRM_ERROR("failed to get sg table.\n"); 95 DRM_ERROR("failed to get sg table.\n");
95 ret = -ENOMEM; 96 ret = PTR_ERR(buf->sgt);
96 goto err_free_attrs; 97 goto err_free_attrs;
97 } 98 }
98 99
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 78e868bcf1ec..e7c2f2d07f19 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -99,12 +99,13 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
99 if (is_drm_iommu_supported(dev)) { 99 if (is_drm_iommu_supported(dev)) {
100 unsigned int nr_pages = buffer->size >> PAGE_SHIFT; 100 unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
101 101
102 buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP, 102 buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
103 nr_pages, VM_MAP,
103 pgprot_writecombine(PAGE_KERNEL)); 104 pgprot_writecombine(PAGE_KERNEL));
104 } else { 105 } else {
105 phys_addr_t dma_addr = buffer->dma_addr; 106 phys_addr_t dma_addr = buffer->dma_addr;
106 if (dma_addr) 107 if (dma_addr)
107 buffer->kvaddr = phys_to_virt(dma_addr); 108 buffer->kvaddr = (void __iomem *)phys_to_virt(dma_addr);
108 else 109 else
109 buffer->kvaddr = (void __iomem *)NULL; 110 buffer->kvaddr = (void __iomem *)NULL;
110 } 111 }
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index b1f8fc69023f..60e84043aa34 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -707,8 +707,7 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
707 reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2); 707 reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
708 break; 708 break;
709 case DRM_MODE_DPMS_OFF: 709 case DRM_MODE_DPMS_OFF:
710 /* disable audio and video ports */ 710 /* disable video ports */
711 reg_write(encoder, REG_ENA_AP, 0x00);
712 reg_write(encoder, REG_ENA_VP_0, 0x00); 711 reg_write(encoder, REG_ENA_VP_0, 0x00);
713 reg_write(encoder, REG_ENA_VP_1, 0x00); 712 reg_write(encoder, REG_ENA_VP_1, 0x00);
714 reg_write(encoder, REG_ENA_VP_2, 0x00); 713 reg_write(encoder, REG_ENA_VP_2, 0x00);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8507c6d1e642..cdfb9da0e4ce 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1392,14 +1392,11 @@ out:
1392 if (i915_terminally_wedged(&dev_priv->gpu_error)) 1392 if (i915_terminally_wedged(&dev_priv->gpu_error))
1393 return VM_FAULT_SIGBUS; 1393 return VM_FAULT_SIGBUS;
1394 case -EAGAIN: 1394 case -EAGAIN:
1395 /* Give the error handler a chance to run and move the 1395 /*
1396 * objects off the GPU active list. Next time we service the 1396 * EAGAIN means the gpu is hung and we'll wait for the error
1397 * fault, we should be able to transition the page into the 1397 * handler to reset everything when re-faulting in
1398 * GTT without touching the GPU (and so avoid further 1398 * i915_mutex_lock_interruptible.
1399 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1400 * with coherency, just lost writes.
1401 */ 1399 */
1402 set_need_resched();
1403 case 0: 1400 case 0:
1404 case -ERESTARTSYS: 1401 case -ERESTARTSYS:
1405 case -EINTR: 1402 case -EINTR:
@@ -4803,10 +4800,10 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
4803 4800
4804 if (!mutex_trylock(&dev->struct_mutex)) { 4801 if (!mutex_trylock(&dev->struct_mutex)) {
4805 if (!mutex_is_locked_by(&dev->struct_mutex, current)) 4802 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4806 return SHRINK_STOP; 4803 return 0;
4807 4804
4808 if (dev_priv->mm.shrinker_no_lock_stealing) 4805 if (dev_priv->mm.shrinker_no_lock_stealing)
4809 return SHRINK_STOP; 4806 return 0;
4810 4807
4811 unlock = false; 4808 unlock = false;
4812 } 4809 }
@@ -4904,10 +4901,10 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
4904 4901
4905 if (!mutex_trylock(&dev->struct_mutex)) { 4902 if (!mutex_trylock(&dev->struct_mutex)) {
4906 if (!mutex_is_locked_by(&dev->struct_mutex, current)) 4903 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4907 return 0; 4904 return SHRINK_STOP;
4908 4905
4909 if (dev_priv->mm.shrinker_no_lock_stealing) 4906 if (dev_priv->mm.shrinker_no_lock_stealing)
4910 return 0; 4907 return SHRINK_STOP;
4911 4908
4912 unlock = false; 4909 unlock = false;
4913 } 4910 }
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index aba9d7498996..dae364f0028c 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -143,8 +143,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
143 143
144 /* Seek the first printf which is hits start position */ 144 /* Seek the first printf which is hits start position */
145 if (e->pos < e->start) { 145 if (e->pos < e->start) {
146 len = vsnprintf(NULL, 0, f, args); 146 va_list tmp;
147 if (!__i915_error_seek(e, len)) 147
148 va_copy(tmp, args);
149 if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
148 return; 150 return;
149 } 151 }
150 152
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 83cce0cdb769..4b91228fd9bd 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1469,6 +1469,34 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1469 return ret; 1469 return ret;
1470} 1470}
1471 1471
1472static void i915_error_wake_up(struct drm_i915_private *dev_priv,
1473 bool reset_completed)
1474{
1475 struct intel_ring_buffer *ring;
1476 int i;
1477
1478 /*
1479 * Notify all waiters for GPU completion events that reset state has
1480 * been changed, and that they need to restart their wait after
1481 * checking for potential errors (and bail out to drop locks if there is
1482 * a gpu reset pending so that i915_error_work_func can acquire them).
1483 */
1484
1485 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
1486 for_each_ring(ring, dev_priv, i)
1487 wake_up_all(&ring->irq_queue);
1488
1489 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
1490 wake_up_all(&dev_priv->pending_flip_queue);
1491
1492 /*
1493 * Signal tasks blocked in i915_gem_wait_for_error that the pending
1494 * reset state is cleared.
1495 */
1496 if (reset_completed)
1497 wake_up_all(&dev_priv->gpu_error.reset_queue);
1498}
1499
1472/** 1500/**
1473 * i915_error_work_func - do process context error handling work 1501 * i915_error_work_func - do process context error handling work
1474 * @work: work struct 1502 * @work: work struct
@@ -1483,11 +1511,10 @@ static void i915_error_work_func(struct work_struct *work)
1483 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 1511 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1484 gpu_error); 1512 gpu_error);
1485 struct drm_device *dev = dev_priv->dev; 1513 struct drm_device *dev = dev_priv->dev;
1486 struct intel_ring_buffer *ring;
1487 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 1514 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1488 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 1515 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1489 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 1516 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1490 int i, ret; 1517 int ret;
1491 1518
1492 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 1519 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
1493 1520
@@ -1506,8 +1533,16 @@ static void i915_error_work_func(struct work_struct *work)
1506 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, 1533 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1507 reset_event); 1534 reset_event);
1508 1535
1536 /*
1537 * All state reset _must_ be completed before we update the
1538 * reset counter, for otherwise waiters might miss the reset
1539 * pending state and not properly drop locks, resulting in
1540 * deadlocks with the reset work.
1541 */
1509 ret = i915_reset(dev); 1542 ret = i915_reset(dev);
1510 1543
1544 intel_display_handle_reset(dev);
1545
1511 if (ret == 0) { 1546 if (ret == 0) {
1512 /* 1547 /*
1513 * After all the gem state is reset, increment the reset 1548 * After all the gem state is reset, increment the reset
@@ -1528,12 +1563,11 @@ static void i915_error_work_func(struct work_struct *work)
1528 atomic_set(&error->reset_counter, I915_WEDGED); 1563 atomic_set(&error->reset_counter, I915_WEDGED);
1529 } 1564 }
1530 1565
1531 for_each_ring(ring, dev_priv, i) 1566 /*
1532 wake_up_all(&ring->irq_queue); 1567 * Note: The wake_up also serves as a memory barrier so that
1533 1568 * waiters see the update value of the reset counter atomic_t.
1534 intel_display_handle_reset(dev); 1569 */
1535 1570 i915_error_wake_up(dev_priv, true);
1536 wake_up_all(&dev_priv->gpu_error.reset_queue);
1537 } 1571 }
1538} 1572}
1539 1573
@@ -1642,8 +1676,6 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
1642void i915_handle_error(struct drm_device *dev, bool wedged) 1676void i915_handle_error(struct drm_device *dev, bool wedged)
1643{ 1677{
1644 struct drm_i915_private *dev_priv = dev->dev_private; 1678 struct drm_i915_private *dev_priv = dev->dev_private;
1645 struct intel_ring_buffer *ring;
1646 int i;
1647 1679
1648 i915_capture_error_state(dev); 1680 i915_capture_error_state(dev);
1649 i915_report_and_clear_eir(dev); 1681 i915_report_and_clear_eir(dev);
@@ -1653,11 +1685,19 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
1653 &dev_priv->gpu_error.reset_counter); 1685 &dev_priv->gpu_error.reset_counter);
1654 1686
1655 /* 1687 /*
1656 * Wakeup waiting processes so that the reset work item 1688 * Wakeup waiting processes so that the reset work function
1657 * doesn't deadlock trying to grab various locks. 1689 * i915_error_work_func doesn't deadlock trying to grab various
1690 * locks. By bumping the reset counter first, the woken
1691 * processes will see a reset in progress and back off,
1692 * releasing their locks and then wait for the reset completion.
1693 * We must do this for _all_ gpu waiters that might hold locks
1694 * that the reset work needs to acquire.
1695 *
1696 * Note: The wake_up serves as the required memory barrier to
1697 * ensure that the waiters see the updated value of the reset
1698 * counter atomic_t.
1658 */ 1699 */
1659 for_each_ring(ring, dev_priv, i) 1700 i915_error_wake_up(dev_priv, false);
1660 wake_up_all(&ring->irq_queue);
1661 } 1701 }
1662 1702
1663 /* 1703 /*
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 63aca49d11a8..63de2701b974 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -778,7 +778,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
778 /* Can only use the always-on power well for eDP when 778 /* Can only use the always-on power well for eDP when
779 * not using the panel fitter, and when not using motion 779 * not using the panel fitter, and when not using motion
780 * blur mitigation (which we don't support). */ 780 * blur mitigation (which we don't support). */
781 if (intel_crtc->config.pch_pfit.size) 781 if (intel_crtc->config.pch_pfit.enabled)
782 temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; 782 temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
783 else 783 else
784 temp |= TRANS_DDI_EDP_INPUT_A_ON; 784 temp |= TRANS_DDI_EDP_INPUT_A_ON;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2489d0b4c7d2..e5822e79f912 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2249,7 +2249,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2249 I915_WRITE(PIPESRC(intel_crtc->pipe), 2249 I915_WRITE(PIPESRC(intel_crtc->pipe),
2250 ((crtc->mode.hdisplay - 1) << 16) | 2250 ((crtc->mode.hdisplay - 1) << 16) |
2251 (crtc->mode.vdisplay - 1)); 2251 (crtc->mode.vdisplay - 1));
2252 if (!intel_crtc->config.pch_pfit.size && 2252 if (!intel_crtc->config.pch_pfit.enabled &&
2253 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || 2253 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2254 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 2254 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2255 I915_WRITE(PF_CTL(intel_crtc->pipe), 0); 2255 I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
@@ -3203,7 +3203,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
3203 struct drm_i915_private *dev_priv = dev->dev_private; 3203 struct drm_i915_private *dev_priv = dev->dev_private;
3204 int pipe = crtc->pipe; 3204 int pipe = crtc->pipe;
3205 3205
3206 if (crtc->config.pch_pfit.size) { 3206 if (crtc->config.pch_pfit.enabled) {
3207 /* Force use of hard-coded filter coefficients 3207 /* Force use of hard-coded filter coefficients
3208 * as some pre-programmed values are broken, 3208 * as some pre-programmed values are broken,
3209 * e.g. x201. 3209 * e.g. x201.
@@ -3428,7 +3428,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc)
3428 3428
3429 /* To avoid upsetting the power well on haswell only disable the pfit if 3429 /* To avoid upsetting the power well on haswell only disable the pfit if
3430 * it's in use. The hw state code will make sure we get this right. */ 3430 * it's in use. The hw state code will make sure we get this right. */
3431 if (crtc->config.pch_pfit.size) { 3431 if (crtc->config.pch_pfit.enabled) {
3432 I915_WRITE(PF_CTL(pipe), 0); 3432 I915_WRITE(PF_CTL(pipe), 0);
3433 I915_WRITE(PF_WIN_POS(pipe), 0); 3433 I915_WRITE(PF_WIN_POS(pipe), 0);
3434 I915_WRITE(PF_WIN_SZ(pipe), 0); 3434 I915_WRITE(PF_WIN_SZ(pipe), 0);
@@ -4775,6 +4775,10 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
4775 4775
4776 pipeconf = 0; 4776 pipeconf = 0;
4777 4777
4778 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
4779 I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
4780 pipeconf |= PIPECONF_ENABLE;
4781
4778 if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) { 4782 if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4779 /* Enable pixel doubling when the dot clock is > 90% of the (display) 4783 /* Enable pixel doubling when the dot clock is > 90% of the (display)
4780 * core speed. 4784 * core speed.
@@ -4877,9 +4881,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4877 return -EINVAL; 4881 return -EINVAL;
4878 } 4882 }
4879 4883
4880 /* Ensure that the cursor is valid for the new mode before changing... */
4881 intel_crtc_update_cursor(crtc, true);
4882
4883 if (is_lvds && dev_priv->lvds_downclock_avail) { 4884 if (is_lvds && dev_priv->lvds_downclock_avail) {
4884 /* 4885 /*
4885 * Ensure we match the reduced clock's P to the target clock. 4886 * Ensure we match the reduced clock's P to the target clock.
@@ -5768,9 +5769,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5768 intel_crtc->config.dpll.p2 = clock.p2; 5769 intel_crtc->config.dpll.p2 = clock.p2;
5769 } 5770 }
5770 5771
5771 /* Ensure that the cursor is valid for the new mode before changing... */
5772 intel_crtc_update_cursor(crtc, true);
5773
5774 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 5772 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
5775 if (intel_crtc->config.has_pch_encoder) { 5773 if (intel_crtc->config.has_pch_encoder) {
5776 fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll); 5774 fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
@@ -5859,6 +5857,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
5859 tmp = I915_READ(PF_CTL(crtc->pipe)); 5857 tmp = I915_READ(PF_CTL(crtc->pipe));
5860 5858
5861 if (tmp & PF_ENABLE) { 5859 if (tmp & PF_ENABLE) {
5860 pipe_config->pch_pfit.enabled = true;
5862 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 5861 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
5863 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 5862 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
5864 5863
@@ -6236,7 +6235,7 @@ static void haswell_modeset_global_resources(struct drm_device *dev)
6236 if (!crtc->base.enabled) 6235 if (!crtc->base.enabled)
6237 continue; 6236 continue;
6238 6237
6239 if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.size || 6238 if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled ||
6240 crtc->config.cpu_transcoder != TRANSCODER_EDP) 6239 crtc->config.cpu_transcoder != TRANSCODER_EDP)
6241 enable = true; 6240 enable = true;
6242 } 6241 }
@@ -6259,9 +6258,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
6259 if (!intel_ddi_pll_mode_set(crtc)) 6258 if (!intel_ddi_pll_mode_set(crtc))
6260 return -EINVAL; 6259 return -EINVAL;
6261 6260
6262 /* Ensure that the cursor is valid for the new mode before changing... */
6263 intel_crtc_update_cursor(crtc, true);
6264
6265 if (intel_crtc->config.has_dp_encoder) 6261 if (intel_crtc->config.has_dp_encoder)
6266 intel_dp_set_m_n(intel_crtc); 6262 intel_dp_set_m_n(intel_crtc);
6267 6263
@@ -6494,15 +6490,15 @@ static void haswell_write_eld(struct drm_connector *connector,
6494 6490
6495 /* Set ELD valid state */ 6491 /* Set ELD valid state */
6496 tmp = I915_READ(aud_cntrl_st2); 6492 tmp = I915_READ(aud_cntrl_st2);
6497 DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp); 6493 DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
6498 tmp |= (AUDIO_ELD_VALID_A << (pipe * 4)); 6494 tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
6499 I915_WRITE(aud_cntrl_st2, tmp); 6495 I915_WRITE(aud_cntrl_st2, tmp);
6500 tmp = I915_READ(aud_cntrl_st2); 6496 tmp = I915_READ(aud_cntrl_st2);
6501 DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp); 6497 DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
6502 6498
6503 /* Enable HDMI mode */ 6499 /* Enable HDMI mode */
6504 tmp = I915_READ(aud_config); 6500 tmp = I915_READ(aud_config);
6505 DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp); 6501 DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
6506 /* clear N_programing_enable and N_value_index */ 6502 /* clear N_programing_enable and N_value_index */
6507 tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE); 6503 tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
6508 I915_WRITE(aud_config, tmp); 6504 I915_WRITE(aud_config, tmp);
@@ -6937,7 +6933,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6937 intel_crtc->cursor_width = width; 6933 intel_crtc->cursor_width = width;
6938 intel_crtc->cursor_height = height; 6934 intel_crtc->cursor_height = height;
6939 6935
6940 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); 6936 if (intel_crtc->active)
6937 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
6941 6938
6942 return 0; 6939 return 0;
6943fail_unpin: 6940fail_unpin:
@@ -6956,7 +6953,8 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
6956 intel_crtc->cursor_x = x; 6953 intel_crtc->cursor_x = x;
6957 intel_crtc->cursor_y = y; 6954 intel_crtc->cursor_y = y;
6958 6955
6959 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); 6956 if (intel_crtc->active)
6957 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
6960 6958
6961 return 0; 6959 return 0;
6962} 6960}
@@ -8205,9 +8203,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
8205 pipe_config->gmch_pfit.control, 8203 pipe_config->gmch_pfit.control,
8206 pipe_config->gmch_pfit.pgm_ratios, 8204 pipe_config->gmch_pfit.pgm_ratios,
8207 pipe_config->gmch_pfit.lvds_border_bits); 8205 pipe_config->gmch_pfit.lvds_border_bits);
8208 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x\n", 8206 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
8209 pipe_config->pch_pfit.pos, 8207 pipe_config->pch_pfit.pos,
8210 pipe_config->pch_pfit.size); 8208 pipe_config->pch_pfit.size,
8209 pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
8211 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); 8210 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
8212} 8211}
8213 8212
@@ -8603,8 +8602,11 @@ intel_pipe_config_compare(struct drm_device *dev,
8603 if (INTEL_INFO(dev)->gen < 4) 8602 if (INTEL_INFO(dev)->gen < 4)
8604 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); 8603 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
8605 PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); 8604 PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
8606 PIPE_CONF_CHECK_I(pch_pfit.pos); 8605 PIPE_CONF_CHECK_I(pch_pfit.enabled);
8607 PIPE_CONF_CHECK_I(pch_pfit.size); 8606 if (current_config->pch_pfit.enabled) {
8607 PIPE_CONF_CHECK_I(pch_pfit.pos);
8608 PIPE_CONF_CHECK_I(pch_pfit.size);
8609 }
8608 8610
8609 PIPE_CONF_CHECK_I(ips_enabled); 8611 PIPE_CONF_CHECK_I(ips_enabled);
8610 8612
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2151d13772b8..79c14e298ba6 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -588,7 +588,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
588 DRM_DEBUG_KMS("aux_ch native nack\n"); 588 DRM_DEBUG_KMS("aux_ch native nack\n");
589 return -EREMOTEIO; 589 return -EREMOTEIO;
590 case AUX_NATIVE_REPLY_DEFER: 590 case AUX_NATIVE_REPLY_DEFER:
591 udelay(100); 591 /*
592 * For now, just give more slack to branch devices. We
593 * could check the DPCD for I2C bit rate capabilities,
594 * and if available, adjust the interval. We could also
595 * be more careful with DP-to-Legacy adapters where a
596 * long legacy cable may force very low I2C bit rates.
597 */
598 if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
599 DP_DWN_STRM_PORT_PRESENT)
600 usleep_range(500, 600);
601 else
602 usleep_range(300, 400);
592 continue; 603 continue;
593 default: 604 default:
594 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 605 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a47799e832c6..28cae80495e2 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -280,6 +280,7 @@ struct intel_crtc_config {
280 struct { 280 struct {
281 u32 pos; 281 u32 pos;
282 u32 size; 282 u32 size;
283 bool enabled;
283 } pch_pfit; 284 } pch_pfit;
284 285
285 /* FDI configuration, only valid if has_pch_encoder is set. */ 286 /* FDI configuration, only valid if has_pch_encoder is set. */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 406303b509c1..7fa7df546c1e 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -263,6 +263,8 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
263 C(vtotal); 263 C(vtotal);
264 C(clock); 264 C(clock);
265#undef C 265#undef C
266
267 drm_mode_set_crtcinfo(adjusted_mode, 0);
266 } 268 }
267 269
268 if (intel_dvo->dev.dev_ops->mode_fixup) 270 if (intel_dvo->dev.dev_ops->mode_fixup)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 42114ecbae0e..293564a2896a 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -112,6 +112,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
112done: 112done:
113 pipe_config->pch_pfit.pos = (x << 16) | y; 113 pipe_config->pch_pfit.pos = (x << 16) | y;
114 pipe_config->pch_pfit.size = (width << 16) | height; 114 pipe_config->pch_pfit.size = (width << 16) | height;
115 pipe_config->pch_pfit.enabled = pipe_config->pch_pfit.size != 0;
115} 116}
116 117
117static void 118static void
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 0c115cc4899f..dd176b7296c1 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2096,16 +2096,16 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
2096 struct drm_crtc *crtc) 2096 struct drm_crtc *crtc)
2097{ 2097{
2098 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2098 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2099 uint32_t pixel_rate, pfit_size; 2099 uint32_t pixel_rate;
2100 2100
2101 pixel_rate = intel_crtc->config.adjusted_mode.clock; 2101 pixel_rate = intel_crtc->config.adjusted_mode.clock;
2102 2102
2103 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to 2103 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
2104 * adjust the pixel_rate here. */ 2104 * adjust the pixel_rate here. */
2105 2105
2106 pfit_size = intel_crtc->config.pch_pfit.size; 2106 if (intel_crtc->config.pch_pfit.enabled) {
2107 if (pfit_size) {
2108 uint64_t pipe_w, pipe_h, pfit_w, pfit_h; 2107 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
2108 uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
2109 2109
2110 pipe_w = intel_crtc->config.requested_mode.hdisplay; 2110 pipe_w = intel_crtc->config.requested_mode.hdisplay;
2111 pipe_h = intel_crtc->config.requested_mode.vdisplay; 2111 pipe_h = intel_crtc->config.requested_mode.vdisplay;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 85037b9d4934..49482fd5b76c 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -788,6 +788,8 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
788 uint16_t h_sync_offset, v_sync_offset; 788 uint16_t h_sync_offset, v_sync_offset;
789 int mode_clock; 789 int mode_clock;
790 790
791 memset(dtd, 0, sizeof(*dtd));
792
791 width = mode->hdisplay; 793 width = mode->hdisplay;
792 height = mode->vdisplay; 794 height = mode->vdisplay;
793 795
@@ -830,44 +832,51 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
830 if (mode->flags & DRM_MODE_FLAG_PVSYNC) 832 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
831 dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE; 833 dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
832 834
833 dtd->part2.sdvo_flags = 0;
834 dtd->part2.v_sync_off_high = v_sync_offset & 0xc0; 835 dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
835 dtd->part2.reserved = 0;
836} 836}
837 837
838static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, 838static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode *pmode,
839 const struct intel_sdvo_dtd *dtd) 839 const struct intel_sdvo_dtd *dtd)
840{ 840{
841 mode->hdisplay = dtd->part1.h_active; 841 struct drm_display_mode mode = {};
842 mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8; 842
843 mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off; 843 mode.hdisplay = dtd->part1.h_active;
844 mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2; 844 mode.hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
845 mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width; 845 mode.hsync_start = mode.hdisplay + dtd->part2.h_sync_off;
846 mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4; 846 mode.hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
847 mode->htotal = mode->hdisplay + dtd->part1.h_blank; 847 mode.hsync_end = mode.hsync_start + dtd->part2.h_sync_width;
848 mode->htotal += (dtd->part1.h_high & 0xf) << 8; 848 mode.hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
849 849 mode.htotal = mode.hdisplay + dtd->part1.h_blank;
850 mode->vdisplay = dtd->part1.v_active; 850 mode.htotal += (dtd->part1.h_high & 0xf) << 8;
851 mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8; 851
852 mode->vsync_start = mode->vdisplay; 852 mode.vdisplay = dtd->part1.v_active;
853 mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf; 853 mode.vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
854 mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2; 854 mode.vsync_start = mode.vdisplay;
855 mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0; 855 mode.vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
856 mode->vsync_end = mode->vsync_start + 856 mode.vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
857 mode.vsync_start += dtd->part2.v_sync_off_high & 0xc0;
858 mode.vsync_end = mode.vsync_start +
857 (dtd->part2.v_sync_off_width & 0xf); 859 (dtd->part2.v_sync_off_width & 0xf);
858 mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4; 860 mode.vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
859 mode->vtotal = mode->vdisplay + dtd->part1.v_blank; 861 mode.vtotal = mode.vdisplay + dtd->part1.v_blank;
860 mode->vtotal += (dtd->part1.v_high & 0xf) << 8; 862 mode.vtotal += (dtd->part1.v_high & 0xf) << 8;
861 863
862 mode->clock = dtd->part1.clock * 10; 864 mode.clock = dtd->part1.clock * 10;
863 865
864 mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
865 if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE) 866 if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
866 mode->flags |= DRM_MODE_FLAG_INTERLACE; 867 mode.flags |= DRM_MODE_FLAG_INTERLACE;
867 if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE) 868 if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
868 mode->flags |= DRM_MODE_FLAG_PHSYNC; 869 mode.flags |= DRM_MODE_FLAG_PHSYNC;
870 else
871 mode.flags |= DRM_MODE_FLAG_NHSYNC;
869 if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE) 872 if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
870 mode->flags |= DRM_MODE_FLAG_PVSYNC; 873 mode.flags |= DRM_MODE_FLAG_PVSYNC;
874 else
875 mode.flags |= DRM_MODE_FLAG_NVSYNC;
876
877 drm_mode_set_crtcinfo(&mode, 0);
878
879 drm_mode_copy(pmode, &mode);
871} 880}
872 881
873static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo) 882static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index f2c6d7909ae2..dd6f84bf6c22 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -916,6 +916,14 @@ intel_tv_compute_config(struct intel_encoder *encoder,
916 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); 916 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
917 pipe_config->pipe_bpp = 8*3; 917 pipe_config->pipe_bpp = 8*3;
918 918
919 /* TV has it's own notion of sync and other mode flags, so clear them. */
920 pipe_config->adjusted_mode.flags = 0;
921
922 /*
923 * FIXME: We don't check whether the input mode is actually what we want
924 * or whether userspace is doing something stupid.
925 */
926
919 return true; 927 return true;
920} 928}
921 929
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index a60584763b61..a0b9d8a95b16 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -124,6 +124,8 @@ void adreno_recover(struct msm_gpu *gpu)
124 124
125 /* reset completed fence seqno, just discard anything pending: */ 125 /* reset completed fence seqno, just discard anything pending: */
126 adreno_gpu->memptrs->fence = gpu->submitted_fence; 126 adreno_gpu->memptrs->fence = gpu->submitted_fence;
127 adreno_gpu->memptrs->rptr = 0;
128 adreno_gpu->memptrs->wptr = 0;
127 129
128 gpu->funcs->pm_resume(gpu); 130 gpu->funcs->pm_resume(gpu);
129 ret = gpu->funcs->hw_init(gpu); 131 ret = gpu->funcs->hw_init(gpu);
@@ -229,7 +231,7 @@ void adreno_idle(struct msm_gpu *gpu)
229 return; 231 return;
230 } while(time_before(jiffies, t)); 232 } while(time_before(jiffies, t));
231 233
232 DRM_ERROR("timeout waiting for %s to drain ringbuffer!\n", gpu->name); 234 DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
233 235
234 /* TODO maybe we need to reset GPU here to recover from hang? */ 236 /* TODO maybe we need to reset GPU here to recover from hang? */
235} 237}
@@ -256,11 +258,17 @@ void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
256{ 258{
257 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 259 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
258 uint32_t freedwords; 260 uint32_t freedwords;
261 unsigned long t = jiffies + ADRENO_IDLE_TIMEOUT;
259 do { 262 do {
260 uint32_t size = gpu->rb->size / 4; 263 uint32_t size = gpu->rb->size / 4;
261 uint32_t wptr = get_wptr(gpu->rb); 264 uint32_t wptr = get_wptr(gpu->rb);
262 uint32_t rptr = adreno_gpu->memptrs->rptr; 265 uint32_t rptr = adreno_gpu->memptrs->rptr;
263 freedwords = (rptr + (size - 1) - wptr) % size; 266 freedwords = (rptr + (size - 1) - wptr) % size;
267
268 if (time_after(jiffies, t)) {
269 DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
270 break;
271 }
264 } while(freedwords < ndwords); 272 } while(freedwords < ndwords);
265} 273}
266 274
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
index 5db5bbaedae2..bc7fd11ad8be 100644
--- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
@@ -19,8 +19,6 @@
19#include "msm_drv.h" 19#include "msm_drv.h"
20#include "mdp4_kms.h" 20#include "mdp4_kms.h"
21 21
22#include <mach/iommu.h>
23
24static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev); 22static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
25 23
26static int mdp4_hw_init(struct msm_kms *kms) 24static int mdp4_hw_init(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 864c9773636b..b3a2f1629041 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -18,8 +18,6 @@
18#include "msm_drv.h" 18#include "msm_drv.h"
19#include "msm_gpu.h" 19#include "msm_gpu.h"
20 20
21#include <mach/iommu.h>
22
23static void msm_fb_output_poll_changed(struct drm_device *dev) 21static void msm_fb_output_poll_changed(struct drm_device *dev)
24{ 22{
25 struct msm_drm_private *priv = dev->dev_private; 23 struct msm_drm_private *priv = dev->dev_private;
@@ -62,6 +60,8 @@ int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
62 int i, ret; 60 int i, ret;
63 61
64 for (i = 0; i < cnt; i++) { 62 for (i = 0; i < cnt; i++) {
63 /* TODO maybe some day msm iommu won't require this hack: */
64 struct device *msm_iommu_get_ctx(const char *ctx_name);
65 struct device *ctx = msm_iommu_get_ctx(names[i]); 65 struct device *ctx = msm_iommu_get_ctx(names[i]);
66 if (!ctx) 66 if (!ctx)
67 continue; 67 continue;
@@ -199,7 +199,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
199 * imx drm driver on iMX5 199 * imx drm driver on iMX5
200 */ 200 */
201 dev_err(dev->dev, "failed to load kms\n"); 201 dev_err(dev->dev, "failed to load kms\n");
202 ret = PTR_ERR(priv->kms); 202 ret = PTR_ERR(kms);
203 goto fail; 203 goto fail;
204 } 204 }
205 205
@@ -499,25 +499,41 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
499 struct timespec *timeout) 499 struct timespec *timeout)
500{ 500{
501 struct msm_drm_private *priv = dev->dev_private; 501 struct msm_drm_private *priv = dev->dev_private;
502 unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
503 unsigned long start_jiffies = jiffies;
504 unsigned long remaining_jiffies;
505 int ret; 502 int ret;
506 503
507 if (time_after(start_jiffies, timeout_jiffies)) 504 if (!priv->gpu)
508 remaining_jiffies = 0; 505 return 0;
509 else 506
510 remaining_jiffies = timeout_jiffies - start_jiffies; 507 if (fence > priv->gpu->submitted_fence) {
511 508 DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
512 ret = wait_event_interruptible_timeout(priv->fence_event, 509 fence, priv->gpu->submitted_fence);
513 priv->completed_fence >= fence, 510 return -EINVAL;
514 remaining_jiffies); 511 }
515 if (ret == 0) { 512
516 DBG("timeout waiting for fence: %u (completed: %u)", 513 if (!timeout) {
517 fence, priv->completed_fence); 514 /* no-wait: */
518 ret = -ETIMEDOUT; 515 ret = fence_completed(dev, fence) ? 0 : -EBUSY;
519 } else if (ret != -ERESTARTSYS) { 516 } else {
520 ret = 0; 517 unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
518 unsigned long start_jiffies = jiffies;
519 unsigned long remaining_jiffies;
520
521 if (time_after(start_jiffies, timeout_jiffies))
522 remaining_jiffies = 0;
523 else
524 remaining_jiffies = timeout_jiffies - start_jiffies;
525
526 ret = wait_event_interruptible_timeout(priv->fence_event,
527 fence_completed(dev, fence),
528 remaining_jiffies);
529
530 if (ret == 0) {
531 DBG("timeout waiting for fence: %u (completed: %u)",
532 fence, priv->completed_fence);
533 ret = -ETIMEDOUT;
534 } else if (ret != -ERESTARTSYS) {
535 ret = 0;
536 }
521 } 537 }
522 538
523 return ret; 539 return ret;
@@ -681,7 +697,7 @@ static struct drm_driver msm_driver = {
681 .gem_vm_ops = &vm_ops, 697 .gem_vm_ops = &vm_ops,
682 .dumb_create = msm_gem_dumb_create, 698 .dumb_create = msm_gem_dumb_create,
683 .dumb_map_offset = msm_gem_dumb_map_offset, 699 .dumb_map_offset = msm_gem_dumb_map_offset,
684 .dumb_destroy = msm_gem_dumb_destroy, 700 .dumb_destroy = drm_gem_dumb_destroy,
685#ifdef CONFIG_DEBUG_FS 701#ifdef CONFIG_DEBUG_FS
686 .debugfs_init = msm_debugfs_init, 702 .debugfs_init = msm_debugfs_init,
687 .debugfs_cleanup = msm_debugfs_cleanup, 703 .debugfs_cleanup = msm_debugfs_cleanup,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 80d75094bf0a..df8f1d084bc1 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -153,7 +153,7 @@ void *msm_gem_vaddr(struct drm_gem_object *obj);
153int msm_gem_queue_inactive_work(struct drm_gem_object *obj, 153int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
154 struct work_struct *work); 154 struct work_struct *work);
155void msm_gem_move_to_active(struct drm_gem_object *obj, 155void msm_gem_move_to_active(struct drm_gem_object *obj,
156 struct msm_gpu *gpu, uint32_t fence); 156 struct msm_gpu *gpu, bool write, uint32_t fence);
157void msm_gem_move_to_inactive(struct drm_gem_object *obj); 157void msm_gem_move_to_inactive(struct drm_gem_object *obj);
158int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, 158int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
159 struct timespec *timeout); 159 struct timespec *timeout);
@@ -191,6 +191,12 @@ u32 msm_readl(const void __iomem *addr);
191#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) 191#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
192#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) 192#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
193 193
194static inline bool fence_completed(struct drm_device *dev, uint32_t fence)
195{
196 struct msm_drm_private *priv = dev->dev_private;
197 return priv->completed_fence >= fence;
198}
199
194static inline int align_pitch(int width, int bpp) 200static inline int align_pitch(int width, int bpp)
195{ 201{
196 int bytespp = (bpp + 7) / 8; 202 int bytespp = (bpp + 7) / 8;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 6b5a6c8c7658..2bae46c66a30 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -40,9 +40,9 @@ static struct page **get_pages(struct drm_gem_object *obj)
40 } 40 }
41 41
42 msm_obj->sgt = drm_prime_pages_to_sg(p, npages); 42 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
43 if (!msm_obj->sgt) { 43 if (IS_ERR(msm_obj->sgt)) {
44 dev_err(dev->dev, "failed to allocate sgt\n"); 44 dev_err(dev->dev, "failed to allocate sgt\n");
45 return ERR_PTR(-ENOMEM); 45 return ERR_CAST(msm_obj->sgt);
46 } 46 }
47 47
48 msm_obj->pages = p; 48 msm_obj->pages = p;
@@ -159,7 +159,6 @@ out_unlock:
159out: 159out:
160 switch (ret) { 160 switch (ret) {
161 case -EAGAIN: 161 case -EAGAIN:
162 set_need_resched();
163 case 0: 162 case 0:
164 case -ERESTARTSYS: 163 case -ERESTARTSYS:
165 case -EINTR: 164 case -EINTR:
@@ -320,13 +319,6 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
320 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); 319 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
321} 320}
322 321
323int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
324 uint32_t handle)
325{
326 /* No special work needed, drop the reference and see what falls out */
327 return drm_gem_handle_delete(file, handle);
328}
329
330int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 322int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
331 uint32_t handle, uint64_t *offset) 323 uint32_t handle, uint64_t *offset)
332{ 324{
@@ -393,11 +385,14 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
393} 385}
394 386
395void msm_gem_move_to_active(struct drm_gem_object *obj, 387void msm_gem_move_to_active(struct drm_gem_object *obj,
396 struct msm_gpu *gpu, uint32_t fence) 388 struct msm_gpu *gpu, bool write, uint32_t fence)
397{ 389{
398 struct msm_gem_object *msm_obj = to_msm_bo(obj); 390 struct msm_gem_object *msm_obj = to_msm_bo(obj);
399 msm_obj->gpu = gpu; 391 msm_obj->gpu = gpu;
400 msm_obj->fence = fence; 392 if (write)
393 msm_obj->write_fence = fence;
394 else
395 msm_obj->read_fence = fence;
401 list_del_init(&msm_obj->mm_list); 396 list_del_init(&msm_obj->mm_list);
402 list_add_tail(&msm_obj->mm_list, &gpu->active_list); 397 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
403} 398}
@@ -411,7 +406,8 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
411 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 406 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
412 407
413 msm_obj->gpu = NULL; 408 msm_obj->gpu = NULL;
414 msm_obj->fence = 0; 409 msm_obj->read_fence = 0;
410 msm_obj->write_fence = 0;
415 list_del_init(&msm_obj->mm_list); 411 list_del_init(&msm_obj->mm_list);
416 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 412 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
417 413
@@ -433,8 +429,18 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
433 struct msm_gem_object *msm_obj = to_msm_bo(obj); 429 struct msm_gem_object *msm_obj = to_msm_bo(obj);
434 int ret = 0; 430 int ret = 0;
435 431
436 if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC)) 432 if (is_active(msm_obj)) {
437 ret = msm_wait_fence_interruptable(dev, msm_obj->fence, timeout); 433 uint32_t fence = 0;
434
435 if (op & MSM_PREP_READ)
436 fence = msm_obj->write_fence;
437 if (op & MSM_PREP_WRITE)
438 fence = max(fence, msm_obj->read_fence);
439 if (op & MSM_PREP_NOSYNC)
440 timeout = NULL;
441
442 ret = msm_wait_fence_interruptable(dev, fence, timeout);
443 }
438 444
439 /* TODO cache maintenance */ 445 /* TODO cache maintenance */
440 446
@@ -455,9 +461,10 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
455 uint64_t off = drm_vma_node_start(&obj->vma_node); 461 uint64_t off = drm_vma_node_start(&obj->vma_node);
456 462
457 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 463 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
458 seq_printf(m, "%08x: %c(%d) %2d (%2d) %08llx %p %d\n", 464 seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
459 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 465 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
460 msm_obj->fence, obj->name, obj->refcount.refcount.counter, 466 msm_obj->read_fence, msm_obj->write_fence,
467 obj->name, obj->refcount.refcount.counter,
461 off, msm_obj->vaddr, obj->size); 468 off, msm_obj->vaddr, obj->size);
462} 469}
463 470
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index d746f13d283c..0676f32e2c6a 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -36,7 +36,7 @@ struct msm_gem_object {
36 */ 36 */
37 struct list_head mm_list; 37 struct list_head mm_list;
38 struct msm_gpu *gpu; /* non-null if active */ 38 struct msm_gpu *gpu; /* non-null if active */
39 uint32_t fence; 39 uint32_t read_fence, write_fence;
40 40
41 /* Transiently in the process of submit ioctl, objects associated 41 /* Transiently in the process of submit ioctl, objects associated
42 * with the submit are on submit->bo_list.. this only lasts for 42 * with the submit are on submit->bo_list.. this only lasts for
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 3e1ef3a00f60..5281d4bc37f7 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -78,7 +78,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
78 } 78 }
79 79
80 if (submit_bo.flags & BO_INVALID_FLAGS) { 80 if (submit_bo.flags & BO_INVALID_FLAGS) {
81 DBG("invalid flags: %x", submit_bo.flags); 81 DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
82 ret = -EINVAL; 82 ret = -EINVAL;
83 goto out_unlock; 83 goto out_unlock;
84 } 84 }
@@ -92,7 +92,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
92 */ 92 */
93 obj = idr_find(&file->object_idr, submit_bo.handle); 93 obj = idr_find(&file->object_idr, submit_bo.handle);
94 if (!obj) { 94 if (!obj) {
95 DBG("invalid handle %u at index %u", submit_bo.handle, i); 95 DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
96 ret = -EINVAL; 96 ret = -EINVAL;
97 goto out_unlock; 97 goto out_unlock;
98 } 98 }
@@ -100,7 +100,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
100 msm_obj = to_msm_bo(obj); 100 msm_obj = to_msm_bo(obj);
101 101
102 if (!list_empty(&msm_obj->submit_entry)) { 102 if (!list_empty(&msm_obj->submit_entry)) {
103 DBG("handle %u at index %u already on submit list", 103 DRM_ERROR("handle %u at index %u already on submit list\n",
104 submit_bo.handle, i); 104 submit_bo.handle, i);
105 ret = -EINVAL; 105 ret = -EINVAL;
106 goto out_unlock; 106 goto out_unlock;
@@ -216,8 +216,9 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
216 struct msm_gem_object **obj, uint32_t *iova, bool *valid) 216 struct msm_gem_object **obj, uint32_t *iova, bool *valid)
217{ 217{
218 if (idx >= submit->nr_bos) { 218 if (idx >= submit->nr_bos) {
219 DBG("invalid buffer index: %u (out of %u)", idx, submit->nr_bos); 219 DRM_ERROR("invalid buffer index: %u (out of %u)\n",
220 return EINVAL; 220 idx, submit->nr_bos);
221 return -EINVAL;
221 } 222 }
222 223
223 if (obj) 224 if (obj)
@@ -239,7 +240,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
239 int ret; 240 int ret;
240 241
241 if (offset % 4) { 242 if (offset % 4) {
242 DBG("non-aligned cmdstream buffer: %u", offset); 243 DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
243 return -EINVAL; 244 return -EINVAL;
244 } 245 }
245 246
@@ -266,7 +267,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
266 return -EFAULT; 267 return -EFAULT;
267 268
268 if (submit_reloc.submit_offset % 4) { 269 if (submit_reloc.submit_offset % 4) {
269 DBG("non-aligned reloc offset: %u", 270 DRM_ERROR("non-aligned reloc offset: %u\n",
270 submit_reloc.submit_offset); 271 submit_reloc.submit_offset);
271 return -EINVAL; 272 return -EINVAL;
272 } 273 }
@@ -276,7 +277,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
276 277
277 if ((off >= (obj->base.size / 4)) || 278 if ((off >= (obj->base.size / 4)) ||
278 (off < last_offset)) { 279 (off < last_offset)) {
279 DBG("invalid offset %u at reloc %u", off, i); 280 DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
280 return -EINVAL; 281 return -EINVAL;
281 } 282 }
282 283
@@ -374,14 +375,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
374 goto out; 375 goto out;
375 376
376 if (submit_cmd.size % 4) { 377 if (submit_cmd.size % 4) {
377 DBG("non-aligned cmdstream buffer size: %u", 378 DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
378 submit_cmd.size); 379 submit_cmd.size);
379 ret = -EINVAL; 380 ret = -EINVAL;
380 goto out; 381 goto out;
381 } 382 }
382 383
383 if (submit_cmd.size >= msm_obj->base.size) { 384 if ((submit_cmd.size + submit_cmd.submit_offset) >=
384 DBG("invalid cmdstream size: %u", submit_cmd.size); 385 msm_obj->base.size) {
386 DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
385 ret = -EINVAL; 387 ret = -EINVAL;
386 goto out; 388 goto out;
387 } 389 }
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index e1e1ec9321ff..3bab937965d1 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -29,13 +29,14 @@
29static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) 29static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev)
30{ 30{
31 struct drm_device *dev = gpu->dev; 31 struct drm_device *dev = gpu->dev;
32 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data; 32 struct kgsl_device_platform_data *pdata;
33 33
34 if (!pdev) { 34 if (!pdev) {
35 dev_err(dev->dev, "could not find dtv pdata\n"); 35 dev_err(dev->dev, "could not find dtv pdata\n");
36 return; 36 return;
37 } 37 }
38 38
39 pdata = pdev->dev.platform_data;
39 if (pdata->bus_scale_table) { 40 if (pdata->bus_scale_table) {
40 gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table); 41 gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table);
41 DBG("bus scale client: %08x", gpu->bsc); 42 DBG("bus scale client: %08x", gpu->bsc);
@@ -230,6 +231,8 @@ static void hangcheck_timer_reset(struct msm_gpu *gpu)
230static void hangcheck_handler(unsigned long data) 231static void hangcheck_handler(unsigned long data)
231{ 232{
232 struct msm_gpu *gpu = (struct msm_gpu *)data; 233 struct msm_gpu *gpu = (struct msm_gpu *)data;
234 struct drm_device *dev = gpu->dev;
235 struct msm_drm_private *priv = dev->dev_private;
233 uint32_t fence = gpu->funcs->last_fence(gpu); 236 uint32_t fence = gpu->funcs->last_fence(gpu);
234 237
235 if (fence != gpu->hangcheck_fence) { 238 if (fence != gpu->hangcheck_fence) {
@@ -237,14 +240,22 @@ static void hangcheck_handler(unsigned long data)
237 gpu->hangcheck_fence = fence; 240 gpu->hangcheck_fence = fence;
238 } else if (fence < gpu->submitted_fence) { 241 } else if (fence < gpu->submitted_fence) {
239 /* no progress and not done.. hung! */ 242 /* no progress and not done.. hung! */
240 struct msm_drm_private *priv = gpu->dev->dev_private;
241 gpu->hangcheck_fence = fence; 243 gpu->hangcheck_fence = fence;
244 dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
245 gpu->name);
246 dev_err(dev->dev, "%s: completed fence: %u\n",
247 gpu->name, fence);
248 dev_err(dev->dev, "%s: submitted fence: %u\n",
249 gpu->name, gpu->submitted_fence);
242 queue_work(priv->wq, &gpu->recover_work); 250 queue_work(priv->wq, &gpu->recover_work);
243 } 251 }
244 252
245 /* if still more pending work, reset the hangcheck timer: */ 253 /* if still more pending work, reset the hangcheck timer: */
246 if (gpu->submitted_fence > gpu->hangcheck_fence) 254 if (gpu->submitted_fence > gpu->hangcheck_fence)
247 hangcheck_timer_reset(gpu); 255 hangcheck_timer_reset(gpu);
256
257 /* workaround for missing irq: */
258 queue_work(priv->wq, &gpu->retire_work);
248} 259}
249 260
250/* 261/*
@@ -265,7 +276,8 @@ static void retire_worker(struct work_struct *work)
265 obj = list_first_entry(&gpu->active_list, 276 obj = list_first_entry(&gpu->active_list,
266 struct msm_gem_object, mm_list); 277 struct msm_gem_object, mm_list);
267 278
268 if (obj->fence <= fence) { 279 if ((obj->read_fence <= fence) &&
280 (obj->write_fence <= fence)) {
269 /* move to inactive: */ 281 /* move to inactive: */
270 msm_gem_move_to_inactive(&obj->base); 282 msm_gem_move_to_inactive(&obj->base);
271 msm_gem_put_iova(&obj->base, gpu->id); 283 msm_gem_put_iova(&obj->base, gpu->id);
@@ -321,7 +333,11 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
321 submit->gpu->id, &iova); 333 submit->gpu->id, &iova);
322 } 334 }
323 335
324 msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence); 336 if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
337 msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
338
339 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
340 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
325 } 341 }
326 hangcheck_timer_reset(gpu); 342 hangcheck_timer_reset(gpu);
327 mutex_unlock(&dev->struct_mutex); 343 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 2e11ea02cf87..57cda2a1437b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -579,8 +579,22 @@ static void
579init_reserved(struct nvbios_init *init) 579init_reserved(struct nvbios_init *init)
580{ 580{
581 u8 opcode = nv_ro08(init->bios, init->offset); 581 u8 opcode = nv_ro08(init->bios, init->offset);
582 trace("RESERVED\t0x%02x\n", opcode); 582 u8 length, i;
583 init->offset += 1; 583
584 switch (opcode) {
585 case 0xaa:
586 length = 4;
587 break;
588 default:
589 length = 1;
590 break;
591 }
592
593 trace("RESERVED 0x%02x\t", opcode);
594 for (i = 1; i < length; i++)
595 cont(" 0x%02x", nv_ro08(init->bios, init->offset + i));
596 cont("\n");
597 init->offset += length;
584} 598}
585 599
586/** 600/**
@@ -1437,7 +1451,7 @@ init_configure_mem(struct nvbios_init *init)
1437 data = init_rdvgai(init, 0x03c4, 0x01); 1451 data = init_rdvgai(init, 0x03c4, 0x01);
1438 init_wrvgai(init, 0x03c4, 0x01, data | 0x20); 1452 init_wrvgai(init, 0x03c4, 0x01, data | 0x20);
1439 1453
1440 while ((addr = nv_ro32(bios, sdata)) != 0xffffffff) { 1454 for (; (addr = nv_ro32(bios, sdata)) != 0xffffffff; sdata += 4) {
1441 switch (addr) { 1455 switch (addr) {
1442 case 0x10021c: /* CKE_NORMAL */ 1456 case 0x10021c: /* CKE_NORMAL */
1443 case 0x1002d0: /* CMD_REFRESH */ 1457 case 0x1002d0: /* CMD_REFRESH */
@@ -2135,6 +2149,7 @@ static struct nvbios_init_opcode {
2135 [0x99] = { init_zm_auxch }, 2149 [0x99] = { init_zm_auxch },
2136 [0x9a] = { init_i2c_long_if }, 2150 [0x9a] = { init_i2c_long_if },
2137 [0xa9] = { init_gpio_ne }, 2151 [0xa9] = { init_gpio_ne },
2152 [0xaa] = { init_reserved },
2138}; 2153};
2139 2154
2140#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0])) 2155#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index d2712e6e5d31..7848590f5568 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -278,7 +278,6 @@ nouveau_display_create(struct drm_device *dev)
278{ 278{
279 struct nouveau_drm *drm = nouveau_drm(dev); 279 struct nouveau_drm *drm = nouveau_drm(dev);
280 struct nouveau_display *disp; 280 struct nouveau_display *disp;
281 u32 pclass = dev->pdev->class >> 8;
282 int ret, gen; 281 int ret, gen;
283 282
284 disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL); 283 disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
@@ -340,29 +339,25 @@ nouveau_display_create(struct drm_device *dev)
340 drm_kms_helper_poll_init(dev); 339 drm_kms_helper_poll_init(dev);
341 drm_kms_helper_poll_disable(dev); 340 drm_kms_helper_poll_disable(dev);
342 341
343 if (nouveau_modeset == 1 || 342 if (drm->vbios.dcb.entries) {
344 (nouveau_modeset < 0 && pclass == PCI_CLASS_DISPLAY_VGA)) { 343 if (nv_device(drm->device)->card_type < NV_50)
345 if (drm->vbios.dcb.entries) { 344 ret = nv04_display_create(dev);
346 if (nv_device(drm->device)->card_type < NV_50) 345 else
347 ret = nv04_display_create(dev); 346 ret = nv50_display_create(dev);
348 else 347 } else {
349 ret = nv50_display_create(dev); 348 ret = 0;
350 } else { 349 }
351 ret = 0;
352 }
353
354 if (ret)
355 goto disp_create_err;
356 350
357 if (dev->mode_config.num_crtc) { 351 if (ret)
358 ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 352 goto disp_create_err;
359 if (ret)
360 goto vblank_err;
361 }
362 353
363 nouveau_backlight_init(dev); 354 if (dev->mode_config.num_crtc) {
355 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
356 if (ret)
357 goto vblank_err;
364 } 358 }
365 359
360 nouveau_backlight_init(dev);
366 return 0; 361 return 0;
367 362
368vblank_err: 363vblank_err:
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 8f6d63d7edd3..a86ecf65c164 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -454,7 +454,8 @@ nouveau_fbcon_init(struct drm_device *dev)
454 int preferred_bpp; 454 int preferred_bpp;
455 int ret; 455 int ret;
456 456
457 if (!dev->mode_config.num_crtc) 457 if (!dev->mode_config.num_crtc ||
458 (dev->pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
458 return 0; 459 return 0;
459 460
460 fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); 461 fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index ca5492ac2da5..0843ebc910d4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -104,9 +104,7 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
104 else 104 else
105 nvbe->ttm.ttm.func = &nv50_sgdma_backend; 105 nvbe->ttm.ttm.func = &nv50_sgdma_backend;
106 106
107 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) { 107 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page))
108 kfree(nvbe);
109 return NULL; 108 return NULL;
110 }
111 return &nvbe->ttm.ttm; 109 return &nvbe->ttm.ttm;
112} 110}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index dfac7965ea28..32923d2f6002 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -707,8 +707,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
707 switch (connector->connector_type) { 707 switch (connector->connector_type) {
708 case DRM_MODE_CONNECTOR_DVII: 708 case DRM_MODE_CONNECTOR_DVII:
709 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ 709 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
710 if (drm_detect_hdmi_monitor(radeon_connector->edid) && 710 if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
711 radeon_audio) 711 (drm_detect_hdmi_monitor(radeon_connector->edid) &&
712 (radeon_connector->audio == RADEON_AUDIO_AUTO)))
712 return ATOM_ENCODER_MODE_HDMI; 713 return ATOM_ENCODER_MODE_HDMI;
713 else if (radeon_connector->use_digital) 714 else if (radeon_connector->use_digital)
714 return ATOM_ENCODER_MODE_DVI; 715 return ATOM_ENCODER_MODE_DVI;
@@ -718,8 +719,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
718 case DRM_MODE_CONNECTOR_DVID: 719 case DRM_MODE_CONNECTOR_DVID:
719 case DRM_MODE_CONNECTOR_HDMIA: 720 case DRM_MODE_CONNECTOR_HDMIA:
720 default: 721 default:
721 if (drm_detect_hdmi_monitor(radeon_connector->edid) && 722 if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
722 radeon_audio) 723 (drm_detect_hdmi_monitor(radeon_connector->edid) &&
724 (radeon_connector->audio == RADEON_AUDIO_AUTO)))
723 return ATOM_ENCODER_MODE_HDMI; 725 return ATOM_ENCODER_MODE_HDMI;
724 else 726 else
725 return ATOM_ENCODER_MODE_DVI; 727 return ATOM_ENCODER_MODE_DVI;
@@ -732,8 +734,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
732 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 734 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
733 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 735 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
734 return ATOM_ENCODER_MODE_DP; 736 return ATOM_ENCODER_MODE_DP;
735 else if (drm_detect_hdmi_monitor(radeon_connector->edid) && 737 else if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
736 radeon_audio) 738 (drm_detect_hdmi_monitor(radeon_connector->edid) &&
739 (radeon_connector->audio == RADEON_AUDIO_AUTO)))
737 return ATOM_ENCODER_MODE_HDMI; 740 return ATOM_ENCODER_MODE_HDMI;
738 else 741 else
739 return ATOM_ENCODER_MODE_DVI; 742 return ATOM_ENCODER_MODE_DVI;
@@ -1647,8 +1650,12 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1647 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); 1650 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
1648 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); 1651 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1649 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1652 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1650 /* some early dce3.2 boards have a bug in their transmitter control table */ 1653 /* some dce3.x boards have a bug in their transmitter control table.
1651 if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730)) 1654 * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
1655 * does the same thing and more.
1656 */
1657 if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
1658 (rdev->family != CHIP_RS880))
1652 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); 1659 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1653 } 1660 }
1654 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { 1661 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 084e69414fd1..b162e98a2953 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -1168,6 +1168,23 @@ static const struct radeon_blacklist_clocks btc_blacklist_clocks[] =
1168 { 25000, 30000, RADEON_SCLK_UP } 1168 { 25000, 30000, RADEON_SCLK_UP }
1169}; 1169};
1170 1170
1171void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
1172 u32 *max_clock)
1173{
1174 u32 i, clock = 0;
1175
1176 if ((table == NULL) || (table->count == 0)) {
1177 *max_clock = clock;
1178 return;
1179 }
1180
1181 for (i = 0; i < table->count; i++) {
1182 if (clock < table->entries[i].clk)
1183 clock = table->entries[i].clk;
1184 }
1185 *max_clock = clock;
1186}
1187
1171void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, 1188void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
1172 u32 clock, u16 max_voltage, u16 *voltage) 1189 u32 clock, u16 max_voltage, u16 *voltage)
1173{ 1190{
@@ -2080,6 +2097,7 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
2080 bool disable_mclk_switching; 2097 bool disable_mclk_switching;
2081 u32 mclk, sclk; 2098 u32 mclk, sclk;
2082 u16 vddc, vddci; 2099 u16 vddc, vddci;
2100 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
2083 2101
2084 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 2102 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
2085 btc_dpm_vblank_too_short(rdev)) 2103 btc_dpm_vblank_too_short(rdev))
@@ -2121,6 +2139,39 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
2121 ps->low.vddci = max_limits->vddci; 2139 ps->low.vddci = max_limits->vddci;
2122 } 2140 }
2123 2141
2142 /* limit clocks to max supported clocks based on voltage dependency tables */
2143 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2144 &max_sclk_vddc);
2145 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2146 &max_mclk_vddci);
2147 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2148 &max_mclk_vddc);
2149
2150 if (max_sclk_vddc) {
2151 if (ps->low.sclk > max_sclk_vddc)
2152 ps->low.sclk = max_sclk_vddc;
2153 if (ps->medium.sclk > max_sclk_vddc)
2154 ps->medium.sclk = max_sclk_vddc;
2155 if (ps->high.sclk > max_sclk_vddc)
2156 ps->high.sclk = max_sclk_vddc;
2157 }
2158 if (max_mclk_vddci) {
2159 if (ps->low.mclk > max_mclk_vddci)
2160 ps->low.mclk = max_mclk_vddci;
2161 if (ps->medium.mclk > max_mclk_vddci)
2162 ps->medium.mclk = max_mclk_vddci;
2163 if (ps->high.mclk > max_mclk_vddci)
2164 ps->high.mclk = max_mclk_vddci;
2165 }
2166 if (max_mclk_vddc) {
2167 if (ps->low.mclk > max_mclk_vddc)
2168 ps->low.mclk = max_mclk_vddc;
2169 if (ps->medium.mclk > max_mclk_vddc)
2170 ps->medium.mclk = max_mclk_vddc;
2171 if (ps->high.mclk > max_mclk_vddc)
2172 ps->high.mclk = max_mclk_vddc;
2173 }
2174
2124 /* XXX validate the min clocks required for display */ 2175 /* XXX validate the min clocks required for display */
2125 2176
2126 if (disable_mclk_switching) { 2177 if (disable_mclk_switching) {
@@ -2340,12 +2391,6 @@ int btc_dpm_set_power_state(struct radeon_device *rdev)
2340 return ret; 2391 return ret;
2341 } 2392 }
2342 2393
2343 ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
2344 if (ret) {
2345 DRM_ERROR("rv770_dpm_force_performance_level failed\n");
2346 return ret;
2347 }
2348
2349 return 0; 2394 return 0;
2350} 2395}
2351 2396
diff --git a/drivers/gpu/drm/radeon/btc_dpm.h b/drivers/gpu/drm/radeon/btc_dpm.h
index 1a15e0e41950..3b6f12b7760b 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.h
+++ b/drivers/gpu/drm/radeon/btc_dpm.h
@@ -46,6 +46,8 @@ void btc_adjust_clock_combinations(struct radeon_device *rdev,
46 struct rv7xx_pl *pl); 46 struct rv7xx_pl *pl);
47void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, 47void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
48 u32 clock, u16 max_voltage, u16 *voltage); 48 u32 clock, u16 max_voltage, u16 *voltage);
49void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
50 u32 *max_clock);
49void btc_apply_voltage_delta_rules(struct radeon_device *rdev, 51void btc_apply_voltage_delta_rules(struct radeon_device *rdev,
50 u16 max_vddc, u16 max_vddci, 52 u16 max_vddc, u16 max_vddci,
51 u16 *vddc, u16 *vddci); 53 u16 *vddc, u16 *vddci);
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 3cce533397c6..51e947a97edf 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -146,6 +146,8 @@ static const struct ci_pt_config_reg didt_config_ci[] =
146}; 146};
147 147
148extern u8 rv770_get_memory_module_index(struct radeon_device *rdev); 148extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
149extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
150 u32 *max_clock);
149extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev, 151extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
150 u32 arb_freq_src, u32 arb_freq_dest); 152 u32 arb_freq_src, u32 arb_freq_dest);
151extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock); 153extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
@@ -712,6 +714,7 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
712 struct radeon_clock_and_voltage_limits *max_limits; 714 struct radeon_clock_and_voltage_limits *max_limits;
713 bool disable_mclk_switching; 715 bool disable_mclk_switching;
714 u32 sclk, mclk; 716 u32 sclk, mclk;
717 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
715 int i; 718 int i;
716 719
717 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 720 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -739,6 +742,29 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
739 } 742 }
740 } 743 }
741 744
745 /* limit clocks to max supported clocks based on voltage dependency tables */
746 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
747 &max_sclk_vddc);
748 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
749 &max_mclk_vddci);
750 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
751 &max_mclk_vddc);
752
753 for (i = 0; i < ps->performance_level_count; i++) {
754 if (max_sclk_vddc) {
755 if (ps->performance_levels[i].sclk > max_sclk_vddc)
756 ps->performance_levels[i].sclk = max_sclk_vddc;
757 }
758 if (max_mclk_vddci) {
759 if (ps->performance_levels[i].mclk > max_mclk_vddci)
760 ps->performance_levels[i].mclk = max_mclk_vddci;
761 }
762 if (max_mclk_vddc) {
763 if (ps->performance_levels[i].mclk > max_mclk_vddc)
764 ps->performance_levels[i].mclk = max_mclk_vddc;
765 }
766 }
767
742 /* XXX validate the min clocks required for display */ 768 /* XXX validate the min clocks required for display */
743 769
744 if (disable_mclk_switching) { 770 if (disable_mclk_switching) {
@@ -4748,12 +4774,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
4748 if (pi->pcie_performance_request) 4774 if (pi->pcie_performance_request)
4749 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); 4775 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
4750 4776
4751 ret = ci_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
4752 if (ret) {
4753 DRM_ERROR("ci_dpm_force_performance_level failed\n");
4754 return ret;
4755 }
4756
4757 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | 4777 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4758 RADEON_CG_BLOCK_MC | 4778 RADEON_CG_BLOCK_MC |
4759 RADEON_CG_BLOCK_SDMA | 4779 RADEON_CG_BLOCK_SDMA |
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 53b43dd3cf1e..252e10a41cf5 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -47,10 +47,11 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
47 u32 smc_start_address, 47 u32 smc_start_address,
48 const u8 *src, u32 byte_count, u32 limit) 48 const u8 *src, u32 byte_count, u32 limit)
49{ 49{
50 unsigned long flags;
50 u32 data, original_data; 51 u32 data, original_data;
51 u32 addr; 52 u32 addr;
52 u32 extra_shift; 53 u32 extra_shift;
53 int ret; 54 int ret = 0;
54 55
55 if (smc_start_address & 3) 56 if (smc_start_address & 3)
56 return -EINVAL; 57 return -EINVAL;
@@ -59,13 +60,14 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
59 60
60 addr = smc_start_address; 61 addr = smc_start_address;
61 62
63 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
62 while (byte_count >= 4) { 64 while (byte_count >= 4) {
63 /* SMC address space is BE */ 65 /* SMC address space is BE */
64 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; 66 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
65 67
66 ret = ci_set_smc_sram_address(rdev, addr, limit); 68 ret = ci_set_smc_sram_address(rdev, addr, limit);
67 if (ret) 69 if (ret)
68 return ret; 70 goto done;
69 71
70 WREG32(SMC_IND_DATA_0, data); 72 WREG32(SMC_IND_DATA_0, data);
71 73
@@ -80,7 +82,7 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
80 82
81 ret = ci_set_smc_sram_address(rdev, addr, limit); 83 ret = ci_set_smc_sram_address(rdev, addr, limit);
82 if (ret) 84 if (ret)
83 return ret; 85 goto done;
84 86
85 original_data = RREG32(SMC_IND_DATA_0); 87 original_data = RREG32(SMC_IND_DATA_0);
86 88
@@ -97,11 +99,15 @@ int ci_copy_bytes_to_smc(struct radeon_device *rdev,
97 99
98 ret = ci_set_smc_sram_address(rdev, addr, limit); 100 ret = ci_set_smc_sram_address(rdev, addr, limit);
99 if (ret) 101 if (ret)
100 return ret; 102 goto done;
101 103
102 WREG32(SMC_IND_DATA_0, data); 104 WREG32(SMC_IND_DATA_0, data);
103 } 105 }
104 return 0; 106
107done:
108 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
109
110 return ret;
105} 111}
106 112
107void ci_start_smc(struct radeon_device *rdev) 113void ci_start_smc(struct radeon_device *rdev)
@@ -197,6 +203,7 @@ PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
197 203
198int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit) 204int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
199{ 205{
206 unsigned long flags;
200 u32 ucode_start_address; 207 u32 ucode_start_address;
201 u32 ucode_size; 208 u32 ucode_size;
202 const u8 *src; 209 const u8 *src;
@@ -219,6 +226,7 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
219 return -EINVAL; 226 return -EINVAL;
220 227
221 src = (const u8 *)rdev->smc_fw->data; 228 src = (const u8 *)rdev->smc_fw->data;
229 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
222 WREG32(SMC_IND_INDEX_0, ucode_start_address); 230 WREG32(SMC_IND_INDEX_0, ucode_start_address);
223 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); 231 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
224 while (ucode_size >= 4) { 232 while (ucode_size >= 4) {
@@ -231,6 +239,7 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
231 ucode_size -= 4; 239 ucode_size -= 4;
232 } 240 }
233 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); 241 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
242 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
234 243
235 return 0; 244 return 0;
236} 245}
@@ -238,25 +247,29 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
238int ci_read_smc_sram_dword(struct radeon_device *rdev, 247int ci_read_smc_sram_dword(struct radeon_device *rdev,
239 u32 smc_address, u32 *value, u32 limit) 248 u32 smc_address, u32 *value, u32 limit)
240{ 249{
250 unsigned long flags;
241 int ret; 251 int ret;
242 252
253 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
243 ret = ci_set_smc_sram_address(rdev, smc_address, limit); 254 ret = ci_set_smc_sram_address(rdev, smc_address, limit);
244 if (ret) 255 if (ret == 0)
245 return ret; 256 *value = RREG32(SMC_IND_DATA_0);
257 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
246 258
247 *value = RREG32(SMC_IND_DATA_0); 259 return ret;
248 return 0;
249} 260}
250 261
251int ci_write_smc_sram_dword(struct radeon_device *rdev, 262int ci_write_smc_sram_dword(struct radeon_device *rdev,
252 u32 smc_address, u32 value, u32 limit) 263 u32 smc_address, u32 value, u32 limit)
253{ 264{
265 unsigned long flags;
254 int ret; 266 int ret;
255 267
268 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
256 ret = ci_set_smc_sram_address(rdev, smc_address, limit); 269 ret = ci_set_smc_sram_address(rdev, smc_address, limit);
257 if (ret) 270 if (ret == 0)
258 return ret; 271 WREG32(SMC_IND_DATA_0, value);
272 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
259 273
260 WREG32(SMC_IND_DATA_0, value); 274 return ret;
261 return 0;
262} 275}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index a3bba0587276..d02fd1c045d5 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -77,6 +77,8 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev);
77static void cik_program_aspm(struct radeon_device *rdev); 77static void cik_program_aspm(struct radeon_device *rdev);
78static void cik_init_pg(struct radeon_device *rdev); 78static void cik_init_pg(struct radeon_device *rdev);
79static void cik_init_cg(struct radeon_device *rdev); 79static void cik_init_cg(struct radeon_device *rdev);
80static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
81 bool enable);
80 82
81/* get temperature in millidegrees */ 83/* get temperature in millidegrees */
82int ci_get_temp(struct radeon_device *rdev) 84int ci_get_temp(struct radeon_device *rdev)
@@ -120,20 +122,27 @@ int kv_get_temp(struct radeon_device *rdev)
120 */ 122 */
121u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg) 123u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg)
122{ 124{
125 unsigned long flags;
123 u32 r; 126 u32 r;
124 127
128 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
125 WREG32(PCIE_INDEX, reg); 129 WREG32(PCIE_INDEX, reg);
126 (void)RREG32(PCIE_INDEX); 130 (void)RREG32(PCIE_INDEX);
127 r = RREG32(PCIE_DATA); 131 r = RREG32(PCIE_DATA);
132 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
128 return r; 133 return r;
129} 134}
130 135
131void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) 136void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
132{ 137{
138 unsigned long flags;
139
140 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
133 WREG32(PCIE_INDEX, reg); 141 WREG32(PCIE_INDEX, reg);
134 (void)RREG32(PCIE_INDEX); 142 (void)RREG32(PCIE_INDEX);
135 WREG32(PCIE_DATA, v); 143 WREG32(PCIE_DATA, v);
136 (void)RREG32(PCIE_DATA); 144 (void)RREG32(PCIE_DATA);
145 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
137} 146}
138 147
139static const u32 spectre_rlc_save_restore_register_list[] = 148static const u32 spectre_rlc_save_restore_register_list[] =
@@ -2722,7 +2731,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
2722 } else if ((rdev->pdev->device == 0x1309) || 2731 } else if ((rdev->pdev->device == 0x1309) ||
2723 (rdev->pdev->device == 0x130A) || 2732 (rdev->pdev->device == 0x130A) ||
2724 (rdev->pdev->device == 0x130D) || 2733 (rdev->pdev->device == 0x130D) ||
2725 (rdev->pdev->device == 0x1313)) { 2734 (rdev->pdev->device == 0x1313) ||
2735 (rdev->pdev->device == 0x131D)) {
2726 rdev->config.cik.max_cu_per_sh = 6; 2736 rdev->config.cik.max_cu_per_sh = 6;
2727 rdev->config.cik.max_backends_per_se = 2; 2737 rdev->config.cik.max_backends_per_se = 2;
2728 } else if ((rdev->pdev->device == 0x1306) || 2738 } else if ((rdev->pdev->device == 0x1306) ||
@@ -2835,10 +2845,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
2835 rdev->config.cik.tile_config |= (3 << 0); 2845 rdev->config.cik.tile_config |= (3 << 0);
2836 break; 2846 break;
2837 } 2847 }
2838 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) 2848 rdev->config.cik.tile_config |=
2839 rdev->config.cik.tile_config |= 1 << 4; 2849 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
2840 else
2841 rdev->config.cik.tile_config |= 0 << 4;
2842 rdev->config.cik.tile_config |= 2850 rdev->config.cik.tile_config |=
2843 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 2851 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
2844 rdev->config.cik.tile_config |= 2852 rdev->config.cik.tile_config |=
@@ -4013,6 +4021,8 @@ static int cik_cp_resume(struct radeon_device *rdev)
4013{ 4021{
4014 int r; 4022 int r;
4015 4023
4024 cik_enable_gui_idle_interrupt(rdev, false);
4025
4016 r = cik_cp_load_microcode(rdev); 4026 r = cik_cp_load_microcode(rdev);
4017 if (r) 4027 if (r)
4018 return r; 4028 return r;
@@ -4024,6 +4034,8 @@ static int cik_cp_resume(struct radeon_device *rdev)
4024 if (r) 4034 if (r)
4025 return r; 4035 return r;
4026 4036
4037 cik_enable_gui_idle_interrupt(rdev, true);
4038
4027 return 0; 4039 return 0;
4028} 4040}
4029 4041
@@ -4442,8 +4454,8 @@ static int cik_mc_init(struct radeon_device *rdev)
4442 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 4454 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
4443 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 4455 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
4444 /* size in MB on si */ 4456 /* size in MB on si */
4445 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 4457 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
4446 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 4458 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
4447 rdev->mc.visible_vram_size = rdev->mc.aper_size; 4459 rdev->mc.visible_vram_size = rdev->mc.aper_size;
4448 si_vram_gtt_location(rdev, &rdev->mc); 4460 si_vram_gtt_location(rdev, &rdev->mc);
4449 radeon_update_bandwidth_info(rdev); 4461 radeon_update_bandwidth_info(rdev);
@@ -4721,12 +4733,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
4721 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT; 4733 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
4722 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT; 4734 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
4723 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT; 4735 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
4724 char *block = (char *)&mc_client; 4736 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
4737 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
4725 4738
4726 printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n", 4739 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
4727 protections, vmid, addr, 4740 protections, vmid, addr,
4728 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read", 4741 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
4729 block, mc_id); 4742 block, mc_client, mc_id);
4730} 4743}
4731 4744
4732/** 4745/**
@@ -5376,7 +5389,9 @@ static void cik_enable_hdp_ls(struct radeon_device *rdev,
5376void cik_update_cg(struct radeon_device *rdev, 5389void cik_update_cg(struct radeon_device *rdev,
5377 u32 block, bool enable) 5390 u32 block, bool enable)
5378{ 5391{
5392
5379 if (block & RADEON_CG_BLOCK_GFX) { 5393 if (block & RADEON_CG_BLOCK_GFX) {
5394 cik_enable_gui_idle_interrupt(rdev, false);
5380 /* order matters! */ 5395 /* order matters! */
5381 if (enable) { 5396 if (enable) {
5382 cik_enable_mgcg(rdev, true); 5397 cik_enable_mgcg(rdev, true);
@@ -5385,6 +5400,7 @@ void cik_update_cg(struct radeon_device *rdev,
5385 cik_enable_cgcg(rdev, false); 5400 cik_enable_cgcg(rdev, false);
5386 cik_enable_mgcg(rdev, false); 5401 cik_enable_mgcg(rdev, false);
5387 } 5402 }
5403 cik_enable_gui_idle_interrupt(rdev, true);
5388 } 5404 }
5389 5405
5390 if (block & RADEON_CG_BLOCK_MC) { 5406 if (block & RADEON_CG_BLOCK_MC) {
@@ -5541,7 +5557,7 @@ static void cik_enable_gfx_cgpg(struct radeon_device *rdev,
5541{ 5557{
5542 u32 data, orig; 5558 u32 data, orig;
5543 5559
5544 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) { 5560 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
5545 orig = data = RREG32(RLC_PG_CNTL); 5561 orig = data = RREG32(RLC_PG_CNTL);
5546 data |= GFX_PG_ENABLE; 5562 data |= GFX_PG_ENABLE;
5547 if (orig != data) 5563 if (orig != data)
@@ -5805,7 +5821,7 @@ static void cik_init_pg(struct radeon_device *rdev)
5805 if (rdev->pg_flags) { 5821 if (rdev->pg_flags) {
5806 cik_enable_sck_slowdown_on_pu(rdev, true); 5822 cik_enable_sck_slowdown_on_pu(rdev, true);
5807 cik_enable_sck_slowdown_on_pd(rdev, true); 5823 cik_enable_sck_slowdown_on_pd(rdev, true);
5808 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { 5824 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
5809 cik_init_gfx_cgpg(rdev); 5825 cik_init_gfx_cgpg(rdev);
5810 cik_enable_cp_pg(rdev, true); 5826 cik_enable_cp_pg(rdev, true);
5811 cik_enable_gds_pg(rdev, true); 5827 cik_enable_gds_pg(rdev, true);
@@ -5819,7 +5835,7 @@ static void cik_fini_pg(struct radeon_device *rdev)
5819{ 5835{
5820 if (rdev->pg_flags) { 5836 if (rdev->pg_flags) {
5821 cik_update_gfx_pg(rdev, false); 5837 cik_update_gfx_pg(rdev, false);
5822 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { 5838 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
5823 cik_enable_cp_pg(rdev, false); 5839 cik_enable_cp_pg(rdev, false);
5824 cik_enable_gds_pg(rdev, false); 5840 cik_enable_gds_pg(rdev, false);
5825 } 5841 }
@@ -5895,7 +5911,9 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
5895 u32 tmp; 5911 u32 tmp;
5896 5912
5897 /* gfx ring */ 5913 /* gfx ring */
5898 WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 5914 tmp = RREG32(CP_INT_CNTL_RING0) &
5915 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5916 WREG32(CP_INT_CNTL_RING0, tmp);
5899 /* sdma */ 5917 /* sdma */
5900 tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; 5918 tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
5901 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp); 5919 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp);
@@ -6036,8 +6054,7 @@ static int cik_irq_init(struct radeon_device *rdev)
6036 */ 6054 */
6037int cik_irq_set(struct radeon_device *rdev) 6055int cik_irq_set(struct radeon_device *rdev)
6038{ 6056{
6039 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE | 6057 u32 cp_int_cntl;
6040 PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
6041 u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3; 6058 u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
6042 u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3; 6059 u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
6043 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 6060 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
@@ -6058,6 +6075,10 @@ int cik_irq_set(struct radeon_device *rdev)
6058 return 0; 6075 return 0;
6059 } 6076 }
6060 6077
6078 cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
6079 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
6080 cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
6081
6061 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 6082 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
6062 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; 6083 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
6063 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; 6084 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 95a66db08d9b..91bb470de0a3 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -2014,12 +2014,6 @@ int cypress_dpm_set_power_state(struct radeon_device *rdev)
2014 if (eg_pi->pcie_performance_request) 2014 if (eg_pi->pcie_performance_request)
2015 cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); 2015 cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
2016 2016
2017 ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
2018 if (ret) {
2019 DRM_ERROR("rv770_dpm_force_performance_level failed\n");
2020 return ret;
2021 }
2022
2023 return 0; 2017 return 0;
2024} 2018}
2025 2019
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 8953255e894b..85a69d2ea3d2 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -28,22 +28,30 @@
28static u32 dce6_endpoint_rreg(struct radeon_device *rdev, 28static u32 dce6_endpoint_rreg(struct radeon_device *rdev,
29 u32 block_offset, u32 reg) 29 u32 block_offset, u32 reg)
30{ 30{
31 unsigned long flags;
31 u32 r; 32 u32 r;
32 33
34 spin_lock_irqsave(&rdev->end_idx_lock, flags);
33 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 35 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
34 r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset); 36 r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset);
37 spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
38
35 return r; 39 return r;
36} 40}
37 41
38static void dce6_endpoint_wreg(struct radeon_device *rdev, 42static void dce6_endpoint_wreg(struct radeon_device *rdev,
39 u32 block_offset, u32 reg, u32 v) 43 u32 block_offset, u32 reg, u32 v)
40{ 44{
45 unsigned long flags;
46
47 spin_lock_irqsave(&rdev->end_idx_lock, flags);
41 if (ASIC_IS_DCE8(rdev)) 48 if (ASIC_IS_DCE8(rdev))
42 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 49 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
43 else 50 else
44 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, 51 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset,
45 AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg)); 52 AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg));
46 WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v); 53 WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v);
54 spin_unlock_irqrestore(&rdev->end_idx_lock, flags);
47} 55}
48 56
49#define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg)) 57#define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg))
@@ -86,12 +94,12 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
86 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
87 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
88 u32 offset = dig->afmt->offset; 96 u32 offset = dig->afmt->offset;
89 u32 id = dig->afmt->pin->id;
90 97
91 if (!dig->afmt->pin) 98 if (!dig->afmt->pin)
92 return; 99 return;
93 100
94 WREG32(AFMT_AUDIO_SRC_CONTROL + offset, AFMT_AUDIO_SRC_SELECT(id)); 101 WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
102 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
95} 103}
96 104
97void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) 105void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index ecd60809db4e..71399065db04 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -40,6 +40,7 @@ static int kv_calculate_dpm_settings(struct radeon_device *rdev);
40static void kv_enable_new_levels(struct radeon_device *rdev); 40static void kv_enable_new_levels(struct radeon_device *rdev);
41static void kv_program_nbps_index_settings(struct radeon_device *rdev, 41static void kv_program_nbps_index_settings(struct radeon_device *rdev,
42 struct radeon_ps *new_rps); 42 struct radeon_ps *new_rps);
43static int kv_set_enabled_level(struct radeon_device *rdev, u32 level);
43static int kv_set_enabled_levels(struct radeon_device *rdev); 44static int kv_set_enabled_levels(struct radeon_device *rdev);
44static int kv_force_dpm_highest(struct radeon_device *rdev); 45static int kv_force_dpm_highest(struct radeon_device *rdev);
45static int kv_force_dpm_lowest(struct radeon_device *rdev); 46static int kv_force_dpm_lowest(struct radeon_device *rdev);
@@ -519,7 +520,7 @@ static int kv_set_dpm_boot_state(struct radeon_device *rdev)
519 520
520static void kv_program_vc(struct radeon_device *rdev) 521static void kv_program_vc(struct radeon_device *rdev)
521{ 522{
522 WREG32_SMC(CG_FTV_0, 0x3FFFC000); 523 WREG32_SMC(CG_FTV_0, 0x3FFFC100);
523} 524}
524 525
525static void kv_clear_vc(struct radeon_device *rdev) 526static void kv_clear_vc(struct radeon_device *rdev)
@@ -638,7 +639,10 @@ static int kv_force_lowest_valid(struct radeon_device *rdev)
638 639
639static int kv_unforce_levels(struct radeon_device *rdev) 640static int kv_unforce_levels(struct radeon_device *rdev)
640{ 641{
641 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); 642 if (rdev->family == CHIP_KABINI)
643 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
644 else
645 return kv_set_enabled_levels(rdev);
642} 646}
643 647
644static int kv_update_sclk_t(struct radeon_device *rdev) 648static int kv_update_sclk_t(struct radeon_device *rdev)
@@ -667,9 +671,8 @@ static int kv_program_bootup_state(struct radeon_device *rdev)
667 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 671 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
668 672
669 if (table && table->count) { 673 if (table && table->count) {
670 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { 674 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
671 if ((table->entries[i].clk == pi->boot_pl.sclk) || 675 if (table->entries[i].clk == pi->boot_pl.sclk)
672 (i == 0))
673 break; 676 break;
674 } 677 }
675 678
@@ -682,9 +685,8 @@ static int kv_program_bootup_state(struct radeon_device *rdev)
682 if (table->num_max_dpm_entries == 0) 685 if (table->num_max_dpm_entries == 0)
683 return -EINVAL; 686 return -EINVAL;
684 687
685 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { 688 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
686 if ((table->entries[i].sclk_frequency == pi->boot_pl.sclk) || 689 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
687 (i == 0))
688 break; 690 break;
689 } 691 }
690 692
@@ -1078,6 +1080,13 @@ static int kv_enable_ulv(struct radeon_device *rdev, bool enable)
1078 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); 1080 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
1079} 1081}
1080 1082
1083static void kv_reset_acp_boot_level(struct radeon_device *rdev)
1084{
1085 struct kv_power_info *pi = kv_get_pi(rdev);
1086
1087 pi->acp_boot_level = 0xff;
1088}
1089
1081static void kv_update_current_ps(struct radeon_device *rdev, 1090static void kv_update_current_ps(struct radeon_device *rdev,
1082 struct radeon_ps *rps) 1091 struct radeon_ps *rps)
1083{ 1092{
@@ -1100,6 +1109,18 @@ static void kv_update_requested_ps(struct radeon_device *rdev,
1100 pi->requested_rps.ps_priv = &pi->requested_ps; 1109 pi->requested_rps.ps_priv = &pi->requested_ps;
1101} 1110}
1102 1111
1112void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
1113{
1114 struct kv_power_info *pi = kv_get_pi(rdev);
1115 int ret;
1116
1117 if (pi->bapm_enable) {
1118 ret = kv_smc_bapm_enable(rdev, enable);
1119 if (ret)
1120 DRM_ERROR("kv_smc_bapm_enable failed\n");
1121 }
1122}
1123
1103int kv_dpm_enable(struct radeon_device *rdev) 1124int kv_dpm_enable(struct radeon_device *rdev)
1104{ 1125{
1105 struct kv_power_info *pi = kv_get_pi(rdev); 1126 struct kv_power_info *pi = kv_get_pi(rdev);
@@ -1192,6 +1213,8 @@ int kv_dpm_enable(struct radeon_device *rdev)
1192 return ret; 1213 return ret;
1193 } 1214 }
1194 1215
1216 kv_reset_acp_boot_level(rdev);
1217
1195 if (rdev->irq.installed && 1218 if (rdev->irq.installed &&
1196 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 1219 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1197 ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 1220 ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
@@ -1203,6 +1226,12 @@ int kv_dpm_enable(struct radeon_device *rdev)
1203 radeon_irq_set(rdev); 1226 radeon_irq_set(rdev);
1204 } 1227 }
1205 1228
1229 ret = kv_smc_bapm_enable(rdev, false);
1230 if (ret) {
1231 DRM_ERROR("kv_smc_bapm_enable failed\n");
1232 return ret;
1233 }
1234
1206 /* powerdown unused blocks for now */ 1235 /* powerdown unused blocks for now */
1207 kv_dpm_powergate_acp(rdev, true); 1236 kv_dpm_powergate_acp(rdev, true);
1208 kv_dpm_powergate_samu(rdev, true); 1237 kv_dpm_powergate_samu(rdev, true);
@@ -1226,6 +1255,8 @@ void kv_dpm_disable(struct radeon_device *rdev)
1226 RADEON_CG_BLOCK_BIF | 1255 RADEON_CG_BLOCK_BIF |
1227 RADEON_CG_BLOCK_HDP), false); 1256 RADEON_CG_BLOCK_HDP), false);
1228 1257
1258 kv_smc_bapm_enable(rdev, false);
1259
1229 /* powerup blocks */ 1260 /* powerup blocks */
1230 kv_dpm_powergate_acp(rdev, false); 1261 kv_dpm_powergate_acp(rdev, false);
1231 kv_dpm_powergate_samu(rdev, false); 1262 kv_dpm_powergate_samu(rdev, false);
@@ -1450,6 +1481,39 @@ static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
1450 return kv_enable_samu_dpm(rdev, !gate); 1481 return kv_enable_samu_dpm(rdev, !gate);
1451} 1482}
1452 1483
1484static u8 kv_get_acp_boot_level(struct radeon_device *rdev)
1485{
1486 u8 i;
1487 struct radeon_clock_voltage_dependency_table *table =
1488 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1489
1490 for (i = 0; i < table->count; i++) {
1491 if (table->entries[i].clk >= 0) /* XXX */
1492 break;
1493 }
1494
1495 if (i >= table->count)
1496 i = table->count - 1;
1497
1498 return i;
1499}
1500
1501static void kv_update_acp_boot_level(struct radeon_device *rdev)
1502{
1503 struct kv_power_info *pi = kv_get_pi(rdev);
1504 u8 acp_boot_level;
1505
1506 if (!pi->caps_stable_p_state) {
1507 acp_boot_level = kv_get_acp_boot_level(rdev);
1508 if (acp_boot_level != pi->acp_boot_level) {
1509 pi->acp_boot_level = acp_boot_level;
1510 kv_send_msg_to_smc_with_parameter(rdev,
1511 PPSMC_MSG_ACPDPM_SetEnabledMask,
1512 (1 << pi->acp_boot_level));
1513 }
1514 }
1515}
1516
1453static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate) 1517static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
1454{ 1518{
1455 struct kv_power_info *pi = kv_get_pi(rdev); 1519 struct kv_power_info *pi = kv_get_pi(rdev);
@@ -1461,7 +1525,7 @@ static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
1461 if (pi->caps_stable_p_state) 1525 if (pi->caps_stable_p_state)
1462 pi->acp_boot_level = table->count - 1; 1526 pi->acp_boot_level = table->count - 1;
1463 else 1527 else
1464 pi->acp_boot_level = 0; 1528 pi->acp_boot_level = kv_get_acp_boot_level(rdev);
1465 1529
1466 ret = kv_copy_bytes_to_smc(rdev, 1530 ret = kv_copy_bytes_to_smc(rdev,
1467 pi->dpm_table_start + 1531 pi->dpm_table_start +
@@ -1588,13 +1652,11 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev,
1588 } 1652 }
1589 } 1653 }
1590 1654
1591 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { 1655 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
1592 if ((table->entries[i].clk <= new_ps->levels[new_ps->num_levels -1].sclk) || 1656 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
1593 (i == 0)) {
1594 pi->highest_valid = i;
1595 break; 1657 break;
1596 }
1597 } 1658 }
1659 pi->highest_valid = i;
1598 1660
1599 if (pi->lowest_valid > pi->highest_valid) { 1661 if (pi->lowest_valid > pi->highest_valid) {
1600 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > 1662 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
@@ -1615,14 +1677,12 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev,
1615 } 1677 }
1616 } 1678 }
1617 1679
1618 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { 1680 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
1619 if (table->entries[i].sclk_frequency <= 1681 if (table->entries[i].sclk_frequency <=
1620 new_ps->levels[new_ps->num_levels - 1].sclk || 1682 new_ps->levels[new_ps->num_levels - 1].sclk)
1621 i == 0) {
1622 pi->highest_valid = i;
1623 break; 1683 break;
1624 }
1625 } 1684 }
1685 pi->highest_valid = i;
1626 1686
1627 if (pi->lowest_valid > pi->highest_valid) { 1687 if (pi->lowest_valid > pi->highest_valid) {
1628 if ((new_ps->levels[0].sclk - 1688 if ((new_ps->levels[0].sclk -
@@ -1724,6 +1784,14 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
1724 RADEON_CG_BLOCK_BIF | 1784 RADEON_CG_BLOCK_BIF |
1725 RADEON_CG_BLOCK_HDP), false); 1785 RADEON_CG_BLOCK_HDP), false);
1726 1786
1787 if (pi->bapm_enable) {
1788 ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power);
1789 if (ret) {
1790 DRM_ERROR("kv_smc_bapm_enable failed\n");
1791 return ret;
1792 }
1793 }
1794
1727 if (rdev->family == CHIP_KABINI) { 1795 if (rdev->family == CHIP_KABINI) {
1728 if (pi->enable_dpm) { 1796 if (pi->enable_dpm) {
1729 kv_set_valid_clock_range(rdev, new_ps); 1797 kv_set_valid_clock_range(rdev, new_ps);
@@ -1775,6 +1843,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
1775 return ret; 1843 return ret;
1776 } 1844 }
1777#endif 1845#endif
1846 kv_update_acp_boot_level(rdev);
1778 kv_update_sclk_t(rdev); 1847 kv_update_sclk_t(rdev);
1779 kv_enable_nb_dpm(rdev); 1848 kv_enable_nb_dpm(rdev);
1780 } 1849 }
@@ -1785,7 +1854,6 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
1785 RADEON_CG_BLOCK_BIF | 1854 RADEON_CG_BLOCK_BIF |
1786 RADEON_CG_BLOCK_HDP), true); 1855 RADEON_CG_BLOCK_HDP), true);
1787 1856
1788 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1789 return 0; 1857 return 0;
1790} 1858}
1791 1859
@@ -1806,12 +1874,23 @@ void kv_dpm_setup_asic(struct radeon_device *rdev)
1806 1874
1807void kv_dpm_reset_asic(struct radeon_device *rdev) 1875void kv_dpm_reset_asic(struct radeon_device *rdev)
1808{ 1876{
1809 kv_force_lowest_valid(rdev); 1877 struct kv_power_info *pi = kv_get_pi(rdev);
1810 kv_init_graphics_levels(rdev); 1878
1811 kv_program_bootup_state(rdev); 1879 if (rdev->family == CHIP_KABINI) {
1812 kv_upload_dpm_settings(rdev); 1880 kv_force_lowest_valid(rdev);
1813 kv_force_lowest_valid(rdev); 1881 kv_init_graphics_levels(rdev);
1814 kv_unforce_levels(rdev); 1882 kv_program_bootup_state(rdev);
1883 kv_upload_dpm_settings(rdev);
1884 kv_force_lowest_valid(rdev);
1885 kv_unforce_levels(rdev);
1886 } else {
1887 kv_init_graphics_levels(rdev);
1888 kv_program_bootup_state(rdev);
1889 kv_freeze_sclk_dpm(rdev, true);
1890 kv_upload_dpm_settings(rdev);
1891 kv_freeze_sclk_dpm(rdev, false);
1892 kv_set_enabled_level(rdev, pi->graphics_boot_level);
1893 }
1815} 1894}
1816 1895
1817//XXX use sumo_dpm_display_configuration_changed 1896//XXX use sumo_dpm_display_configuration_changed
@@ -1871,12 +1950,15 @@ static int kv_force_dpm_highest(struct radeon_device *rdev)
1871 if (ret) 1950 if (ret)
1872 return ret; 1951 return ret;
1873 1952
1874 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i >= 0; i--) { 1953 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
1875 if (enable_mask & (1 << i)) 1954 if (enable_mask & (1 << i))
1876 break; 1955 break;
1877 } 1956 }
1878 1957
1879 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 1958 if (rdev->family == CHIP_KABINI)
1959 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
1960 else
1961 return kv_set_enabled_level(rdev, i);
1880} 1962}
1881 1963
1882static int kv_force_dpm_lowest(struct radeon_device *rdev) 1964static int kv_force_dpm_lowest(struct radeon_device *rdev)
@@ -1893,7 +1975,10 @@ static int kv_force_dpm_lowest(struct radeon_device *rdev)
1893 break; 1975 break;
1894 } 1976 }
1895 1977
1896 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 1978 if (rdev->family == CHIP_KABINI)
1979 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
1980 else
1981 return kv_set_enabled_level(rdev, i);
1897} 1982}
1898 1983
1899static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev, 1984static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
@@ -1911,9 +1996,9 @@ static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
1911 if (!pi->caps_sclk_ds) 1996 if (!pi->caps_sclk_ds)
1912 return 0; 1997 return 0;
1913 1998
1914 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i <= 0; i--) { 1999 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
1915 temp = sclk / sumo_get_sleep_divider_from_id(i); 2000 temp = sclk / sumo_get_sleep_divider_from_id(i);
1916 if ((temp >= min) || (i == 0)) 2001 if (temp >= min)
1917 break; 2002 break;
1918 } 2003 }
1919 2004
@@ -2039,12 +2124,12 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
2039 ps->dpmx_nb_ps_lo = 0x1; 2124 ps->dpmx_nb_ps_lo = 0x1;
2040 ps->dpmx_nb_ps_hi = 0x0; 2125 ps->dpmx_nb_ps_hi = 0x0;
2041 } else { 2126 } else {
2042 ps->dpm0_pg_nb_ps_lo = 0x1; 2127 ps->dpm0_pg_nb_ps_lo = 0x3;
2043 ps->dpm0_pg_nb_ps_hi = 0x0; 2128 ps->dpm0_pg_nb_ps_hi = 0x0;
2044 ps->dpmx_nb_ps_lo = 0x2; 2129 ps->dpmx_nb_ps_lo = 0x3;
2045 ps->dpmx_nb_ps_hi = 0x1; 2130 ps->dpmx_nb_ps_hi = 0x0;
2046 2131
2047 if (pi->sys_info.nb_dpm_enable && pi->battery_state) { 2132 if (pi->sys_info.nb_dpm_enable) {
2048 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2133 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2049 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) || 2134 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
2050 pi->disable_nb_ps3_in_battery; 2135 pi->disable_nb_ps3_in_battery;
@@ -2210,6 +2295,15 @@ static void kv_enable_new_levels(struct radeon_device *rdev)
2210 } 2295 }
2211} 2296}
2212 2297
2298static int kv_set_enabled_level(struct radeon_device *rdev, u32 level)
2299{
2300 u32 new_mask = (1 << level);
2301
2302 return kv_send_msg_to_smc_with_parameter(rdev,
2303 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2304 new_mask);
2305}
2306
2213static int kv_set_enabled_levels(struct radeon_device *rdev) 2307static int kv_set_enabled_levels(struct radeon_device *rdev)
2214{ 2308{
2215 struct kv_power_info *pi = kv_get_pi(rdev); 2309 struct kv_power_info *pi = kv_get_pi(rdev);
diff --git a/drivers/gpu/drm/radeon/kv_dpm.h b/drivers/gpu/drm/radeon/kv_dpm.h
index 32bb079572d7..8cef7525d7a8 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.h
+++ b/drivers/gpu/drm/radeon/kv_dpm.h
@@ -192,6 +192,7 @@ int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
192int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, 192int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
193 u32 *value, u32 limit); 193 u32 *value, u32 limit);
194int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable); 194int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable);
195int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable);
195int kv_copy_bytes_to_smc(struct radeon_device *rdev, 196int kv_copy_bytes_to_smc(struct radeon_device *rdev,
196 u32 smc_start_address, 197 u32 smc_start_address,
197 const u8 *src, u32 byte_count, u32 limit); 198 const u8 *src, u32 byte_count, u32 limit);
diff --git a/drivers/gpu/drm/radeon/kv_smc.c b/drivers/gpu/drm/radeon/kv_smc.c
index 34a226d7e34a..0000b59a6d05 100644
--- a/drivers/gpu/drm/radeon/kv_smc.c
+++ b/drivers/gpu/drm/radeon/kv_smc.c
@@ -107,6 +107,14 @@ int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable)
107 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable); 107 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable);
108} 108}
109 109
110int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable)
111{
112 if (enable)
113 return kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM);
114 else
115 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM);
116}
117
110int kv_copy_bytes_to_smc(struct radeon_device *rdev, 118int kv_copy_bytes_to_smc(struct radeon_device *rdev,
111 u32 smc_start_address, 119 u32 smc_start_address,
112 const u8 *src, u32 byte_count, u32 limit) 120 const u8 *src, u32 byte_count, u32 limit)
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index f7b625c9e0e9..f26339028154 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -787,6 +787,7 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
787 bool disable_mclk_switching; 787 bool disable_mclk_switching;
788 u32 mclk, sclk; 788 u32 mclk, sclk;
789 u16 vddc, vddci; 789 u16 vddc, vddci;
790 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
790 int i; 791 int i;
791 792
792 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 793 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -813,6 +814,29 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
813 } 814 }
814 } 815 }
815 816
817 /* limit clocks to max supported clocks based on voltage dependency tables */
818 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
819 &max_sclk_vddc);
820 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
821 &max_mclk_vddci);
822 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
823 &max_mclk_vddc);
824
825 for (i = 0; i < ps->performance_level_count; i++) {
826 if (max_sclk_vddc) {
827 if (ps->performance_levels[i].sclk > max_sclk_vddc)
828 ps->performance_levels[i].sclk = max_sclk_vddc;
829 }
830 if (max_mclk_vddci) {
831 if (ps->performance_levels[i].mclk > max_mclk_vddci)
832 ps->performance_levels[i].mclk = max_mclk_vddci;
833 }
834 if (max_mclk_vddc) {
835 if (ps->performance_levels[i].mclk > max_mclk_vddc)
836 ps->performance_levels[i].mclk = max_mclk_vddc;
837 }
838 }
839
816 /* XXX validate the min clocks required for display */ 840 /* XXX validate the min clocks required for display */
817 841
818 if (disable_mclk_switching) { 842 if (disable_mclk_switching) {
@@ -3865,12 +3889,6 @@ int ni_dpm_set_power_state(struct radeon_device *rdev)
3865 return ret; 3889 return ret;
3866 } 3890 }
3867 3891
3868 ret = ni_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
3869 if (ret) {
3870 DRM_ERROR("ni_dpm_force_performance_level failed\n");
3871 return ret;
3872 }
3873
3874 return 0; 3892 return 0;
3875} 3893}
3876 3894
diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h
index 682842804bce..5670b8291285 100644
--- a/drivers/gpu/drm/radeon/ppsmc.h
+++ b/drivers/gpu/drm/radeon/ppsmc.h
@@ -163,6 +163,8 @@ typedef uint8_t PPSMC_Result;
163#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f) 163#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f)
164#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d) 164#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
165#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e) 165#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
166#define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120)
167#define PPSMC_MSG_DisableBAPM ((uint32_t) 0x121)
166#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124) 168#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124)
167 169
168 170
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 9fc61dd68bc0..d71333033b2b 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2853,21 +2853,28 @@ static void r100_pll_errata_after_data(struct radeon_device *rdev)
2853 2853
2854uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) 2854uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
2855{ 2855{
2856 unsigned long flags;
2856 uint32_t data; 2857 uint32_t data;
2857 2858
2859 spin_lock_irqsave(&rdev->pll_idx_lock, flags);
2858 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); 2860 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
2859 r100_pll_errata_after_index(rdev); 2861 r100_pll_errata_after_index(rdev);
2860 data = RREG32(RADEON_CLOCK_CNTL_DATA); 2862 data = RREG32(RADEON_CLOCK_CNTL_DATA);
2861 r100_pll_errata_after_data(rdev); 2863 r100_pll_errata_after_data(rdev);
2864 spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
2862 return data; 2865 return data;
2863} 2866}
2864 2867
2865void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2868void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
2866{ 2869{
2870 unsigned long flags;
2871
2872 spin_lock_irqsave(&rdev->pll_idx_lock, flags);
2867 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); 2873 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
2868 r100_pll_errata_after_index(rdev); 2874 r100_pll_errata_after_index(rdev);
2869 WREG32(RADEON_CLOCK_CNTL_DATA, v); 2875 WREG32(RADEON_CLOCK_CNTL_DATA, v);
2870 r100_pll_errata_after_data(rdev); 2876 r100_pll_errata_after_data(rdev);
2877 spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
2871} 2878}
2872 2879
2873static void r100_set_safe_registers(struct radeon_device *rdev) 2880static void r100_set_safe_registers(struct radeon_device *rdev)
@@ -2926,9 +2933,11 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
2926 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); 2933 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2927 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 2934 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
2928 seq_printf(m, "%u dwords in ring\n", count); 2935 seq_printf(m, "%u dwords in ring\n", count);
2929 for (j = 0; j <= count; j++) { 2936 if (ring->ready) {
2930 i = (rdp + j) & ring->ptr_mask; 2937 for (j = 0; j <= count; j++) {
2931 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); 2938 i = (rdp + j) & ring->ptr_mask;
2939 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
2940 }
2932 } 2941 }
2933 return 0; 2942 return 0;
2934} 2943}
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 4e796ecf9ea4..6edf2b3a52b4 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -160,18 +160,25 @@ void r420_pipes_init(struct radeon_device *rdev)
160 160
161u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg) 161u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
162{ 162{
163 unsigned long flags;
163 u32 r; 164 u32 r;
164 165
166 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
165 WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg)); 167 WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
166 r = RREG32(R_0001FC_MC_IND_DATA); 168 r = RREG32(R_0001FC_MC_IND_DATA);
169 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
167 return r; 170 return r;
168} 171}
169 172
170void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v) 173void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
171{ 174{
175 unsigned long flags;
176
177 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
172 WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) | 178 WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
173 S_0001F8_MC_IND_WR_EN(1)); 179 S_0001F8_MC_IND_WR_EN(1));
174 WREG32(R_0001FC_MC_IND_DATA, v); 180 WREG32(R_0001FC_MC_IND_DATA, v);
181 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
175} 182}
176 183
177static void r420_debugfs(struct radeon_device *rdev) 184static void r420_debugfs(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index ea4d3734e6d9..2a1b1876b431 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -119,6 +119,11 @@ u32 r600_get_xclk(struct radeon_device *rdev)
119 return rdev->clock.spll.reference_freq; 119 return rdev->clock.spll.reference_freq;
120} 120}
121 121
122int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
123{
124 return 0;
125}
126
122/* get temperature in millidegrees */ 127/* get temperature in millidegrees */
123int rv6xx_get_temp(struct radeon_device *rdev) 128int rv6xx_get_temp(struct radeon_device *rdev)
124{ 129{
@@ -1045,20 +1050,27 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev)
1045 1050
1046uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg) 1051uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
1047{ 1052{
1053 unsigned long flags;
1048 uint32_t r; 1054 uint32_t r;
1049 1055
1056 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1050 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg)); 1057 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
1051 r = RREG32(R_0028FC_MC_DATA); 1058 r = RREG32(R_0028FC_MC_DATA);
1052 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR); 1059 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
1060 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1053 return r; 1061 return r;
1054} 1062}
1055 1063
1056void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 1064void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1057{ 1065{
1066 unsigned long flags;
1067
1068 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1058 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) | 1069 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
1059 S_0028F8_MC_IND_WR_EN(1)); 1070 S_0028F8_MC_IND_WR_EN(1));
1060 WREG32(R_0028FC_MC_DATA, v); 1071 WREG32(R_0028FC_MC_DATA, v);
1061 WREG32(R_0028F8_MC_INDEX, 0x7F); 1072 WREG32(R_0028F8_MC_INDEX, 0x7F);
1073 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1062} 1074}
1063 1075
1064static void r600_mc_program(struct radeon_device *rdev) 1076static void r600_mc_program(struct radeon_device *rdev)
@@ -2092,20 +2104,27 @@ static void r600_gpu_init(struct radeon_device *rdev)
2092 */ 2104 */
2093u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg) 2105u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
2094{ 2106{
2107 unsigned long flags;
2095 u32 r; 2108 u32 r;
2096 2109
2110 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2097 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 2111 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2098 (void)RREG32(PCIE_PORT_INDEX); 2112 (void)RREG32(PCIE_PORT_INDEX);
2099 r = RREG32(PCIE_PORT_DATA); 2113 r = RREG32(PCIE_PORT_DATA);
2114 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2100 return r; 2115 return r;
2101} 2116}
2102 2117
2103void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2118void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2104{ 2119{
2120 unsigned long flags;
2121
2122 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2105 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 2123 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2106 (void)RREG32(PCIE_PORT_INDEX); 2124 (void)RREG32(PCIE_PORT_INDEX);
2107 WREG32(PCIE_PORT_DATA, (v)); 2125 WREG32(PCIE_PORT_DATA, (v));
2108 (void)RREG32(PCIE_PORT_DATA); 2126 (void)RREG32(PCIE_PORT_DATA);
2127 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2109} 2128}
2110 2129
2111/* 2130/*
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index fa0de46fcc0d..5513d8f06252 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -1084,7 +1084,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
1084 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = 1084 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
1085 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); 1085 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
1086 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = 1086 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
1087 le16_to_cpu(limits->entries[i].usVoltage); 1087 le16_to_cpu(entry->usVoltage);
1088 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) 1088 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
1089 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); 1089 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
1090 } 1090 }
@@ -1219,30 +1219,20 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
1219 1219
1220void r600_free_extended_power_table(struct radeon_device *rdev) 1220void r600_free_extended_power_table(struct radeon_device *rdev)
1221{ 1221{
1222 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries) 1222 struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state;
1223 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); 1223
1224 if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) 1224 kfree(dyn_state->vddc_dependency_on_sclk.entries);
1225 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); 1225 kfree(dyn_state->vddci_dependency_on_mclk.entries);
1226 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) 1226 kfree(dyn_state->vddc_dependency_on_mclk.entries);
1227 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); 1227 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
1228 if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) 1228 kfree(dyn_state->cac_leakage_table.entries);
1229 kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); 1229 kfree(dyn_state->phase_shedding_limits_table.entries);
1230 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) 1230 kfree(dyn_state->ppm_table);
1231 kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); 1231 kfree(dyn_state->cac_tdp_table);
1232 if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) 1232 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
1233 kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries); 1233 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
1234 if (rdev->pm.dpm.dyn_state.ppm_table) 1234 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
1235 kfree(rdev->pm.dpm.dyn_state.ppm_table); 1235 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
1236 if (rdev->pm.dpm.dyn_state.cac_tdp_table)
1237 kfree(rdev->pm.dpm.dyn_state.cac_tdp_table);
1238 if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
1239 kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries);
1240 if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
1241 kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries);
1242 if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
1243 kfree(rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries);
1244 if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
1245 kfree(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries);
1246} 1236}
1247 1237
1248enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev, 1238enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index f443010ce90b..b0fa6002af3e 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -257,10 +257,7 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
257 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 257 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
258 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 258 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
259 */ 259 */
260 if (ASIC_IS_DCE3(rdev)) { 260 if (ASIC_IS_DCE32(rdev)) {
261 /* according to the reg specs, this should DCE3.2 only, but in
262 * practice it seems to cover DCE3.0 as well.
263 */
264 if (dig->dig_encoder == 0) { 261 if (dig->dig_encoder == 0) {
265 dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; 262 dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
266 dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); 263 dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
@@ -276,8 +273,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
276 WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); 273 WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
277 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ 274 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
278 } 275 }
276 } else if (ASIC_IS_DCE3(rdev)) {
277 /* according to the reg specs, this should DCE3.2 only, but in
278 * practice it seems to cover DCE3.0/3.1 as well.
279 */
280 if (dig->dig_encoder == 0) {
281 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
282 WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
283 WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
284 } else {
285 WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100);
286 WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
287 WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
288 }
279 } else { 289 } else {
280 /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ 290 /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
281 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | 291 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
282 AUDIO_DTO_MODULE(clock / 10)); 292 AUDIO_DTO_MODULE(clock / 10));
283 } 293 }
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 454f90a849e4..e673fe26ea84 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1040,7 +1040,7 @@
1040# define HDMI0_AVI_INFO_CONT (1 << 1) 1040# define HDMI0_AVI_INFO_CONT (1 << 1)
1041# define HDMI0_AUDIO_INFO_SEND (1 << 4) 1041# define HDMI0_AUDIO_INFO_SEND (1 << 4)
1042# define HDMI0_AUDIO_INFO_CONT (1 << 5) 1042# define HDMI0_AUDIO_INFO_CONT (1 << 5)
1043# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */ 1043# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */
1044# define HDMI0_AUDIO_INFO_UPDATE (1 << 7) 1044# define HDMI0_AUDIO_INFO_UPDATE (1 << 7)
1045# define HDMI0_MPEG_INFO_SEND (1 << 8) 1045# define HDMI0_MPEG_INFO_SEND (1 << 8)
1046# define HDMI0_MPEG_INFO_CONT (1 << 9) 1046# define HDMI0_MPEG_INFO_CONT (1 << 9)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ff8b564ce2b2..a400ac1c4147 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -181,7 +181,7 @@ extern int radeon_aspm;
181#define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16) 181#define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16)
182 182
183/* PG flags */ 183/* PG flags */
184#define RADEON_PG_SUPPORT_GFX_CG (1 << 0) 184#define RADEON_PG_SUPPORT_GFX_PG (1 << 0)
185#define RADEON_PG_SUPPORT_GFX_SMG (1 << 1) 185#define RADEON_PG_SUPPORT_GFX_SMG (1 << 1)
186#define RADEON_PG_SUPPORT_GFX_DMG (1 << 2) 186#define RADEON_PG_SUPPORT_GFX_DMG (1 << 2)
187#define RADEON_PG_SUPPORT_UVD (1 << 3) 187#define RADEON_PG_SUPPORT_UVD (1 << 3)
@@ -1778,6 +1778,7 @@ struct radeon_asic {
1778 int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level); 1778 int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
1779 bool (*vblank_too_short)(struct radeon_device *rdev); 1779 bool (*vblank_too_short)(struct radeon_device *rdev);
1780 void (*powergate_uvd)(struct radeon_device *rdev, bool gate); 1780 void (*powergate_uvd)(struct radeon_device *rdev, bool gate);
1781 void (*enable_bapm)(struct radeon_device *rdev, bool enable);
1781 } dpm; 1782 } dpm;
1782 /* pageflipping */ 1783 /* pageflipping */
1783 struct { 1784 struct {
@@ -2110,6 +2111,28 @@ struct radeon_device {
2110 resource_size_t rmmio_size; 2111 resource_size_t rmmio_size;
2111 /* protects concurrent MM_INDEX/DATA based register access */ 2112 /* protects concurrent MM_INDEX/DATA based register access */
2112 spinlock_t mmio_idx_lock; 2113 spinlock_t mmio_idx_lock;
2114 /* protects concurrent SMC based register access */
2115 spinlock_t smc_idx_lock;
2116 /* protects concurrent PLL register access */
2117 spinlock_t pll_idx_lock;
2118 /* protects concurrent MC register access */
2119 spinlock_t mc_idx_lock;
2120 /* protects concurrent PCIE register access */
2121 spinlock_t pcie_idx_lock;
2122 /* protects concurrent PCIE_PORT register access */
2123 spinlock_t pciep_idx_lock;
2124 /* protects concurrent PIF register access */
2125 spinlock_t pif_idx_lock;
2126 /* protects concurrent CG register access */
2127 spinlock_t cg_idx_lock;
2128 /* protects concurrent UVD register access */
2129 spinlock_t uvd_idx_lock;
2130 /* protects concurrent RCU register access */
2131 spinlock_t rcu_idx_lock;
2132 /* protects concurrent DIDT register access */
2133 spinlock_t didt_idx_lock;
2134 /* protects concurrent ENDPOINT (audio) register access */
2135 spinlock_t end_idx_lock;
2113 void __iomem *rmmio; 2136 void __iomem *rmmio;
2114 radeon_rreg_t mc_rreg; 2137 radeon_rreg_t mc_rreg;
2115 radeon_wreg_t mc_wreg; 2138 radeon_wreg_t mc_wreg;
@@ -2277,123 +2300,179 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
2277 */ 2300 */
2278static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) 2301static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
2279{ 2302{
2303 unsigned long flags;
2280 uint32_t r; 2304 uint32_t r;
2281 2305
2306 spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
2282 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 2307 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
2283 r = RREG32(RADEON_PCIE_DATA); 2308 r = RREG32(RADEON_PCIE_DATA);
2309 spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
2284 return r; 2310 return r;
2285} 2311}
2286 2312
2287static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2313static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
2288{ 2314{
2315 unsigned long flags;
2316
2317 spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
2289 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 2318 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
2290 WREG32(RADEON_PCIE_DATA, (v)); 2319 WREG32(RADEON_PCIE_DATA, (v));
2320 spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
2291} 2321}
2292 2322
2293static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg) 2323static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
2294{ 2324{
2325 unsigned long flags;
2295 u32 r; 2326 u32 r;
2296 2327
2328 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
2297 WREG32(TN_SMC_IND_INDEX_0, (reg)); 2329 WREG32(TN_SMC_IND_INDEX_0, (reg));
2298 r = RREG32(TN_SMC_IND_DATA_0); 2330 r = RREG32(TN_SMC_IND_DATA_0);
2331 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
2299 return r; 2332 return r;
2300} 2333}
2301 2334
2302static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2335static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2303{ 2336{
2337 unsigned long flags;
2338
2339 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
2304 WREG32(TN_SMC_IND_INDEX_0, (reg)); 2340 WREG32(TN_SMC_IND_INDEX_0, (reg));
2305 WREG32(TN_SMC_IND_DATA_0, (v)); 2341 WREG32(TN_SMC_IND_DATA_0, (v));
2342 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
2306} 2343}
2307 2344
2308static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg) 2345static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
2309{ 2346{
2347 unsigned long flags;
2310 u32 r; 2348 u32 r;
2311 2349
2350 spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
2312 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 2351 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
2313 r = RREG32(R600_RCU_DATA); 2352 r = RREG32(R600_RCU_DATA);
2353 spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
2314 return r; 2354 return r;
2315} 2355}
2316 2356
2317static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2357static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2318{ 2358{
2359 unsigned long flags;
2360
2361 spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
2319 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 2362 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
2320 WREG32(R600_RCU_DATA, (v)); 2363 WREG32(R600_RCU_DATA, (v));
2364 spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
2321} 2365}
2322 2366
2323static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg) 2367static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg)
2324{ 2368{
2369 unsigned long flags;
2325 u32 r; 2370 u32 r;
2326 2371
2372 spin_lock_irqsave(&rdev->cg_idx_lock, flags);
2327 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); 2373 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
2328 r = RREG32(EVERGREEN_CG_IND_DATA); 2374 r = RREG32(EVERGREEN_CG_IND_DATA);
2375 spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
2329 return r; 2376 return r;
2330} 2377}
2331 2378
2332static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2379static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2333{ 2380{
2381 unsigned long flags;
2382
2383 spin_lock_irqsave(&rdev->cg_idx_lock, flags);
2334 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); 2384 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
2335 WREG32(EVERGREEN_CG_IND_DATA, (v)); 2385 WREG32(EVERGREEN_CG_IND_DATA, (v));
2386 spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
2336} 2387}
2337 2388
2338static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg) 2389static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg)
2339{ 2390{
2391 unsigned long flags;
2340 u32 r; 2392 u32 r;
2341 2393
2394 spin_lock_irqsave(&rdev->pif_idx_lock, flags);
2342 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); 2395 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
2343 r = RREG32(EVERGREEN_PIF_PHY0_DATA); 2396 r = RREG32(EVERGREEN_PIF_PHY0_DATA);
2397 spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
2344 return r; 2398 return r;
2345} 2399}
2346 2400
2347static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2401static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2348{ 2402{
2403 unsigned long flags;
2404
2405 spin_lock_irqsave(&rdev->pif_idx_lock, flags);
2349 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); 2406 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
2350 WREG32(EVERGREEN_PIF_PHY0_DATA, (v)); 2407 WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
2408 spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
2351} 2409}
2352 2410
2353static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg) 2411static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg)
2354{ 2412{
2413 unsigned long flags;
2355 u32 r; 2414 u32 r;
2356 2415
2416 spin_lock_irqsave(&rdev->pif_idx_lock, flags);
2357 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); 2417 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
2358 r = RREG32(EVERGREEN_PIF_PHY1_DATA); 2418 r = RREG32(EVERGREEN_PIF_PHY1_DATA);
2419 spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
2359 return r; 2420 return r;
2360} 2421}
2361 2422
2362static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2423static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2363{ 2424{
2425 unsigned long flags;
2426
2427 spin_lock_irqsave(&rdev->pif_idx_lock, flags);
2364 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); 2428 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
2365 WREG32(EVERGREEN_PIF_PHY1_DATA, (v)); 2429 WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
2430 spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
2366} 2431}
2367 2432
2368static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg) 2433static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
2369{ 2434{
2435 unsigned long flags;
2370 u32 r; 2436 u32 r;
2371 2437
2438 spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
2372 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 2439 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
2373 r = RREG32(R600_UVD_CTX_DATA); 2440 r = RREG32(R600_UVD_CTX_DATA);
2441 spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
2374 return r; 2442 return r;
2375} 2443}
2376 2444
2377static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2445static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2378{ 2446{
2447 unsigned long flags;
2448
2449 spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
2379 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 2450 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
2380 WREG32(R600_UVD_CTX_DATA, (v)); 2451 WREG32(R600_UVD_CTX_DATA, (v));
2452 spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
2381} 2453}
2382 2454
2383 2455
2384static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg) 2456static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg)
2385{ 2457{
2458 unsigned long flags;
2386 u32 r; 2459 u32 r;
2387 2460
2461 spin_lock_irqsave(&rdev->didt_idx_lock, flags);
2388 WREG32(CIK_DIDT_IND_INDEX, (reg)); 2462 WREG32(CIK_DIDT_IND_INDEX, (reg));
2389 r = RREG32(CIK_DIDT_IND_DATA); 2463 r = RREG32(CIK_DIDT_IND_DATA);
2464 spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
2390 return r; 2465 return r;
2391} 2466}
2392 2467
2393static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2468static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2394{ 2469{
2470 unsigned long flags;
2471
2472 spin_lock_irqsave(&rdev->didt_idx_lock, flags);
2395 WREG32(CIK_DIDT_IND_INDEX, (reg)); 2473 WREG32(CIK_DIDT_IND_INDEX, (reg));
2396 WREG32(CIK_DIDT_IND_DATA, (v)); 2474 WREG32(CIK_DIDT_IND_DATA, (v));
2475 spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
2397} 2476}
2398 2477
2399void r100_pll_errata_after_index(struct radeon_device *rdev); 2478void r100_pll_errata_after_index(struct radeon_device *rdev);
@@ -2569,6 +2648,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
2569#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l)) 2648#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l))
2570#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev)) 2649#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
2571#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g)) 2650#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
2651#define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e))
2572 2652
2573/* Common functions */ 2653/* Common functions */
2574/* AGP */ 2654/* AGP */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 630853b96841..8f7e04538fd6 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1004,6 +1004,8 @@ static struct radeon_asic rv6xx_asic = {
1004 .wait_for_vblank = &avivo_wait_for_vblank, 1004 .wait_for_vblank = &avivo_wait_for_vblank,
1005 .set_backlight_level = &atombios_set_backlight_level, 1005 .set_backlight_level = &atombios_set_backlight_level,
1006 .get_backlight_level = &atombios_get_backlight_level, 1006 .get_backlight_level = &atombios_get_backlight_level,
1007 .hdmi_enable = &r600_hdmi_enable,
1008 .hdmi_setmode = &r600_hdmi_setmode,
1007 }, 1009 },
1008 .copy = { 1010 .copy = {
1009 .blit = &r600_copy_cpdma, 1011 .blit = &r600_copy_cpdma,
@@ -1037,6 +1039,7 @@ static struct radeon_asic rv6xx_asic = {
1037 .set_pcie_lanes = &r600_set_pcie_lanes, 1039 .set_pcie_lanes = &r600_set_pcie_lanes,
1038 .set_clock_gating = NULL, 1040 .set_clock_gating = NULL,
1039 .get_temperature = &rv6xx_get_temp, 1041 .get_temperature = &rv6xx_get_temp,
1042 .set_uvd_clocks = &r600_set_uvd_clocks,
1040 }, 1043 },
1041 .dpm = { 1044 .dpm = {
1042 .init = &rv6xx_dpm_init, 1045 .init = &rv6xx_dpm_init,
@@ -1126,6 +1129,7 @@ static struct radeon_asic rs780_asic = {
1126 .set_pcie_lanes = NULL, 1129 .set_pcie_lanes = NULL,
1127 .set_clock_gating = NULL, 1130 .set_clock_gating = NULL,
1128 .get_temperature = &rv6xx_get_temp, 1131 .get_temperature = &rv6xx_get_temp,
1132 .set_uvd_clocks = &r600_set_uvd_clocks,
1129 }, 1133 },
1130 .dpm = { 1134 .dpm = {
1131 .init = &rs780_dpm_init, 1135 .init = &rs780_dpm_init,
@@ -1141,6 +1145,7 @@ static struct radeon_asic rs780_asic = {
1141 .get_mclk = &rs780_dpm_get_mclk, 1145 .get_mclk = &rs780_dpm_get_mclk,
1142 .print_power_state = &rs780_dpm_print_power_state, 1146 .print_power_state = &rs780_dpm_print_power_state,
1143 .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level, 1147 .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level,
1148 .force_performance_level = &rs780_dpm_force_performance_level,
1144 }, 1149 },
1145 .pflip = { 1150 .pflip = {
1146 .pre_page_flip = &rs600_pre_page_flip, 1151 .pre_page_flip = &rs600_pre_page_flip,
@@ -1791,6 +1796,7 @@ static struct radeon_asic trinity_asic = {
1791 .print_power_state = &trinity_dpm_print_power_state, 1796 .print_power_state = &trinity_dpm_print_power_state,
1792 .debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level, 1797 .debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level,
1793 .force_performance_level = &trinity_dpm_force_performance_level, 1798 .force_performance_level = &trinity_dpm_force_performance_level,
1799 .enable_bapm = &trinity_dpm_enable_bapm,
1794 }, 1800 },
1795 .pflip = { 1801 .pflip = {
1796 .pre_page_flip = &evergreen_pre_page_flip, 1802 .pre_page_flip = &evergreen_pre_page_flip,
@@ -2166,6 +2172,7 @@ static struct radeon_asic kv_asic = {
2166 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, 2172 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
2167 .force_performance_level = &kv_dpm_force_performance_level, 2173 .force_performance_level = &kv_dpm_force_performance_level,
2168 .powergate_uvd = &kv_dpm_powergate_uvd, 2174 .powergate_uvd = &kv_dpm_powergate_uvd,
2175 .enable_bapm = &kv_dpm_enable_bapm,
2169 }, 2176 },
2170 .pflip = { 2177 .pflip = {
2171 .pre_page_flip = &evergreen_pre_page_flip, 2178 .pre_page_flip = &evergreen_pre_page_flip,
@@ -2390,7 +2397,7 @@ int radeon_asic_init(struct radeon_device *rdev)
2390 RADEON_CG_SUPPORT_HDP_LS | 2397 RADEON_CG_SUPPORT_HDP_LS |
2391 RADEON_CG_SUPPORT_HDP_MGCG; 2398 RADEON_CG_SUPPORT_HDP_MGCG;
2392 rdev->pg_flags = 0 | 2399 rdev->pg_flags = 0 |
2393 /*RADEON_PG_SUPPORT_GFX_CG | */ 2400 /*RADEON_PG_SUPPORT_GFX_PG | */
2394 RADEON_PG_SUPPORT_SDMA; 2401 RADEON_PG_SUPPORT_SDMA;
2395 break; 2402 break;
2396 case CHIP_OLAND: 2403 case CHIP_OLAND:
@@ -2479,7 +2486,7 @@ int radeon_asic_init(struct radeon_device *rdev)
2479 RADEON_CG_SUPPORT_HDP_LS | 2486 RADEON_CG_SUPPORT_HDP_LS |
2480 RADEON_CG_SUPPORT_HDP_MGCG; 2487 RADEON_CG_SUPPORT_HDP_MGCG;
2481 rdev->pg_flags = 0; 2488 rdev->pg_flags = 0;
2482 /*RADEON_PG_SUPPORT_GFX_CG | 2489 /*RADEON_PG_SUPPORT_GFX_PG |
2483 RADEON_PG_SUPPORT_GFX_SMG | 2490 RADEON_PG_SUPPORT_GFX_SMG |
2484 RADEON_PG_SUPPORT_GFX_DMG | 2491 RADEON_PG_SUPPORT_GFX_DMG |
2485 RADEON_PG_SUPPORT_UVD | 2492 RADEON_PG_SUPPORT_UVD |
@@ -2507,7 +2514,7 @@ int radeon_asic_init(struct radeon_device *rdev)
2507 RADEON_CG_SUPPORT_HDP_LS | 2514 RADEON_CG_SUPPORT_HDP_LS |
2508 RADEON_CG_SUPPORT_HDP_MGCG; 2515 RADEON_CG_SUPPORT_HDP_MGCG;
2509 rdev->pg_flags = 0; 2516 rdev->pg_flags = 0;
2510 /*RADEON_PG_SUPPORT_GFX_CG | 2517 /*RADEON_PG_SUPPORT_GFX_PG |
2511 RADEON_PG_SUPPORT_GFX_SMG | 2518 RADEON_PG_SUPPORT_GFX_SMG |
2512 RADEON_PG_SUPPORT_UVD | 2519 RADEON_PG_SUPPORT_UVD |
2513 RADEON_PG_SUPPORT_VCE | 2520 RADEON_PG_SUPPORT_VCE |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 818bbe6b884b..70c29d5e080d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -389,6 +389,7 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
389u32 r600_get_xclk(struct radeon_device *rdev); 389u32 r600_get_xclk(struct radeon_device *rdev);
390uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev); 390uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
391int rv6xx_get_temp(struct radeon_device *rdev); 391int rv6xx_get_temp(struct radeon_device *rdev);
392int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
392int r600_dpm_pre_set_power_state(struct radeon_device *rdev); 393int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
393void r600_dpm_post_set_power_state(struct radeon_device *rdev); 394void r600_dpm_post_set_power_state(struct radeon_device *rdev);
394/* r600 dma */ 395/* r600 dma */
@@ -428,6 +429,8 @@ void rs780_dpm_print_power_state(struct radeon_device *rdev,
428 struct radeon_ps *ps); 429 struct radeon_ps *ps);
429void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 430void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
430 struct seq_file *m); 431 struct seq_file *m);
432int rs780_dpm_force_performance_level(struct radeon_device *rdev,
433 enum radeon_dpm_forced_level level);
431 434
432/* 435/*
433 * rv770,rv730,rv710,rv740 436 * rv770,rv730,rv710,rv740
@@ -625,6 +628,7 @@ void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *r
625 struct seq_file *m); 628 struct seq_file *m);
626int trinity_dpm_force_performance_level(struct radeon_device *rdev, 629int trinity_dpm_force_performance_level(struct radeon_device *rdev,
627 enum radeon_dpm_forced_level level); 630 enum radeon_dpm_forced_level level);
631void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
628 632
629/* DCE6 - SI */ 633/* DCE6 - SI */
630void dce6_bandwidth_update(struct radeon_device *rdev); 634void dce6_bandwidth_update(struct radeon_device *rdev);
@@ -781,6 +785,7 @@ void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
781int kv_dpm_force_performance_level(struct radeon_device *rdev, 785int kv_dpm_force_performance_level(struct radeon_device *rdev,
782 enum radeon_dpm_forced_level level); 786 enum radeon_dpm_forced_level level);
783void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); 787void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
788void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
784 789
785/* uvd v1.0 */ 790/* uvd v1.0 */
786uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev, 791uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 404e25d285ba..f79ee184ffd5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1367,6 +1367,7 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
1367 int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info); 1367 int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
1368 uint16_t data_offset, size; 1368 uint16_t data_offset, size;
1369 struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; 1369 struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
1370 struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *ss_assign;
1370 uint8_t frev, crev; 1371 uint8_t frev, crev;
1371 int i, num_indices; 1372 int i, num_indices;
1372 1373
@@ -1378,18 +1379,21 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
1378 1379
1379 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 1380 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
1380 sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT); 1381 sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
1381 1382 ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
1383 ((u8 *)&ss_info->asSS_Info[0]);
1382 for (i = 0; i < num_indices; i++) { 1384 for (i = 0; i < num_indices; i++) {
1383 if (ss_info->asSS_Info[i].ucSS_Id == id) { 1385 if (ss_assign->ucSS_Id == id) {
1384 ss->percentage = 1386 ss->percentage =
1385 le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage); 1387 le16_to_cpu(ss_assign->usSpreadSpectrumPercentage);
1386 ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType; 1388 ss->type = ss_assign->ucSpreadSpectrumType;
1387 ss->step = ss_info->asSS_Info[i].ucSS_Step; 1389 ss->step = ss_assign->ucSS_Step;
1388 ss->delay = ss_info->asSS_Info[i].ucSS_Delay; 1390 ss->delay = ss_assign->ucSS_Delay;
1389 ss->range = ss_info->asSS_Info[i].ucSS_Range; 1391 ss->range = ss_assign->ucSS_Range;
1390 ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; 1392 ss->refdiv = ss_assign->ucRecommendedRef_Div;
1391 return true; 1393 return true;
1392 } 1394 }
1395 ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
1396 ((u8 *)ss_assign + sizeof(struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT));
1393 } 1397 }
1394 } 1398 }
1395 return false; 1399 return false;
@@ -1477,6 +1481,12 @@ union asic_ss_info {
1477 struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3; 1481 struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
1478}; 1482};
1479 1483
1484union asic_ss_assignment {
1485 struct _ATOM_ASIC_SS_ASSIGNMENT v1;
1486 struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2;
1487 struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3;
1488};
1489
1480bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, 1490bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1481 struct radeon_atom_ss *ss, 1491 struct radeon_atom_ss *ss,
1482 int id, u32 clock) 1492 int id, u32 clock)
@@ -1485,6 +1495,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1485 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); 1495 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
1486 uint16_t data_offset, size; 1496 uint16_t data_offset, size;
1487 union asic_ss_info *ss_info; 1497 union asic_ss_info *ss_info;
1498 union asic_ss_assignment *ss_assign;
1488 uint8_t frev, crev; 1499 uint8_t frev, crev;
1489 int i, num_indices; 1500 int i, num_indices;
1490 1501
@@ -1509,45 +1520,52 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1509 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 1520 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
1510 sizeof(ATOM_ASIC_SS_ASSIGNMENT); 1521 sizeof(ATOM_ASIC_SS_ASSIGNMENT);
1511 1522
1523 ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]);
1512 for (i = 0; i < num_indices; i++) { 1524 for (i = 0; i < num_indices; i++) {
1513 if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && 1525 if ((ss_assign->v1.ucClockIndication == id) &&
1514 (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) { 1526 (clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) {
1515 ss->percentage = 1527 ss->percentage =
1516 le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1528 le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
1517 ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1529 ss->type = ss_assign->v1.ucSpreadSpectrumMode;
1518 ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz); 1530 ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
1519 return true; 1531 return true;
1520 } 1532 }
1533 ss_assign = (union asic_ss_assignment *)
1534 ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT));
1521 } 1535 }
1522 break; 1536 break;
1523 case 2: 1537 case 2:
1524 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 1538 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
1525 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); 1539 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
1540 ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]);
1526 for (i = 0; i < num_indices; i++) { 1541 for (i = 0; i < num_indices; i++) {
1527 if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && 1542 if ((ss_assign->v2.ucClockIndication == id) &&
1528 (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) { 1543 (clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) {
1529 ss->percentage = 1544 ss->percentage =
1530 le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1545 le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
1531 ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1546 ss->type = ss_assign->v2.ucSpreadSpectrumMode;
1532 ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz); 1547 ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
1533 if ((crev == 2) && 1548 if ((crev == 2) &&
1534 ((id == ASIC_INTERNAL_ENGINE_SS) || 1549 ((id == ASIC_INTERNAL_ENGINE_SS) ||
1535 (id == ASIC_INTERNAL_MEMORY_SS))) 1550 (id == ASIC_INTERNAL_MEMORY_SS)))
1536 ss->rate /= 100; 1551 ss->rate /= 100;
1537 return true; 1552 return true;
1538 } 1553 }
1554 ss_assign = (union asic_ss_assignment *)
1555 ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2));
1539 } 1556 }
1540 break; 1557 break;
1541 case 3: 1558 case 3:
1542 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / 1559 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
1543 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); 1560 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
1561 ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]);
1544 for (i = 0; i < num_indices; i++) { 1562 for (i = 0; i < num_indices; i++) {
1545 if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && 1563 if ((ss_assign->v3.ucClockIndication == id) &&
1546 (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) { 1564 (clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) {
1547 ss->percentage = 1565 ss->percentage =
1548 le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1566 le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
1549 ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1567 ss->type = ss_assign->v3.ucSpreadSpectrumMode;
1550 ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); 1568 ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
1551 if ((id == ASIC_INTERNAL_ENGINE_SS) || 1569 if ((id == ASIC_INTERNAL_ENGINE_SS) ||
1552 (id == ASIC_INTERNAL_MEMORY_SS)) 1570 (id == ASIC_INTERNAL_MEMORY_SS))
1553 ss->rate /= 100; 1571 ss->rate /= 100;
@@ -1555,6 +1573,8 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1555 radeon_atombios_get_igp_ss_overrides(rdev, ss, id); 1573 radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
1556 return true; 1574 return true;
1557 } 1575 }
1576 ss_assign = (union asic_ss_assignment *)
1577 ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3));
1558 } 1578 }
1559 break; 1579 break;
1560 default: 1580 default:
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2399f25ec037..79159b5da05b 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -396,6 +396,21 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
396 } 396 }
397 } 397 }
398 398
399 if (property == rdev->mode_info.audio_property) {
400 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
401 /* need to find digital encoder on connector */
402 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
403 if (!encoder)
404 return 0;
405
406 radeon_encoder = to_radeon_encoder(encoder);
407
408 if (radeon_connector->audio != val) {
409 radeon_connector->audio = val;
410 radeon_property_change_mode(&radeon_encoder->base);
411 }
412 }
413
399 if (property == rdev->mode_info.underscan_property) { 414 if (property == rdev->mode_info.underscan_property) {
400 /* need to find digital encoder on connector */ 415 /* need to find digital encoder on connector */
401 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); 416 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
@@ -1420,7 +1435,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1420 if (radeon_dp_getdpcd(radeon_connector)) 1435 if (radeon_dp_getdpcd(radeon_connector))
1421 ret = connector_status_connected; 1436 ret = connector_status_connected;
1422 } else { 1437 } else {
1423 /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */ 1438 /* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
1424 if (radeon_ddc_probe(radeon_connector, false)) 1439 if (radeon_ddc_probe(radeon_connector, false))
1425 ret = connector_status_connected; 1440 ret = connector_status_connected;
1426 } 1441 }
@@ -1489,6 +1504,24 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
1489 .force = radeon_dvi_force, 1504 .force = radeon_dvi_force,
1490}; 1505};
1491 1506
1507static const struct drm_connector_funcs radeon_edp_connector_funcs = {
1508 .dpms = drm_helper_connector_dpms,
1509 .detect = radeon_dp_detect,
1510 .fill_modes = drm_helper_probe_single_connector_modes,
1511 .set_property = radeon_lvds_set_property,
1512 .destroy = radeon_dp_connector_destroy,
1513 .force = radeon_dvi_force,
1514};
1515
1516static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
1517 .dpms = drm_helper_connector_dpms,
1518 .detect = radeon_dp_detect,
1519 .fill_modes = drm_helper_probe_single_connector_modes,
1520 .set_property = radeon_lvds_set_property,
1521 .destroy = radeon_dp_connector_destroy,
1522 .force = radeon_dvi_force,
1523};
1524
1492void 1525void
1493radeon_add_atom_connector(struct drm_device *dev, 1526radeon_add_atom_connector(struct drm_device *dev,
1494 uint32_t connector_id, 1527 uint32_t connector_id,
@@ -1580,8 +1613,6 @@ radeon_add_atom_connector(struct drm_device *dev,
1580 goto failed; 1613 goto failed;
1581 radeon_dig_connector->igp_lane_info = igp_lane_info; 1614 radeon_dig_connector->igp_lane_info = igp_lane_info;
1582 radeon_connector->con_priv = radeon_dig_connector; 1615 radeon_connector->con_priv = radeon_dig_connector;
1583 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
1584 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
1585 if (i2c_bus->valid) { 1616 if (i2c_bus->valid) {
1586 /* add DP i2c bus */ 1617 /* add DP i2c bus */
1587 if (connector_type == DRM_MODE_CONNECTOR_eDP) 1618 if (connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -1598,6 +1629,10 @@ radeon_add_atom_connector(struct drm_device *dev,
1598 case DRM_MODE_CONNECTOR_VGA: 1629 case DRM_MODE_CONNECTOR_VGA:
1599 case DRM_MODE_CONNECTOR_DVIA: 1630 case DRM_MODE_CONNECTOR_DVIA:
1600 default: 1631 default:
1632 drm_connector_init(dev, &radeon_connector->base,
1633 &radeon_dp_connector_funcs, connector_type);
1634 drm_connector_helper_add(&radeon_connector->base,
1635 &radeon_dp_connector_helper_funcs);
1601 connector->interlace_allowed = true; 1636 connector->interlace_allowed = true;
1602 connector->doublescan_allowed = true; 1637 connector->doublescan_allowed = true;
1603 radeon_connector->dac_load_detect = true; 1638 radeon_connector->dac_load_detect = true;
@@ -1610,6 +1645,10 @@ radeon_add_atom_connector(struct drm_device *dev,
1610 case DRM_MODE_CONNECTOR_HDMIA: 1645 case DRM_MODE_CONNECTOR_HDMIA:
1611 case DRM_MODE_CONNECTOR_HDMIB: 1646 case DRM_MODE_CONNECTOR_HDMIB:
1612 case DRM_MODE_CONNECTOR_DisplayPort: 1647 case DRM_MODE_CONNECTOR_DisplayPort:
1648 drm_connector_init(dev, &radeon_connector->base,
1649 &radeon_dp_connector_funcs, connector_type);
1650 drm_connector_helper_add(&radeon_connector->base,
1651 &radeon_dp_connector_helper_funcs);
1613 drm_object_attach_property(&radeon_connector->base.base, 1652 drm_object_attach_property(&radeon_connector->base.base,
1614 rdev->mode_info.underscan_property, 1653 rdev->mode_info.underscan_property,
1615 UNDERSCAN_OFF); 1654 UNDERSCAN_OFF);
@@ -1619,6 +1658,9 @@ radeon_add_atom_connector(struct drm_device *dev,
1619 drm_object_attach_property(&radeon_connector->base.base, 1658 drm_object_attach_property(&radeon_connector->base.base,
1620 rdev->mode_info.underscan_vborder_property, 1659 rdev->mode_info.underscan_vborder_property,
1621 0); 1660 0);
1661 drm_object_attach_property(&radeon_connector->base.base,
1662 rdev->mode_info.audio_property,
1663 RADEON_AUDIO_DISABLE);
1622 subpixel_order = SubPixelHorizontalRGB; 1664 subpixel_order = SubPixelHorizontalRGB;
1623 connector->interlace_allowed = true; 1665 connector->interlace_allowed = true;
1624 if (connector_type == DRM_MODE_CONNECTOR_HDMIB) 1666 if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1634,6 +1676,10 @@ radeon_add_atom_connector(struct drm_device *dev,
1634 break; 1676 break;
1635 case DRM_MODE_CONNECTOR_LVDS: 1677 case DRM_MODE_CONNECTOR_LVDS:
1636 case DRM_MODE_CONNECTOR_eDP: 1678 case DRM_MODE_CONNECTOR_eDP:
1679 drm_connector_init(dev, &radeon_connector->base,
1680 &radeon_lvds_bridge_connector_funcs, connector_type);
1681 drm_connector_helper_add(&radeon_connector->base,
1682 &radeon_dp_connector_helper_funcs);
1637 drm_object_attach_property(&radeon_connector->base.base, 1683 drm_object_attach_property(&radeon_connector->base.base,
1638 dev->mode_config.scaling_mode_property, 1684 dev->mode_config.scaling_mode_property,
1639 DRM_MODE_SCALE_FULLSCREEN); 1685 DRM_MODE_SCALE_FULLSCREEN);
@@ -1708,6 +1754,11 @@ radeon_add_atom_connector(struct drm_device *dev,
1708 rdev->mode_info.underscan_vborder_property, 1754 rdev->mode_info.underscan_vborder_property,
1709 0); 1755 0);
1710 } 1756 }
1757 if (ASIC_IS_DCE2(rdev)) {
1758 drm_object_attach_property(&radeon_connector->base.base,
1759 rdev->mode_info.audio_property,
1760 RADEON_AUDIO_DISABLE);
1761 }
1711 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1762 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1712 radeon_connector->dac_load_detect = true; 1763 radeon_connector->dac_load_detect = true;
1713 drm_object_attach_property(&radeon_connector->base.base, 1764 drm_object_attach_property(&radeon_connector->base.base,
@@ -1748,6 +1799,11 @@ radeon_add_atom_connector(struct drm_device *dev,
1748 rdev->mode_info.underscan_vborder_property, 1799 rdev->mode_info.underscan_vborder_property,
1749 0); 1800 0);
1750 } 1801 }
1802 if (ASIC_IS_DCE2(rdev)) {
1803 drm_object_attach_property(&radeon_connector->base.base,
1804 rdev->mode_info.audio_property,
1805 RADEON_AUDIO_DISABLE);
1806 }
1751 subpixel_order = SubPixelHorizontalRGB; 1807 subpixel_order = SubPixelHorizontalRGB;
1752 connector->interlace_allowed = true; 1808 connector->interlace_allowed = true;
1753 if (connector_type == DRM_MODE_CONNECTOR_HDMIB) 1809 if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1787,6 +1843,11 @@ radeon_add_atom_connector(struct drm_device *dev,
1787 rdev->mode_info.underscan_vborder_property, 1843 rdev->mode_info.underscan_vborder_property,
1788 0); 1844 0);
1789 } 1845 }
1846 if (ASIC_IS_DCE2(rdev)) {
1847 drm_object_attach_property(&radeon_connector->base.base,
1848 rdev->mode_info.audio_property,
1849 RADEON_AUDIO_DISABLE);
1850 }
1790 connector->interlace_allowed = true; 1851 connector->interlace_allowed = true;
1791 /* in theory with a DP to VGA converter... */ 1852 /* in theory with a DP to VGA converter... */
1792 connector->doublescan_allowed = false; 1853 connector->doublescan_allowed = false;
@@ -1797,7 +1858,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1797 goto failed; 1858 goto failed;
1798 radeon_dig_connector->igp_lane_info = igp_lane_info; 1859 radeon_dig_connector->igp_lane_info = igp_lane_info;
1799 radeon_connector->con_priv = radeon_dig_connector; 1860 radeon_connector->con_priv = radeon_dig_connector;
1800 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); 1861 drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type);
1801 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); 1862 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
1802 if (i2c_bus->valid) { 1863 if (i2c_bus->valid) {
1803 /* add DP i2c bus */ 1864 /* add DP i2c bus */
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index a56084410372..66c222836631 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -28,6 +28,7 @@
28#include <drm/radeon_drm.h> 28#include <drm/radeon_drm.h>
29#include "radeon_reg.h" 29#include "radeon_reg.h"
30#include "radeon.h" 30#include "radeon.h"
31#include "radeon_trace.h"
31 32
32static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) 33static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
33{ 34{
@@ -80,10 +81,13 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
80 p->relocs[i].lobj.bo = p->relocs[i].robj; 81 p->relocs[i].lobj.bo = p->relocs[i].robj;
81 p->relocs[i].lobj.written = !!r->write_domain; 82 p->relocs[i].lobj.written = !!r->write_domain;
82 83
83 /* the first reloc of an UVD job is the 84 /* the first reloc of an UVD job is the msg and that must be in
84 msg and that must be in VRAM */ 85 VRAM, also but everything into VRAM on AGP cards to avoid
85 if (p->ring == R600_RING_TYPE_UVD_INDEX && i == 0) { 86 image corruptions */
86 /* TODO: is this still needed for NI+ ? */ 87 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
88 p->rdev->family < CHIP_PALM &&
89 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
90
87 p->relocs[i].lobj.domain = 91 p->relocs[i].lobj.domain =
88 RADEON_GEM_DOMAIN_VRAM; 92 RADEON_GEM_DOMAIN_VRAM;
89 93
@@ -559,6 +563,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
559 return r; 563 return r;
560 } 564 }
561 565
566 trace_radeon_cs(&parser);
567
562 r = radeon_cs_ib_chunk(rdev, &parser); 568 r = radeon_cs_ib_chunk(rdev, &parser);
563 if (r) { 569 if (r) {
564 goto out; 570 goto out;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 16cb8792b1e6..841d0e09be3e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1249,6 +1249,17 @@ int radeon_device_init(struct radeon_device *rdev,
1249 /* Registers mapping */ 1249 /* Registers mapping */
1250 /* TODO: block userspace mapping of io register */ 1250 /* TODO: block userspace mapping of io register */
1251 spin_lock_init(&rdev->mmio_idx_lock); 1251 spin_lock_init(&rdev->mmio_idx_lock);
1252 spin_lock_init(&rdev->smc_idx_lock);
1253 spin_lock_init(&rdev->pll_idx_lock);
1254 spin_lock_init(&rdev->mc_idx_lock);
1255 spin_lock_init(&rdev->pcie_idx_lock);
1256 spin_lock_init(&rdev->pciep_idx_lock);
1257 spin_lock_init(&rdev->pif_idx_lock);
1258 spin_lock_init(&rdev->cg_idx_lock);
1259 spin_lock_init(&rdev->uvd_idx_lock);
1260 spin_lock_init(&rdev->rcu_idx_lock);
1261 spin_lock_init(&rdev->didt_idx_lock);
1262 spin_lock_init(&rdev->end_idx_lock);
1252 if (rdev->family >= CHIP_BONAIRE) { 1263 if (rdev->family >= CHIP_BONAIRE) {
1253 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5); 1264 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1254 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5); 1265 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
@@ -1309,13 +1320,22 @@ int radeon_device_init(struct radeon_device *rdev,
1309 return r; 1320 return r;
1310 } 1321 }
1311 if ((radeon_testing & 1)) { 1322 if ((radeon_testing & 1)) {
1312 radeon_test_moves(rdev); 1323 if (rdev->accel_working)
1324 radeon_test_moves(rdev);
1325 else
1326 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1313 } 1327 }
1314 if ((radeon_testing & 2)) { 1328 if ((radeon_testing & 2)) {
1315 radeon_test_syncing(rdev); 1329 if (rdev->accel_working)
1330 radeon_test_syncing(rdev);
1331 else
1332 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1316 } 1333 }
1317 if (radeon_benchmarking) { 1334 if (radeon_benchmarking) {
1318 radeon_benchmark(rdev, radeon_benchmarking); 1335 if (rdev->accel_working)
1336 radeon_benchmark(rdev, radeon_benchmarking);
1337 else
1338 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1319 } 1339 }
1320 return 0; 1340 return 0;
1321} 1341}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b055bddaa94c..0d1aa050d41d 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1172,6 +1172,12 @@ static struct drm_prop_enum_list radeon_underscan_enum_list[] =
1172 { UNDERSCAN_AUTO, "auto" }, 1172 { UNDERSCAN_AUTO, "auto" },
1173}; 1173};
1174 1174
1175static struct drm_prop_enum_list radeon_audio_enum_list[] =
1176{ { RADEON_AUDIO_DISABLE, "off" },
1177 { RADEON_AUDIO_ENABLE, "on" },
1178 { RADEON_AUDIO_AUTO, "auto" },
1179};
1180
1175static int radeon_modeset_create_props(struct radeon_device *rdev) 1181static int radeon_modeset_create_props(struct radeon_device *rdev)
1176{ 1182{
1177 int sz; 1183 int sz;
@@ -1222,6 +1228,12 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
1222 if (!rdev->mode_info.underscan_vborder_property) 1228 if (!rdev->mode_info.underscan_vborder_property)
1223 return -ENOMEM; 1229 return -ENOMEM;
1224 1230
1231 sz = ARRAY_SIZE(radeon_audio_enum_list);
1232 rdev->mode_info.audio_property =
1233 drm_property_create_enum(rdev->ddev, 0,
1234 "audio",
1235 radeon_audio_enum_list, sz);
1236
1225 return 0; 1237 return 0;
1226} 1238}
1227 1239
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index cb4445f55a96..cdd12dcd988b 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -153,7 +153,7 @@ int radeon_benchmarking = 0;
153int radeon_testing = 0; 153int radeon_testing = 0;
154int radeon_connector_table = 0; 154int radeon_connector_table = 0;
155int radeon_tv = 1; 155int radeon_tv = 1;
156int radeon_audio = 0; 156int radeon_audio = 1;
157int radeon_disp_priority = 0; 157int radeon_disp_priority = 0;
158int radeon_hw_i2c = 0; 158int radeon_hw_i2c = 0;
159int radeon_pcie_gen2 = -1; 159int radeon_pcie_gen2 = -1;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index d908d8d68f6b..ef63d3f00b2f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -247,6 +247,8 @@ struct radeon_mode_info {
247 struct drm_property *underscan_property; 247 struct drm_property *underscan_property;
248 struct drm_property *underscan_hborder_property; 248 struct drm_property *underscan_hborder_property;
249 struct drm_property *underscan_vborder_property; 249 struct drm_property *underscan_vborder_property;
250 /* audio */
251 struct drm_property *audio_property;
250 /* hardcoded DFP edid from BIOS */ 252 /* hardcoded DFP edid from BIOS */
251 struct edid *bios_hardcoded_edid; 253 struct edid *bios_hardcoded_edid;
252 int bios_hardcoded_edid_size; 254 int bios_hardcoded_edid_size;
@@ -471,6 +473,12 @@ struct radeon_router {
471 u8 cd_mux_state; 473 u8 cd_mux_state;
472}; 474};
473 475
476enum radeon_connector_audio {
477 RADEON_AUDIO_DISABLE = 0,
478 RADEON_AUDIO_ENABLE = 1,
479 RADEON_AUDIO_AUTO = 2
480};
481
474struct radeon_connector { 482struct radeon_connector {
475 struct drm_connector base; 483 struct drm_connector base;
476 uint32_t connector_id; 484 uint32_t connector_id;
@@ -489,6 +497,7 @@ struct radeon_connector {
489 struct radeon_hpd hpd; 497 struct radeon_hpd hpd;
490 struct radeon_router router; 498 struct radeon_router router;
491 struct radeon_i2c_chan *router_bus; 499 struct radeon_i2c_chan *router_bus;
500 enum radeon_connector_audio audio;
492}; 501};
493 502
494struct radeon_framebuffer { 503struct radeon_framebuffer {
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index d7555369a3e5..ac07ad1d4f8c 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -67,7 +67,16 @@ int radeon_pm_get_type_index(struct radeon_device *rdev,
67 67
68void radeon_pm_acpi_event_handler(struct radeon_device *rdev) 68void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
69{ 69{
70 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 70 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
71 mutex_lock(&rdev->pm.mutex);
72 if (power_supply_is_system_supplied() > 0)
73 rdev->pm.dpm.ac_power = true;
74 else
75 rdev->pm.dpm.ac_power = false;
76 if (rdev->asic->dpm.enable_bapm)
77 radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
78 mutex_unlock(&rdev->pm.mutex);
79 } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
71 if (rdev->pm.profile == PM_PROFILE_AUTO) { 80 if (rdev->pm.profile == PM_PROFILE_AUTO) {
72 mutex_lock(&rdev->pm.mutex); 81 mutex_lock(&rdev->pm.mutex);
73 radeon_pm_update_profile(rdev); 82 radeon_pm_update_profile(rdev);
@@ -333,7 +342,7 @@ static ssize_t radeon_get_pm_profile(struct device *dev,
333 struct device_attribute *attr, 342 struct device_attribute *attr,
334 char *buf) 343 char *buf)
335{ 344{
336 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 345 struct drm_device *ddev = dev_get_drvdata(dev);
337 struct radeon_device *rdev = ddev->dev_private; 346 struct radeon_device *rdev = ddev->dev_private;
338 int cp = rdev->pm.profile; 347 int cp = rdev->pm.profile;
339 348
@@ -349,7 +358,7 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
349 const char *buf, 358 const char *buf,
350 size_t count) 359 size_t count)
351{ 360{
352 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 361 struct drm_device *ddev = dev_get_drvdata(dev);
353 struct radeon_device *rdev = ddev->dev_private; 362 struct radeon_device *rdev = ddev->dev_private;
354 363
355 mutex_lock(&rdev->pm.mutex); 364 mutex_lock(&rdev->pm.mutex);
@@ -383,7 +392,7 @@ static ssize_t radeon_get_pm_method(struct device *dev,
383 struct device_attribute *attr, 392 struct device_attribute *attr,
384 char *buf) 393 char *buf)
385{ 394{
386 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 395 struct drm_device *ddev = dev_get_drvdata(dev);
387 struct radeon_device *rdev = ddev->dev_private; 396 struct radeon_device *rdev = ddev->dev_private;
388 int pm = rdev->pm.pm_method; 397 int pm = rdev->pm.pm_method;
389 398
@@ -397,7 +406,7 @@ static ssize_t radeon_set_pm_method(struct device *dev,
397 const char *buf, 406 const char *buf,
398 size_t count) 407 size_t count)
399{ 408{
400 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 409 struct drm_device *ddev = dev_get_drvdata(dev);
401 struct radeon_device *rdev = ddev->dev_private; 410 struct radeon_device *rdev = ddev->dev_private;
402 411
403 /* we don't support the legacy modes with dpm */ 412 /* we don't support the legacy modes with dpm */
@@ -433,7 +442,7 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
433 struct device_attribute *attr, 442 struct device_attribute *attr,
434 char *buf) 443 char *buf)
435{ 444{
436 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 445 struct drm_device *ddev = dev_get_drvdata(dev);
437 struct radeon_device *rdev = ddev->dev_private; 446 struct radeon_device *rdev = ddev->dev_private;
438 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; 447 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
439 448
@@ -447,7 +456,7 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
447 const char *buf, 456 const char *buf,
448 size_t count) 457 size_t count)
449{ 458{
450 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 459 struct drm_device *ddev = dev_get_drvdata(dev);
451 struct radeon_device *rdev = ddev->dev_private; 460 struct radeon_device *rdev = ddev->dev_private;
452 461
453 mutex_lock(&rdev->pm.mutex); 462 mutex_lock(&rdev->pm.mutex);
@@ -472,7 +481,7 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
472 struct device_attribute *attr, 481 struct device_attribute *attr,
473 char *buf) 482 char *buf)
474{ 483{
475 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 484 struct drm_device *ddev = dev_get_drvdata(dev);
476 struct radeon_device *rdev = ddev->dev_private; 485 struct radeon_device *rdev = ddev->dev_private;
477 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 486 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
478 487
@@ -486,7 +495,7 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
486 const char *buf, 495 const char *buf,
487 size_t count) 496 size_t count)
488{ 497{
489 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 498 struct drm_device *ddev = dev_get_drvdata(dev);
490 struct radeon_device *rdev = ddev->dev_private; 499 struct radeon_device *rdev = ddev->dev_private;
491 enum radeon_dpm_forced_level level; 500 enum radeon_dpm_forced_level level;
492 int ret = 0; 501 int ret = 0;
@@ -524,7 +533,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
524 struct device_attribute *attr, 533 struct device_attribute *attr,
525 char *buf) 534 char *buf)
526{ 535{
527 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 536 struct drm_device *ddev = dev_get_drvdata(dev);
528 struct radeon_device *rdev = ddev->dev_private; 537 struct radeon_device *rdev = ddev->dev_private;
529 int temp; 538 int temp;
530 539
@@ -536,6 +545,23 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
536 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 545 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
537} 546}
538 547
548static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
549 struct device_attribute *attr,
550 char *buf)
551{
552 struct drm_device *ddev = dev_get_drvdata(dev);
553 struct radeon_device *rdev = ddev->dev_private;
554 int hyst = to_sensor_dev_attr(attr)->index;
555 int temp;
556
557 if (hyst)
558 temp = rdev->pm.dpm.thermal.min_temp;
559 else
560 temp = rdev->pm.dpm.thermal.max_temp;
561
562 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
563}
564
539static ssize_t radeon_hwmon_show_name(struct device *dev, 565static ssize_t radeon_hwmon_show_name(struct device *dev,
540 struct device_attribute *attr, 566 struct device_attribute *attr,
541 char *buf) 567 char *buf)
@@ -544,16 +570,37 @@ static ssize_t radeon_hwmon_show_name(struct device *dev,
544} 570}
545 571
546static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); 572static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
573static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
574static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
547static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0); 575static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
548 576
549static struct attribute *hwmon_attributes[] = { 577static struct attribute *hwmon_attributes[] = {
550 &sensor_dev_attr_temp1_input.dev_attr.attr, 578 &sensor_dev_attr_temp1_input.dev_attr.attr,
579 &sensor_dev_attr_temp1_crit.dev_attr.attr,
580 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
551 &sensor_dev_attr_name.dev_attr.attr, 581 &sensor_dev_attr_name.dev_attr.attr,
552 NULL 582 NULL
553}; 583};
554 584
585static umode_t hwmon_attributes_visible(struct kobject *kobj,
586 struct attribute *attr, int index)
587{
588 struct device *dev = container_of(kobj, struct device, kobj);
589 struct drm_device *ddev = dev_get_drvdata(dev);
590 struct radeon_device *rdev = ddev->dev_private;
591
592 /* Skip limit attributes if DPM is not enabled */
593 if (rdev->pm.pm_method != PM_METHOD_DPM &&
594 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
595 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
596 return 0;
597
598 return attr->mode;
599}
600
555static const struct attribute_group hwmon_attrgroup = { 601static const struct attribute_group hwmon_attrgroup = {
556 .attrs = hwmon_attributes, 602 .attrs = hwmon_attributes,
603 .is_visible = hwmon_attributes_visible,
557}; 604};
558 605
559static int radeon_hwmon_init(struct radeon_device *rdev) 606static int radeon_hwmon_init(struct radeon_device *rdev)
@@ -870,10 +917,13 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
870 917
871 radeon_dpm_post_set_power_state(rdev); 918 radeon_dpm_post_set_power_state(rdev);
872 919
873 /* force low perf level for thermal */ 920 if (rdev->asic->dpm.force_performance_level) {
874 if (rdev->pm.dpm.thermal_active && 921 if (rdev->pm.dpm.thermal_active)
875 rdev->asic->dpm.force_performance_level) { 922 /* force low perf level for thermal */
876 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW); 923 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
924 else
925 /* otherwise, enable auto */
926 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
877 } 927 }
878 928
879done: 929done:
@@ -952,7 +1002,7 @@ static void radeon_pm_resume_old(struct radeon_device *rdev)
952{ 1002{
953 /* set up the default clocks if the MC ucode is loaded */ 1003 /* set up the default clocks if the MC ucode is loaded */
954 if ((rdev->family >= CHIP_BARTS) && 1004 if ((rdev->family >= CHIP_BARTS) &&
955 (rdev->family <= CHIP_HAINAN) && 1005 (rdev->family <= CHIP_CAYMAN) &&
956 rdev->mc_fw) { 1006 rdev->mc_fw) {
957 if (rdev->pm.default_vddc) 1007 if (rdev->pm.default_vddc)
958 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1008 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -996,7 +1046,7 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev)
996 if (ret) { 1046 if (ret) {
997 DRM_ERROR("radeon: dpm resume failed\n"); 1047 DRM_ERROR("radeon: dpm resume failed\n");
998 if ((rdev->family >= CHIP_BARTS) && 1048 if ((rdev->family >= CHIP_BARTS) &&
999 (rdev->family <= CHIP_HAINAN) && 1049 (rdev->family <= CHIP_CAYMAN) &&
1000 rdev->mc_fw) { 1050 rdev->mc_fw) {
1001 if (rdev->pm.default_vddc) 1051 if (rdev->pm.default_vddc)
1002 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1052 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1047,7 +1097,7 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
1047 radeon_pm_init_profile(rdev); 1097 radeon_pm_init_profile(rdev);
1048 /* set up the default clocks if the MC ucode is loaded */ 1098 /* set up the default clocks if the MC ucode is loaded */
1049 if ((rdev->family >= CHIP_BARTS) && 1099 if ((rdev->family >= CHIP_BARTS) &&
1050 (rdev->family <= CHIP_HAINAN) && 1100 (rdev->family <= CHIP_CAYMAN) &&
1051 rdev->mc_fw) { 1101 rdev->mc_fw) {
1052 if (rdev->pm.default_vddc) 1102 if (rdev->pm.default_vddc)
1053 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1103 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1102,9 +1152,10 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
1102{ 1152{
1103 int ret; 1153 int ret;
1104 1154
1105 /* default to performance state */ 1155 /* default to balanced state */
1106 rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; 1156 rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
1107 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 1157 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
1158 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1108 rdev->pm.default_sclk = rdev->clock.default_sclk; 1159 rdev->pm.default_sclk = rdev->clock.default_sclk;
1109 rdev->pm.default_mclk = rdev->clock.default_mclk; 1160 rdev->pm.default_mclk = rdev->clock.default_mclk;
1110 rdev->pm.current_sclk = rdev->clock.default_sclk; 1161 rdev->pm.current_sclk = rdev->clock.default_sclk;
@@ -1132,7 +1183,7 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
1132 if (ret) { 1183 if (ret) {
1133 rdev->pm.dpm_enabled = false; 1184 rdev->pm.dpm_enabled = false;
1134 if ((rdev->family >= CHIP_BARTS) && 1185 if ((rdev->family >= CHIP_BARTS) &&
1135 (rdev->family <= CHIP_HAINAN) && 1186 (rdev->family <= CHIP_CAYMAN) &&
1136 rdev->mc_fw) { 1187 rdev->mc_fw) {
1137 if (rdev->pm.default_vddc) 1188 if (rdev->pm.default_vddc)
1138 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1189 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 46a25f037b84..18254e1c3e71 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -839,9 +839,11 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
839 * packet that is the root issue 839 * packet that is the root issue
840 */ 840 */
841 i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; 841 i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
842 for (j = 0; j <= (count + 32); j++) { 842 if (ring->ready) {
843 seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]); 843 for (j = 0; j <= (count + 32); j++) {
844 i = (i + 1) & ring->ptr_mask; 844 seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
845 i = (i + 1) & ring->ptr_mask;
846 }
845 } 847 }
846 return 0; 848 return 0;
847} 849}
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index eafd8160a155..f7e367815964 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -27,6 +27,26 @@ TRACE_EVENT(radeon_bo_create,
27 TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) 27 TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
28); 28);
29 29
30TRACE_EVENT(radeon_cs,
31 TP_PROTO(struct radeon_cs_parser *p),
32 TP_ARGS(p),
33 TP_STRUCT__entry(
34 __field(u32, ring)
35 __field(u32, dw)
36 __field(u32, fences)
37 ),
38
39 TP_fast_assign(
40 __entry->ring = p->ring;
41 __entry->dw = p->chunks[p->chunk_ib_idx].length_dw;
42 __entry->fences = radeon_fence_count_emitted(
43 p->rdev, p->ring);
44 ),
45 TP_printk("ring=%u, dw=%u, fences=%u",
46 __entry->ring, __entry->dw,
47 __entry->fences)
48);
49
30DECLARE_EVENT_CLASS(radeon_fence_request, 50DECLARE_EVENT_CLASS(radeon_fence_request,
31 51
32 TP_PROTO(struct drm_device *dev, u32 seqno), 52 TP_PROTO(struct drm_device *dev, u32 seqno),
@@ -53,13 +73,6 @@ DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
53 TP_ARGS(dev, seqno) 73 TP_ARGS(dev, seqno)
54); 74);
55 75
56DEFINE_EVENT(radeon_fence_request, radeon_fence_retire,
57
58 TP_PROTO(struct drm_device *dev, u32 seqno),
59
60 TP_ARGS(dev, seqno)
61);
62
63DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin, 76DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
64 77
65 TP_PROTO(struct drm_device *dev, u32 seqno), 78 TP_PROTO(struct drm_device *dev, u32 seqno),
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 1a01bbff9bfa..a0f11856ddde 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -476,8 +476,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
476 return -EINVAL; 476 return -EINVAL;
477 } 477 }
478 478
479 /* TODO: is this still necessary on NI+ ? */ 479 if (p->rdev->family < CHIP_PALM && (cmd == 0 || cmd == 0x3) &&
480 if ((cmd == 0 || cmd == 0x3) &&
481 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { 480 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
482 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", 481 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
483 start, end); 482 start, end);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index b8074a8ec75a..9566b5940a5a 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -274,19 +274,26 @@ static void rs400_mc_init(struct radeon_device *rdev)
274 274
275uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) 275uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
276{ 276{
277 unsigned long flags;
277 uint32_t r; 278 uint32_t r;
278 279
280 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
279 WREG32(RS480_NB_MC_INDEX, reg & 0xff); 281 WREG32(RS480_NB_MC_INDEX, reg & 0xff);
280 r = RREG32(RS480_NB_MC_DATA); 282 r = RREG32(RS480_NB_MC_DATA);
281 WREG32(RS480_NB_MC_INDEX, 0xff); 283 WREG32(RS480_NB_MC_INDEX, 0xff);
284 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
282 return r; 285 return r;
283} 286}
284 287
285void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 288void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
286{ 289{
290 unsigned long flags;
291
292 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
287 WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN); 293 WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
288 WREG32(RS480_NB_MC_DATA, (v)); 294 WREG32(RS480_NB_MC_DATA, (v));
289 WREG32(RS480_NB_MC_INDEX, 0xff); 295 WREG32(RS480_NB_MC_INDEX, 0xff);
296 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
290} 297}
291 298
292#if defined(CONFIG_DEBUG_FS) 299#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 670b555d2ca2..6acba8017b9a 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -847,16 +847,26 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
847 847
848uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) 848uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
849{ 849{
850 unsigned long flags;
851 u32 r;
852
853 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
850 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 854 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
851 S_000070_MC_IND_CITF_ARB0(1)); 855 S_000070_MC_IND_CITF_ARB0(1));
852 return RREG32(R_000074_MC_IND_DATA); 856 r = RREG32(R_000074_MC_IND_DATA);
857 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
858 return r;
853} 859}
854 860
855void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 861void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
856{ 862{
863 unsigned long flags;
864
865 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
857 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 866 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
858 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); 867 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
859 WREG32(R_000074_MC_IND_DATA, v); 868 WREG32(R_000074_MC_IND_DATA, v);
869 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
860} 870}
861 871
862static void rs600_debugfs(struct radeon_device *rdev) 872static void rs600_debugfs(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index d8ddfb34545d..1447d794c22a 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -631,20 +631,27 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
631 631
632uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) 632uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
633{ 633{
634 unsigned long flags;
634 uint32_t r; 635 uint32_t r;
635 636
637 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
636 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg)); 638 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
637 r = RREG32(R_00007C_MC_DATA); 639 r = RREG32(R_00007C_MC_DATA);
638 WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR); 640 WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
641 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
639 return r; 642 return r;
640} 643}
641 644
642void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 645void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
643{ 646{
647 unsigned long flags;
648
649 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
644 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) | 650 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
645 S_000078_MC_IND_WR_EN(1)); 651 S_000078_MC_IND_WR_EN(1));
646 WREG32(R_00007C_MC_DATA, v); 652 WREG32(R_00007C_MC_DATA, v);
647 WREG32(R_000078_MC_INDEX, 0x7F); 653 WREG32(R_000078_MC_INDEX, 0x7F);
654 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
648} 655}
649 656
650static void rs690_mc_program(struct radeon_device *rdev) 657static void rs690_mc_program(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index d1a1ce73bd45..6af8505cf4d2 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -62,9 +62,7 @@ static void rs780_get_pm_mode_parameters(struct radeon_device *rdev)
62 radeon_crtc = to_radeon_crtc(crtc); 62 radeon_crtc = to_radeon_crtc(crtc);
63 pi->crtc_id = radeon_crtc->crtc_id; 63 pi->crtc_id = radeon_crtc->crtc_id;
64 if (crtc->mode.htotal && crtc->mode.vtotal) 64 if (crtc->mode.htotal && crtc->mode.vtotal)
65 pi->refresh_rate = 65 pi->refresh_rate = drm_mode_vrefresh(&crtc->mode);
66 (crtc->mode.clock * 1000) /
67 (crtc->mode.htotal * crtc->mode.vtotal);
68 break; 66 break;
69 } 67 }
70 } 68 }
@@ -376,9 +374,8 @@ static void rs780_disable_vbios_powersaving(struct radeon_device *rdev)
376 WREG32_P(CG_INTGFX_MISC, 0, ~0xFFF00000); 374 WREG32_P(CG_INTGFX_MISC, 0, ~0xFFF00000);
377} 375}
378 376
379static void rs780_force_voltage_to_high(struct radeon_device *rdev) 377static void rs780_force_voltage(struct radeon_device *rdev, u16 voltage)
380{ 378{
381 struct igp_power_info *pi = rs780_get_pi(rdev);
382 struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps); 379 struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps);
383 380
384 if ((current_state->max_voltage == RS780_VDDC_LEVEL_HIGH) && 381 if ((current_state->max_voltage == RS780_VDDC_LEVEL_HIGH) &&
@@ -390,7 +387,7 @@ static void rs780_force_voltage_to_high(struct radeon_device *rdev)
390 udelay(1); 387 udelay(1);
391 388
392 WREG32_P(FVTHROT_PWM_CTRL_REG0, 389 WREG32_P(FVTHROT_PWM_CTRL_REG0,
393 STARTING_PWM_HIGHTIME(pi->max_voltage), 390 STARTING_PWM_HIGHTIME(voltage),
394 ~STARTING_PWM_HIGHTIME_MASK); 391 ~STARTING_PWM_HIGHTIME_MASK);
395 392
396 WREG32_P(FVTHROT_PWM_CTRL_REG0, 393 WREG32_P(FVTHROT_PWM_CTRL_REG0,
@@ -404,6 +401,26 @@ static void rs780_force_voltage_to_high(struct radeon_device *rdev)
404 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL); 401 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
405} 402}
406 403
404static void rs780_force_fbdiv(struct radeon_device *rdev, u32 fb_div)
405{
406 struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps);
407
408 if (current_state->sclk_low == current_state->sclk_high)
409 return;
410
411 WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL);
412
413 WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(fb_div),
414 ~FORCED_FEEDBACK_DIV_MASK);
415 WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(fb_div),
416 ~STARTING_FEEDBACK_DIV_MASK);
417 WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
418
419 udelay(100);
420
421 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL);
422}
423
407static int rs780_set_engine_clock_scaling(struct radeon_device *rdev, 424static int rs780_set_engine_clock_scaling(struct radeon_device *rdev,
408 struct radeon_ps *new_ps, 425 struct radeon_ps *new_ps,
409 struct radeon_ps *old_ps) 426 struct radeon_ps *old_ps)
@@ -432,17 +449,13 @@ static int rs780_set_engine_clock_scaling(struct radeon_device *rdev,
432 if (ret) 449 if (ret)
433 return ret; 450 return ret;
434 451
435 WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL); 452 if ((min_dividers.ref_div != max_dividers.ref_div) ||
436 453 (min_dividers.post_div != max_dividers.post_div) ||
437 WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(max_dividers.fb_div), 454 (max_dividers.ref_div != current_max_dividers.ref_div) ||
438 ~FORCED_FEEDBACK_DIV_MASK); 455 (max_dividers.post_div != current_max_dividers.post_div))
439 WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(max_dividers.fb_div), 456 return -EINVAL;
440 ~STARTING_FEEDBACK_DIV_MASK);
441 WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV);
442
443 udelay(100);
444 457
445 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL); 458 rs780_force_fbdiv(rdev, max_dividers.fb_div);
446 459
447 if (max_dividers.fb_div > min_dividers.fb_div) { 460 if (max_dividers.fb_div > min_dividers.fb_div) {
448 WREG32_P(FVTHROT_FBDIV_REG0, 461 WREG32_P(FVTHROT_FBDIV_REG0,
@@ -486,6 +499,9 @@ static void rs780_activate_engine_clk_scaling(struct radeon_device *rdev,
486 (new_state->sclk_low == old_state->sclk_low)) 499 (new_state->sclk_low == old_state->sclk_low))
487 return; 500 return;
488 501
502 if (new_state->sclk_high == new_state->sclk_low)
503 return;
504
489 rs780_clk_scaling_enable(rdev, true); 505 rs780_clk_scaling_enable(rdev, true);
490} 506}
491 507
@@ -649,7 +665,7 @@ int rs780_dpm_set_power_state(struct radeon_device *rdev)
649 rs780_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); 665 rs780_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
650 666
651 if (pi->voltage_control) { 667 if (pi->voltage_control) {
652 rs780_force_voltage_to_high(rdev); 668 rs780_force_voltage(rdev, pi->max_voltage);
653 mdelay(5); 669 mdelay(5);
654 } 670 }
655 671
@@ -717,14 +733,18 @@ static void rs780_parse_pplib_non_clock_info(struct radeon_device *rdev,
717 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 733 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
718 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 734 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
719 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 735 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
720 } else if (r600_is_uvd_state(rps->class, rps->class2)) {
721 rps->vclk = RS780_DEFAULT_VCLK_FREQ;
722 rps->dclk = RS780_DEFAULT_DCLK_FREQ;
723 } else { 736 } else {
724 rps->vclk = 0; 737 rps->vclk = 0;
725 rps->dclk = 0; 738 rps->dclk = 0;
726 } 739 }
727 740
741 if (r600_is_uvd_state(rps->class, rps->class2)) {
742 if ((rps->vclk == 0) || (rps->dclk == 0)) {
743 rps->vclk = RS780_DEFAULT_VCLK_FREQ;
744 rps->dclk = RS780_DEFAULT_DCLK_FREQ;
745 }
746 }
747
728 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) 748 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
729 rdev->pm.dpm.boot_ps = rps; 749 rdev->pm.dpm.boot_ps = rps;
730 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 750 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
@@ -986,3 +1006,55 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
986 seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n", 1006 seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n",
987 ps->sclk_high, ps->max_voltage); 1007 ps->sclk_high, ps->max_voltage);
988} 1008}
1009
1010int rs780_dpm_force_performance_level(struct radeon_device *rdev,
1011 enum radeon_dpm_forced_level level)
1012{
1013 struct igp_power_info *pi = rs780_get_pi(rdev);
1014 struct radeon_ps *rps = rdev->pm.dpm.current_ps;
1015 struct igp_ps *ps = rs780_get_ps(rps);
1016 struct atom_clock_dividers dividers;
1017 int ret;
1018
1019 rs780_clk_scaling_enable(rdev, false);
1020 rs780_voltage_scaling_enable(rdev, false);
1021
1022 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
1023 if (pi->voltage_control)
1024 rs780_force_voltage(rdev, pi->max_voltage);
1025
1026 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
1027 ps->sclk_high, false, &dividers);
1028 if (ret)
1029 return ret;
1030
1031 rs780_force_fbdiv(rdev, dividers.fb_div);
1032 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
1033 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
1034 ps->sclk_low, false, &dividers);
1035 if (ret)
1036 return ret;
1037
1038 rs780_force_fbdiv(rdev, dividers.fb_div);
1039
1040 if (pi->voltage_control)
1041 rs780_force_voltage(rdev, pi->min_voltage);
1042 } else {
1043 if (pi->voltage_control)
1044 rs780_force_voltage(rdev, pi->max_voltage);
1045
1046 if (ps->sclk_high != ps->sclk_low) {
1047 WREG32_P(FVTHROT_FBDIV_REG1, 0, ~FORCE_FEEDBACK_DIV);
1048 rs780_clk_scaling_enable(rdev, true);
1049 }
1050
1051 if (pi->voltage_control) {
1052 rs780_voltage_scaling_enable(rdev, true);
1053 rs780_enable_voltage_scaling(rdev, rps);
1054 }
1055 }
1056
1057 rdev->pm.dpm.forced_level = level;
1058
1059 return 0;
1060}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 8ea1573ae820..873eb4b193b4 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -209,19 +209,27 @@ static void rv515_mc_init(struct radeon_device *rdev)
209 209
210uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) 210uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
211{ 211{
212 unsigned long flags;
212 uint32_t r; 213 uint32_t r;
213 214
215 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
214 WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); 216 WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
215 r = RREG32(MC_IND_DATA); 217 r = RREG32(MC_IND_DATA);
216 WREG32(MC_IND_INDEX, 0); 218 WREG32(MC_IND_INDEX, 0);
219 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
220
217 return r; 221 return r;
218} 222}
219 223
220void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 224void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
221{ 225{
226 unsigned long flags;
227
228 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
222 WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); 229 WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
223 WREG32(MC_IND_DATA, (v)); 230 WREG32(MC_IND_DATA, (v));
224 WREG32(MC_IND_INDEX, 0); 231 WREG32(MC_IND_INDEX, 0);
232 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
225} 233}
226 234
227#if defined(CONFIG_DEBUG_FS) 235#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index ab1f2016f21e..5811d277a36a 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -1758,8 +1758,6 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
1758 1758
1759 rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); 1759 rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
1760 1760
1761 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1762
1763 return 0; 1761 return 0;
1764} 1762}
1765 1763
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 8cbb85dae5aa..913b025ae9b3 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2064,12 +2064,6 @@ int rv770_dpm_set_power_state(struct radeon_device *rdev)
2064 rv770_program_dcodt_after_state_switch(rdev, new_ps, old_ps); 2064 rv770_program_dcodt_after_state_switch(rdev, new_ps, old_ps);
2065 rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); 2065 rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
2066 2066
2067 ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
2068 if (ret) {
2069 DRM_ERROR("rv770_dpm_force_performance_level failed\n");
2070 return ret;
2071 }
2072
2073 return 0; 2067 return 0;
2074} 2068}
2075 2069
@@ -2147,14 +2141,18 @@ static void rv7xx_parse_pplib_non_clock_info(struct radeon_device *rdev,
2147 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2141 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
2148 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2142 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
2149 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2143 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
2150 } else if (r600_is_uvd_state(rps->class, rps->class2)) {
2151 rps->vclk = RV770_DEFAULT_VCLK_FREQ;
2152 rps->dclk = RV770_DEFAULT_DCLK_FREQ;
2153 } else { 2144 } else {
2154 rps->vclk = 0; 2145 rps->vclk = 0;
2155 rps->dclk = 0; 2146 rps->dclk = 0;
2156 } 2147 }
2157 2148
2149 if (r600_is_uvd_state(rps->class, rps->class2)) {
2150 if ((rps->vclk == 0) || (rps->dclk == 0)) {
2151 rps->vclk = RV770_DEFAULT_VCLK_FREQ;
2152 rps->dclk = RV770_DEFAULT_DCLK_FREQ;
2153 }
2154 }
2155
2158 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) 2156 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
2159 rdev->pm.dpm.boot_ps = rps; 2157 rdev->pm.dpm.boot_ps = rps;
2160 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 2158 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
diff --git a/drivers/gpu/drm/radeon/rv770_smc.c b/drivers/gpu/drm/radeon/rv770_smc.c
index ab95da570215..b2a224407365 100644
--- a/drivers/gpu/drm/radeon/rv770_smc.c
+++ b/drivers/gpu/drm/radeon/rv770_smc.c
@@ -274,8 +274,8 @@ static const u8 cayman_smc_int_vectors[] =
274 0x08, 0x72, 0x08, 0x72 274 0x08, 0x72, 0x08, 0x72
275}; 275};
276 276
277int rv770_set_smc_sram_address(struct radeon_device *rdev, 277static int rv770_set_smc_sram_address(struct radeon_device *rdev,
278 u16 smc_address, u16 limit) 278 u16 smc_address, u16 limit)
279{ 279{
280 u32 addr; 280 u32 addr;
281 281
@@ -296,9 +296,10 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
296 u16 smc_start_address, const u8 *src, 296 u16 smc_start_address, const u8 *src,
297 u16 byte_count, u16 limit) 297 u16 byte_count, u16 limit)
298{ 298{
299 unsigned long flags;
299 u32 data, original_data, extra_shift; 300 u32 data, original_data, extra_shift;
300 u16 addr; 301 u16 addr;
301 int ret; 302 int ret = 0;
302 303
303 if (smc_start_address & 3) 304 if (smc_start_address & 3)
304 return -EINVAL; 305 return -EINVAL;
@@ -307,13 +308,14 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
307 308
308 addr = smc_start_address; 309 addr = smc_start_address;
309 310
311 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
310 while (byte_count >= 4) { 312 while (byte_count >= 4) {
311 /* SMC address space is BE */ 313 /* SMC address space is BE */
312 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; 314 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
313 315
314 ret = rv770_set_smc_sram_address(rdev, addr, limit); 316 ret = rv770_set_smc_sram_address(rdev, addr, limit);
315 if (ret) 317 if (ret)
316 return ret; 318 goto done;
317 319
318 WREG32(SMC_SRAM_DATA, data); 320 WREG32(SMC_SRAM_DATA, data);
319 321
@@ -328,7 +330,7 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
328 330
329 ret = rv770_set_smc_sram_address(rdev, addr, limit); 331 ret = rv770_set_smc_sram_address(rdev, addr, limit);
330 if (ret) 332 if (ret)
331 return ret; 333 goto done;
332 334
333 original_data = RREG32(SMC_SRAM_DATA); 335 original_data = RREG32(SMC_SRAM_DATA);
334 336
@@ -346,12 +348,15 @@ int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
346 348
347 ret = rv770_set_smc_sram_address(rdev, addr, limit); 349 ret = rv770_set_smc_sram_address(rdev, addr, limit);
348 if (ret) 350 if (ret)
349 return ret; 351 goto done;
350 352
351 WREG32(SMC_SRAM_DATA, data); 353 WREG32(SMC_SRAM_DATA, data);
352 } 354 }
353 355
354 return 0; 356done:
357 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
358
359 return ret;
355} 360}
356 361
357static int rv770_program_interrupt_vectors(struct radeon_device *rdev, 362static int rv770_program_interrupt_vectors(struct radeon_device *rdev,
@@ -461,12 +466,15 @@ PPSMC_Result rv770_wait_for_smc_inactive(struct radeon_device *rdev)
461 466
462static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit) 467static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit)
463{ 468{
469 unsigned long flags;
464 u16 i; 470 u16 i;
465 471
472 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
466 for (i = 0; i < limit; i += 4) { 473 for (i = 0; i < limit; i += 4) {
467 rv770_set_smc_sram_address(rdev, i, limit); 474 rv770_set_smc_sram_address(rdev, i, limit);
468 WREG32(SMC_SRAM_DATA, 0); 475 WREG32(SMC_SRAM_DATA, 0);
469 } 476 }
477 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
470} 478}
471 479
472int rv770_load_smc_ucode(struct radeon_device *rdev, 480int rv770_load_smc_ucode(struct radeon_device *rdev,
@@ -595,27 +603,29 @@ int rv770_load_smc_ucode(struct radeon_device *rdev,
595int rv770_read_smc_sram_dword(struct radeon_device *rdev, 603int rv770_read_smc_sram_dword(struct radeon_device *rdev,
596 u16 smc_address, u32 *value, u16 limit) 604 u16 smc_address, u32 *value, u16 limit)
597{ 605{
606 unsigned long flags;
598 int ret; 607 int ret;
599 608
609 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
600 ret = rv770_set_smc_sram_address(rdev, smc_address, limit); 610 ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
601 if (ret) 611 if (ret == 0)
602 return ret; 612 *value = RREG32(SMC_SRAM_DATA);
603 613 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
604 *value = RREG32(SMC_SRAM_DATA);
605 614
606 return 0; 615 return ret;
607} 616}
608 617
609int rv770_write_smc_sram_dword(struct radeon_device *rdev, 618int rv770_write_smc_sram_dword(struct radeon_device *rdev,
610 u16 smc_address, u32 value, u16 limit) 619 u16 smc_address, u32 value, u16 limit)
611{ 620{
621 unsigned long flags;
612 int ret; 622 int ret;
613 623
624 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
614 ret = rv770_set_smc_sram_address(rdev, smc_address, limit); 625 ret = rv770_set_smc_sram_address(rdev, smc_address, limit);
615 if (ret) 626 if (ret == 0)
616 return ret; 627 WREG32(SMC_SRAM_DATA, value);
628 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
617 629
618 WREG32(SMC_SRAM_DATA, value); 630 return ret;
619
620 return 0;
621} 631}
diff --git a/drivers/gpu/drm/radeon/rv770_smc.h b/drivers/gpu/drm/radeon/rv770_smc.h
index f78d92a4b325..3b2c963c4880 100644
--- a/drivers/gpu/drm/radeon/rv770_smc.h
+++ b/drivers/gpu/drm/radeon/rv770_smc.h
@@ -187,8 +187,6 @@ typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
187#define RV770_SMC_SOFT_REGISTER_uvd_enabled 0x9C 187#define RV770_SMC_SOFT_REGISTER_uvd_enabled 0x9C
188#define RV770_SMC_SOFT_REGISTER_is_asic_lombok 0xA0 188#define RV770_SMC_SOFT_REGISTER_is_asic_lombok 0xA0
189 189
190int rv770_set_smc_sram_address(struct radeon_device *rdev,
191 u16 smc_address, u16 limit);
192int rv770_copy_bytes_to_smc(struct radeon_device *rdev, 190int rv770_copy_bytes_to_smc(struct radeon_device *rdev,
193 u16 smc_start_address, const u8 *src, 191 u16 smc_start_address, const u8 *src,
194 u16 byte_count, u16 limit); 192 u16 byte_count, u16 limit);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 9fe60e542922..1ae277152cc7 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -852,7 +852,7 @@
852#define AFMT_VBI_PACKET_CONTROL 0x7608 852#define AFMT_VBI_PACKET_CONTROL 0x7608
853# define AFMT_GENERIC0_UPDATE (1 << 2) 853# define AFMT_GENERIC0_UPDATE (1 << 2)
854#define AFMT_INFOFRAME_CONTROL0 0x760c 854#define AFMT_INFOFRAME_CONTROL0 0x760c
855# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */ 855# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */
856# define AFMT_AUDIO_INFO_UPDATE (1 << 7) 856# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
857# define AFMT_MPEG_INFO_UPDATE (1 << 10) 857# define AFMT_MPEG_INFO_UPDATE (1 << 10)
858#define AFMT_GENERIC0_7 0x7610 858#define AFMT_GENERIC0_7 0x7610
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 3e23b757dcfa..c354c1094967 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -83,6 +83,8 @@ extern void si_dma_vm_set_page(struct radeon_device *rdev,
83 uint64_t pe, 83 uint64_t pe,
84 uint64_t addr, unsigned count, 84 uint64_t addr, unsigned count,
85 uint32_t incr, uint32_t flags); 85 uint32_t incr, uint32_t flags);
86static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
87 bool enable);
86 88
87static const u32 verde_rlc_save_restore_register_list[] = 89static const u32 verde_rlc_save_restore_register_list[] =
88{ 90{
@@ -3386,6 +3388,8 @@ static int si_cp_resume(struct radeon_device *rdev)
3386 u32 rb_bufsz; 3388 u32 rb_bufsz;
3387 int r; 3389 int r;
3388 3390
3391 si_enable_gui_idle_interrupt(rdev, false);
3392
3389 WREG32(CP_SEM_WAIT_TIMER, 0x0); 3393 WREG32(CP_SEM_WAIT_TIMER, 0x0);
3390 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); 3394 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
3391 3395
@@ -3501,6 +3505,8 @@ static int si_cp_resume(struct radeon_device *rdev)
3501 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 3505 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3502 } 3506 }
3503 3507
3508 si_enable_gui_idle_interrupt(rdev, true);
3509
3504 return 0; 3510 return 0;
3505} 3511}
3506 3512
@@ -4888,7 +4894,7 @@ static void si_enable_gfx_cgpg(struct radeon_device *rdev,
4888{ 4894{
4889 u32 tmp; 4895 u32 tmp;
4890 4896
4891 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) { 4897 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
4892 tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10); 4898 tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
4893 WREG32(RLC_TTOP_D, tmp); 4899 WREG32(RLC_TTOP_D, tmp);
4894 4900
@@ -5250,6 +5256,7 @@ void si_update_cg(struct radeon_device *rdev,
5250 u32 block, bool enable) 5256 u32 block, bool enable)
5251{ 5257{
5252 if (block & RADEON_CG_BLOCK_GFX) { 5258 if (block & RADEON_CG_BLOCK_GFX) {
5259 si_enable_gui_idle_interrupt(rdev, false);
5253 /* order matters! */ 5260 /* order matters! */
5254 if (enable) { 5261 if (enable) {
5255 si_enable_mgcg(rdev, true); 5262 si_enable_mgcg(rdev, true);
@@ -5258,6 +5265,7 @@ void si_update_cg(struct radeon_device *rdev,
5258 si_enable_cgcg(rdev, false); 5265 si_enable_cgcg(rdev, false);
5259 si_enable_mgcg(rdev, false); 5266 si_enable_mgcg(rdev, false);
5260 } 5267 }
5268 si_enable_gui_idle_interrupt(rdev, true);
5261 } 5269 }
5262 5270
5263 if (block & RADEON_CG_BLOCK_MC) { 5271 if (block & RADEON_CG_BLOCK_MC) {
@@ -5408,7 +5416,7 @@ static void si_init_pg(struct radeon_device *rdev)
5408 si_init_dma_pg(rdev); 5416 si_init_dma_pg(rdev);
5409 } 5417 }
5410 si_init_ao_cu_mask(rdev); 5418 si_init_ao_cu_mask(rdev);
5411 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { 5419 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
5412 si_init_gfx_cgpg(rdev); 5420 si_init_gfx_cgpg(rdev);
5413 } 5421 }
5414 si_enable_dma_pg(rdev, true); 5422 si_enable_dma_pg(rdev, true);
@@ -5560,7 +5568,9 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
5560{ 5568{
5561 u32 tmp; 5569 u32 tmp;
5562 5570
5563 WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 5571 tmp = RREG32(CP_INT_CNTL_RING0) &
5572 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5573 WREG32(CP_INT_CNTL_RING0, tmp);
5564 WREG32(CP_INT_CNTL_RING1, 0); 5574 WREG32(CP_INT_CNTL_RING1, 0);
5565 WREG32(CP_INT_CNTL_RING2, 0); 5575 WREG32(CP_INT_CNTL_RING2, 0);
5566 tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; 5576 tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
@@ -5685,7 +5695,7 @@ static int si_irq_init(struct radeon_device *rdev)
5685 5695
5686int si_irq_set(struct radeon_device *rdev) 5696int si_irq_set(struct radeon_device *rdev)
5687{ 5697{
5688 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; 5698 u32 cp_int_cntl;
5689 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; 5699 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
5690 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 5700 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
5691 u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; 5701 u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
@@ -5706,6 +5716,9 @@ int si_irq_set(struct radeon_device *rdev)
5706 return 0; 5716 return 0;
5707 } 5717 }
5708 5718
5719 cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
5720 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5721
5709 if (!ASIC_IS_NODCE(rdev)) { 5722 if (!ASIC_IS_NODCE(rdev)) {
5710 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 5723 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
5711 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; 5724 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 5be9b4e72350..9ace28702c76 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2910,6 +2910,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2910 bool disable_sclk_switching = false; 2910 bool disable_sclk_switching = false;
2911 u32 mclk, sclk; 2911 u32 mclk, sclk;
2912 u16 vddc, vddci; 2912 u16 vddc, vddci;
2913 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
2913 int i; 2914 int i;
2914 2915
2915 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 2916 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -2943,6 +2944,29 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2943 } 2944 }
2944 } 2945 }
2945 2946
2947 /* limit clocks to max supported clocks based on voltage dependency tables */
2948 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2949 &max_sclk_vddc);
2950 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2951 &max_mclk_vddci);
2952 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2953 &max_mclk_vddc);
2954
2955 for (i = 0; i < ps->performance_level_count; i++) {
2956 if (max_sclk_vddc) {
2957 if (ps->performance_levels[i].sclk > max_sclk_vddc)
2958 ps->performance_levels[i].sclk = max_sclk_vddc;
2959 }
2960 if (max_mclk_vddci) {
2961 if (ps->performance_levels[i].mclk > max_mclk_vddci)
2962 ps->performance_levels[i].mclk = max_mclk_vddci;
2963 }
2964 if (max_mclk_vddc) {
2965 if (ps->performance_levels[i].mclk > max_mclk_vddc)
2966 ps->performance_levels[i].mclk = max_mclk_vddc;
2967 }
2968 }
2969
2946 /* XXX validate the min clocks required for display */ 2970 /* XXX validate the min clocks required for display */
2947 2971
2948 if (disable_mclk_switching) { 2972 if (disable_mclk_switching) {
@@ -6075,12 +6099,6 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
6075 return ret; 6099 return ret;
6076 } 6100 }
6077 6101
6078 ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
6079 if (ret) {
6080 DRM_ERROR("si_dpm_force_performance_level failed\n");
6081 return ret;
6082 }
6083
6084 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | 6102 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
6085 RADEON_CG_BLOCK_MC | 6103 RADEON_CG_BLOCK_MC |
6086 RADEON_CG_BLOCK_SDMA | 6104 RADEON_CG_BLOCK_SDMA |
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
index 5f524c0a541e..d422a1cbf727 100644
--- a/drivers/gpu/drm/radeon/si_smc.c
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -29,8 +29,8 @@
29#include "ppsmc.h" 29#include "ppsmc.h"
30#include "radeon_ucode.h" 30#include "radeon_ucode.h"
31 31
32int si_set_smc_sram_address(struct radeon_device *rdev, 32static int si_set_smc_sram_address(struct radeon_device *rdev,
33 u32 smc_address, u32 limit) 33 u32 smc_address, u32 limit)
34{ 34{
35 if (smc_address & 3) 35 if (smc_address & 3)
36 return -EINVAL; 36 return -EINVAL;
@@ -47,7 +47,8 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
47 u32 smc_start_address, 47 u32 smc_start_address,
48 const u8 *src, u32 byte_count, u32 limit) 48 const u8 *src, u32 byte_count, u32 limit)
49{ 49{
50 int ret; 50 unsigned long flags;
51 int ret = 0;
51 u32 data, original_data, addr, extra_shift; 52 u32 data, original_data, addr, extra_shift;
52 53
53 if (smc_start_address & 3) 54 if (smc_start_address & 3)
@@ -57,13 +58,14 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
57 58
58 addr = smc_start_address; 59 addr = smc_start_address;
59 60
61 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
60 while (byte_count >= 4) { 62 while (byte_count >= 4) {
61 /* SMC address space is BE */ 63 /* SMC address space is BE */
62 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; 64 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
63 65
64 ret = si_set_smc_sram_address(rdev, addr, limit); 66 ret = si_set_smc_sram_address(rdev, addr, limit);
65 if (ret) 67 if (ret)
66 return ret; 68 goto done;
67 69
68 WREG32(SMC_IND_DATA_0, data); 70 WREG32(SMC_IND_DATA_0, data);
69 71
@@ -78,7 +80,7 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
78 80
79 ret = si_set_smc_sram_address(rdev, addr, limit); 81 ret = si_set_smc_sram_address(rdev, addr, limit);
80 if (ret) 82 if (ret)
81 return ret; 83 goto done;
82 84
83 original_data = RREG32(SMC_IND_DATA_0); 85 original_data = RREG32(SMC_IND_DATA_0);
84 86
@@ -96,11 +98,15 @@ int si_copy_bytes_to_smc(struct radeon_device *rdev,
96 98
97 ret = si_set_smc_sram_address(rdev, addr, limit); 99 ret = si_set_smc_sram_address(rdev, addr, limit);
98 if (ret) 100 if (ret)
99 return ret; 101 goto done;
100 102
101 WREG32(SMC_IND_DATA_0, data); 103 WREG32(SMC_IND_DATA_0, data);
102 } 104 }
103 return 0; 105
106done:
107 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
108
109 return ret;
104} 110}
105 111
106void si_start_smc(struct radeon_device *rdev) 112void si_start_smc(struct radeon_device *rdev)
@@ -203,6 +209,7 @@ PPSMC_Result si_wait_for_smc_inactive(struct radeon_device *rdev)
203 209
204int si_load_smc_ucode(struct radeon_device *rdev, u32 limit) 210int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
205{ 211{
212 unsigned long flags;
206 u32 ucode_start_address; 213 u32 ucode_start_address;
207 u32 ucode_size; 214 u32 ucode_size;
208 const u8 *src; 215 const u8 *src;
@@ -241,6 +248,7 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
241 return -EINVAL; 248 return -EINVAL;
242 249
243 src = (const u8 *)rdev->smc_fw->data; 250 src = (const u8 *)rdev->smc_fw->data;
251 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
244 WREG32(SMC_IND_INDEX_0, ucode_start_address); 252 WREG32(SMC_IND_INDEX_0, ucode_start_address);
245 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); 253 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
246 while (ucode_size >= 4) { 254 while (ucode_size >= 4) {
@@ -253,6 +261,7 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
253 ucode_size -= 4; 261 ucode_size -= 4;
254 } 262 }
255 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); 263 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
264 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
256 265
257 return 0; 266 return 0;
258} 267}
@@ -260,25 +269,29 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
260int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, 269int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
261 u32 *value, u32 limit) 270 u32 *value, u32 limit)
262{ 271{
272 unsigned long flags;
263 int ret; 273 int ret;
264 274
275 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
265 ret = si_set_smc_sram_address(rdev, smc_address, limit); 276 ret = si_set_smc_sram_address(rdev, smc_address, limit);
266 if (ret) 277 if (ret == 0)
267 return ret; 278 *value = RREG32(SMC_IND_DATA_0);
279 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
268 280
269 *value = RREG32(SMC_IND_DATA_0); 281 return ret;
270 return 0;
271} 282}
272 283
273int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, 284int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
274 u32 value, u32 limit) 285 u32 value, u32 limit)
275{ 286{
287 unsigned long flags;
276 int ret; 288 int ret;
277 289
290 spin_lock_irqsave(&rdev->smc_idx_lock, flags);
278 ret = si_set_smc_sram_address(rdev, smc_address, limit); 291 ret = si_set_smc_sram_address(rdev, smc_address, limit);
279 if (ret) 292 if (ret == 0)
280 return ret; 293 WREG32(SMC_IND_DATA_0, value);
294 spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
281 295
282 WREG32(SMC_IND_DATA_0, value); 296 return ret;
283 return 0;
284} 297}
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 864761c0120e..96ea6db8bf57 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1319,8 +1319,6 @@ int sumo_dpm_set_power_state(struct radeon_device *rdev)
1319 if (pi->enable_dpm) 1319 if (pi->enable_dpm)
1320 sumo_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); 1320 sumo_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
1321 1321
1322 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1323
1324 return 0; 1322 return 0;
1325} 1323}
1326 1324
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index b07b7b8f1aff..7f998bf1cc9d 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1068,6 +1068,17 @@ static void trinity_update_requested_ps(struct radeon_device *rdev,
1068 pi->requested_rps.ps_priv = &pi->requested_ps; 1068 pi->requested_rps.ps_priv = &pi->requested_ps;
1069} 1069}
1070 1070
1071void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
1072{
1073 struct trinity_power_info *pi = trinity_get_pi(rdev);
1074
1075 if (pi->enable_bapm) {
1076 trinity_acquire_mutex(rdev);
1077 trinity_dpm_bapm_enable(rdev, enable);
1078 trinity_release_mutex(rdev);
1079 }
1080}
1081
1071int trinity_dpm_enable(struct radeon_device *rdev) 1082int trinity_dpm_enable(struct radeon_device *rdev)
1072{ 1083{
1073 struct trinity_power_info *pi = trinity_get_pi(rdev); 1084 struct trinity_power_info *pi = trinity_get_pi(rdev);
@@ -1091,6 +1102,7 @@ int trinity_dpm_enable(struct radeon_device *rdev)
1091 trinity_program_sclk_dpm(rdev); 1102 trinity_program_sclk_dpm(rdev);
1092 trinity_start_dpm(rdev); 1103 trinity_start_dpm(rdev);
1093 trinity_wait_for_dpm_enabled(rdev); 1104 trinity_wait_for_dpm_enabled(rdev);
1105 trinity_dpm_bapm_enable(rdev, false);
1094 trinity_release_mutex(rdev); 1106 trinity_release_mutex(rdev);
1095 1107
1096 if (rdev->irq.installed && 1108 if (rdev->irq.installed &&
@@ -1116,6 +1128,7 @@ void trinity_dpm_disable(struct radeon_device *rdev)
1116 trinity_release_mutex(rdev); 1128 trinity_release_mutex(rdev);
1117 return; 1129 return;
1118 } 1130 }
1131 trinity_dpm_bapm_enable(rdev, false);
1119 trinity_disable_clock_power_gating(rdev); 1132 trinity_disable_clock_power_gating(rdev);
1120 sumo_clear_vc(rdev); 1133 sumo_clear_vc(rdev);
1121 trinity_wait_for_level_0(rdev); 1134 trinity_wait_for_level_0(rdev);
@@ -1212,6 +1225,8 @@ int trinity_dpm_set_power_state(struct radeon_device *rdev)
1212 1225
1213 trinity_acquire_mutex(rdev); 1226 trinity_acquire_mutex(rdev);
1214 if (pi->enable_dpm) { 1227 if (pi->enable_dpm) {
1228 if (pi->enable_bapm)
1229 trinity_dpm_bapm_enable(rdev, rdev->pm.dpm.ac_power);
1215 trinity_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); 1230 trinity_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
1216 trinity_enable_power_level_0(rdev); 1231 trinity_enable_power_level_0(rdev);
1217 trinity_force_level_0(rdev); 1232 trinity_force_level_0(rdev);
@@ -1221,7 +1236,6 @@ int trinity_dpm_set_power_state(struct radeon_device *rdev)
1221 trinity_force_level_0(rdev); 1236 trinity_force_level_0(rdev);
1222 trinity_unforce_levels(rdev); 1237 trinity_unforce_levels(rdev);
1223 trinity_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); 1238 trinity_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
1224 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1225 } 1239 }
1226 trinity_release_mutex(rdev); 1240 trinity_release_mutex(rdev);
1227 1241
@@ -1854,6 +1868,7 @@ int trinity_dpm_init(struct radeon_device *rdev)
1854 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 1868 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
1855 pi->at[i] = TRINITY_AT_DFLT; 1869 pi->at[i] = TRINITY_AT_DFLT;
1856 1870
1871 pi->enable_bapm = true;
1857 pi->enable_nbps_policy = true; 1872 pi->enable_nbps_policy = true;
1858 pi->enable_sclk_ds = true; 1873 pi->enable_sclk_ds = true;
1859 pi->enable_gfx_power_gating = true; 1874 pi->enable_gfx_power_gating = true;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.h b/drivers/gpu/drm/radeon/trinity_dpm.h
index e82df071f8b3..c261657750ca 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.h
+++ b/drivers/gpu/drm/radeon/trinity_dpm.h
@@ -108,6 +108,7 @@ struct trinity_power_info {
108 bool enable_auto_thermal_throttling; 108 bool enable_auto_thermal_throttling;
109 bool enable_dpm; 109 bool enable_dpm;
110 bool enable_sclk_ds; 110 bool enable_sclk_ds;
111 bool enable_bapm;
111 bool uvd_dpm; 112 bool uvd_dpm;
112 struct radeon_ps current_rps; 113 struct radeon_ps current_rps;
113 struct trinity_ps current_ps; 114 struct trinity_ps current_ps;
@@ -118,6 +119,7 @@ struct trinity_power_info {
118#define TRINITY_AT_DFLT 30 119#define TRINITY_AT_DFLT 30
119 120
120/* trinity_smc.c */ 121/* trinity_smc.c */
122int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable);
121int trinity_dpm_config(struct radeon_device *rdev, bool enable); 123int trinity_dpm_config(struct radeon_device *rdev, bool enable);
122int trinity_uvd_dpm_config(struct radeon_device *rdev); 124int trinity_uvd_dpm_config(struct radeon_device *rdev);
123int trinity_dpm_force_state(struct radeon_device *rdev, u32 n); 125int trinity_dpm_force_state(struct radeon_device *rdev, u32 n);
diff --git a/drivers/gpu/drm/radeon/trinity_smc.c b/drivers/gpu/drm/radeon/trinity_smc.c
index a42d89f1830c..9672bcbc7312 100644
--- a/drivers/gpu/drm/radeon/trinity_smc.c
+++ b/drivers/gpu/drm/radeon/trinity_smc.c
@@ -56,6 +56,14 @@ static int trinity_notify_message_to_smu(struct radeon_device *rdev, u32 id)
56 return 0; 56 return 0;
57} 57}
58 58
59int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable)
60{
61 if (enable)
62 return trinity_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM);
63 else
64 return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM);
65}
66
59int trinity_dpm_config(struct radeon_device *rdev, bool enable) 67int trinity_dpm_config(struct radeon_device *rdev, bool enable)
60{ 68{
61 if (enable) 69 if (enable)
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 7266805d9786..3100fa9cb52f 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -212,8 +212,8 @@ int uvd_v1_0_start(struct radeon_device *rdev)
212 /* enable VCPU clock */ 212 /* enable VCPU clock */
213 WREG32(UVD_VCPU_CNTL, 1 << 9); 213 WREG32(UVD_VCPU_CNTL, 1 << 9);
214 214
215 /* enable UMC */ 215 /* enable UMC and NC0 */
216 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); 216 WREG32_P(UVD_LMI_CTRL2, 1 << 13, ~((1 << 8) | (1 << 13)));
217 217
218 /* boot up the VCPU */ 218 /* boot up the VCPU */
219 WREG32(UVD_SOFT_RESET, 0); 219 WREG32(UVD_SOFT_RESET, 0);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 58a5f3261c0b..a868176c258a 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -218,7 +218,7 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
218 uint32_t key) 218 uint32_t key)
219{ 219{
220 struct ttm_object_device *tdev = tfile->tdev; 220 struct ttm_object_device *tdev = tfile->tdev;
221 struct ttm_base_object *base; 221 struct ttm_base_object *uninitialized_var(base);
222 struct drm_hash_item *hash; 222 struct drm_hash_item *hash;
223 int ret; 223 int ret;
224 224
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 5e93a52d4f2c..210d50365162 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -170,7 +170,7 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
170 ttm_tt_unbind(ttm); 170 ttm_tt_unbind(ttm);
171 } 171 }
172 172
173 if (likely(ttm->pages != NULL)) { 173 if (ttm->state == tt_unbound) {
174 ttm->bdev->driver->ttm_tt_unpopulate(ttm); 174 ttm->bdev->driver->ttm_tt_unpopulate(ttm);
175 } 175 }
176 176
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8dbe9d0ae9a7..8bf646183bac 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -97,7 +97,6 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
97 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); 97 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
98 switch (ret) { 98 switch (ret) {
99 case -EAGAIN: 99 case -EAGAIN:
100 set_need_resched();
101 case 0: 100 case 0:
102 case -ERESTARTSYS: 101 case -ERESTARTSYS:
103 return VM_FAULT_NOPAGE; 102 return VM_FAULT_NOPAGE;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index ae88a97f976e..b8470b1a10fe 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -94,7 +94,6 @@ EXPORT_SYMBOL_GPL(hid_register_report);
94static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values) 94static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values)
95{ 95{
96 struct hid_field *field; 96 struct hid_field *field;
97 int i;
98 97
99 if (report->maxfield == HID_MAX_FIELDS) { 98 if (report->maxfield == HID_MAX_FIELDS) {
100 hid_err(report->device, "too many fields in report\n"); 99 hid_err(report->device, "too many fields in report\n");
@@ -113,9 +112,6 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned
113 field->value = (s32 *)(field->usage + usages); 112 field->value = (s32 *)(field->usage + usages);
114 field->report = report; 113 field->report = report;
115 114
116 for (i = 0; i < usages; i++)
117 field->usage[i].usage_index = i;
118
119 return field; 115 return field;
120} 116}
121 117
@@ -226,9 +222,9 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
226{ 222{
227 struct hid_report *report; 223 struct hid_report *report;
228 struct hid_field *field; 224 struct hid_field *field;
229 int usages; 225 unsigned usages;
230 unsigned offset; 226 unsigned offset;
231 int i; 227 unsigned i;
232 228
233 report = hid_register_report(parser->device, report_type, parser->global.report_id); 229 report = hid_register_report(parser->device, report_type, parser->global.report_id);
234 if (!report) { 230 if (!report) {
@@ -255,7 +251,8 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
255 if (!parser->local.usage_index) /* Ignore padding fields */ 251 if (!parser->local.usage_index) /* Ignore padding fields */
256 return 0; 252 return 0;
257 253
258 usages = max_t(int, parser->local.usage_index, parser->global.report_count); 254 usages = max_t(unsigned, parser->local.usage_index,
255 parser->global.report_count);
259 256
260 field = hid_register_field(report, usages, parser->global.report_count); 257 field = hid_register_field(report, usages, parser->global.report_count);
261 if (!field) 258 if (!field)
@@ -266,13 +263,14 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
266 field->application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION); 263 field->application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
267 264
268 for (i = 0; i < usages; i++) { 265 for (i = 0; i < usages; i++) {
269 int j = i; 266 unsigned j = i;
270 /* Duplicate the last usage we parsed if we have excess values */ 267 /* Duplicate the last usage we parsed if we have excess values */
271 if (i >= parser->local.usage_index) 268 if (i >= parser->local.usage_index)
272 j = parser->local.usage_index - 1; 269 j = parser->local.usage_index - 1;
273 field->usage[i].hid = parser->local.usage[j]; 270 field->usage[i].hid = parser->local.usage[j];
274 field->usage[i].collection_index = 271 field->usage[i].collection_index =
275 parser->local.collection_index[j]; 272 parser->local.collection_index[j];
273 field->usage[i].usage_index = i;
276 } 274 }
277 275
278 field->maxusage = usages; 276 field->maxusage = usages;
@@ -801,6 +799,64 @@ int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size)
801} 799}
802EXPORT_SYMBOL_GPL(hid_parse_report); 800EXPORT_SYMBOL_GPL(hid_parse_report);
803 801
802static const char * const hid_report_names[] = {
803 "HID_INPUT_REPORT",
804 "HID_OUTPUT_REPORT",
805 "HID_FEATURE_REPORT",
806};
807/**
808 * hid_validate_values - validate existing device report's value indexes
809 *
810 * @device: hid device
811 * @type: which report type to examine
812 * @id: which report ID to examine (0 for first)
813 * @field_index: which report field to examine
814 * @report_counts: expected number of values
815 *
816 * Validate the number of values in a given field of a given report, after
817 * parsing.
818 */
819struct hid_report *hid_validate_values(struct hid_device *hid,
820 unsigned int type, unsigned int id,
821 unsigned int field_index,
822 unsigned int report_counts)
823{
824 struct hid_report *report;
825
826 if (type > HID_FEATURE_REPORT) {
827 hid_err(hid, "invalid HID report type %u\n", type);
828 return NULL;
829 }
830
831 if (id >= HID_MAX_IDS) {
832 hid_err(hid, "invalid HID report id %u\n", id);
833 return NULL;
834 }
835
836 /*
837 * Explicitly not using hid_get_report() here since it depends on
838 * ->numbered being checked, which may not always be the case when
839 * drivers go to access report values.
840 */
841 report = hid->report_enum[type].report_id_hash[id];
842 if (!report) {
843 hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
844 return NULL;
845 }
846 if (report->maxfield <= field_index) {
847 hid_err(hid, "not enough fields in %s %u\n",
848 hid_report_names[type], id);
849 return NULL;
850 }
851 if (report->field[field_index]->report_count < report_counts) {
852 hid_err(hid, "not enough values in %s %u field %u\n",
853 hid_report_names[type], id, field_index);
854 return NULL;
855 }
856 return report;
857}
858EXPORT_SYMBOL_GPL(hid_validate_values);
859
804/** 860/**
805 * hid_open_report - open a driver-specific device report 861 * hid_open_report - open a driver-specific device report
806 * 862 *
@@ -1296,7 +1352,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
1296 goto out; 1352 goto out;
1297 } 1353 }
1298 1354
1299 if (hid->claimed != HID_CLAIMED_HIDRAW) { 1355 if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
1300 for (a = 0; a < report->maxfield; a++) 1356 for (a = 0; a < report->maxfield; a++)
1301 hid_input_field(hid, report->field[a], cdata, interrupt); 1357 hid_input_field(hid, report->field[a], cdata, interrupt);
1302 hdrv = hid->driver; 1358 hdrv = hid->driver;
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index b420f4a0fd28..8741d953dcc8 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -485,6 +485,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
485 if (field->flags & HID_MAIN_ITEM_CONSTANT) 485 if (field->flags & HID_MAIN_ITEM_CONSTANT)
486 goto ignore; 486 goto ignore;
487 487
488 /* Ignore if report count is out of bounds. */
489 if (field->report_count < 1)
490 goto ignore;
491
488 /* only LED usages are supported in output fields */ 492 /* only LED usages are supported in output fields */
489 if (field->report_type == HID_OUTPUT_REPORT && 493 if (field->report_type == HID_OUTPUT_REPORT &&
490 (usage->hid & HID_USAGE_PAGE) != HID_UP_LED) { 494 (usage->hid & HID_USAGE_PAGE) != HID_UP_LED) {
@@ -1236,7 +1240,11 @@ static void report_features(struct hid_device *hid)
1236 1240
1237 rep_enum = &hid->report_enum[HID_FEATURE_REPORT]; 1241 rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
1238 list_for_each_entry(rep, &rep_enum->report_list, list) 1242 list_for_each_entry(rep, &rep_enum->report_list, list)
1239 for (i = 0; i < rep->maxfield; i++) 1243 for (i = 0; i < rep->maxfield; i++) {
1244 /* Ignore if report count is out of bounds. */
1245 if (rep->field[i]->report_count < 1)
1246 continue;
1247
1240 for (j = 0; j < rep->field[i]->maxusage; j++) { 1248 for (j = 0; j < rep->field[i]->maxusage; j++) {
1241 /* Verify if Battery Strength feature is available */ 1249 /* Verify if Battery Strength feature is available */
1242 hidinput_setup_battery(hid, HID_FEATURE_REPORT, rep->field[i]); 1250 hidinput_setup_battery(hid, HID_FEATURE_REPORT, rep->field[i]);
@@ -1245,6 +1253,7 @@ static void report_features(struct hid_device *hid)
1245 drv->feature_mapping(hid, rep->field[i], 1253 drv->feature_mapping(hid, rep->field[i],
1246 rep->field[i]->usage + j); 1254 rep->field[i]->usage + j);
1247 } 1255 }
1256 }
1248} 1257}
1249 1258
1250static struct hid_input *hidinput_allocate(struct hid_device *hid) 1259static struct hid_input *hidinput_allocate(struct hid_device *hid)
diff --git a/drivers/hid/hid-lenovo-tpkbd.c b/drivers/hid/hid-lenovo-tpkbd.c
index 07837f5a4eb8..31cf29a6ba17 100644
--- a/drivers/hid/hid-lenovo-tpkbd.c
+++ b/drivers/hid/hid-lenovo-tpkbd.c
@@ -339,7 +339,15 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
339 struct tpkbd_data_pointer *data_pointer; 339 struct tpkbd_data_pointer *data_pointer;
340 size_t name_sz = strlen(dev_name(dev)) + 16; 340 size_t name_sz = strlen(dev_name(dev)) + 16;
341 char *name_mute, *name_micmute; 341 char *name_mute, *name_micmute;
342 int ret; 342 int i, ret;
343
344 /* Validate required reports. */
345 for (i = 0; i < 4; i++) {
346 if (!hid_validate_values(hdev, HID_FEATURE_REPORT, 4, i, 1))
347 return -ENODEV;
348 }
349 if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 3, 0, 2))
350 return -ENODEV;
343 351
344 if (sysfs_create_group(&hdev->dev.kobj, 352 if (sysfs_create_group(&hdev->dev.kobj,
345 &tpkbd_attr_group_pointer)) { 353 &tpkbd_attr_group_pointer)) {
@@ -406,22 +414,27 @@ static int tpkbd_probe(struct hid_device *hdev,
406 ret = hid_parse(hdev); 414 ret = hid_parse(hdev);
407 if (ret) { 415 if (ret) {
408 hid_err(hdev, "hid_parse failed\n"); 416 hid_err(hdev, "hid_parse failed\n");
409 goto err_free; 417 goto err;
410 } 418 }
411 419
412 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 420 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
413 if (ret) { 421 if (ret) {
414 hid_err(hdev, "hid_hw_start failed\n"); 422 hid_err(hdev, "hid_hw_start failed\n");
415 goto err_free; 423 goto err;
416 } 424 }
417 425
418 uhdev = (struct usbhid_device *) hdev->driver_data; 426 uhdev = (struct usbhid_device *) hdev->driver_data;
419 427
420 if (uhdev->ifnum == 1) 428 if (uhdev->ifnum == 1) {
421 return tpkbd_probe_tp(hdev); 429 ret = tpkbd_probe_tp(hdev);
430 if (ret)
431 goto err_hid;
432 }
422 433
423 return 0; 434 return 0;
424err_free: 435err_hid:
436 hid_hw_stop(hdev);
437err:
425 return ret; 438 return ret;
426} 439}
427 440
diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
index b3cd1507dda2..1a42eaa6ca02 100644
--- a/drivers/hid/hid-lg2ff.c
+++ b/drivers/hid/hid-lg2ff.c
@@ -64,26 +64,13 @@ int lg2ff_init(struct hid_device *hid)
64 struct hid_report *report; 64 struct hid_report *report;
65 struct hid_input *hidinput = list_entry(hid->inputs.next, 65 struct hid_input *hidinput = list_entry(hid->inputs.next,
66 struct hid_input, list); 66 struct hid_input, list);
67 struct list_head *report_list =
68 &hid->report_enum[HID_OUTPUT_REPORT].report_list;
69 struct input_dev *dev = hidinput->input; 67 struct input_dev *dev = hidinput->input;
70 int error; 68 int error;
71 69
72 if (list_empty(report_list)) { 70 /* Check that the report looks ok */
73 hid_err(hid, "no output report found\n"); 71 report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7);
72 if (!report)
74 return -ENODEV; 73 return -ENODEV;
75 }
76
77 report = list_entry(report_list->next, struct hid_report, list);
78
79 if (report->maxfield < 1) {
80 hid_err(hid, "output report is empty\n");
81 return -ENODEV;
82 }
83 if (report->field[0]->report_count < 7) {
84 hid_err(hid, "not enough values in the field\n");
85 return -ENODEV;
86 }
87 74
88 lg2ff = kmalloc(sizeof(struct lg2ff_device), GFP_KERNEL); 75 lg2ff = kmalloc(sizeof(struct lg2ff_device), GFP_KERNEL);
89 if (!lg2ff) 76 if (!lg2ff)
diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c
index e52f181f6aa1..8c2da183d3bc 100644
--- a/drivers/hid/hid-lg3ff.c
+++ b/drivers/hid/hid-lg3ff.c
@@ -66,10 +66,11 @@ static int hid_lg3ff_play(struct input_dev *dev, void *data,
66 int x, y; 66 int x, y;
67 67
68/* 68/*
69 * Maxusage should always be 63 (maximum fields) 69 * Available values in the field should always be 63, but we only use up to
70 * likely a better way to ensure this data is clean 70 * 35. Instead, clear the entire area, however big it is.
71 */ 71 */
72 memset(report->field[0]->value, 0, sizeof(__s32)*report->field[0]->maxusage); 72 memset(report->field[0]->value, 0,
73 sizeof(__s32) * report->field[0]->report_count);
73 74
74 switch (effect->type) { 75 switch (effect->type) {
75 case FF_CONSTANT: 76 case FF_CONSTANT:
@@ -129,32 +130,14 @@ static const signed short ff3_joystick_ac[] = {
129int lg3ff_init(struct hid_device *hid) 130int lg3ff_init(struct hid_device *hid)
130{ 131{
131 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); 132 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
132 struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
133 struct input_dev *dev = hidinput->input; 133 struct input_dev *dev = hidinput->input;
134 struct hid_report *report;
135 struct hid_field *field;
136 const signed short *ff_bits = ff3_joystick_ac; 134 const signed short *ff_bits = ff3_joystick_ac;
137 int error; 135 int error;
138 int i; 136 int i;
139 137
140 /* Find the report to use */
141 if (list_empty(report_list)) {
142 hid_err(hid, "No output report found\n");
143 return -1;
144 }
145
146 /* Check that the report looks ok */ 138 /* Check that the report looks ok */
147 report = list_entry(report_list->next, struct hid_report, list); 139 if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35))
148 if (!report) { 140 return -ENODEV;
149 hid_err(hid, "NULL output report\n");
150 return -1;
151 }
152
153 field = report->field[0];
154 if (!field) {
155 hid_err(hid, "NULL field\n");
156 return -1;
157 }
158 141
159 /* Assume single fixed device G940 */ 142 /* Assume single fixed device G940 */
160 for (i = 0; ff_bits[i] >= 0; i++) 143 for (i = 0; ff_bits[i] >= 0; i++)
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 0ddae2a00d59..8782fe1aaa07 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -484,34 +484,16 @@ static enum led_brightness lg4ff_led_get_brightness(struct led_classdev *led_cde
484int lg4ff_init(struct hid_device *hid) 484int lg4ff_init(struct hid_device *hid)
485{ 485{
486 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); 486 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
487 struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
488 struct input_dev *dev = hidinput->input; 487 struct input_dev *dev = hidinput->input;
489 struct hid_report *report;
490 struct hid_field *field;
491 struct lg4ff_device_entry *entry; 488 struct lg4ff_device_entry *entry;
492 struct lg_drv_data *drv_data; 489 struct lg_drv_data *drv_data;
493 struct usb_device_descriptor *udesc; 490 struct usb_device_descriptor *udesc;
494 int error, i, j; 491 int error, i, j;
495 __u16 bcdDevice, rev_maj, rev_min; 492 __u16 bcdDevice, rev_maj, rev_min;
496 493
497 /* Find the report to use */
498 if (list_empty(report_list)) {
499 hid_err(hid, "No output report found\n");
500 return -1;
501 }
502
503 /* Check that the report looks ok */ 494 /* Check that the report looks ok */
504 report = list_entry(report_list->next, struct hid_report, list); 495 if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
505 if (!report) {
506 hid_err(hid, "NULL output report\n");
507 return -1; 496 return -1;
508 }
509
510 field = report->field[0];
511 if (!field) {
512 hid_err(hid, "NULL field\n");
513 return -1;
514 }
515 497
516 /* Check what wheel has been connected */ 498 /* Check what wheel has been connected */
517 for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) { 499 for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) {
diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
index d7ea8c845b40..e1394af0ae7b 100644
--- a/drivers/hid/hid-lgff.c
+++ b/drivers/hid/hid-lgff.c
@@ -128,27 +128,14 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
128int lgff_init(struct hid_device* hid) 128int lgff_init(struct hid_device* hid)
129{ 129{
130 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); 130 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
131 struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
132 struct input_dev *dev = hidinput->input; 131 struct input_dev *dev = hidinput->input;
133 struct hid_report *report;
134 struct hid_field *field;
135 const signed short *ff_bits = ff_joystick; 132 const signed short *ff_bits = ff_joystick;
136 int error; 133 int error;
137 int i; 134 int i;
138 135
139 /* Find the report to use */
140 if (list_empty(report_list)) {
141 hid_err(hid, "No output report found\n");
142 return -1;
143 }
144
145 /* Check that the report looks ok */ 136 /* Check that the report looks ok */
146 report = list_entry(report_list->next, struct hid_report, list); 137 if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
147 field = report->field[0]; 138 return -ENODEV;
148 if (!field) {
149 hid_err(hid, "NULL field\n");
150 return -1;
151 }
152 139
153 for (i = 0; i < ARRAY_SIZE(devices); i++) { 140 for (i = 0; i < ARRAY_SIZE(devices); i++) {
154 if (dev->id.vendor == devices[i].idVendor && 141 if (dev->id.vendor == devices[i].idVendor &&
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 7800b1410562..2e5302462efb 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -461,7 +461,7 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
461 struct hid_report *report; 461 struct hid_report *report;
462 struct hid_report_enum *output_report_enum; 462 struct hid_report_enum *output_report_enum;
463 u8 *data = (u8 *)(&dj_report->device_index); 463 u8 *data = (u8 *)(&dj_report->device_index);
464 int i; 464 unsigned int i;
465 465
466 output_report_enum = &hdev->report_enum[HID_OUTPUT_REPORT]; 466 output_report_enum = &hdev->report_enum[HID_OUTPUT_REPORT];
467 report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT]; 467 report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
@@ -471,7 +471,7 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
471 return -ENODEV; 471 return -ENODEV;
472 } 472 }
473 473
474 for (i = 0; i < report->field[0]->report_count; i++) 474 for (i = 0; i < DJREPORT_SHORT_LENGTH - 1; i++)
475 report->field[0]->value[i] = data[i]; 475 report->field[0]->value[i] = data[i];
476 476
477 hid_hw_request(hdev, report, HID_REQ_SET_REPORT); 477 hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
@@ -791,6 +791,12 @@ static int logi_dj_probe(struct hid_device *hdev,
791 goto hid_parse_fail; 791 goto hid_parse_fail;
792 } 792 }
793 793
794 if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, REPORT_ID_DJ_SHORT,
795 0, DJREPORT_SHORT_LENGTH - 1)) {
796 retval = -ENODEV;
797 goto hid_parse_fail;
798 }
799
794 /* Starts the usb device and connects to upper interfaces hiddev and 800 /* Starts the usb device and connects to upper interfaces hiddev and
795 * hidraw */ 801 * hidraw */
796 retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 802 retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index ac28f08c3866..5e5fe1b8eebb 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -101,9 +101,9 @@ struct mt_device {
101 unsigned last_slot_field; /* the last field of a slot */ 101 unsigned last_slot_field; /* the last field of a slot */
102 unsigned mt_report_id; /* the report ID of the multitouch device */ 102 unsigned mt_report_id; /* the report ID of the multitouch device */
103 unsigned pen_report_id; /* the report ID of the pen device */ 103 unsigned pen_report_id; /* the report ID of the pen device */
104 __s8 inputmode; /* InputMode HID feature, -1 if non-existent */ 104 __s16 inputmode; /* InputMode HID feature, -1 if non-existent */
105 __s8 inputmode_index; /* InputMode HID feature index in the report */ 105 __s16 inputmode_index; /* InputMode HID feature index in the report */
106 __s8 maxcontact_report_id; /* Maximum Contact Number HID feature, 106 __s16 maxcontact_report_id; /* Maximum Contact Number HID feature,
107 -1 if non-existent */ 107 -1 if non-existent */
108 __u8 num_received; /* how many contacts we received */ 108 __u8 num_received; /* how many contacts we received */
109 __u8 num_expected; /* expected last contact index */ 109 __u8 num_expected; /* expected last contact index */
@@ -312,20 +312,18 @@ static void mt_feature_mapping(struct hid_device *hdev,
312 struct hid_field *field, struct hid_usage *usage) 312 struct hid_field *field, struct hid_usage *usage)
313{ 313{
314 struct mt_device *td = hid_get_drvdata(hdev); 314 struct mt_device *td = hid_get_drvdata(hdev);
315 int i;
316 315
317 switch (usage->hid) { 316 switch (usage->hid) {
318 case HID_DG_INPUTMODE: 317 case HID_DG_INPUTMODE:
319 td->inputmode = field->report->id; 318 /* Ignore if value index is out of bounds. */
320 td->inputmode_index = 0; /* has to be updated below */ 319 if (usage->usage_index >= field->report_count) {
321 320 dev_err(&hdev->dev, "HID_DG_INPUTMODE out of range\n");
322 for (i=0; i < field->maxusage; i++) { 321 break;
323 if (field->usage[i].hid == usage->hid) {
324 td->inputmode_index = i;
325 break;
326 }
327 } 322 }
328 323
324 td->inputmode = field->report->id;
325 td->inputmode_index = usage->usage_index;
326
329 break; 327 break;
330 case HID_DG_CONTACTMAX: 328 case HID_DG_CONTACTMAX:
331 td->maxcontact_report_id = field->report->id; 329 td->maxcontact_report_id = field->report->id;
@@ -511,6 +509,10 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
511 mt_store_field(usage, td, hi); 509 mt_store_field(usage, td, hi);
512 return 1; 510 return 1;
513 case HID_DG_CONTACTCOUNT: 511 case HID_DG_CONTACTCOUNT:
512 /* Ignore if indexes are out of bounds. */
513 if (field->index >= field->report->maxfield ||
514 usage->usage_index >= field->report_count)
515 return 1;
514 td->cc_index = field->index; 516 td->cc_index = field->index;
515 td->cc_value_index = usage->usage_index; 517 td->cc_value_index = usage->usage_index;
516 return 1; 518 return 1;
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 30dbb6b40bbf..b18320db5f7d 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -537,6 +537,10 @@ static int buzz_init(struct hid_device *hdev)
537 drv_data = hid_get_drvdata(hdev); 537 drv_data = hid_get_drvdata(hdev);
538 BUG_ON(!(drv_data->quirks & BUZZ_CONTROLLER)); 538 BUG_ON(!(drv_data->quirks & BUZZ_CONTROLLER));
539 539
540 /* Validate expected report characteristics. */
541 if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 7))
542 return -ENODEV;
543
540 buzz = kzalloc(sizeof(*buzz), GFP_KERNEL); 544 buzz = kzalloc(sizeof(*buzz), GFP_KERNEL);
541 if (!buzz) { 545 if (!buzz) {
542 hid_err(hdev, "Insufficient memory, cannot allocate driver data\n"); 546 hid_err(hdev, "Insufficient memory, cannot allocate driver data\n");
diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
index d16491192112..29f328f411fb 100644
--- a/drivers/hid/hid-steelseries.c
+++ b/drivers/hid/hid-steelseries.c
@@ -249,6 +249,11 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
249 goto err_free; 249 goto err_free;
250 } 250 }
251 251
252 if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 16)) {
253 ret = -ENODEV;
254 goto err_free;
255 }
256
252 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 257 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
253 if (ret) { 258 if (ret) {
254 hid_err(hdev, "hw start failed\n"); 259 hid_err(hdev, "hw start failed\n");
diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
index 6ec28a37c146..a29756c6ca02 100644
--- a/drivers/hid/hid-zpff.c
+++ b/drivers/hid/hid-zpff.c
@@ -68,21 +68,13 @@ static int zpff_init(struct hid_device *hid)
68 struct hid_report *report; 68 struct hid_report *report;
69 struct hid_input *hidinput = list_entry(hid->inputs.next, 69 struct hid_input *hidinput = list_entry(hid->inputs.next,
70 struct hid_input, list); 70 struct hid_input, list);
71 struct list_head *report_list =
72 &hid->report_enum[HID_OUTPUT_REPORT].report_list;
73 struct input_dev *dev = hidinput->input; 71 struct input_dev *dev = hidinput->input;
74 int error; 72 int i, error;
75 73
76 if (list_empty(report_list)) { 74 for (i = 0; i < 4; i++) {
77 hid_err(hid, "no output report found\n"); 75 report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1);
78 return -ENODEV; 76 if (!report)
79 } 77 return -ENODEV;
80
81 report = list_entry(report_list->next, struct hid_report, list);
82
83 if (report->maxfield < 4) {
84 hid_err(hid, "not enough fields in report\n");
85 return -ENODEV;
86 } 78 }
87 79
88 zpff = kzalloc(sizeof(struct zpff_device), GFP_KERNEL); 80 zpff = kzalloc(sizeof(struct zpff_device), GFP_KERNEL);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 8f4743ab5fb2..936093e0271e 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -195,7 +195,7 @@ int vmbus_connect(void)
195 195
196 do { 196 do {
197 ret = vmbus_negotiate_version(msginfo, version); 197 ret = vmbus_negotiate_version(msginfo, version);
198 if (ret) 198 if (ret == -ETIMEDOUT)
199 goto cleanup; 199 goto cleanup;
200 200
201 if (vmbus_connection.conn_state == CONNECTED) 201 if (vmbus_connection.conn_state == CONNECTED)
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 28b03325b872..09988b289622 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -32,13 +32,17 @@
32/* 32/*
33 * Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7) 33 * Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7)
34 */ 34 */
35#define WS2008_SRV_MAJOR 1
36#define WS2008_SRV_MINOR 0
37#define WS2008_SRV_VERSION (WS2008_SRV_MAJOR << 16 | WS2008_SRV_MINOR)
38
35#define WIN7_SRV_MAJOR 3 39#define WIN7_SRV_MAJOR 3
36#define WIN7_SRV_MINOR 0 40#define WIN7_SRV_MINOR 0
37#define WIN7_SRV_MAJOR_MINOR (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR) 41#define WIN7_SRV_VERSION (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR)
38 42
39#define WIN8_SRV_MAJOR 4 43#define WIN8_SRV_MAJOR 4
40#define WIN8_SRV_MINOR 0 44#define WIN8_SRV_MINOR 0
41#define WIN8_SRV_MAJOR_MINOR (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR) 45#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
42 46
43/* 47/*
44 * Global state maintained for transaction that is being processed. 48 * Global state maintained for transaction that is being processed.
@@ -587,6 +591,8 @@ void hv_kvp_onchannelcallback(void *context)
587 591
588 struct icmsg_hdr *icmsghdrp; 592 struct icmsg_hdr *icmsghdrp;
589 struct icmsg_negotiate *negop = NULL; 593 struct icmsg_negotiate *negop = NULL;
594 int util_fw_version;
595 int kvp_srv_version;
590 596
591 if (kvp_transaction.active) { 597 if (kvp_transaction.active) {
592 /* 598 /*
@@ -606,17 +612,26 @@ void hv_kvp_onchannelcallback(void *context)
606 612
607 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 613 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
608 /* 614 /*
609 * We start with win8 version and if the host cannot 615 * Based on the host, select appropriate
610 * support that we use the previous version. 616 * framework and service versions we will
617 * negotiate.
611 */ 618 */
612 if (vmbus_prep_negotiate_resp(icmsghdrp, negop, 619 switch (vmbus_proto_version) {
613 recv_buffer, UTIL_FW_MAJOR_MINOR, 620 case (VERSION_WS2008):
614 WIN8_SRV_MAJOR_MINOR)) 621 util_fw_version = UTIL_WS2K8_FW_VERSION;
615 goto done; 622 kvp_srv_version = WS2008_SRV_VERSION;
616 623 break;
624 case (VERSION_WIN7):
625 util_fw_version = UTIL_FW_VERSION;
626 kvp_srv_version = WIN7_SRV_VERSION;
627 break;
628 default:
629 util_fw_version = UTIL_FW_VERSION;
630 kvp_srv_version = WIN8_SRV_VERSION;
631 }
617 vmbus_prep_negotiate_resp(icmsghdrp, negop, 632 vmbus_prep_negotiate_resp(icmsghdrp, negop,
618 recv_buffer, UTIL_FW_MAJOR_MINOR, 633 recv_buffer, util_fw_version,
619 WIN7_SRV_MAJOR_MINOR); 634 kvp_srv_version);
620 635
621 } else { 636 } else {
622 kvp_msg = (struct hv_kvp_msg *)&recv_buffer[ 637 kvp_msg = (struct hv_kvp_msg *)&recv_buffer[
@@ -649,7 +664,6 @@ void hv_kvp_onchannelcallback(void *context)
649 return; 664 return;
650 665
651 } 666 }
652done:
653 667
654 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION 668 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
655 | ICMSGHDRFLAG_RESPONSE; 669 | ICMSGHDRFLAG_RESPONSE;
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index e4572f3f2834..0c3546224376 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -26,7 +26,7 @@
26 26
27#define VSS_MAJOR 5 27#define VSS_MAJOR 5
28#define VSS_MINOR 0 28#define VSS_MINOR 0
29#define VSS_MAJOR_MINOR (VSS_MAJOR << 16 | VSS_MINOR) 29#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
30 30
31 31
32 32
@@ -190,8 +190,8 @@ void hv_vss_onchannelcallback(void *context)
190 190
191 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 191 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
192 vmbus_prep_negotiate_resp(icmsghdrp, negop, 192 vmbus_prep_negotiate_resp(icmsghdrp, negop,
193 recv_buffer, UTIL_FW_MAJOR_MINOR, 193 recv_buffer, UTIL_FW_VERSION,
194 VSS_MAJOR_MINOR); 194 VSS_VERSION);
195 } else { 195 } else {
196 vss_msg = (struct hv_vss_msg *)&recv_buffer[ 196 vss_msg = (struct hv_vss_msg *)&recv_buffer[
197 sizeof(struct vmbuspipe_hdr) + 197 sizeof(struct vmbuspipe_hdr) +
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index cb82233541b1..273e3ddb3a20 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -28,17 +28,32 @@
28#include <linux/reboot.h> 28#include <linux/reboot.h>
29#include <linux/hyperv.h> 29#include <linux/hyperv.h>
30 30
31#define SHUTDOWN_MAJOR 3
32#define SHUTDOWN_MINOR 0
33#define SHUTDOWN_MAJOR_MINOR (SHUTDOWN_MAJOR << 16 | SHUTDOWN_MINOR)
34 31
35#define TIMESYNCH_MAJOR 3 32#define SD_MAJOR 3
36#define TIMESYNCH_MINOR 0 33#define SD_MINOR 0
37#define TIMESYNCH_MAJOR_MINOR (TIMESYNCH_MAJOR << 16 | TIMESYNCH_MINOR) 34#define SD_VERSION (SD_MAJOR << 16 | SD_MINOR)
38 35
39#define HEARTBEAT_MAJOR 3 36#define SD_WS2008_MAJOR 1
40#define HEARTBEAT_MINOR 0 37#define SD_WS2008_VERSION (SD_WS2008_MAJOR << 16 | SD_MINOR)
41#define HEARTBEAT_MAJOR_MINOR (HEARTBEAT_MAJOR << 16 | HEARTBEAT_MINOR) 38
39#define TS_MAJOR 3
40#define TS_MINOR 0
41#define TS_VERSION (TS_MAJOR << 16 | TS_MINOR)
42
43#define TS_WS2008_MAJOR 1
44#define TS_WS2008_VERSION (TS_WS2008_MAJOR << 16 | TS_MINOR)
45
46#define HB_MAJOR 3
47#define HB_MINOR 0
48#define HB_VERSION (HB_MAJOR << 16 | HB_MINOR)
49
50#define HB_WS2008_MAJOR 1
51#define HB_WS2008_VERSION (HB_WS2008_MAJOR << 16 | HB_MINOR)
52
53static int sd_srv_version;
54static int ts_srv_version;
55static int hb_srv_version;
56static int util_fw_version;
42 57
43static void shutdown_onchannelcallback(void *context); 58static void shutdown_onchannelcallback(void *context);
44static struct hv_util_service util_shutdown = { 59static struct hv_util_service util_shutdown = {
@@ -99,8 +114,8 @@ static void shutdown_onchannelcallback(void *context)
99 114
100 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 115 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
101 vmbus_prep_negotiate_resp(icmsghdrp, negop, 116 vmbus_prep_negotiate_resp(icmsghdrp, negop,
102 shut_txf_buf, UTIL_FW_MAJOR_MINOR, 117 shut_txf_buf, util_fw_version,
103 SHUTDOWN_MAJOR_MINOR); 118 sd_srv_version);
104 } else { 119 } else {
105 shutdown_msg = 120 shutdown_msg =
106 (struct shutdown_msg_data *)&shut_txf_buf[ 121 (struct shutdown_msg_data *)&shut_txf_buf[
@@ -216,6 +231,7 @@ static void timesync_onchannelcallback(void *context)
216 struct icmsg_hdr *icmsghdrp; 231 struct icmsg_hdr *icmsghdrp;
217 struct ictimesync_data *timedatap; 232 struct ictimesync_data *timedatap;
218 u8 *time_txf_buf = util_timesynch.recv_buffer; 233 u8 *time_txf_buf = util_timesynch.recv_buffer;
234 struct icmsg_negotiate *negop = NULL;
219 235
220 vmbus_recvpacket(channel, time_txf_buf, 236 vmbus_recvpacket(channel, time_txf_buf,
221 PAGE_SIZE, &recvlen, &requestid); 237 PAGE_SIZE, &recvlen, &requestid);
@@ -225,9 +241,10 @@ static void timesync_onchannelcallback(void *context)
225 sizeof(struct vmbuspipe_hdr)]; 241 sizeof(struct vmbuspipe_hdr)];
226 242
227 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 243 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
228 vmbus_prep_negotiate_resp(icmsghdrp, NULL, time_txf_buf, 244 vmbus_prep_negotiate_resp(icmsghdrp, negop,
229 UTIL_FW_MAJOR_MINOR, 245 time_txf_buf,
230 TIMESYNCH_MAJOR_MINOR); 246 util_fw_version,
247 ts_srv_version);
231 } else { 248 } else {
232 timedatap = (struct ictimesync_data *)&time_txf_buf[ 249 timedatap = (struct ictimesync_data *)&time_txf_buf[
233 sizeof(struct vmbuspipe_hdr) + 250 sizeof(struct vmbuspipe_hdr) +
@@ -257,6 +274,7 @@ static void heartbeat_onchannelcallback(void *context)
257 struct icmsg_hdr *icmsghdrp; 274 struct icmsg_hdr *icmsghdrp;
258 struct heartbeat_msg_data *heartbeat_msg; 275 struct heartbeat_msg_data *heartbeat_msg;
259 u8 *hbeat_txf_buf = util_heartbeat.recv_buffer; 276 u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
277 struct icmsg_negotiate *negop = NULL;
260 278
261 vmbus_recvpacket(channel, hbeat_txf_buf, 279 vmbus_recvpacket(channel, hbeat_txf_buf,
262 PAGE_SIZE, &recvlen, &requestid); 280 PAGE_SIZE, &recvlen, &requestid);
@@ -266,9 +284,9 @@ static void heartbeat_onchannelcallback(void *context)
266 sizeof(struct vmbuspipe_hdr)]; 284 sizeof(struct vmbuspipe_hdr)];
267 285
268 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 286 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
269 vmbus_prep_negotiate_resp(icmsghdrp, NULL, 287 vmbus_prep_negotiate_resp(icmsghdrp, negop,
270 hbeat_txf_buf, UTIL_FW_MAJOR_MINOR, 288 hbeat_txf_buf, util_fw_version,
271 HEARTBEAT_MAJOR_MINOR); 289 hb_srv_version);
272 } else { 290 } else {
273 heartbeat_msg = 291 heartbeat_msg =
274 (struct heartbeat_msg_data *)&hbeat_txf_buf[ 292 (struct heartbeat_msg_data *)&hbeat_txf_buf[
@@ -321,6 +339,25 @@ static int util_probe(struct hv_device *dev,
321 goto error; 339 goto error;
322 340
323 hv_set_drvdata(dev, srv); 341 hv_set_drvdata(dev, srv);
342 /*
343 * Based on the host; initialize the framework and
344 * service version numbers we will negotiate.
345 */
346 switch (vmbus_proto_version) {
347 case (VERSION_WS2008):
348 util_fw_version = UTIL_WS2K8_FW_VERSION;
349 sd_srv_version = SD_WS2008_VERSION;
350 ts_srv_version = TS_WS2008_VERSION;
351 hb_srv_version = HB_WS2008_VERSION;
352 break;
353
354 default:
355 util_fw_version = UTIL_FW_VERSION;
356 sd_srv_version = SD_VERSION;
357 ts_srv_version = TS_VERSION;
358 hb_srv_version = HB_VERSION;
359 }
360
324 return 0; 361 return 0;
325 362
326error: 363error:
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 62c2e32e25ef..98814d12a604 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -525,16 +525,25 @@ static int applesmc_init_smcreg_try(void)
525{ 525{
526 struct applesmc_registers *s = &smcreg; 526 struct applesmc_registers *s = &smcreg;
527 bool left_light_sensor, right_light_sensor; 527 bool left_light_sensor, right_light_sensor;
528 unsigned int count;
528 u8 tmp[1]; 529 u8 tmp[1];
529 int ret; 530 int ret;
530 531
531 if (s->init_complete) 532 if (s->init_complete)
532 return 0; 533 return 0;
533 534
534 ret = read_register_count(&s->key_count); 535 ret = read_register_count(&count);
535 if (ret) 536 if (ret)
536 return ret; 537 return ret;
537 538
539 if (s->cache && s->key_count != count) {
540 pr_warn("key count changed from %d to %d\n",
541 s->key_count, count);
542 kfree(s->cache);
543 s->cache = NULL;
544 }
545 s->key_count = count;
546
538 if (!s->cache) 547 if (!s->cache)
539 s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL); 548 s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL);
540 if (!s->cache) 549 if (!s->cache)
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index dbecf08399f8..5888feef1ac5 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -98,6 +98,8 @@
98 98
99#define DW_IC_ERR_TX_ABRT 0x1 99#define DW_IC_ERR_TX_ABRT 0x1
100 100
101#define DW_IC_TAR_10BITADDR_MASTER BIT(12)
102
101/* 103/*
102 * status codes 104 * status codes
103 */ 105 */
@@ -388,22 +390,34 @@ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
388static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) 390static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
389{ 391{
390 struct i2c_msg *msgs = dev->msgs; 392 struct i2c_msg *msgs = dev->msgs;
391 u32 ic_con; 393 u32 ic_con, ic_tar = 0;
392 394
393 /* Disable the adapter */ 395 /* Disable the adapter */
394 __i2c_dw_enable(dev, false); 396 __i2c_dw_enable(dev, false);
395 397
396 /* set the slave (target) address */
397 dw_writel(dev, msgs[dev->msg_write_idx].addr, DW_IC_TAR);
398
399 /* if the slave address is ten bit address, enable 10BITADDR */ 398 /* if the slave address is ten bit address, enable 10BITADDR */
400 ic_con = dw_readl(dev, DW_IC_CON); 399 ic_con = dw_readl(dev, DW_IC_CON);
401 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) 400 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
402 ic_con |= DW_IC_CON_10BITADDR_MASTER; 401 ic_con |= DW_IC_CON_10BITADDR_MASTER;
403 else 402 /*
403 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
404 * mode has to be enabled via bit 12 of IC_TAR register.
405 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
406 * detected from registers.
407 */
408 ic_tar = DW_IC_TAR_10BITADDR_MASTER;
409 } else {
404 ic_con &= ~DW_IC_CON_10BITADDR_MASTER; 410 ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
411 }
412
405 dw_writel(dev, ic_con, DW_IC_CON); 413 dw_writel(dev, ic_con, DW_IC_CON);
406 414
415 /*
416 * Set the slave (target) address and enable 10-bit addressing mode
417 * if applicable.
418 */
419 dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR);
420
407 /* Enable the adapter */ 421 /* Enable the adapter */
408 __i2c_dw_enable(dev, true); 422 __i2c_dw_enable(dev, true);
409 423
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 8ed79a086f85..1672effbcebb 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -393,6 +393,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
393 393
394 desc = &priv->hw[priv->head]; 394 desc = &priv->hw[priv->head];
395 395
396 /* Initialize the DMA buffer */
397 memset(priv->dma_buffer, 0, sizeof(priv->dma_buffer));
398
396 /* Initialize the descriptor */ 399 /* Initialize the descriptor */
397 memset(desc, 0, sizeof(struct ismt_desc)); 400 memset(desc, 0, sizeof(struct ismt_desc));
398 desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write); 401 desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 7f3a47443494..d3e9cc3153a9 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -234,9 +234,9 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
234 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR | 234 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR |
235 (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT; 235 (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT;
236 236
237 writel_relaxed(data_reg_lo, 237 writel(data_reg_lo,
238 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO); 238 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO);
239 writel_relaxed(data_reg_hi, 239 writel(data_reg_hi,
240 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI); 240 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI);
241 241
242 } else { 242 } else {
@@ -697,6 +697,7 @@ static const struct of_device_id mv64xxx_i2c_of_match_table[] = {
697MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table); 697MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
698 698
699#ifdef CONFIG_OF 699#ifdef CONFIG_OF
700#ifdef CONFIG_HAVE_CLK
700static int 701static int
701mv64xxx_calc_freq(const int tclk, const int n, const int m) 702mv64xxx_calc_freq(const int tclk, const int n, const int m)
702{ 703{
@@ -726,16 +727,12 @@ mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n,
726 return false; 727 return false;
727 return true; 728 return true;
728} 729}
730#endif /* CONFIG_HAVE_CLK */
729 731
730static int 732static int
731mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, 733mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
732 struct device *dev) 734 struct device *dev)
733{ 735{
734 const struct of_device_id *device;
735 struct device_node *np = dev->of_node;
736 u32 bus_freq, tclk;
737 int rc = 0;
738
739 /* CLK is mandatory when using DT to describe the i2c bus. We 736 /* CLK is mandatory when using DT to describe the i2c bus. We
740 * need to know tclk in order to calculate bus clock 737 * need to know tclk in order to calculate bus clock
741 * factors. 738 * factors.
@@ -744,6 +741,11 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
744 /* Have OF but no CLK */ 741 /* Have OF but no CLK */
745 return -ENODEV; 742 return -ENODEV;
746#else 743#else
744 const struct of_device_id *device;
745 struct device_node *np = dev->of_node;
746 u32 bus_freq, tclk;
747 int rc = 0;
748
747 if (IS_ERR(drv_data->clk)) { 749 if (IS_ERR(drv_data->clk)) {
748 rc = -ENODEV; 750 rc = -ENODEV;
749 goto out; 751 goto out;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 3535f3c0f7b4..3747b9bf67d6 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1178,8 +1178,6 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
1178 1178
1179 i2c_del_adapter(&i2c->adap); 1179 i2c_del_adapter(&i2c->adap);
1180 1180
1181 clk_disable_unprepare(i2c->clk);
1182
1183 if (pdev->dev.of_node && IS_ERR(i2c->pctrl)) 1181 if (pdev->dev.of_node && IS_ERR(i2c->pctrl))
1184 s3c24xx_i2c_dt_gpio_free(i2c); 1182 s3c24xx_i2c_dt_gpio_free(i2c);
1185 1183
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 12e32e6b4103..81e3dc260993 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -620,7 +620,7 @@ static int bma180_remove(struct i2c_client *client)
620#ifdef CONFIG_PM_SLEEP 620#ifdef CONFIG_PM_SLEEP
621static int bma180_suspend(struct device *dev) 621static int bma180_suspend(struct device *dev)
622{ 622{
623 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 623 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
624 struct bma180_data *data = iio_priv(indio_dev); 624 struct bma180_data *data = iio_priv(indio_dev);
625 int ret; 625 int ret;
626 626
@@ -633,7 +633,7 @@ static int bma180_suspend(struct device *dev)
633 633
634static int bma180_resume(struct device *dev) 634static int bma180_resume(struct device *dev)
635{ 635{
636 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 636 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
637 struct bma180_data *data = iio_priv(indio_dev); 637 struct bma180_data *data = iio_priv(indio_dev);
638 int ret; 638 int ret;
639 639
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 84be63bdf038..0f16b553e063 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -556,7 +556,7 @@ static const struct iio_info at91_adc_info = {
556 556
557static int at91_adc_probe(struct platform_device *pdev) 557static int at91_adc_probe(struct platform_device *pdev)
558{ 558{
559 unsigned int prsc, mstrclk, ticks, adc_clk, shtim; 559 unsigned int prsc, mstrclk, ticks, adc_clk, adc_clk_khz, shtim;
560 int ret; 560 int ret;
561 struct iio_dev *idev; 561 struct iio_dev *idev;
562 struct at91_adc_state *st; 562 struct at91_adc_state *st;
@@ -649,6 +649,7 @@ static int at91_adc_probe(struct platform_device *pdev)
649 */ 649 */
650 mstrclk = clk_get_rate(st->clk); 650 mstrclk = clk_get_rate(st->clk);
651 adc_clk = clk_get_rate(st->adc_clk); 651 adc_clk = clk_get_rate(st->adc_clk);
652 adc_clk_khz = adc_clk / 1000;
652 prsc = (mstrclk / (2 * adc_clk)) - 1; 653 prsc = (mstrclk / (2 * adc_clk)) - 1;
653 654
654 if (!st->startup_time) { 655 if (!st->startup_time) {
@@ -662,15 +663,15 @@ static int at91_adc_probe(struct platform_device *pdev)
662 * defined in the electrical characteristics of the board, divided by 8. 663 * defined in the electrical characteristics of the board, divided by 8.
663 * The formula thus is : Startup Time = (ticks + 1) * 8 / ADC Clock 664 * The formula thus is : Startup Time = (ticks + 1) * 8 / ADC Clock
664 */ 665 */
665 ticks = round_up((st->startup_time * adc_clk / 666 ticks = round_up((st->startup_time * adc_clk_khz /
666 1000000) - 1, 8) / 8; 667 1000) - 1, 8) / 8;
667 /* 668 /*
668 * a minimal Sample and Hold Time is necessary for the ADC to guarantee 669 * a minimal Sample and Hold Time is necessary for the ADC to guarantee
669 * the best converted final value between two channels selection 670 * the best converted final value between two channels selection
670 * The formula thus is : Sample and Hold Time = (shtim + 1) / ADCClock 671 * The formula thus is : Sample and Hold Time = (shtim + 1) / ADCClock
671 */ 672 */
672 shtim = round_up((st->sample_hold_time * adc_clk / 673 shtim = round_up((st->sample_hold_time * adc_clk_khz /
673 1000000) - 1, 1); 674 1000) - 1, 1);
674 675
675 reg = AT91_ADC_PRESCAL_(prsc) & st->registers->mr_prescal_mask; 676 reg = AT91_ADC_PRESCAL_(prsc) & st->registers->mr_prescal_mask;
676 reg |= AT91_ADC_STARTUP_(ticks) & st->registers->mr_startup_mask; 677 reg |= AT91_ADC_STARTUP_(ticks) & st->registers->mr_startup_mask;
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
index d0a79a4bce1c..ba6f6a91dfff 100644
--- a/drivers/iio/amplifiers/ad8366.c
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -185,10 +185,8 @@ static int ad8366_remove(struct spi_device *spi)
185 185
186 iio_device_unregister(indio_dev); 186 iio_device_unregister(indio_dev);
187 187
188 if (!IS_ERR(reg)) { 188 if (!IS_ERR(reg))
189 regulator_disable(reg); 189 regulator_disable(reg);
190 regulator_put(reg);
191 }
192 190
193 return 0; 191 return 0;
194} 192}
diff --git a/drivers/iio/buffer_cb.c b/drivers/iio/buffer_cb.c
index 9d19ba74f22b..415f3c6efd72 100644
--- a/drivers/iio/buffer_cb.c
+++ b/drivers/iio/buffer_cb.c
@@ -41,6 +41,8 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
41 goto error_ret; 41 goto error_ret;
42 } 42 }
43 43
44 iio_buffer_init(&cb_buff->buffer);
45
44 cb_buff->private = private; 46 cb_buff->private = private;
45 cb_buff->cb = cb; 47 cb_buff->cb = cb;
46 cb_buff->buffer.access = &iio_cb_access; 48 cb_buff->buffer.access = &iio_cb_access;
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 1f4a48e6a82c..1397b6e0e414 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -37,21 +37,21 @@ struct mcp4725_data {
37 37
38static int mcp4725_suspend(struct device *dev) 38static int mcp4725_suspend(struct device *dev)
39{ 39{
40 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 40 struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
41 struct mcp4725_data *data = iio_priv(indio_dev); 41 to_i2c_client(dev)));
42 u8 outbuf[2]; 42 u8 outbuf[2];
43 43
44 outbuf[0] = (data->powerdown_mode + 1) << 4; 44 outbuf[0] = (data->powerdown_mode + 1) << 4;
45 outbuf[1] = 0; 45 outbuf[1] = 0;
46 data->powerdown = true; 46 data->powerdown = true;
47 47
48 return i2c_master_send(to_i2c_client(dev), outbuf, 2); 48 return i2c_master_send(data->client, outbuf, 2);
49} 49}
50 50
51static int mcp4725_resume(struct device *dev) 51static int mcp4725_resume(struct device *dev)
52{ 52{
53 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 53 struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
54 struct mcp4725_data *data = iio_priv(indio_dev); 54 to_i2c_client(dev)));
55 u8 outbuf[2]; 55 u8 outbuf[2];
56 56
57 /* restore previous DAC value */ 57 /* restore previous DAC value */
@@ -59,7 +59,7 @@ static int mcp4725_resume(struct device *dev)
59 outbuf[1] = data->dac_value & 0xff; 59 outbuf[1] = data->dac_value & 0xff;
60 data->powerdown = false; 60 data->powerdown = false;
61 61
62 return i2c_master_send(to_i2c_client(dev), outbuf, 2); 62 return i2c_master_send(data->client, outbuf, 2);
63} 63}
64 64
65#ifdef CONFIG_PM_SLEEP 65#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h
index 05c1b74502a3..9b32253b824b 100644
--- a/drivers/iio/iio_core.h
+++ b/drivers/iio/iio_core.h
@@ -49,11 +49,15 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
49#define iio_buffer_poll_addr (&iio_buffer_poll) 49#define iio_buffer_poll_addr (&iio_buffer_poll)
50#define iio_buffer_read_first_n_outer_addr (&iio_buffer_read_first_n_outer) 50#define iio_buffer_read_first_n_outer_addr (&iio_buffer_read_first_n_outer)
51 51
52void iio_disable_all_buffers(struct iio_dev *indio_dev);
53
52#else 54#else
53 55
54#define iio_buffer_poll_addr NULL 56#define iio_buffer_poll_addr NULL
55#define iio_buffer_read_first_n_outer_addr NULL 57#define iio_buffer_read_first_n_outer_addr NULL
56 58
59static inline void iio_disable_all_buffers(struct iio_dev *indio_dev) {}
60
57#endif 61#endif
58 62
59int iio_device_register_eventset(struct iio_dev *indio_dev); 63int iio_device_register_eventset(struct iio_dev *indio_dev);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index e73033f3839a..2710f7245c3b 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -460,6 +460,25 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
460 return bytes; 460 return bytes;
461} 461}
462 462
463void iio_disable_all_buffers(struct iio_dev *indio_dev)
464{
465 struct iio_buffer *buffer, *_buffer;
466
467 if (list_empty(&indio_dev->buffer_list))
468 return;
469
470 if (indio_dev->setup_ops->predisable)
471 indio_dev->setup_ops->predisable(indio_dev);
472
473 list_for_each_entry_safe(buffer, _buffer,
474 &indio_dev->buffer_list, buffer_list)
475 list_del_init(&buffer->buffer_list);
476
477 indio_dev->currentmode = INDIO_DIRECT_MODE;
478 if (indio_dev->setup_ops->postdisable)
479 indio_dev->setup_ops->postdisable(indio_dev);
480}
481
463int iio_update_buffers(struct iio_dev *indio_dev, 482int iio_update_buffers(struct iio_dev *indio_dev,
464 struct iio_buffer *insert_buffer, 483 struct iio_buffer *insert_buffer,
465 struct iio_buffer *remove_buffer) 484 struct iio_buffer *remove_buffer)
@@ -528,8 +547,15 @@ int iio_update_buffers(struct iio_dev *indio_dev,
528 * Note can only occur when adding a buffer. 547 * Note can only occur when adding a buffer.
529 */ 548 */
530 list_del(&insert_buffer->buffer_list); 549 list_del(&insert_buffer->buffer_list);
531 indio_dev->active_scan_mask = old_mask; 550 if (old_mask) {
532 success = -EINVAL; 551 indio_dev->active_scan_mask = old_mask;
552 success = -EINVAL;
553 }
554 else {
555 kfree(compound_mask);
556 ret = -EINVAL;
557 goto error_ret;
558 }
533 } 559 }
534 } else { 560 } else {
535 indio_dev->active_scan_mask = compound_mask; 561 indio_dev->active_scan_mask = compound_mask;
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 97f0297b120f..f95c6979efd8 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -848,13 +848,10 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
848static void iio_dev_release(struct device *device) 848static void iio_dev_release(struct device *device)
849{ 849{
850 struct iio_dev *indio_dev = dev_to_iio_dev(device); 850 struct iio_dev *indio_dev = dev_to_iio_dev(device);
851 if (indio_dev->chrdev.dev)
852 cdev_del(&indio_dev->chrdev);
853 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) 851 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
854 iio_device_unregister_trigger_consumer(indio_dev); 852 iio_device_unregister_trigger_consumer(indio_dev);
855 iio_device_unregister_eventset(indio_dev); 853 iio_device_unregister_eventset(indio_dev);
856 iio_device_unregister_sysfs(indio_dev); 854 iio_device_unregister_sysfs(indio_dev);
857 iio_device_unregister_debugfs(indio_dev);
858 855
859 ida_simple_remove(&iio_ida, indio_dev->id); 856 ida_simple_remove(&iio_ida, indio_dev->id);
860 kfree(indio_dev); 857 kfree(indio_dev);
@@ -970,6 +967,8 @@ static int iio_chrdev_open(struct inode *inode, struct file *filp)
970 if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags)) 967 if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags))
971 return -EBUSY; 968 return -EBUSY;
972 969
970 iio_device_get(indio_dev);
971
973 filp->private_data = indio_dev; 972 filp->private_data = indio_dev;
974 973
975 return 0; 974 return 0;
@@ -983,6 +982,8 @@ static int iio_chrdev_release(struct inode *inode, struct file *filp)
983 struct iio_dev *indio_dev = container_of(inode->i_cdev, 982 struct iio_dev *indio_dev = container_of(inode->i_cdev,
984 struct iio_dev, chrdev); 983 struct iio_dev, chrdev);
985 clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags); 984 clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags);
985 iio_device_put(indio_dev);
986
986 return 0; 987 return 0;
987} 988}
988 989
@@ -1052,18 +1053,20 @@ int iio_device_register(struct iio_dev *indio_dev)
1052 indio_dev->setup_ops == NULL) 1053 indio_dev->setup_ops == NULL)
1053 indio_dev->setup_ops = &noop_ring_setup_ops; 1054 indio_dev->setup_ops = &noop_ring_setup_ops;
1054 1055
1055 ret = device_add(&indio_dev->dev);
1056 if (ret < 0)
1057 goto error_unreg_eventset;
1058 cdev_init(&indio_dev->chrdev, &iio_buffer_fileops); 1056 cdev_init(&indio_dev->chrdev, &iio_buffer_fileops);
1059 indio_dev->chrdev.owner = indio_dev->info->driver_module; 1057 indio_dev->chrdev.owner = indio_dev->info->driver_module;
1058 indio_dev->chrdev.kobj.parent = &indio_dev->dev.kobj;
1060 ret = cdev_add(&indio_dev->chrdev, indio_dev->dev.devt, 1); 1059 ret = cdev_add(&indio_dev->chrdev, indio_dev->dev.devt, 1);
1061 if (ret < 0) 1060 if (ret < 0)
1062 goto error_del_device; 1061 goto error_unreg_eventset;
1063 return 0;
1064 1062
1065error_del_device: 1063 ret = device_add(&indio_dev->dev);
1066 device_del(&indio_dev->dev); 1064 if (ret < 0)
1065 goto error_cdev_del;
1066
1067 return 0;
1068error_cdev_del:
1069 cdev_del(&indio_dev->chrdev);
1067error_unreg_eventset: 1070error_unreg_eventset:
1068 iio_device_unregister_eventset(indio_dev); 1071 iio_device_unregister_eventset(indio_dev);
1069error_free_sysfs: 1072error_free_sysfs:
@@ -1078,9 +1081,17 @@ EXPORT_SYMBOL(iio_device_register);
1078void iio_device_unregister(struct iio_dev *indio_dev) 1081void iio_device_unregister(struct iio_dev *indio_dev)
1079{ 1082{
1080 mutex_lock(&indio_dev->info_exist_lock); 1083 mutex_lock(&indio_dev->info_exist_lock);
1084
1085 device_del(&indio_dev->dev);
1086
1087 if (indio_dev->chrdev.dev)
1088 cdev_del(&indio_dev->chrdev);
1089 iio_device_unregister_debugfs(indio_dev);
1090
1091 iio_disable_all_buffers(indio_dev);
1092
1081 indio_dev->info = NULL; 1093 indio_dev->info = NULL;
1082 mutex_unlock(&indio_dev->info_exist_lock); 1094 mutex_unlock(&indio_dev->info_exist_lock);
1083 device_del(&indio_dev->dev);
1084} 1095}
1085EXPORT_SYMBOL(iio_device_unregister); 1096EXPORT_SYMBOL(iio_device_unregister);
1086subsys_initcall(iio_init); 1097subsys_initcall(iio_init);
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 10aa9ef86cec..6be65ef5faa9 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -72,7 +72,8 @@ EXPORT_SYMBOL(iio_push_event);
72static unsigned int iio_event_poll(struct file *filep, 72static unsigned int iio_event_poll(struct file *filep,
73 struct poll_table_struct *wait) 73 struct poll_table_struct *wait)
74{ 74{
75 struct iio_event_interface *ev_int = filep->private_data; 75 struct iio_dev *indio_dev = filep->private_data;
76 struct iio_event_interface *ev_int = indio_dev->event_interface;
76 unsigned int events = 0; 77 unsigned int events = 0;
77 78
78 poll_wait(filep, &ev_int->wait, wait); 79 poll_wait(filep, &ev_int->wait, wait);
@@ -90,7 +91,8 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
90 size_t count, 91 size_t count,
91 loff_t *f_ps) 92 loff_t *f_ps)
92{ 93{
93 struct iio_event_interface *ev_int = filep->private_data; 94 struct iio_dev *indio_dev = filep->private_data;
95 struct iio_event_interface *ev_int = indio_dev->event_interface;
94 unsigned int copied; 96 unsigned int copied;
95 int ret; 97 int ret;
96 98
@@ -121,7 +123,8 @@ error_unlock:
121 123
122static int iio_event_chrdev_release(struct inode *inode, struct file *filep) 124static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
123{ 125{
124 struct iio_event_interface *ev_int = filep->private_data; 126 struct iio_dev *indio_dev = filep->private_data;
127 struct iio_event_interface *ev_int = indio_dev->event_interface;
125 128
126 spin_lock_irq(&ev_int->wait.lock); 129 spin_lock_irq(&ev_int->wait.lock);
127 __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); 130 __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
@@ -133,6 +136,8 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
133 kfifo_reset_out(&ev_int->det_events); 136 kfifo_reset_out(&ev_int->det_events);
134 spin_unlock_irq(&ev_int->wait.lock); 137 spin_unlock_irq(&ev_int->wait.lock);
135 138
139 iio_device_put(indio_dev);
140
136 return 0; 141 return 0;
137} 142}
138 143
@@ -158,12 +163,15 @@ int iio_event_getfd(struct iio_dev *indio_dev)
158 return -EBUSY; 163 return -EBUSY;
159 } 164 }
160 spin_unlock_irq(&ev_int->wait.lock); 165 spin_unlock_irq(&ev_int->wait.lock);
161 fd = anon_inode_getfd("iio:event", 166 iio_device_get(indio_dev);
162 &iio_event_chrdev_fileops, ev_int, O_RDONLY); 167
168 fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
169 indio_dev, O_RDONLY);
163 if (fd < 0) { 170 if (fd < 0) {
164 spin_lock_irq(&ev_int->wait.lock); 171 spin_lock_irq(&ev_int->wait.lock);
165 __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); 172 __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
166 spin_unlock_irq(&ev_int->wait.lock); 173 spin_unlock_irq(&ev_int->wait.lock);
174 iio_device_put(indio_dev);
167 } 175 }
168 return fd; 176 return fd;
169} 177}
@@ -276,7 +284,7 @@ static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
276 goto error_ret; 284 goto error_ret;
277 } 285 }
278 if (chan->modified) 286 if (chan->modified)
279 mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel, 287 mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel2,
280 i/IIO_EV_DIR_MAX, 288 i/IIO_EV_DIR_MAX,
281 i%IIO_EV_DIR_MAX); 289 i%IIO_EV_DIR_MAX);
282 else if (chan->differential) 290 else if (chan->differential)
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index e8d2849cc81d..cab3bc7494a2 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -29,9 +29,9 @@
29#define ST_MAGN_NUMBER_DATA_CHANNELS 3 29#define ST_MAGN_NUMBER_DATA_CHANNELS 3
30 30
31/* DEFAULT VALUE FOR SENSORS */ 31/* DEFAULT VALUE FOR SENSORS */
32#define ST_MAGN_DEFAULT_OUT_X_L_ADDR 0X04 32#define ST_MAGN_DEFAULT_OUT_X_H_ADDR 0X03
33#define ST_MAGN_DEFAULT_OUT_Y_L_ADDR 0X08 33#define ST_MAGN_DEFAULT_OUT_Y_H_ADDR 0X07
34#define ST_MAGN_DEFAULT_OUT_Z_L_ADDR 0X06 34#define ST_MAGN_DEFAULT_OUT_Z_H_ADDR 0X05
35 35
36/* FULLSCALE */ 36/* FULLSCALE */
37#define ST_MAGN_FS_AVL_1300MG 1300 37#define ST_MAGN_FS_AVL_1300MG 1300
@@ -117,16 +117,16 @@
117static const struct iio_chan_spec st_magn_16bit_channels[] = { 117static const struct iio_chan_spec st_magn_16bit_channels[] = {
118 ST_SENSORS_LSM_CHANNELS(IIO_MAGN, 118 ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
119 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), 119 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
120 ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_LE, 16, 16, 120 ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_BE, 16, 16,
121 ST_MAGN_DEFAULT_OUT_X_L_ADDR), 121 ST_MAGN_DEFAULT_OUT_X_H_ADDR),
122 ST_SENSORS_LSM_CHANNELS(IIO_MAGN, 122 ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
123 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), 123 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
124 ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_LE, 16, 16, 124 ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_BE, 16, 16,
125 ST_MAGN_DEFAULT_OUT_Y_L_ADDR), 125 ST_MAGN_DEFAULT_OUT_Y_H_ADDR),
126 ST_SENSORS_LSM_CHANNELS(IIO_MAGN, 126 ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
127 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), 127 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
128 ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_LE, 16, 16, 128 ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_BE, 16, 16,
129 ST_MAGN_DEFAULT_OUT_Z_L_ADDR), 129 ST_MAGN_DEFAULT_OUT_Z_H_ADDR),
130 IIO_CHAN_SOFT_TIMESTAMP(3) 130 IIO_CHAN_SOFT_TIMESTAMP(3)
131}; 131};
132 132
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index 64ccde3f1f7a..6d63883da1ab 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -255,12 +255,14 @@ static int tmp006_remove(struct i2c_client *client)
255#ifdef CONFIG_PM_SLEEP 255#ifdef CONFIG_PM_SLEEP
256static int tmp006_suspend(struct device *dev) 256static int tmp006_suspend(struct device *dev)
257{ 257{
258 return tmp006_powerdown(iio_priv(dev_to_iio_dev(dev))); 258 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
259 return tmp006_powerdown(iio_priv(indio_dev));
259} 260}
260 261
261static int tmp006_resume(struct device *dev) 262static int tmp006_resume(struct device *dev)
262{ 263{
263 struct tmp006_data *data = iio_priv(dev_to_iio_dev(dev)); 264 struct tmp006_data *data = iio_priv(i2c_get_clientdata(
265 to_i2c_client(dev)));
264 return i2c_smbus_write_word_swapped(data->client, TMP006_CONFIG, 266 return i2c_smbus_write_word_swapped(data->client, TMP006_CONFIG,
265 data->config | TMP006_CONFIG_MOD_MASK); 267 data->config | TMP006_CONFIG_MOD_MASK);
266} 268}
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 653ac6bfc57a..6c923c7039a1 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1588,7 +1588,7 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1588 int resp_data_len; 1588 int resp_data_len;
1589 int resp_len; 1589 int resp_len;
1590 1590
1591 resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4; 1591 resp_data_len = 4;
1592 resp_len = sizeof(*srp_rsp) + resp_data_len; 1592 resp_len = sizeof(*srp_rsp) + resp_data_len;
1593 1593
1594 srp_rsp = ioctx->ioctx.buf; 1594 srp_rsp = ioctx->ioctx.buf;
@@ -1600,11 +1600,9 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1600 + atomic_xchg(&ch->req_lim_delta, 0)); 1600 + atomic_xchg(&ch->req_lim_delta, 0));
1601 srp_rsp->tag = tag; 1601 srp_rsp->tag = tag;
1602 1602
1603 if (rsp_code != SRP_TSK_MGMT_SUCCESS) { 1603 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1604 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID; 1604 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1605 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len); 1605 srp_rsp->data[3] = rsp_code;
1606 srp_rsp->data[3] = rsp_code;
1607 }
1608 1606
1609 return resp_len; 1607 return resp_len;
1610} 1608}
@@ -2358,6 +2356,8 @@ static void srpt_release_channel_work(struct work_struct *w)
2358 transport_deregister_session(se_sess); 2356 transport_deregister_session(se_sess);
2359 ch->sess = NULL; 2357 ch->sess = NULL;
2360 2358
2359 ib_destroy_cm_id(ch->cm_id);
2360
2361 srpt_destroy_ch_ib(ch); 2361 srpt_destroy_ch_ib(ch);
2362 2362
2363 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, 2363 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
@@ -2368,8 +2368,6 @@ static void srpt_release_channel_work(struct work_struct *w)
2368 list_del(&ch->list); 2368 list_del(&ch->list);
2369 spin_unlock_irq(&sdev->spinlock); 2369 spin_unlock_irq(&sdev->spinlock);
2370 2370
2371 ib_destroy_cm_id(ch->cm_id);
2372
2373 if (ch->release_done) 2371 if (ch->release_done)
2374 complete(ch->release_done); 2372 complete(ch->release_done);
2375 2373
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index f417e89e1e7e..181c9ba929cd 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -377,6 +377,7 @@ struct arm_smmu_cfg {
377 u32 cbar; 377 u32 cbar;
378 pgd_t *pgd; 378 pgd_t *pgd;
379}; 379};
380#define INVALID_IRPTNDX 0xff
380 381
381#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) 382#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
382#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) 383#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
@@ -840,7 +841,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
840 if (IS_ERR_VALUE(ret)) { 841 if (IS_ERR_VALUE(ret)) {
841 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", 842 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
842 root_cfg->irptndx, irq); 843 root_cfg->irptndx, irq);
843 root_cfg->irptndx = -1; 844 root_cfg->irptndx = INVALID_IRPTNDX;
844 goto out_free_context; 845 goto out_free_context;
845 } 846 }
846 847
@@ -869,7 +870,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
869 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); 870 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
870 arm_smmu_tlb_inv_context(root_cfg); 871 arm_smmu_tlb_inv_context(root_cfg);
871 872
872 if (root_cfg->irptndx != -1) { 873 if (root_cfg->irptndx != INVALID_IRPTNDX) {
873 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; 874 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
874 free_irq(irq, domain); 875 free_irq(irq, domain);
875 } 876 }
@@ -1857,8 +1858,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1857 goto out_put_parent; 1858 goto out_put_parent;
1858 } 1859 }
1859 1860
1860 arm_smmu_device_reset(smmu);
1861
1862 for (i = 0; i < smmu->num_global_irqs; ++i) { 1861 for (i = 0; i < smmu->num_global_irqs; ++i) {
1863 err = request_irq(smmu->irqs[i], 1862 err = request_irq(smmu->irqs[i],
1864 arm_smmu_global_fault, 1863 arm_smmu_global_fault,
@@ -1876,6 +1875,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1876 spin_lock(&arm_smmu_devices_lock); 1875 spin_lock(&arm_smmu_devices_lock);
1877 list_add(&smmu->list, &arm_smmu_devices); 1876 list_add(&smmu->list, &arm_smmu_devices);
1878 spin_unlock(&arm_smmu_devices_lock); 1877 spin_unlock(&arm_smmu_devices_lock);
1878
1879 arm_smmu_device_reset(smmu);
1879 return 0; 1880 return 0;
1880 1881
1881out_free_irqs: 1882out_free_irqs:
@@ -1966,10 +1967,10 @@ static int __init arm_smmu_init(void)
1966 return ret; 1967 return ret;
1967 1968
1968 /* Oh, for a proper bus abstraction */ 1969 /* Oh, for a proper bus abstraction */
1969 if (!iommu_present(&platform_bus_type)); 1970 if (!iommu_present(&platform_bus_type))
1970 bus_set_iommu(&platform_bus_type, &arm_smmu_ops); 1971 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1971 1972
1972 if (!iommu_present(&amba_bustype)); 1973 if (!iommu_present(&amba_bustype))
1973 bus_set_iommu(&amba_bustype, &arm_smmu_ops); 1974 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
1974 1975
1975 return 0; 1976 return 0;
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 7f910c76ca0a..3c92780bda09 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2295,8 +2295,8 @@ _hfcpci_softirq(struct device *dev, void *arg)
2295static void 2295static void
2296hfcpci_softirq(void *arg) 2296hfcpci_softirq(void *arg)
2297{ 2297{
2298 (void) driver_for_each_device(&hfc_driver.driver, NULL, arg, 2298 WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, arg,
2299 _hfcpci_softirq); 2299 _hfcpci_softirq) != 0);
2300 2300
2301 /* if next event would be in the past ... */ 2301 /* if next event would be in the past ... */
2302 if ((s32)(hfc_jiffies + tics - jiffies) <= 0) 2302 if ((s32)(hfc_jiffies + tics - jiffies) <= 0)
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c
index 1063babe1d3a..36817e0a0b94 100644
--- a/drivers/isdn/hisax/amd7930_fn.c
+++ b/drivers/isdn/hisax/amd7930_fn.c
@@ -314,7 +314,7 @@ Amd7930_empty_Dfifo(struct IsdnCardState *cs, int flag)
314 314
315 t += sprintf(t, "Amd7930: empty_Dfifo cnt: %d |", cs->rcvidx); 315 t += sprintf(t, "Amd7930: empty_Dfifo cnt: %d |", cs->rcvidx);
316 QuickHex(t, cs->rcvbuf, cs->rcvidx); 316 QuickHex(t, cs->rcvbuf, cs->rcvidx);
317 debugl1(cs, cs->dlog); 317 debugl1(cs, "%s", cs->dlog);
318 } 318 }
319 /* moves received data in sk-buffer */ 319 /* moves received data in sk-buffer */
320 memcpy(skb_put(skb, cs->rcvidx), cs->rcvbuf, cs->rcvidx); 320 memcpy(skb_put(skb, cs->rcvidx), cs->rcvbuf, cs->rcvidx);
@@ -406,7 +406,7 @@ Amd7930_fill_Dfifo(struct IsdnCardState *cs)
406 406
407 t += sprintf(t, "Amd7930: fill_Dfifo cnt: %d |", count); 407 t += sprintf(t, "Amd7930: fill_Dfifo cnt: %d |", count);
408 QuickHex(t, deb_ptr, count); 408 QuickHex(t, deb_ptr, count);
409 debugl1(cs, cs->dlog); 409 debugl1(cs, "%s", cs->dlog);
410 } 410 }
411 /* AMD interrupts on */ 411 /* AMD interrupts on */
412 AmdIrqOn(cs); 412 AmdIrqOn(cs);
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
index ee9b9a03cffa..d1427bd6452d 100644
--- a/drivers/isdn/hisax/avm_pci.c
+++ b/drivers/isdn/hisax/avm_pci.c
@@ -285,7 +285,7 @@ hdlc_empty_fifo(struct BCState *bcs, int count)
285 t += sprintf(t, "hdlc_empty_fifo %c cnt %d", 285 t += sprintf(t, "hdlc_empty_fifo %c cnt %d",
286 bcs->channel ? 'B' : 'A', count); 286 bcs->channel ? 'B' : 'A', count);
287 QuickHex(t, p, count); 287 QuickHex(t, p, count);
288 debugl1(cs, bcs->blog); 288 debugl1(cs, "%s", bcs->blog);
289 } 289 }
290} 290}
291 291
@@ -345,7 +345,7 @@ hdlc_fill_fifo(struct BCState *bcs)
345 t += sprintf(t, "hdlc_fill_fifo %c cnt %d", 345 t += sprintf(t, "hdlc_fill_fifo %c cnt %d",
346 bcs->channel ? 'B' : 'A', count); 346 bcs->channel ? 'B' : 'A', count);
347 QuickHex(t, p, count); 347 QuickHex(t, p, count);
348 debugl1(cs, bcs->blog); 348 debugl1(cs, "%s", bcs->blog);
349 } 349 }
350} 350}
351 351
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index bf04d2a3cf4a..b33f53b3ca93 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1896,7 +1896,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
1896 ptr--; 1896 ptr--;
1897 *ptr++ = '\n'; 1897 *ptr++ = '\n';
1898 *ptr = 0; 1898 *ptr = 0;
1899 HiSax_putstatus(cs, NULL, cs->dlog); 1899 HiSax_putstatus(cs, NULL, "%s", cs->dlog);
1900 } else 1900 } else
1901 HiSax_putstatus(cs, "LogEcho: ", 1901 HiSax_putstatus(cs, "LogEcho: ",
1902 "warning Frame too big (%d)", 1902 "warning Frame too big (%d)",
diff --git a/drivers/isdn/hisax/diva.c b/drivers/isdn/hisax/diva.c
index 8d0cf6e4dc00..4fc90de68d18 100644
--- a/drivers/isdn/hisax/diva.c
+++ b/drivers/isdn/hisax/diva.c
@@ -427,7 +427,7 @@ Memhscx_empty_fifo(struct BCState *bcs, int count)
427 t += sprintf(t, "hscx_empty_fifo %c cnt %d", 427 t += sprintf(t, "hscx_empty_fifo %c cnt %d",
428 bcs->hw.hscx.hscx ? 'B' : 'A', count); 428 bcs->hw.hscx.hscx ? 'B' : 'A', count);
429 QuickHex(t, ptr, count); 429 QuickHex(t, ptr, count);
430 debugl1(cs, bcs->blog); 430 debugl1(cs, "%s", bcs->blog);
431 } 431 }
432} 432}
433 433
@@ -469,7 +469,7 @@ Memhscx_fill_fifo(struct BCState *bcs)
469 t += sprintf(t, "hscx_fill_fifo %c cnt %d", 469 t += sprintf(t, "hscx_fill_fifo %c cnt %d",
470 bcs->hw.hscx.hscx ? 'B' : 'A', count); 470 bcs->hw.hscx.hscx ? 'B' : 'A', count);
471 QuickHex(t, ptr, count); 471 QuickHex(t, ptr, count);
472 debugl1(cs, bcs->blog); 472 debugl1(cs, "%s", bcs->blog);
473 } 473 }
474} 474}
475 475
diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c
index 1df6f9a56ca2..2be1c8a3bb5f 100644
--- a/drivers/isdn/hisax/elsa.c
+++ b/drivers/isdn/hisax/elsa.c
@@ -535,7 +535,7 @@ check_arcofi(struct IsdnCardState *cs)
535 t = tmp; 535 t = tmp;
536 t += sprintf(tmp, "Arcofi data"); 536 t += sprintf(tmp, "Arcofi data");
537 QuickHex(t, p, cs->dc.isac.mon_rxp); 537 QuickHex(t, p, cs->dc.isac.mon_rxp);
538 debugl1(cs, tmp); 538 debugl1(cs, "%s", tmp);
539 if ((cs->dc.isac.mon_rxp == 2) && (cs->dc.isac.mon_rx[0] == 0xa0)) { 539 if ((cs->dc.isac.mon_rxp == 2) && (cs->dc.isac.mon_rx[0] == 0xa0)) {
540 switch (cs->dc.isac.mon_rx[1]) { 540 switch (cs->dc.isac.mon_rx[1]) {
541 case 0x80: 541 case 0x80:
diff --git a/drivers/isdn/hisax/elsa_ser.c b/drivers/isdn/hisax/elsa_ser.c
index d4c98d330bfe..3f84dd8f1757 100644
--- a/drivers/isdn/hisax/elsa_ser.c
+++ b/drivers/isdn/hisax/elsa_ser.c
@@ -344,7 +344,7 @@ static inline void receive_chars(struct IsdnCardState *cs,
344 344
345 t += sprintf(t, "modem read cnt %d", cs->hw.elsa.rcvcnt); 345 t += sprintf(t, "modem read cnt %d", cs->hw.elsa.rcvcnt);
346 QuickHex(t, cs->hw.elsa.rcvbuf, cs->hw.elsa.rcvcnt); 346 QuickHex(t, cs->hw.elsa.rcvbuf, cs->hw.elsa.rcvcnt);
347 debugl1(cs, tmp); 347 debugl1(cs, "%s", tmp);
348 } 348 }
349 cs->hw.elsa.rcvcnt = 0; 349 cs->hw.elsa.rcvcnt = 0;
350} 350}
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 3ccd724ff8c2..497bd026c237 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -901,7 +901,7 @@ Begin:
901 ptr--; 901 ptr--;
902 *ptr++ = '\n'; 902 *ptr++ = '\n';
903 *ptr = 0; 903 *ptr = 0;
904 HiSax_putstatus(cs, NULL, cs->dlog); 904 HiSax_putstatus(cs, NULL, "%s", cs->dlog);
905 } else 905 } else
906 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3); 906 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3);
907 } 907 }
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index dc4574f735ef..fa1fefd711cd 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -674,7 +674,7 @@ receive_emsg(struct IsdnCardState *cs)
674 ptr--; 674 ptr--;
675 *ptr++ = '\n'; 675 *ptr++ = '\n';
676 *ptr = 0; 676 *ptr = 0;
677 HiSax_putstatus(cs, NULL, cs->dlog); 677 HiSax_putstatus(cs, NULL, "%s", cs->dlog);
678 } else 678 } else
679 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len); 679 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len);
680 } 680 }
diff --git a/drivers/isdn/hisax/hscx_irq.c b/drivers/isdn/hisax/hscx_irq.c
index f398d4838937..a8d6188402c6 100644
--- a/drivers/isdn/hisax/hscx_irq.c
+++ b/drivers/isdn/hisax/hscx_irq.c
@@ -75,7 +75,7 @@ hscx_empty_fifo(struct BCState *bcs, int count)
75 t += sprintf(t, "hscx_empty_fifo %c cnt %d", 75 t += sprintf(t, "hscx_empty_fifo %c cnt %d",
76 bcs->hw.hscx.hscx ? 'B' : 'A', count); 76 bcs->hw.hscx.hscx ? 'B' : 'A', count);
77 QuickHex(t, ptr, count); 77 QuickHex(t, ptr, count);
78 debugl1(cs, bcs->blog); 78 debugl1(cs, "%s", bcs->blog);
79 } 79 }
80} 80}
81 81
@@ -115,7 +115,7 @@ hscx_fill_fifo(struct BCState *bcs)
115 t += sprintf(t, "hscx_fill_fifo %c cnt %d", 115 t += sprintf(t, "hscx_fill_fifo %c cnt %d",
116 bcs->hw.hscx.hscx ? 'B' : 'A', count); 116 bcs->hw.hscx.hscx ? 'B' : 'A', count);
117 QuickHex(t, ptr, count); 117 QuickHex(t, ptr, count);
118 debugl1(cs, bcs->blog); 118 debugl1(cs, "%s", bcs->blog);
119 } 119 }
120} 120}
121 121
diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c
index db5321f6379b..51dae9167238 100644
--- a/drivers/isdn/hisax/icc.c
+++ b/drivers/isdn/hisax/icc.c
@@ -134,7 +134,7 @@ icc_empty_fifo(struct IsdnCardState *cs, int count)
134 134
135 t += sprintf(t, "icc_empty_fifo cnt %d", count); 135 t += sprintf(t, "icc_empty_fifo cnt %d", count);
136 QuickHex(t, ptr, count); 136 QuickHex(t, ptr, count);
137 debugl1(cs, cs->dlog); 137 debugl1(cs, "%s", cs->dlog);
138 } 138 }
139} 139}
140 140
@@ -176,7 +176,7 @@ icc_fill_fifo(struct IsdnCardState *cs)
176 176
177 t += sprintf(t, "icc_fill_fifo cnt %d", count); 177 t += sprintf(t, "icc_fill_fifo cnt %d", count);
178 QuickHex(t, ptr, count); 178 QuickHex(t, ptr, count);
179 debugl1(cs, cs->dlog); 179 debugl1(cs, "%s", cs->dlog);
180 } 180 }
181} 181}
182 182
diff --git a/drivers/isdn/hisax/ipacx.c b/drivers/isdn/hisax/ipacx.c
index 74feb5c83067..5faa5de24305 100644
--- a/drivers/isdn/hisax/ipacx.c
+++ b/drivers/isdn/hisax/ipacx.c
@@ -260,7 +260,7 @@ dch_empty_fifo(struct IsdnCardState *cs, int count)
260 260
261 t += sprintf(t, "dch_empty_fifo() cnt %d", count); 261 t += sprintf(t, "dch_empty_fifo() cnt %d", count);
262 QuickHex(t, ptr, count); 262 QuickHex(t, ptr, count);
263 debugl1(cs, cs->dlog); 263 debugl1(cs, "%s", cs->dlog);
264 } 264 }
265} 265}
266 266
@@ -307,7 +307,7 @@ dch_fill_fifo(struct IsdnCardState *cs)
307 307
308 t += sprintf(t, "dch_fill_fifo() cnt %d", count); 308 t += sprintf(t, "dch_fill_fifo() cnt %d", count);
309 QuickHex(t, ptr, count); 309 QuickHex(t, ptr, count);
310 debugl1(cs, cs->dlog); 310 debugl1(cs, "%s", cs->dlog);
311 } 311 }
312} 312}
313 313
@@ -539,7 +539,7 @@ bch_empty_fifo(struct BCState *bcs, int count)
539 539
540 t += sprintf(t, "bch_empty_fifo() B-%d cnt %d", hscx, count); 540 t += sprintf(t, "bch_empty_fifo() B-%d cnt %d", hscx, count);
541 QuickHex(t, ptr, count); 541 QuickHex(t, ptr, count);
542 debugl1(cs, bcs->blog); 542 debugl1(cs, "%s", bcs->blog);
543 } 543 }
544} 544}
545 545
@@ -582,7 +582,7 @@ bch_fill_fifo(struct BCState *bcs)
582 582
583 t += sprintf(t, "chb_fill_fifo() B-%d cnt %d", hscx, count); 583 t += sprintf(t, "chb_fill_fifo() B-%d cnt %d", hscx, count);
584 QuickHex(t, ptr, count); 584 QuickHex(t, ptr, count);
585 debugl1(cs, bcs->blog); 585 debugl1(cs, "%s", bcs->blog);
586 } 586 }
587} 587}
588 588
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
index a365ccc1c99c..7fdf78f46433 100644
--- a/drivers/isdn/hisax/isac.c
+++ b/drivers/isdn/hisax/isac.c
@@ -137,7 +137,7 @@ isac_empty_fifo(struct IsdnCardState *cs, int count)
137 137
138 t += sprintf(t, "isac_empty_fifo cnt %d", count); 138 t += sprintf(t, "isac_empty_fifo cnt %d", count);
139 QuickHex(t, ptr, count); 139 QuickHex(t, ptr, count);
140 debugl1(cs, cs->dlog); 140 debugl1(cs, "%s", cs->dlog);
141 } 141 }
142} 142}
143 143
@@ -179,7 +179,7 @@ isac_fill_fifo(struct IsdnCardState *cs)
179 179
180 t += sprintf(t, "isac_fill_fifo cnt %d", count); 180 t += sprintf(t, "isac_fill_fifo cnt %d", count);
181 QuickHex(t, ptr, count); 181 QuickHex(t, ptr, count);
182 debugl1(cs, cs->dlog); 182 debugl1(cs, "%s", cs->dlog);
183 } 183 }
184} 184}
185 185
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index 7fdf34704fe5..f4956c73aa11 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -74,7 +74,7 @@ sendmsg(struct IsdnCardState *cs, u_char his, u_char creg, u_char len,
74 t = tmp; 74 t = tmp;
75 t += sprintf(t, "sendmbox cnt %d", len); 75 t += sprintf(t, "sendmbox cnt %d", len);
76 QuickHex(t, &msg[len-i], (i > 64) ? 64 : i); 76 QuickHex(t, &msg[len-i], (i > 64) ? 64 : i);
77 debugl1(cs, tmp); 77 debugl1(cs, "%s", tmp);
78 i -= 64; 78 i -= 64;
79 } 79 }
80 } 80 }
@@ -105,7 +105,7 @@ rcv_mbox(struct IsdnCardState *cs, struct isar_reg *ireg, u_char *msg)
105 t = tmp; 105 t = tmp;
106 t += sprintf(t, "rcv_mbox cnt %d", ireg->clsb); 106 t += sprintf(t, "rcv_mbox cnt %d", ireg->clsb);
107 QuickHex(t, &msg[ireg->clsb - i], (i > 64) ? 64 : i); 107 QuickHex(t, &msg[ireg->clsb - i], (i > 64) ? 64 : i);
108 debugl1(cs, tmp); 108 debugl1(cs, "%s", tmp);
109 i -= 64; 109 i -= 64;
110 } 110 }
111 } 111 }
@@ -1248,7 +1248,7 @@ isar_int_main(struct IsdnCardState *cs)
1248 tp += sprintf(debbuf, "msg iis(%x) msb(%x)", 1248 tp += sprintf(debbuf, "msg iis(%x) msb(%x)",
1249 ireg->iis, ireg->cmsb); 1249 ireg->iis, ireg->cmsb);
1250 QuickHex(tp, (u_char *)ireg->par, ireg->clsb); 1250 QuickHex(tp, (u_char *)ireg->par, ireg->clsb);
1251 debugl1(cs, debbuf); 1251 debugl1(cs, "%s", debbuf);
1252 } 1252 }
1253 break; 1253 break;
1254 case ISAR_IIS_INVMSG: 1254 case ISAR_IIS_INVMSG:
diff --git a/drivers/isdn/hisax/jade.c b/drivers/isdn/hisax/jade.c
index f946c58d8ab1..e2ae7871a209 100644
--- a/drivers/isdn/hisax/jade.c
+++ b/drivers/isdn/hisax/jade.c
@@ -81,10 +81,7 @@ modejade(struct BCState *bcs, int mode, int bc)
81 int jade = bcs->hw.hscx.hscx; 81 int jade = bcs->hw.hscx.hscx;
82 82
83 if (cs->debug & L1_DEB_HSCX) { 83 if (cs->debug & L1_DEB_HSCX) {
84 char tmp[40]; 84 debugl1(cs, "jade %c mode %d ichan %d", 'A' + jade, mode, bc);
85 sprintf(tmp, "jade %c mode %d ichan %d",
86 'A' + jade, mode, bc);
87 debugl1(cs, tmp);
88 } 85 }
89 bcs->mode = mode; 86 bcs->mode = mode;
90 bcs->channel = bc; 87 bcs->channel = bc;
@@ -257,23 +254,18 @@ void
257clear_pending_jade_ints(struct IsdnCardState *cs) 254clear_pending_jade_ints(struct IsdnCardState *cs)
258{ 255{
259 int val; 256 int val;
260 char tmp[64];
261 257
262 cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0x00); 258 cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0x00);
263 cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0x00); 259 cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0x00);
264 260
265 val = cs->BC_Read_Reg(cs, 1, jade_HDLC_ISR); 261 val = cs->BC_Read_Reg(cs, 1, jade_HDLC_ISR);
266 sprintf(tmp, "jade B ISTA %x", val); 262 debugl1(cs, "jade B ISTA %x", val);
267 debugl1(cs, tmp);
268 val = cs->BC_Read_Reg(cs, 0, jade_HDLC_ISR); 263 val = cs->BC_Read_Reg(cs, 0, jade_HDLC_ISR);
269 sprintf(tmp, "jade A ISTA %x", val); 264 debugl1(cs, "jade A ISTA %x", val);
270 debugl1(cs, tmp);
271 val = cs->BC_Read_Reg(cs, 1, jade_HDLC_STAR); 265 val = cs->BC_Read_Reg(cs, 1, jade_HDLC_STAR);
272 sprintf(tmp, "jade B STAR %x", val); 266 debugl1(cs, "jade B STAR %x", val);
273 debugl1(cs, tmp);
274 val = cs->BC_Read_Reg(cs, 0, jade_HDLC_STAR); 267 val = cs->BC_Read_Reg(cs, 0, jade_HDLC_STAR);
275 sprintf(tmp, "jade A STAR %x", val); 268 debugl1(cs, "jade A STAR %x", val);
276 debugl1(cs, tmp);
277 /* Unmask ints */ 269 /* Unmask ints */
278 cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0xF8); 270 cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0xF8);
279 cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0xF8); 271 cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0xF8);
diff --git a/drivers/isdn/hisax/jade_irq.c b/drivers/isdn/hisax/jade_irq.c
index f521fc83dc76..b930da9b5aa6 100644
--- a/drivers/isdn/hisax/jade_irq.c
+++ b/drivers/isdn/hisax/jade_irq.c
@@ -65,7 +65,7 @@ jade_empty_fifo(struct BCState *bcs, int count)
65 t += sprintf(t, "jade_empty_fifo %c cnt %d", 65 t += sprintf(t, "jade_empty_fifo %c cnt %d",
66 bcs->hw.hscx.hscx ? 'B' : 'A', count); 66 bcs->hw.hscx.hscx ? 'B' : 'A', count);
67 QuickHex(t, ptr, count); 67 QuickHex(t, ptr, count);
68 debugl1(cs, bcs->blog); 68 debugl1(cs, "%s", bcs->blog);
69 } 69 }
70} 70}
71 71
@@ -105,7 +105,7 @@ jade_fill_fifo(struct BCState *bcs)
105 t += sprintf(t, "jade_fill_fifo %c cnt %d", 105 t += sprintf(t, "jade_fill_fifo %c cnt %d",
106 bcs->hw.hscx.hscx ? 'B' : 'A', count); 106 bcs->hw.hscx.hscx ? 'B' : 'A', count);
107 QuickHex(t, ptr, count); 107 QuickHex(t, ptr, count);
108 debugl1(cs, bcs->blog); 108 debugl1(cs, "%s", bcs->blog);
109 } 109 }
110} 110}
111 111
diff --git a/drivers/isdn/hisax/l3_1tr6.c b/drivers/isdn/hisax/l3_1tr6.c
index 4c1bca5caa1d..875402e76d0a 100644
--- a/drivers/isdn/hisax/l3_1tr6.c
+++ b/drivers/isdn/hisax/l3_1tr6.c
@@ -63,7 +63,7 @@ l3_1tr6_error(struct l3_process *pc, u_char *msg, struct sk_buff *skb)
63{ 63{
64 dev_kfree_skb(skb); 64 dev_kfree_skb(skb);
65 if (pc->st->l3.debug & L3_DEB_WARN) 65 if (pc->st->l3.debug & L3_DEB_WARN)
66 l3_debug(pc->st, msg); 66 l3_debug(pc->st, "%s", msg);
67 l3_1tr6_release_req(pc, 0, NULL); 67 l3_1tr6_release_req(pc, 0, NULL);
68} 68}
69 69
@@ -161,7 +161,6 @@ l3_1tr6_setup(struct l3_process *pc, u_char pr, void *arg)
161{ 161{
162 u_char *p; 162 u_char *p;
163 int bcfound = 0; 163 int bcfound = 0;
164 char tmp[80];
165 struct sk_buff *skb = arg; 164 struct sk_buff *skb = arg;
166 165
167 /* Channel Identification */ 166 /* Channel Identification */
@@ -214,10 +213,9 @@ l3_1tr6_setup(struct l3_process *pc, u_char pr, void *arg)
214 /* Signal all services, linklevel takes care of Service-Indicator */ 213 /* Signal all services, linklevel takes care of Service-Indicator */
215 if (bcfound) { 214 if (bcfound) {
216 if ((pc->para.setup.si1 != 7) && (pc->st->l3.debug & L3_DEB_WARN)) { 215 if ((pc->para.setup.si1 != 7) && (pc->st->l3.debug & L3_DEB_WARN)) {
217 sprintf(tmp, "non-digital call: %s -> %s", 216 l3_debug(pc->st, "non-digital call: %s -> %s",
218 pc->para.setup.phone, 217 pc->para.setup.phone,
219 pc->para.setup.eazmsn); 218 pc->para.setup.eazmsn);
220 l3_debug(pc->st, tmp);
221 } 219 }
222 newl3state(pc, 6); 220 newl3state(pc, 6);
223 pc->st->l3.l3l4(pc->st, CC_SETUP | INDICATION, pc); 221 pc->st->l3.l3l4(pc->st, CC_SETUP | INDICATION, pc);
@@ -301,7 +299,7 @@ l3_1tr6_info(struct l3_process *pc, u_char pr, void *arg)
301{ 299{
302 u_char *p; 300 u_char *p;
303 int i, tmpcharge = 0; 301 int i, tmpcharge = 0;
304 char a_charge[8], tmp[32]; 302 char a_charge[8];
305 struct sk_buff *skb = arg; 303 struct sk_buff *skb = arg;
306 304
307 p = skb->data; 305 p = skb->data;
@@ -316,8 +314,8 @@ l3_1tr6_info(struct l3_process *pc, u_char pr, void *arg)
316 pc->st->l3.l3l4(pc->st, CC_CHARGE | INDICATION, pc); 314 pc->st->l3.l3l4(pc->st, CC_CHARGE | INDICATION, pc);
317 } 315 }
318 if (pc->st->l3.debug & L3_DEB_CHARGE) { 316 if (pc->st->l3.debug & L3_DEB_CHARGE) {
319 sprintf(tmp, "charging info %d", pc->para.chargeinfo); 317 l3_debug(pc->st, "charging info %d",
320 l3_debug(pc->st, tmp); 318 pc->para.chargeinfo);
321 } 319 }
322 } else if (pc->st->l3.debug & L3_DEB_CHARGE) 320 } else if (pc->st->l3.debug & L3_DEB_CHARGE)
323 l3_debug(pc->st, "charging info not found"); 321 l3_debug(pc->st, "charging info not found");
@@ -399,7 +397,7 @@ l3_1tr6_disc(struct l3_process *pc, u_char pr, void *arg)
399 struct sk_buff *skb = arg; 397 struct sk_buff *skb = arg;
400 u_char *p; 398 u_char *p;
401 int i, tmpcharge = 0; 399 int i, tmpcharge = 0;
402 char a_charge[8], tmp[32]; 400 char a_charge[8];
403 401
404 StopAllL3Timer(pc); 402 StopAllL3Timer(pc);
405 p = skb->data; 403 p = skb->data;
@@ -414,8 +412,8 @@ l3_1tr6_disc(struct l3_process *pc, u_char pr, void *arg)
414 pc->st->l3.l3l4(pc->st, CC_CHARGE | INDICATION, pc); 412 pc->st->l3.l3l4(pc->st, CC_CHARGE | INDICATION, pc);
415 } 413 }
416 if (pc->st->l3.debug & L3_DEB_CHARGE) { 414 if (pc->st->l3.debug & L3_DEB_CHARGE) {
417 sprintf(tmp, "charging info %d", pc->para.chargeinfo); 415 l3_debug(pc->st, "charging info %d",
418 l3_debug(pc->st, tmp); 416 pc->para.chargeinfo);
419 } 417 }
420 } else if (pc->st->l3.debug & L3_DEB_CHARGE) 418 } else if (pc->st->l3.debug & L3_DEB_CHARGE)
421 l3_debug(pc->st, "charging info not found"); 419 l3_debug(pc->st, "charging info not found");
@@ -746,7 +744,6 @@ up1tr6(struct PStack *st, int pr, void *arg)
746 int i, mt, cr; 744 int i, mt, cr;
747 struct l3_process *proc; 745 struct l3_process *proc;
748 struct sk_buff *skb = arg; 746 struct sk_buff *skb = arg;
749 char tmp[80];
750 747
751 switch (pr) { 748 switch (pr) {
752 case (DL_DATA | INDICATION): 749 case (DL_DATA | INDICATION):
@@ -762,26 +759,23 @@ up1tr6(struct PStack *st, int pr, void *arg)
762 } 759 }
763 if (skb->len < 4) { 760 if (skb->len < 4) {
764 if (st->l3.debug & L3_DEB_PROTERR) { 761 if (st->l3.debug & L3_DEB_PROTERR) {
765 sprintf(tmp, "up1tr6 len only %d", skb->len); 762 l3_debug(st, "up1tr6 len only %d", skb->len);
766 l3_debug(st, tmp);
767 } 763 }
768 dev_kfree_skb(skb); 764 dev_kfree_skb(skb);
769 return; 765 return;
770 } 766 }
771 if ((skb->data[0] & 0xfe) != PROTO_DIS_N0) { 767 if ((skb->data[0] & 0xfe) != PROTO_DIS_N0) {
772 if (st->l3.debug & L3_DEB_PROTERR) { 768 if (st->l3.debug & L3_DEB_PROTERR) {
773 sprintf(tmp, "up1tr6%sunexpected discriminator %x message len %d", 769 l3_debug(st, "up1tr6%sunexpected discriminator %x message len %d",
774 (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", 770 (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
775 skb->data[0], skb->len); 771 skb->data[0], skb->len);
776 l3_debug(st, tmp);
777 } 772 }
778 dev_kfree_skb(skb); 773 dev_kfree_skb(skb);
779 return; 774 return;
780 } 775 }
781 if (skb->data[1] != 1) { 776 if (skb->data[1] != 1) {
782 if (st->l3.debug & L3_DEB_PROTERR) { 777 if (st->l3.debug & L3_DEB_PROTERR) {
783 sprintf(tmp, "up1tr6 CR len not 1"); 778 l3_debug(st, "up1tr6 CR len not 1");
784 l3_debug(st, tmp);
785 } 779 }
786 dev_kfree_skb(skb); 780 dev_kfree_skb(skb);
787 return; 781 return;
@@ -791,9 +785,8 @@ up1tr6(struct PStack *st, int pr, void *arg)
791 if (skb->data[0] == PROTO_DIS_N0) { 785 if (skb->data[0] == PROTO_DIS_N0) {
792 dev_kfree_skb(skb); 786 dev_kfree_skb(skb);
793 if (st->l3.debug & L3_DEB_STATE) { 787 if (st->l3.debug & L3_DEB_STATE) {
794 sprintf(tmp, "up1tr6%s N0 mt %x unhandled", 788 l3_debug(st, "up1tr6%s N0 mt %x unhandled",
795 (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", mt); 789 (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", mt);
796 l3_debug(st, tmp);
797 } 790 }
798 } else if (skb->data[0] == PROTO_DIS_N1) { 791 } else if (skb->data[0] == PROTO_DIS_N1) {
799 if (!(proc = getl3proc(st, cr))) { 792 if (!(proc = getl3proc(st, cr))) {
@@ -801,8 +794,7 @@ up1tr6(struct PStack *st, int pr, void *arg)
801 if (cr < 128) { 794 if (cr < 128) {
802 if (!(proc = new_l3_process(st, cr))) { 795 if (!(proc = new_l3_process(st, cr))) {
803 if (st->l3.debug & L3_DEB_PROTERR) { 796 if (st->l3.debug & L3_DEB_PROTERR) {
804 sprintf(tmp, "up1tr6 no roc mem"); 797 l3_debug(st, "up1tr6 no roc mem");
805 l3_debug(st, tmp);
806 } 798 }
807 dev_kfree_skb(skb); 799 dev_kfree_skb(skb);
808 return; 800 return;
@@ -821,8 +813,7 @@ up1tr6(struct PStack *st, int pr, void *arg)
821 } else { 813 } else {
822 if (!(proc = new_l3_process(st, cr))) { 814 if (!(proc = new_l3_process(st, cr))) {
823 if (st->l3.debug & L3_DEB_PROTERR) { 815 if (st->l3.debug & L3_DEB_PROTERR) {
824 sprintf(tmp, "up1tr6 no roc mem"); 816 l3_debug(st, "up1tr6 no roc mem");
825 l3_debug(st, tmp);
826 } 817 }
827 dev_kfree_skb(skb); 818 dev_kfree_skb(skb);
828 return; 819 return;
@@ -837,18 +828,16 @@ up1tr6(struct PStack *st, int pr, void *arg)
837 if (i == ARRAY_SIZE(datastln1)) { 828 if (i == ARRAY_SIZE(datastln1)) {
838 dev_kfree_skb(skb); 829 dev_kfree_skb(skb);
839 if (st->l3.debug & L3_DEB_STATE) { 830 if (st->l3.debug & L3_DEB_STATE) {
840 sprintf(tmp, "up1tr6%sstate %d mt %x unhandled", 831 l3_debug(st, "up1tr6%sstate %d mt %x unhandled",
841 (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", 832 (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
842 proc->state, mt); 833 proc->state, mt);
843 l3_debug(st, tmp);
844 } 834 }
845 return; 835 return;
846 } else { 836 } else {
847 if (st->l3.debug & L3_DEB_STATE) { 837 if (st->l3.debug & L3_DEB_STATE) {
848 sprintf(tmp, "up1tr6%sstate %d mt %x", 838 l3_debug(st, "up1tr6%sstate %d mt %x",
849 (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", 839 (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ",
850 proc->state, mt); 840 proc->state, mt);
851 l3_debug(st, tmp);
852 } 841 }
853 datastln1[i].rout(proc, pr, skb); 842 datastln1[i].rout(proc, pr, skb);
854 } 843 }
@@ -861,7 +850,6 @@ down1tr6(struct PStack *st, int pr, void *arg)
861 int i, cr; 850 int i, cr;
862 struct l3_process *proc; 851 struct l3_process *proc;
863 struct Channel *chan; 852 struct Channel *chan;
864 char tmp[80];
865 853
866 if ((DL_ESTABLISH | REQUEST) == pr) { 854 if ((DL_ESTABLISH | REQUEST) == pr) {
867 l3_msg(st, pr, NULL); 855 l3_msg(st, pr, NULL);
@@ -888,15 +876,13 @@ down1tr6(struct PStack *st, int pr, void *arg)
888 break; 876 break;
889 if (i == ARRAY_SIZE(downstl)) { 877 if (i == ARRAY_SIZE(downstl)) {
890 if (st->l3.debug & L3_DEB_STATE) { 878 if (st->l3.debug & L3_DEB_STATE) {
891 sprintf(tmp, "down1tr6 state %d prim %d unhandled", 879 l3_debug(st, "down1tr6 state %d prim %d unhandled",
892 proc->state, pr); 880 proc->state, pr);
893 l3_debug(st, tmp);
894 } 881 }
895 } else { 882 } else {
896 if (st->l3.debug & L3_DEB_STATE) { 883 if (st->l3.debug & L3_DEB_STATE) {
897 sprintf(tmp, "down1tr6 state %d prim %d", 884 l3_debug(st, "down1tr6 state %d prim %d",
898 proc->state, pr); 885 proc->state, pr);
899 l3_debug(st, tmp);
900 } 886 }
901 downstl[i].rout(proc, pr, arg); 887 downstl[i].rout(proc, pr, arg);
902 } 888 }
diff --git a/drivers/isdn/hisax/netjet.c b/drivers/isdn/hisax/netjet.c
index b646eed379df..233e432e06f6 100644
--- a/drivers/isdn/hisax/netjet.c
+++ b/drivers/isdn/hisax/netjet.c
@@ -176,7 +176,7 @@ static void printframe(struct IsdnCardState *cs, u_char *buf, int count, char *s
176 else 176 else
177 j = i; 177 j = i;
178 QuickHex(t, p, j); 178 QuickHex(t, p, j);
179 debugl1(cs, tmp); 179 debugl1(cs, "%s", tmp);
180 p += j; 180 p += j;
181 i -= j; 181 i -= j;
182 t = tmp; 182 t = tmp;
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index 041bf52d9d0a..af1b020a81f1 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -1179,7 +1179,7 @@ LogFrame(struct IsdnCardState *cs, u_char *buf, int size)
1179 dp--; 1179 dp--;
1180 *dp++ = '\n'; 1180 *dp++ = '\n';
1181 *dp = 0; 1181 *dp = 0;
1182 HiSax_putstatus(cs, NULL, cs->dlog); 1182 HiSax_putstatus(cs, NULL, "%s", cs->dlog);
1183 } else 1183 } else
1184 HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size); 1184 HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size);
1185} 1185}
@@ -1246,7 +1246,7 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
1246 } 1246 }
1247 if (finish) { 1247 if (finish) {
1248 *dp = 0; 1248 *dp = 0;
1249 HiSax_putstatus(cs, NULL, cs->dlog); 1249 HiSax_putstatus(cs, NULL, "%s", cs->dlog);
1250 return; 1250 return;
1251 } 1251 }
1252 if ((0xfe & buf[0]) == PROTO_DIS_N0) { /* 1TR6 */ 1252 if ((0xfe & buf[0]) == PROTO_DIS_N0) { /* 1TR6 */
@@ -1509,5 +1509,5 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
1509 dp += sprintf(dp, "Unknown protocol %x!", buf[0]); 1509 dp += sprintf(dp, "Unknown protocol %x!", buf[0]);
1510 } 1510 }
1511 *dp = 0; 1511 *dp = 0;
1512 HiSax_putstatus(cs, NULL, cs->dlog); 1512 HiSax_putstatus(cs, NULL, "%s", cs->dlog);
1513} 1513}
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c
index d8cac6935818..a85895585d90 100644
--- a/drivers/isdn/hisax/w6692.c
+++ b/drivers/isdn/hisax/w6692.c
@@ -154,7 +154,7 @@ W6692_empty_fifo(struct IsdnCardState *cs, int count)
154 154
155 t += sprintf(t, "W6692_empty_fifo cnt %d", count); 155 t += sprintf(t, "W6692_empty_fifo cnt %d", count);
156 QuickHex(t, ptr, count); 156 QuickHex(t, ptr, count);
157 debugl1(cs, cs->dlog); 157 debugl1(cs, "%s", cs->dlog);
158 } 158 }
159} 159}
160 160
@@ -196,7 +196,7 @@ W6692_fill_fifo(struct IsdnCardState *cs)
196 196
197 t += sprintf(t, "W6692_fill_fifo cnt %d", count); 197 t += sprintf(t, "W6692_fill_fifo cnt %d", count);
198 QuickHex(t, ptr, count); 198 QuickHex(t, ptr, count);
199 debugl1(cs, cs->dlog); 199 debugl1(cs, "%s", cs->dlog);
200 } 200 }
201} 201}
202 202
@@ -226,7 +226,7 @@ W6692B_empty_fifo(struct BCState *bcs, int count)
226 t += sprintf(t, "W6692B_empty_fifo %c cnt %d", 226 t += sprintf(t, "W6692B_empty_fifo %c cnt %d",
227 bcs->channel + '1', count); 227 bcs->channel + '1', count);
228 QuickHex(t, ptr, count); 228 QuickHex(t, ptr, count);
229 debugl1(cs, bcs->blog); 229 debugl1(cs, "%s", bcs->blog);
230 } 230 }
231} 231}
232 232
@@ -264,7 +264,7 @@ W6692B_fill_fifo(struct BCState *bcs)
264 t += sprintf(t, "W6692B_fill_fifo %c cnt %d", 264 t += sprintf(t, "W6692B_fill_fifo %c cnt %d",
265 bcs->channel + '1', count); 265 bcs->channel + '1', count);
266 QuickHex(t, ptr, count); 266 QuickHex(t, ptr, count);
267 debugl1(cs, bcs->blog); 267 debugl1(cs, "%s", bcs->blog);
268 } 268 }
269} 269}
270 270
diff --git a/drivers/mailbox/mailbox-omap2.c b/drivers/mailbox/mailbox-omap2.c
index eba380d7b17f..42d2b893ea67 100644
--- a/drivers/mailbox/mailbox-omap2.c
+++ b/drivers/mailbox/mailbox-omap2.c
@@ -325,7 +325,6 @@ static int omap2_mbox_remove(struct platform_device *pdev)
325 kfree(privblk); 325 kfree(privblk);
326 kfree(mboxblk); 326 kfree(mboxblk);
327 kfree(list); 327 kfree(list);
328 platform_set_drvdata(pdev, NULL);
329 328
330 return 0; 329 return 0;
331} 330}
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index b39f6f0b45f2..0f12382aa35d 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -498,7 +498,7 @@ struct cached_dev {
498 */ 498 */
499 atomic_t has_dirty; 499 atomic_t has_dirty;
500 500
501 struct ratelimit writeback_rate; 501 struct bch_ratelimit writeback_rate;
502 struct delayed_work writeback_rate_update; 502 struct delayed_work writeback_rate_update;
503 503
504 /* 504 /*
@@ -507,10 +507,9 @@ struct cached_dev {
507 */ 507 */
508 sector_t last_read; 508 sector_t last_read;
509 509
510 /* Number of writeback bios in flight */ 510 /* Limit number of writeback bios in flight */
511 atomic_t in_flight; 511 struct semaphore in_flight;
512 struct closure_with_timer writeback; 512 struct closure_with_timer writeback;
513 struct closure_waitlist writeback_wait;
514 513
515 struct keybuf writeback_keys; 514 struct keybuf writeback_keys;
516 515
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 8010eed06a51..22d1ae72c282 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -926,28 +926,45 @@ struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search)
926 926
927/* Mergesort */ 927/* Mergesort */
928 928
929static void sort_key_next(struct btree_iter *iter,
930 struct btree_iter_set *i)
931{
932 i->k = bkey_next(i->k);
933
934 if (i->k == i->end)
935 *i = iter->data[--iter->used];
936}
937
929static void btree_sort_fixup(struct btree_iter *iter) 938static void btree_sort_fixup(struct btree_iter *iter)
930{ 939{
931 while (iter->used > 1) { 940 while (iter->used > 1) {
932 struct btree_iter_set *top = iter->data, *i = top + 1; 941 struct btree_iter_set *top = iter->data, *i = top + 1;
933 struct bkey *k;
934 942
935 if (iter->used > 2 && 943 if (iter->used > 2 &&
936 btree_iter_cmp(i[0], i[1])) 944 btree_iter_cmp(i[0], i[1]))
937 i++; 945 i++;
938 946
939 for (k = i->k; 947 if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
940 k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0;
941 k = bkey_next(k))
942 if (top->k > i->k)
943 __bch_cut_front(top->k, k);
944 else if (KEY_SIZE(k))
945 bch_cut_back(&START_KEY(k), top->k);
946
947 if (top->k < i->k || k == i->k)
948 break; 948 break;
949 949
950 heap_sift(iter, i - top, btree_iter_cmp); 950 if (!KEY_SIZE(i->k)) {
951 sort_key_next(iter, i);
952 heap_sift(iter, i - top, btree_iter_cmp);
953 continue;
954 }
955
956 if (top->k > i->k) {
957 if (bkey_cmp(top->k, i->k) >= 0)
958 sort_key_next(iter, i);
959 else
960 bch_cut_front(top->k, i->k);
961
962 heap_sift(iter, i - top, btree_iter_cmp);
963 } else {
964 /* can't happen because of comparison func */
965 BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
966 bch_cut_back(&START_KEY(i->k), top->k);
967 }
951 } 968 }
952} 969}
953 970
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index f9764e61978b..f42fc7ed9cd6 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -255,7 +255,7 @@ void bch_btree_node_read(struct btree *b)
255 255
256 return; 256 return;
257err: 257err:
258 bch_cache_set_error(b->c, "io error reading bucket %lu", 258 bch_cache_set_error(b->c, "io error reading bucket %zu",
259 PTR_BUCKET_NR(b->c, &b->key, 0)); 259 PTR_BUCKET_NR(b->c, &b->key, 0));
260} 260}
261 261
@@ -612,7 +612,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
612 return SHRINK_STOP; 612 return SHRINK_STOP;
613 613
614 /* Return -1 if we can't do anything right now */ 614 /* Return -1 if we can't do anything right now */
615 if (sc->gfp_mask & __GFP_WAIT) 615 if (sc->gfp_mask & __GFP_IO)
616 mutex_lock(&c->bucket_lock); 616 mutex_lock(&c->bucket_lock);
617 else if (!mutex_trylock(&c->bucket_lock)) 617 else if (!mutex_trylock(&c->bucket_lock))
618 return -1; 618 return -1;
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ba95ab84b2be..8435f81e5d85 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -153,7 +153,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
153 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS); 153 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
154 pr_debug("%u journal buckets", ca->sb.njournal_buckets); 154 pr_debug("%u journal buckets", ca->sb.njournal_buckets);
155 155
156 /* Read journal buckets ordered by golden ratio hash to quickly 156 /*
157 * Read journal buckets ordered by golden ratio hash to quickly
157 * find a sequence of buckets with valid journal entries 158 * find a sequence of buckets with valid journal entries
158 */ 159 */
159 for (i = 0; i < ca->sb.njournal_buckets; i++) { 160 for (i = 0; i < ca->sb.njournal_buckets; i++) {
@@ -166,18 +167,20 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
166 goto bsearch; 167 goto bsearch;
167 } 168 }
168 169
169 /* If that fails, check all the buckets we haven't checked 170 /*
171 * If that fails, check all the buckets we haven't checked
170 * already 172 * already
171 */ 173 */
172 pr_debug("falling back to linear search"); 174 pr_debug("falling back to linear search");
173 175
174 for (l = 0; l < ca->sb.njournal_buckets; l++) { 176 for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
175 if (test_bit(l, bitmap)) 177 l < ca->sb.njournal_buckets;
176 continue; 178 l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
177
178 if (read_bucket(l)) 179 if (read_bucket(l))
179 goto bsearch; 180 goto bsearch;
180 } 181
182 if (list_empty(list))
183 continue;
181bsearch: 184bsearch:
182 /* Binary search */ 185 /* Binary search */
183 m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1); 186 m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
@@ -197,10 +200,12 @@ bsearch:
197 r = m; 200 r = m;
198 } 201 }
199 202
200 /* Read buckets in reverse order until we stop finding more 203 /*
204 * Read buckets in reverse order until we stop finding more
201 * journal entries 205 * journal entries
202 */ 206 */
203 pr_debug("finishing up"); 207 pr_debug("finishing up: m %u njournal_buckets %u",
208 m, ca->sb.njournal_buckets);
204 l = m; 209 l = m;
205 210
206 while (1) { 211 while (1) {
@@ -228,9 +233,10 @@ bsearch:
228 } 233 }
229 } 234 }
230 235
231 c->journal.seq = list_entry(list->prev, 236 if (!list_empty(list))
232 struct journal_replay, 237 c->journal.seq = list_entry(list->prev,
233 list)->j.seq; 238 struct journal_replay,
239 list)->j.seq;
234 240
235 return 0; 241 return 0;
236#undef read_bucket 242#undef read_bucket
@@ -428,7 +434,7 @@ static void do_journal_discard(struct cache *ca)
428 return; 434 return;
429 } 435 }
430 436
431 switch (atomic_read(&ja->discard_in_flight) == DISCARD_IN_FLIGHT) { 437 switch (atomic_read(&ja->discard_in_flight)) {
432 case DISCARD_IN_FLIGHT: 438 case DISCARD_IN_FLIGHT:
433 return; 439 return;
434 440
@@ -689,6 +695,7 @@ void bch_journal_meta(struct cache_set *c, struct closure *cl)
689 if (cl) 695 if (cl)
690 BUG_ON(!closure_wait(&w->wait, cl)); 696 BUG_ON(!closure_wait(&w->wait, cl));
691 697
698 closure_flush(&c->journal.io);
692 __journal_try_write(c, true); 699 __journal_try_write(c, true);
693 } 700 }
694} 701}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 786a1a4f74d8..71eb233b9ace 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -997,14 +997,17 @@ static void request_write(struct cached_dev *dc, struct search *s)
997 } else { 997 } else {
998 bch_writeback_add(dc); 998 bch_writeback_add(dc);
999 999
1000 if (s->op.flush_journal) { 1000 if (bio->bi_rw & REQ_FLUSH) {
1001 /* Also need to send a flush to the backing device */ 1001 /* Also need to send a flush to the backing device */
1002 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, 1002 struct bio *flush = bio_alloc_bioset(0, GFP_NOIO,
1003 dc->disk.bio_split); 1003 dc->disk.bio_split);
1004 1004
1005 bio->bi_size = 0; 1005 flush->bi_rw = WRITE_FLUSH;
1006 bio->bi_vcnt = 0; 1006 flush->bi_bdev = bio->bi_bdev;
1007 closure_bio_submit(bio, cl, s->d); 1007 flush->bi_end_io = request_endio;
1008 flush->bi_private = cl;
1009
1010 closure_bio_submit(flush, cl, s->d);
1008 } else { 1011 } else {
1009 s->op.cache_bio = bio; 1012 s->op.cache_bio = bio;
1010 } 1013 }
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 4fe6ab2fbe2e..924dcfdae111 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -223,8 +223,13 @@ STORE(__cached_dev)
223 } 223 }
224 224
225 if (attr == &sysfs_label) { 225 if (attr == &sysfs_label) {
226 /* note: endlines are preserved */ 226 if (size > SB_LABEL_SIZE)
227 memcpy(dc->sb.label, buf, SB_LABEL_SIZE); 227 return -EINVAL;
228 memcpy(dc->sb.label, buf, size);
229 if (size < SB_LABEL_SIZE)
230 dc->sb.label[size] = '\0';
231 if (size && dc->sb.label[size - 1] == '\n')
232 dc->sb.label[size - 1] = '\0';
228 bch_write_bdev_super(dc, NULL); 233 bch_write_bdev_super(dc, NULL);
229 if (dc->disk.c) { 234 if (dc->disk.c) {
230 memcpy(dc->disk.c->uuids[dc->disk.id].label, 235 memcpy(dc->disk.c->uuids[dc->disk.id].label,
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 98eb81159a22..420dad545c7d 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -190,7 +190,16 @@ void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
190 stats->last = now ?: 1; 190 stats->last = now ?: 1;
191} 191}
192 192
193unsigned bch_next_delay(struct ratelimit *d, uint64_t done) 193/**
194 * bch_next_delay() - increment @d by the amount of work done, and return how
195 * long to delay until the next time to do some work.
196 *
197 * @d - the struct bch_ratelimit to update
198 * @done - the amount of work done, in arbitrary units
199 *
200 * Returns the amount of time to delay by, in jiffies
201 */
202uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
194{ 203{
195 uint64_t now = local_clock(); 204 uint64_t now = local_clock();
196 205
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 1ae2a73ad85f..ea345c6896f4 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -450,17 +450,23 @@ read_attribute(name ## _last_ ## frequency_units)
450 (ewma) >> factor; \ 450 (ewma) >> factor; \
451}) 451})
452 452
453struct ratelimit { 453struct bch_ratelimit {
454 /* Next time we want to do some work, in nanoseconds */
454 uint64_t next; 455 uint64_t next;
456
457 /*
458 * Rate at which we want to do work, in units per nanosecond
459 * The units here correspond to the units passed to bch_next_delay()
460 */
455 unsigned rate; 461 unsigned rate;
456}; 462};
457 463
458static inline void ratelimit_reset(struct ratelimit *d) 464static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
459{ 465{
460 d->next = local_clock(); 466 d->next = local_clock();
461} 467}
462 468
463unsigned bch_next_delay(struct ratelimit *d, uint64_t done); 469uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done);
464 470
465#define __DIV_SAFE(n, d, zero) \ 471#define __DIV_SAFE(n, d, zero) \
466({ \ 472({ \
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 22cbff551628..ba3ee48320f2 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -94,11 +94,15 @@ static void update_writeback_rate(struct work_struct *work)
94 94
95static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) 95static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
96{ 96{
97 uint64_t ret;
98
97 if (atomic_read(&dc->disk.detaching) || 99 if (atomic_read(&dc->disk.detaching) ||
98 !dc->writeback_percent) 100 !dc->writeback_percent)
99 return 0; 101 return 0;
100 102
101 return bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); 103 ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
104
105 return min_t(uint64_t, ret, HZ);
102} 106}
103 107
104/* Background writeback */ 108/* Background writeback */
@@ -208,7 +212,7 @@ normal_refill:
208 212
209 up_write(&dc->writeback_lock); 213 up_write(&dc->writeback_lock);
210 214
211 ratelimit_reset(&dc->writeback_rate); 215 bch_ratelimit_reset(&dc->writeback_rate);
212 216
213 /* Punt to workqueue only so we don't recurse and blow the stack */ 217 /* Punt to workqueue only so we don't recurse and blow the stack */
214 continue_at(cl, read_dirty, dirty_wq); 218 continue_at(cl, read_dirty, dirty_wq);
@@ -318,9 +322,7 @@ static void write_dirty_finish(struct closure *cl)
318 } 322 }
319 323
320 bch_keybuf_del(&dc->writeback_keys, w); 324 bch_keybuf_del(&dc->writeback_keys, w);
321 atomic_dec_bug(&dc->in_flight); 325 up(&dc->in_flight);
322
323 closure_wake_up(&dc->writeback_wait);
324 326
325 closure_return_with_destructor(cl, dirty_io_destructor); 327 closure_return_with_destructor(cl, dirty_io_destructor);
326} 328}
@@ -349,7 +351,7 @@ static void write_dirty(struct closure *cl)
349 351
350 closure_bio_submit(&io->bio, cl, &io->dc->disk); 352 closure_bio_submit(&io->bio, cl, &io->dc->disk);
351 353
352 continue_at(cl, write_dirty_finish, dirty_wq); 354 continue_at(cl, write_dirty_finish, system_wq);
353} 355}
354 356
355static void read_dirty_endio(struct bio *bio, int error) 357static void read_dirty_endio(struct bio *bio, int error)
@@ -369,7 +371,7 @@ static void read_dirty_submit(struct closure *cl)
369 371
370 closure_bio_submit(&io->bio, cl, &io->dc->disk); 372 closure_bio_submit(&io->bio, cl, &io->dc->disk);
371 373
372 continue_at(cl, write_dirty, dirty_wq); 374 continue_at(cl, write_dirty, system_wq);
373} 375}
374 376
375static void read_dirty(struct closure *cl) 377static void read_dirty(struct closure *cl)
@@ -394,12 +396,8 @@ static void read_dirty(struct closure *cl)
394 396
395 if (delay > 0 && 397 if (delay > 0 &&
396 (KEY_START(&w->key) != dc->last_read || 398 (KEY_START(&w->key) != dc->last_read ||
397 jiffies_to_msecs(delay) > 50)) { 399 jiffies_to_msecs(delay) > 50))
398 w->private = NULL; 400 delay = schedule_timeout_uninterruptible(delay);
399
400 closure_delay(&dc->writeback, delay);
401 continue_at(cl, read_dirty, dirty_wq);
402 }
403 401
404 dc->last_read = KEY_OFFSET(&w->key); 402 dc->last_read = KEY_OFFSET(&w->key);
405 403
@@ -424,15 +422,10 @@ static void read_dirty(struct closure *cl)
424 422
425 trace_bcache_writeback(&w->key); 423 trace_bcache_writeback(&w->key);
426 424
427 closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl); 425 down(&dc->in_flight);
426 closure_call(&io->cl, read_dirty_submit, NULL, cl);
428 427
429 delay = writeback_delay(dc, KEY_SIZE(&w->key)); 428 delay = writeback_delay(dc, KEY_SIZE(&w->key));
430
431 atomic_inc(&dc->in_flight);
432
433 if (!closure_wait_event(&dc->writeback_wait, cl,
434 atomic_read(&dc->in_flight) < 64))
435 continue_at(cl, read_dirty, dirty_wq);
436 } 429 }
437 430
438 if (0) { 431 if (0) {
@@ -442,7 +435,11 @@ err:
442 bch_keybuf_del(&dc->writeback_keys, w); 435 bch_keybuf_del(&dc->writeback_keys, w);
443 } 436 }
444 437
445 refill_dirty(cl); 438 /*
439 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
440 * freed) before refilling again
441 */
442 continue_at(cl, refill_dirty, dirty_wq);
446} 443}
447 444
448/* Init */ 445/* Init */
@@ -484,6 +481,7 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
484 481
485void bch_cached_dev_writeback_init(struct cached_dev *dc) 482void bch_cached_dev_writeback_init(struct cached_dev *dc)
486{ 483{
484 sema_init(&dc->in_flight, 64);
487 closure_init_unlocked(&dc->writeback); 485 closure_init_unlocked(&dc->writeback);
488 init_rwsem(&dc->writeback_lock); 486 init_rwsem(&dc->writeback_lock);
489 487
@@ -513,7 +511,7 @@ void bch_writeback_exit(void)
513 511
514int __init bch_writeback_init(void) 512int __init bch_writeback_init(void)
515{ 513{
516 dirty_wq = create_singlethread_workqueue("bcache_writeback"); 514 dirty_wq = create_workqueue("bcache_writeback");
517 if (!dirty_wq) 515 if (!dirty_wq)
518 return -ENOMEM; 516 return -ENOMEM;
519 517
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index ea49834377c8..2a20986a2fec 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -19,8 +19,6 @@
19#define DM_MSG_PREFIX "io" 19#define DM_MSG_PREFIX "io"
20 20
21#define DM_IO_MAX_REGIONS BITS_PER_LONG 21#define DM_IO_MAX_REGIONS BITS_PER_LONG
22#define MIN_IOS 16
23#define MIN_BIOS 16
24 22
25struct dm_io_client { 23struct dm_io_client {
26 mempool_t *pool; 24 mempool_t *pool;
@@ -50,16 +48,17 @@ static struct kmem_cache *_dm_io_cache;
50struct dm_io_client *dm_io_client_create(void) 48struct dm_io_client *dm_io_client_create(void)
51{ 49{
52 struct dm_io_client *client; 50 struct dm_io_client *client;
51 unsigned min_ios = dm_get_reserved_bio_based_ios();
53 52
54 client = kmalloc(sizeof(*client), GFP_KERNEL); 53 client = kmalloc(sizeof(*client), GFP_KERNEL);
55 if (!client) 54 if (!client)
56 return ERR_PTR(-ENOMEM); 55 return ERR_PTR(-ENOMEM);
57 56
58 client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache); 57 client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
59 if (!client->pool) 58 if (!client->pool)
60 goto bad; 59 goto bad;
61 60
62 client->bios = bioset_create(MIN_BIOS, 0); 61 client->bios = bioset_create(min_ios, 0);
63 if (!client->bios) 62 if (!client->bios)
64 goto bad; 63 goto bad;
65 64
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index b759a127f9c3..de570a558764 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/device-mapper.h> 8#include <linux/device-mapper.h>
9 9
10#include "dm.h"
10#include "dm-path-selector.h" 11#include "dm-path-selector.h"
11#include "dm-uevent.h" 12#include "dm-uevent.h"
12 13
@@ -116,8 +117,6 @@ struct dm_mpath_io {
116 117
117typedef int (*action_fn) (struct pgpath *pgpath); 118typedef int (*action_fn) (struct pgpath *pgpath);
118 119
119#define MIN_IOS 256 /* Mempool size */
120
121static struct kmem_cache *_mpio_cache; 120static struct kmem_cache *_mpio_cache;
122 121
123static struct workqueue_struct *kmultipathd, *kmpath_handlerd; 122static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
@@ -190,6 +189,7 @@ static void free_priority_group(struct priority_group *pg,
190static struct multipath *alloc_multipath(struct dm_target *ti) 189static struct multipath *alloc_multipath(struct dm_target *ti)
191{ 190{
192 struct multipath *m; 191 struct multipath *m;
192 unsigned min_ios = dm_get_reserved_rq_based_ios();
193 193
194 m = kzalloc(sizeof(*m), GFP_KERNEL); 194 m = kzalloc(sizeof(*m), GFP_KERNEL);
195 if (m) { 195 if (m) {
@@ -202,7 +202,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
202 INIT_WORK(&m->trigger_event, trigger_event); 202 INIT_WORK(&m->trigger_event, trigger_event);
203 init_waitqueue_head(&m->pg_init_wait); 203 init_waitqueue_head(&m->pg_init_wait);
204 mutex_init(&m->work_mutex); 204 mutex_init(&m->work_mutex);
205 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); 205 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
206 if (!m->mpio_pool) { 206 if (!m->mpio_pool) {
207 kfree(m); 207 kfree(m);
208 return NULL; 208 return NULL;
@@ -1268,6 +1268,7 @@ static int noretry_error(int error)
1268 case -EREMOTEIO: 1268 case -EREMOTEIO:
1269 case -EILSEQ: 1269 case -EILSEQ:
1270 case -ENODATA: 1270 case -ENODATA:
1271 case -ENOSPC:
1271 return 1; 1272 return 1;
1272 } 1273 }
1273 1274
@@ -1298,8 +1299,17 @@ static int do_end_io(struct multipath *m, struct request *clone,
1298 if (!error && !clone->errors) 1299 if (!error && !clone->errors)
1299 return 0; /* I/O complete */ 1300 return 0; /* I/O complete */
1300 1301
1301 if (noretry_error(error)) 1302 if (noretry_error(error)) {
1303 if ((clone->cmd_flags & REQ_WRITE_SAME) &&
1304 !clone->q->limits.max_write_same_sectors) {
1305 struct queue_limits *limits;
1306
1307 /* device doesn't really support WRITE SAME, disable it */
1308 limits = dm_get_queue_limits(dm_table_get_md(m->ti->table));
1309 limits->max_write_same_sectors = 0;
1310 }
1302 return error; 1311 return error;
1312 }
1303 1313
1304 if (mpio->pgpath) 1314 if (mpio->pgpath)
1305 fail_path(mpio->pgpath); 1315 fail_path(mpio->pgpath);
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 3ac415675b6c..4caa8e6d59d7 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -256,7 +256,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
256 */ 256 */
257 INIT_WORK_ONSTACK(&req.work, do_metadata); 257 INIT_WORK_ONSTACK(&req.work, do_metadata);
258 queue_work(ps->metadata_wq, &req.work); 258 queue_work(ps->metadata_wq, &req.work);
259 flush_work(&req.work); 259 flush_workqueue(ps->metadata_wq);
260 260
261 return req.result; 261 return req.result;
262} 262}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index c434e5aab2df..aec57d76db5d 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -725,17 +725,16 @@ static int calc_max_buckets(void)
725 */ 725 */
726static int init_hash_tables(struct dm_snapshot *s) 726static int init_hash_tables(struct dm_snapshot *s)
727{ 727{
728 sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; 728 sector_t hash_size, cow_dev_size, max_buckets;
729 729
730 /* 730 /*
731 * Calculate based on the size of the original volume or 731 * Calculate based on the size of the original volume or
732 * the COW volume... 732 * the COW volume...
733 */ 733 */
734 cow_dev_size = get_dev_size(s->cow->bdev); 734 cow_dev_size = get_dev_size(s->cow->bdev);
735 origin_dev_size = get_dev_size(s->origin->bdev);
736 max_buckets = calc_max_buckets(); 735 max_buckets = calc_max_buckets();
737 736
738 hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; 737 hash_size = cow_dev_size >> s->store->chunk_shift;
739 hash_size = min(hash_size, max_buckets); 738 hash_size = min(hash_size, max_buckets);
740 739
741 if (hash_size < 64) 740 if (hash_size < 64)
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 8ae31e8d3d64..3d404c1371ed 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -451,19 +451,26 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
451 struct dm_stat_percpu *p; 451 struct dm_stat_percpu *p;
452 452
453 /* 453 /*
454 * For strict correctness we should use local_irq_disable/enable 454 * For strict correctness we should use local_irq_save/restore
455 * instead of preempt_disable/enable. 455 * instead of preempt_disable/enable.
456 * 456 *
457 * This is racy if the driver finishes bios from non-interrupt 457 * preempt_disable/enable is racy if the driver finishes bios
458 * context as well as from interrupt context or from more different 458 * from non-interrupt context as well as from interrupt context
459 * interrupts. 459 * or from more different interrupts.
460 * 460 *
461 * However, the race only results in not counting some events, 461 * On 64-bit architectures the race only results in not counting some
462 * so it is acceptable. 462 * events, so it is acceptable. On 32-bit architectures the race could
463 * cause the counter going off by 2^32, so we need to do proper locking
464 * there.
463 * 465 *
464 * part_stat_lock()/part_stat_unlock() have this race too. 466 * part_stat_lock()/part_stat_unlock() have this race too.
465 */ 467 */
468#if BITS_PER_LONG == 32
469 unsigned long flags;
470 local_irq_save(flags);
471#else
466 preempt_disable(); 472 preempt_disable();
473#endif
467 p = &s->stat_percpu[smp_processor_id()][entry]; 474 p = &s->stat_percpu[smp_processor_id()][entry];
468 475
469 if (!end) { 476 if (!end) {
@@ -478,7 +485,11 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
478 p->ticks[idx] += duration; 485 p->ticks[idx] += duration;
479 } 486 }
480 487
488#if BITS_PER_LONG == 32
489 local_irq_restore(flags);
490#else
481 preempt_enable(); 491 preempt_enable();
492#endif
482} 493}
483 494
484static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw, 495static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index ed063427d676..2c0cf511ec23 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2095,6 +2095,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2095 * them down to the data device. The thin device's discard 2095 * them down to the data device. The thin device's discard
2096 * processing will cause mappings to be removed from the btree. 2096 * processing will cause mappings to be removed from the btree.
2097 */ 2097 */
2098 ti->discard_zeroes_data_unsupported = true;
2098 if (pf.discard_enabled && pf.discard_passdown) { 2099 if (pf.discard_enabled && pf.discard_passdown) {
2099 ti->num_discard_bios = 1; 2100 ti->num_discard_bios = 1;
2100 2101
@@ -2104,7 +2105,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2104 * thin devices' discard limits consistent). 2105 * thin devices' discard limits consistent).
2105 */ 2106 */
2106 ti->discards_supported = true; 2107 ti->discards_supported = true;
2107 ti->discard_zeroes_data_unsupported = true;
2108 } 2108 }
2109 ti->private = pt; 2109 ti->private = pt;
2110 2110
@@ -2689,8 +2689,16 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2689 * They get transferred to the live pool in bind_control_target() 2689 * They get transferred to the live pool in bind_control_target()
2690 * called from pool_preresume(). 2690 * called from pool_preresume().
2691 */ 2691 */
2692 if (!pt->adjusted_pf.discard_enabled) 2692 if (!pt->adjusted_pf.discard_enabled) {
2693 /*
2694 * Must explicitly disallow stacking discard limits otherwise the
2695 * block layer will stack them if pool's data device has support.
2696 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
2697 * user to see that, so make sure to set all discard limits to 0.
2698 */
2699 limits->discard_granularity = 0;
2693 return; 2700 return;
2701 }
2694 2702
2695 disable_passdown_if_not_supported(pt); 2703 disable_passdown_if_not_supported(pt);
2696 2704
@@ -2826,10 +2834,10 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2826 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook); 2834 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
2827 2835
2828 /* In case the pool supports discards, pass them on. */ 2836 /* In case the pool supports discards, pass them on. */
2837 ti->discard_zeroes_data_unsupported = true;
2829 if (tc->pool->pf.discard_enabled) { 2838 if (tc->pool->pf.discard_enabled) {
2830 ti->discards_supported = true; 2839 ti->discards_supported = true;
2831 ti->num_discard_bios = 1; 2840 ti->num_discard_bios = 1;
2832 ti->discard_zeroes_data_unsupported = true;
2833 /* Discard bios must be split on a block boundary */ 2841 /* Discard bios must be split on a block boundary */
2834 ti->split_discard_bios = true; 2842 ti->split_discard_bios = true;
2835 } 2843 }
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6a5e9ed2fcc3..b3e26c7d1417 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -211,10 +211,55 @@ struct dm_md_mempools {
211 struct bio_set *bs; 211 struct bio_set *bs;
212}; 212};
213 213
214#define MIN_IOS 256 214#define RESERVED_BIO_BASED_IOS 16
215#define RESERVED_REQUEST_BASED_IOS 256
216#define RESERVED_MAX_IOS 1024
215static struct kmem_cache *_io_cache; 217static struct kmem_cache *_io_cache;
216static struct kmem_cache *_rq_tio_cache; 218static struct kmem_cache *_rq_tio_cache;
217 219
220/*
221 * Bio-based DM's mempools' reserved IOs set by the user.
222 */
223static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
224
225/*
226 * Request-based DM's mempools' reserved IOs set by the user.
227 */
228static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
229
230static unsigned __dm_get_reserved_ios(unsigned *reserved_ios,
231 unsigned def, unsigned max)
232{
233 unsigned ios = ACCESS_ONCE(*reserved_ios);
234 unsigned modified_ios = 0;
235
236 if (!ios)
237 modified_ios = def;
238 else if (ios > max)
239 modified_ios = max;
240
241 if (modified_ios) {
242 (void)cmpxchg(reserved_ios, ios, modified_ios);
243 ios = modified_ios;
244 }
245
246 return ios;
247}
248
249unsigned dm_get_reserved_bio_based_ios(void)
250{
251 return __dm_get_reserved_ios(&reserved_bio_based_ios,
252 RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
253}
254EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
255
256unsigned dm_get_reserved_rq_based_ios(void)
257{
258 return __dm_get_reserved_ios(&reserved_rq_based_ios,
259 RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
260}
261EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
262
218static int __init local_init(void) 263static int __init local_init(void)
219{ 264{
220 int r = -ENOMEM; 265 int r = -ENOMEM;
@@ -2278,6 +2323,17 @@ struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2278} 2323}
2279 2324
2280/* 2325/*
2326 * The queue_limits are only valid as long as you have a reference
2327 * count on 'md'.
2328 */
2329struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2330{
2331 BUG_ON(!atomic_read(&md->holders));
2332 return &md->queue->limits;
2333}
2334EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2335
2336/*
2281 * Fully initialize a request-based queue (->elevator, ->request_fn, etc). 2337 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2282 */ 2338 */
2283static int dm_init_request_based_queue(struct mapped_device *md) 2339static int dm_init_request_based_queue(struct mapped_device *md)
@@ -2862,18 +2918,18 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u
2862 2918
2863 if (type == DM_TYPE_BIO_BASED) { 2919 if (type == DM_TYPE_BIO_BASED) {
2864 cachep = _io_cache; 2920 cachep = _io_cache;
2865 pool_size = 16; 2921 pool_size = dm_get_reserved_bio_based_ios();
2866 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 2922 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
2867 } else if (type == DM_TYPE_REQUEST_BASED) { 2923 } else if (type == DM_TYPE_REQUEST_BASED) {
2868 cachep = _rq_tio_cache; 2924 cachep = _rq_tio_cache;
2869 pool_size = MIN_IOS; 2925 pool_size = dm_get_reserved_rq_based_ios();
2870 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2926 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
2871 /* per_bio_data_size is not used. See __bind_mempools(). */ 2927 /* per_bio_data_size is not used. See __bind_mempools(). */
2872 WARN_ON(per_bio_data_size != 0); 2928 WARN_ON(per_bio_data_size != 0);
2873 } else 2929 } else
2874 goto out; 2930 goto out;
2875 2931
2876 pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep); 2932 pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
2877 if (!pools->io_pool) 2933 if (!pools->io_pool)
2878 goto out; 2934 goto out;
2879 2935
@@ -2924,6 +2980,13 @@ module_exit(dm_exit);
2924 2980
2925module_param(major, uint, 0); 2981module_param(major, uint, 0);
2926MODULE_PARM_DESC(major, "The major number of the device mapper"); 2982MODULE_PARM_DESC(major, "The major number of the device mapper");
2983
2984module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
2985MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
2986
2987module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
2988MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
2989
2927MODULE_DESCRIPTION(DM_NAME " driver"); 2990MODULE_DESCRIPTION(DM_NAME " driver");
2928MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 2991MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2929MODULE_LICENSE("GPL"); 2992MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 5e604cc7b4aa..1d1ad7b7e527 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -184,6 +184,9 @@ void dm_free_md_mempools(struct dm_md_mempools *pools);
184/* 184/*
185 * Helpers that are used by DM core 185 * Helpers that are used by DM core
186 */ 186 */
187unsigned dm_get_reserved_bio_based_ios(void);
188unsigned dm_get_reserved_rq_based_ios(void);
189
187static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen) 190static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
188{ 191{
189 return !maxlen || strlen(result) + 1 >= maxlen; 192 return !maxlen || strlen(result) + 1 >= maxlen;
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index d0fdc134068a..f6ff711aa5bb 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -57,6 +57,7 @@ void mei_amthif_reset_params(struct mei_device *dev)
57 dev->iamthif_ioctl = false; 57 dev->iamthif_ioctl = false;
58 dev->iamthif_state = MEI_IAMTHIF_IDLE; 58 dev->iamthif_state = MEI_IAMTHIF_IDLE;
59 dev->iamthif_timer = 0; 59 dev->iamthif_timer = 0;
60 dev->iamthif_stall_timer = 0;
60} 61}
61 62
62/** 63/**
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 6d0282c08a06..cd2033cd7120 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -297,10 +297,13 @@ int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
297 297
298 if (cl->reading_state != MEI_READ_COMPLETE && 298 if (cl->reading_state != MEI_READ_COMPLETE &&
299 !waitqueue_active(&cl->rx_wait)) { 299 !waitqueue_active(&cl->rx_wait)) {
300
300 mutex_unlock(&dev->device_lock); 301 mutex_unlock(&dev->device_lock);
301 302
302 if (wait_event_interruptible(cl->rx_wait, 303 if (wait_event_interruptible(cl->rx_wait,
303 (MEI_READ_COMPLETE == cl->reading_state))) { 304 cl->reading_state == MEI_READ_COMPLETE ||
305 mei_cl_is_transitioning(cl))) {
306
304 if (signal_pending(current)) 307 if (signal_pending(current))
305 return -EINTR; 308 return -EINTR;
306 return -ERESTARTSYS; 309 return -ERESTARTSYS;
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 9eb031e92070..892cc4207fa2 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -90,6 +90,12 @@ static inline bool mei_cl_is_connected(struct mei_cl *cl)
90 cl->dev->dev_state == MEI_DEV_ENABLED && 90 cl->dev->dev_state == MEI_DEV_ENABLED &&
91 cl->state == MEI_FILE_CONNECTED); 91 cl->state == MEI_FILE_CONNECTED);
92} 92}
93static inline bool mei_cl_is_transitioning(struct mei_cl *cl)
94{
95 return (MEI_FILE_INITIALIZING == cl->state ||
96 MEI_FILE_DISCONNECTED == cl->state ||
97 MEI_FILE_DISCONNECTING == cl->state);
98}
93 99
94bool mei_cl_is_other_connecting(struct mei_cl *cl); 100bool mei_cl_is_other_connecting(struct mei_cl *cl);
95int mei_cl_disconnect(struct mei_cl *cl); 101int mei_cl_disconnect(struct mei_cl *cl);
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 6127ab64bb39..0a0448326e9d 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -35,11 +35,15 @@ static void mei_hbm_me_cl_allocate(struct mei_device *dev)
35 struct mei_me_client *clients; 35 struct mei_me_client *clients;
36 int b; 36 int b;
37 37
38 dev->me_clients_num = 0;
39 dev->me_client_presentation_num = 0;
40 dev->me_client_index = 0;
41
38 /* count how many ME clients we have */ 42 /* count how many ME clients we have */
39 for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX) 43 for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
40 dev->me_clients_num++; 44 dev->me_clients_num++;
41 45
42 if (dev->me_clients_num <= 0) 46 if (dev->me_clients_num == 0)
43 return; 47 return;
44 48
45 kfree(dev->me_clients); 49 kfree(dev->me_clients);
@@ -221,7 +225,7 @@ static int mei_hbm_prop_req(struct mei_device *dev)
221 struct hbm_props_request *prop_req; 225 struct hbm_props_request *prop_req;
222 const size_t len = sizeof(struct hbm_props_request); 226 const size_t len = sizeof(struct hbm_props_request);
223 unsigned long next_client_index; 227 unsigned long next_client_index;
224 u8 client_num; 228 unsigned long client_num;
225 229
226 230
227 client_num = dev->me_client_presentation_num; 231 client_num = dev->me_client_presentation_num;
@@ -677,8 +681,6 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
677 if (dev->dev_state == MEI_DEV_INIT_CLIENTS && 681 if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
678 dev->hbm_state == MEI_HBM_ENUM_CLIENTS) { 682 dev->hbm_state == MEI_HBM_ENUM_CLIENTS) {
679 dev->init_clients_timer = 0; 683 dev->init_clients_timer = 0;
680 dev->me_client_presentation_num = 0;
681 dev->me_client_index = 0;
682 mei_hbm_me_cl_allocate(dev); 684 mei_hbm_me_cl_allocate(dev);
683 dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; 685 dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES;
684 686
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 92c73118b13c..6197018e2f16 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -175,6 +175,9 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
175 memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg)); 175 memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
176 } 176 }
177 177
178 /* we're already in reset, cancel the init timer */
179 dev->init_clients_timer = 0;
180
178 dev->me_clients_num = 0; 181 dev->me_clients_num = 0;
179 dev->rd_msg_hdr = 0; 182 dev->rd_msg_hdr = 0;
180 dev->wd_pending = false; 183 dev->wd_pending = false;
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 173ff095be0d..cabeddd66c1f 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -249,19 +249,16 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
249 mutex_unlock(&dev->device_lock); 249 mutex_unlock(&dev->device_lock);
250 250
251 if (wait_event_interruptible(cl->rx_wait, 251 if (wait_event_interruptible(cl->rx_wait,
252 (MEI_READ_COMPLETE == cl->reading_state || 252 MEI_READ_COMPLETE == cl->reading_state ||
253 MEI_FILE_INITIALIZING == cl->state || 253 mei_cl_is_transitioning(cl))) {
254 MEI_FILE_DISCONNECTED == cl->state || 254
255 MEI_FILE_DISCONNECTING == cl->state))) {
256 if (signal_pending(current)) 255 if (signal_pending(current))
257 return -EINTR; 256 return -EINTR;
258 return -ERESTARTSYS; 257 return -ERESTARTSYS;
259 } 258 }
260 259
261 mutex_lock(&dev->device_lock); 260 mutex_lock(&dev->device_lock);
262 if (MEI_FILE_INITIALIZING == cl->state || 261 if (mei_cl_is_transitioning(cl)) {
263 MEI_FILE_DISCONNECTED == cl->state ||
264 MEI_FILE_DISCONNECTING == cl->state) {
265 rets = -EBUSY; 262 rets = -EBUSY;
266 goto out; 263 goto out;
267 } 264 }
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 7b918b2fb894..456b322013e2 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -396,9 +396,9 @@ struct mei_device {
396 struct mei_me_client *me_clients; /* Note: memory has to be allocated */ 396 struct mei_me_client *me_clients; /* Note: memory has to be allocated */
397 DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX); 397 DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
398 DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX); 398 DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX);
399 u8 me_clients_num; 399 unsigned long me_clients_num;
400 u8 me_client_presentation_num; 400 unsigned long me_client_presentation_num;
401 u8 me_client_index; 401 unsigned long me_client_index;
402 402
403 struct mei_cl wd_cl; 403 struct mei_cl wd_cl;
404 enum mei_wd_states wd_state; 404 enum mei_wd_states wd_state;
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 87ed3fb5149a..f344659dceac 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -113,14 +113,14 @@ static const struct sh_mobile_sdhi_ops sdhi_ops = {
113}; 113};
114 114
115static const struct of_device_id sh_mobile_sdhi_of_match[] = { 115static const struct of_device_id sh_mobile_sdhi_of_match[] = {
116 { .compatible = "renesas,shmobile-sdhi" }, 116 { .compatible = "renesas,sdhi-shmobile" },
117 { .compatible = "renesas,sh7372-sdhi" }, 117 { .compatible = "renesas,sdhi-sh7372" },
118 { .compatible = "renesas,sh73a0-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 118 { .compatible = "renesas,sdhi-sh73a0", .data = &sh_mobile_sdhi_of_cfg[0], },
119 { .compatible = "renesas,r8a73a4-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 119 { .compatible = "renesas,sdhi-r8a73a4", .data = &sh_mobile_sdhi_of_cfg[0], },
120 { .compatible = "renesas,r8a7740-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 120 { .compatible = "renesas,sdhi-r8a7740", .data = &sh_mobile_sdhi_of_cfg[0], },
121 { .compatible = "renesas,r8a7778-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 121 { .compatible = "renesas,sdhi-r8a7778", .data = &sh_mobile_sdhi_of_cfg[0], },
122 { .compatible = "renesas,r8a7779-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 122 { .compatible = "renesas,sdhi-r8a7779", .data = &sh_mobile_sdhi_of_cfg[0], },
123 { .compatible = "renesas,r8a7790-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 123 { .compatible = "renesas,sdhi-r8a7790", .data = &sh_mobile_sdhi_of_cfg[0], },
124 {}, 124 {},
125}; 125};
126MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match); 126MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 5db900d917f9..dd03dfdfb0d6 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1236,7 +1236,6 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
1236 return 0; 1236 return 0;
1237} 1237}
1238 1238
1239#ifdef CONFIG_OF
1240static struct of_device_id pxa3xx_nand_dt_ids[] = { 1239static struct of_device_id pxa3xx_nand_dt_ids[] = {
1241 { 1240 {
1242 .compatible = "marvell,pxa3xx-nand", 1241 .compatible = "marvell,pxa3xx-nand",
@@ -1284,12 +1283,6 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1284 1283
1285 return 0; 1284 return 0;
1286} 1285}
1287#else
1288static inline int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1289{
1290 return 0;
1291}
1292#endif
1293 1286
1294static int pxa3xx_nand_probe(struct platform_device *pdev) 1287static int pxa3xx_nand_probe(struct platform_device *pdev)
1295{ 1288{
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 91f179d5135c..f428ef574372 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1472,7 +1472,7 @@ void bond_alb_monitor(struct work_struct *work)
1472 bond_info->lp_counter++; 1472 bond_info->lp_counter++;
1473 1473
1474 /* send learning packets */ 1474 /* send learning packets */
1475 if (bond_info->lp_counter >= BOND_ALB_LP_TICKS) { 1475 if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
1476 /* change of curr_active_slave involves swapping of mac addresses. 1476 /* change of curr_active_slave involves swapping of mac addresses.
1477 * in order to avoid this swapping from happening while 1477 * in order to avoid this swapping from happening while
1478 * sending the learning packets, the curr_slave_lock must be held for 1478 * sending the learning packets, the curr_slave_lock must be held for
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 28d8e4c7dc06..c5eff5dafdfe 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -36,14 +36,15 @@ struct slave;
36 * Used for division - never set 36 * Used for division - never set
37 * to zero !!! 37 * to zero !!!
38 */ 38 */
39#define BOND_ALB_LP_INTERVAL 1 /* In seconds, periodic send of 39#define BOND_ALB_DEFAULT_LP_INTERVAL 1
40 * learning packets to the switch 40#define BOND_ALB_LP_INTERVAL(bond) (bond->params.lp_interval) /* In seconds, periodic send of
41 */ 41 * learning packets to the switch
42 */
42 43
43#define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \ 44#define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \
44 * ALB_TIMER_TICKS_PER_SEC) 45 * ALB_TIMER_TICKS_PER_SEC)
45 46
46#define BOND_ALB_LP_TICKS (BOND_ALB_LP_INTERVAL \ 47#define BOND_ALB_LP_TICKS(bond) (BOND_ALB_LP_INTERVAL(bond) \
47 * ALB_TIMER_TICKS_PER_SEC) 48 * ALB_TIMER_TICKS_PER_SEC)
48 49
49#define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table. 50#define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 72df399c4ab3..e883bfe2e727 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1724,6 +1724,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1724 struct bonding *bond = netdev_priv(bond_dev); 1724 struct bonding *bond = netdev_priv(bond_dev);
1725 struct slave *slave, *oldcurrent; 1725 struct slave *slave, *oldcurrent;
1726 struct sockaddr addr; 1726 struct sockaddr addr;
1727 int old_flags = bond_dev->flags;
1727 netdev_features_t old_features = bond_dev->features; 1728 netdev_features_t old_features = bond_dev->features;
1728 1729
1729 /* slave is not a slave or master is not master of this slave */ 1730 /* slave is not a slave or master is not master of this slave */
@@ -1855,12 +1856,18 @@ static int __bond_release_one(struct net_device *bond_dev,
1855 * bond_change_active_slave(..., NULL) 1856 * bond_change_active_slave(..., NULL)
1856 */ 1857 */
1857 if (!USES_PRIMARY(bond->params.mode)) { 1858 if (!USES_PRIMARY(bond->params.mode)) {
1858 /* unset promiscuity level from slave */ 1859 /* unset promiscuity level from slave
1859 if (bond_dev->flags & IFF_PROMISC) 1860 * NOTE: The NETDEV_CHANGEADDR call above may change the value
1861 * of the IFF_PROMISC flag in the bond_dev, but we need the
1862 * value of that flag before that change, as that was the value
1863 * when this slave was attached, so we cache at the start of the
1864 * function and use it here. Same goes for ALLMULTI below
1865 */
1866 if (old_flags & IFF_PROMISC)
1860 dev_set_promiscuity(slave_dev, -1); 1867 dev_set_promiscuity(slave_dev, -1);
1861 1868
1862 /* unset allmulti level from slave */ 1869 /* unset allmulti level from slave */
1863 if (bond_dev->flags & IFF_ALLMULTI) 1870 if (old_flags & IFF_ALLMULTI)
1864 dev_set_allmulti(slave_dev, -1); 1871 dev_set_allmulti(slave_dev, -1);
1865 1872
1866 bond_hw_addr_flush(bond_dev, slave_dev); 1873 bond_hw_addr_flush(bond_dev, slave_dev);
@@ -4416,6 +4423,7 @@ static int bond_check_params(struct bond_params *params)
4416 params->all_slaves_active = all_slaves_active; 4423 params->all_slaves_active = all_slaves_active;
4417 params->resend_igmp = resend_igmp; 4424 params->resend_igmp = resend_igmp;
4418 params->min_links = min_links; 4425 params->min_links = min_links;
4426 params->lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
4419 4427
4420 if (primary) { 4428 if (primary) {
4421 strncpy(params->primary, primary, IFNAMSIZ); 4429 strncpy(params->primary, primary, IFNAMSIZ);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index eeab40b01b7a..c29b836749b6 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1699,6 +1699,44 @@ out:
1699static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR, 1699static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR,
1700 bonding_show_resend_igmp, bonding_store_resend_igmp); 1700 bonding_show_resend_igmp, bonding_store_resend_igmp);
1701 1701
1702
1703static ssize_t bonding_show_lp_interval(struct device *d,
1704 struct device_attribute *attr,
1705 char *buf)
1706{
1707 struct bonding *bond = to_bond(d);
1708 return sprintf(buf, "%d\n", bond->params.lp_interval);
1709}
1710
1711static ssize_t bonding_store_lp_interval(struct device *d,
1712 struct device_attribute *attr,
1713 const char *buf, size_t count)
1714{
1715 struct bonding *bond = to_bond(d);
1716 int new_value, ret = count;
1717
1718 if (sscanf(buf, "%d", &new_value) != 1) {
1719 pr_err("%s: no lp interval value specified.\n",
1720 bond->dev->name);
1721 ret = -EINVAL;
1722 goto out;
1723 }
1724
1725 if (new_value <= 0) {
1726 pr_err ("%s: lp_interval must be between 1 and %d\n",
1727 bond->dev->name, INT_MAX);
1728 ret = -EINVAL;
1729 goto out;
1730 }
1731
1732 bond->params.lp_interval = new_value;
1733out:
1734 return ret;
1735}
1736
1737static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
1738 bonding_show_lp_interval, bonding_store_lp_interval);
1739
1702static struct attribute *per_bond_attrs[] = { 1740static struct attribute *per_bond_attrs[] = {
1703 &dev_attr_slaves.attr, 1741 &dev_attr_slaves.attr,
1704 &dev_attr_mode.attr, 1742 &dev_attr_mode.attr,
@@ -1729,6 +1767,7 @@ static struct attribute *per_bond_attrs[] = {
1729 &dev_attr_all_slaves_active.attr, 1767 &dev_attr_all_slaves_active.attr,
1730 &dev_attr_resend_igmp.attr, 1768 &dev_attr_resend_igmp.attr,
1731 &dev_attr_min_links.attr, 1769 &dev_attr_min_links.attr,
1770 &dev_attr_lp_interval.attr,
1732 NULL, 1771 NULL,
1733}; 1772};
1734 1773
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 7ad8bd5cc947..03cf3fd14490 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -176,6 +176,7 @@ struct bond_params {
176 int tx_queues; 176 int tx_queues;
177 int all_slaves_active; 177 int all_slaves_active;
178 int resend_igmp; 178 int resend_igmp;
179 int lp_interval;
179}; 180};
180 181
181struct bond_parm_tbl { 182struct bond_parm_tbl {
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 71c677e651d7..3f21142138b7 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -702,7 +702,6 @@ static int flexcan_chip_start(struct net_device *dev)
702{ 702{
703 struct flexcan_priv *priv = netdev_priv(dev); 703 struct flexcan_priv *priv = netdev_priv(dev);
704 struct flexcan_regs __iomem *regs = priv->base; 704 struct flexcan_regs __iomem *regs = priv->base;
705 unsigned int i;
706 int err; 705 int err;
707 u32 reg_mcr, reg_ctrl; 706 u32 reg_mcr, reg_ctrl;
708 707
@@ -772,17 +771,6 @@ static int flexcan_chip_start(struct net_device *dev)
772 netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); 771 netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
773 flexcan_write(reg_ctrl, &regs->ctrl); 772 flexcan_write(reg_ctrl, &regs->ctrl);
774 773
775 for (i = 0; i < ARRAY_SIZE(regs->cantxfg); i++) {
776 flexcan_write(0, &regs->cantxfg[i].can_ctrl);
777 flexcan_write(0, &regs->cantxfg[i].can_id);
778 flexcan_write(0, &regs->cantxfg[i].data[0]);
779 flexcan_write(0, &regs->cantxfg[i].data[1]);
780
781 /* put MB into rx queue */
782 flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
783 &regs->cantxfg[i].can_ctrl);
784 }
785
786 /* acceptance mask/acceptance code (accept everything) */ 774 /* acceptance mask/acceptance code (accept everything) */
787 flexcan_write(0x0, &regs->rxgmask); 775 flexcan_write(0x0, &regs->rxgmask);
788 flexcan_write(0x0, &regs->rx14mask); 776 flexcan_write(0x0, &regs->rx14mask);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 874188ba06f7..25377e547f9b 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -76,6 +76,10 @@ MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces");
76/* maximum rx buffer len: extended CAN frame with timestamp */ 76/* maximum rx buffer len: extended CAN frame with timestamp */
77#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1) 77#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1)
78 78
79#define SLC_CMD_LEN 1
80#define SLC_SFF_ID_LEN 3
81#define SLC_EFF_ID_LEN 8
82
79struct slcan { 83struct slcan {
80 int magic; 84 int magic;
81 85
@@ -142,47 +146,63 @@ static void slc_bump(struct slcan *sl)
142{ 146{
143 struct sk_buff *skb; 147 struct sk_buff *skb;
144 struct can_frame cf; 148 struct can_frame cf;
145 int i, dlc_pos, tmp; 149 int i, tmp;
146 unsigned long ultmp; 150 u32 tmpid;
147 char cmd = sl->rbuff[0]; 151 char *cmd = sl->rbuff;
148 152
149 if ((cmd != 't') && (cmd != 'T') && (cmd != 'r') && (cmd != 'R')) 153 cf.can_id = 0;
154
155 switch (*cmd) {
156 case 'r':
157 cf.can_id = CAN_RTR_FLAG;
158 /* fallthrough */
159 case 't':
160 /* store dlc ASCII value and terminate SFF CAN ID string */
161 cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN];
162 sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0;
163 /* point to payload data behind the dlc */
164 cmd += SLC_CMD_LEN + SLC_SFF_ID_LEN + 1;
165 break;
166 case 'R':
167 cf.can_id = CAN_RTR_FLAG;
168 /* fallthrough */
169 case 'T':
170 cf.can_id |= CAN_EFF_FLAG;
171 /* store dlc ASCII value and terminate EFF CAN ID string */
172 cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN];
173 sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0;
174 /* point to payload data behind the dlc */
175 cmd += SLC_CMD_LEN + SLC_EFF_ID_LEN + 1;
176 break;
177 default:
150 return; 178 return;
179 }
151 180
152 if (cmd & 0x20) /* tiny chars 'r' 't' => standard frame format */ 181 if (kstrtou32(sl->rbuff + SLC_CMD_LEN, 16, &tmpid))
153 dlc_pos = 4; /* dlc position tiiid */
154 else
155 dlc_pos = 9; /* dlc position Tiiiiiiiid */
156
157 if (!((sl->rbuff[dlc_pos] >= '0') && (sl->rbuff[dlc_pos] < '9')))
158 return; 182 return;
159 183
160 cf.can_dlc = sl->rbuff[dlc_pos] - '0'; /* get can_dlc from ASCII val */ 184 cf.can_id |= tmpid;
161 185
162 sl->rbuff[dlc_pos] = 0; /* terminate can_id string */ 186 /* get can_dlc from sanitized ASCII value */
163 187 if (cf.can_dlc >= '0' && cf.can_dlc < '9')
164 if (kstrtoul(sl->rbuff+1, 16, &ultmp)) 188 cf.can_dlc -= '0';
189 else
165 return; 190 return;
166 191
167 cf.can_id = ultmp;
168
169 if (!(cmd & 0x20)) /* NO tiny chars => extended frame format */
170 cf.can_id |= CAN_EFF_FLAG;
171
172 if ((cmd | 0x20) == 'r') /* RTR frame */
173 cf.can_id |= CAN_RTR_FLAG;
174
175 *(u64 *) (&cf.data) = 0; /* clear payload */ 192 *(u64 *) (&cf.data) = 0; /* clear payload */
176 193
177 for (i = 0, dlc_pos++; i < cf.can_dlc; i++) { 194 /* RTR frames may have a dlc > 0 but they never have any data bytes */
178 tmp = hex_to_bin(sl->rbuff[dlc_pos++]); 195 if (!(cf.can_id & CAN_RTR_FLAG)) {
179 if (tmp < 0) 196 for (i = 0; i < cf.can_dlc; i++) {
180 return; 197 tmp = hex_to_bin(*cmd++);
181 cf.data[i] = (tmp << 4); 198 if (tmp < 0)
182 tmp = hex_to_bin(sl->rbuff[dlc_pos++]); 199 return;
183 if (tmp < 0) 200 cf.data[i] = (tmp << 4);
184 return; 201 tmp = hex_to_bin(*cmd++);
185 cf.data[i] |= tmp; 202 if (tmp < 0)
203 return;
204 cf.data[i] |= tmp;
205 }
186 } 206 }
187 207
188 skb = dev_alloc_skb(sizeof(struct can_frame) + 208 skb = dev_alloc_skb(sizeof(struct can_frame) +
@@ -209,7 +229,6 @@ static void slc_bump(struct slcan *sl)
209/* parse tty input stream */ 229/* parse tty input stream */
210static void slcan_unesc(struct slcan *sl, unsigned char s) 230static void slcan_unesc(struct slcan *sl, unsigned char s)
211{ 231{
212
213 if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */ 232 if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
214 if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && 233 if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
215 (sl->rcount > 4)) { 234 (sl->rcount > 4)) {
@@ -236,27 +255,46 @@ static void slcan_unesc(struct slcan *sl, unsigned char s)
236/* Encapsulate one can_frame and stuff into a TTY queue. */ 255/* Encapsulate one can_frame and stuff into a TTY queue. */
237static void slc_encaps(struct slcan *sl, struct can_frame *cf) 256static void slc_encaps(struct slcan *sl, struct can_frame *cf)
238{ 257{
239 int actual, idx, i; 258 int actual, i;
240 char cmd; 259 unsigned char *pos;
260 unsigned char *endpos;
261 canid_t id = cf->can_id;
262
263 pos = sl->xbuff;
241 264
242 if (cf->can_id & CAN_RTR_FLAG) 265 if (cf->can_id & CAN_RTR_FLAG)
243 cmd = 'R'; /* becomes 'r' in standard frame format */ 266 *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */
244 else 267 else
245 cmd = 'T'; /* becomes 't' in standard frame format */ 268 *pos = 'T'; /* becomes 't' in standard frame format (SSF) */
246 269
247 if (cf->can_id & CAN_EFF_FLAG) 270 /* determine number of chars for the CAN-identifier */
248 sprintf(sl->xbuff, "%c%08X%d", cmd, 271 if (cf->can_id & CAN_EFF_FLAG) {
249 cf->can_id & CAN_EFF_MASK, cf->can_dlc); 272 id &= CAN_EFF_MASK;
250 else 273 endpos = pos + SLC_EFF_ID_LEN;
251 sprintf(sl->xbuff, "%c%03X%d", cmd | 0x20, 274 } else {
252 cf->can_id & CAN_SFF_MASK, cf->can_dlc); 275 *pos |= 0x20; /* convert R/T to lower case for SFF */
276 id &= CAN_SFF_MASK;
277 endpos = pos + SLC_SFF_ID_LEN;
278 }
253 279
254 idx = strlen(sl->xbuff); 280 /* build 3 (SFF) or 8 (EFF) digit CAN identifier */
281 pos++;
282 while (endpos >= pos) {
283 *endpos-- = hex_asc_upper[id & 0xf];
284 id >>= 4;
285 }
286
287 pos += (cf->can_id & CAN_EFF_FLAG) ? SLC_EFF_ID_LEN : SLC_SFF_ID_LEN;
255 288
256 for (i = 0; i < cf->can_dlc; i++) 289 *pos++ = cf->can_dlc + '0';
257 sprintf(&sl->xbuff[idx + 2*i], "%02X", cf->data[i]); 290
291 /* RTR frames may have a dlc > 0 but they never have any data bytes */
292 if (!(cf->can_id & CAN_RTR_FLAG)) {
293 for (i = 0; i < cf->can_dlc; i++)
294 pos = hex_byte_pack_upper(pos, cf->data[i]);
295 }
258 296
259 strcat(sl->xbuff, "\r"); /* add terminating character */ 297 *pos++ = '\r';
260 298
261 /* Order of next two lines is *very* important. 299 /* Order of next two lines is *very* important.
262 * When we are sending a little amount of data, 300 * When we are sending a little amount of data,
@@ -267,8 +305,8 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
267 * 14 Oct 1994 Dmitry Gorodchanin. 305 * 14 Oct 1994 Dmitry Gorodchanin.
268 */ 306 */
269 set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); 307 set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
270 actual = sl->tty->ops->write(sl->tty, sl->xbuff, strlen(sl->xbuff)); 308 actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff);
271 sl->xleft = strlen(sl->xbuff) - actual; 309 sl->xleft = (pos - sl->xbuff) - actual;
272 sl->xhead = sl->xbuff + actual; 310 sl->xhead = sl->xbuff + actual;
273 sl->dev->stats.tx_bytes += cf->can_dlc; 311 sl->dev->stats.tx_bytes += cf->can_dlc;
274} 312}
@@ -286,11 +324,13 @@ static void slcan_write_wakeup(struct tty_struct *tty)
286 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) 324 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
287 return; 325 return;
288 326
327 spin_lock(&sl->lock);
289 if (sl->xleft <= 0) { 328 if (sl->xleft <= 0) {
290 /* Now serial buffer is almost free & we can start 329 /* Now serial buffer is almost free & we can start
291 * transmission of another packet */ 330 * transmission of another packet */
292 sl->dev->stats.tx_packets++; 331 sl->dev->stats.tx_packets++;
293 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 332 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
333 spin_unlock(&sl->lock);
294 netif_wake_queue(sl->dev); 334 netif_wake_queue(sl->dev);
295 return; 335 return;
296 } 336 }
@@ -298,6 +338,7 @@ static void slcan_write_wakeup(struct tty_struct *tty)
298 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 338 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
299 sl->xleft -= actual; 339 sl->xleft -= actual;
300 sl->xhead += actual; 340 sl->xhead += actual;
341 spin_unlock(&sl->lock);
301} 342}
302 343
303/* Send a can_frame to a TTY queue. */ 344/* Send a can_frame to a TTY queue. */
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index a0f647f92bf5..0b7a4c3b01a2 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -463,7 +463,7 @@ static int peak_usb_start(struct peak_usb_device *dev)
463 if (i < PCAN_USB_MAX_TX_URBS) { 463 if (i < PCAN_USB_MAX_TX_URBS) {
464 if (i == 0) { 464 if (i == 0) {
465 netdev_err(netdev, "couldn't setup any tx URB\n"); 465 netdev_err(netdev, "couldn't setup any tx URB\n");
466 return err; 466 goto err_tx;
467 } 467 }
468 468
469 netdev_warn(netdev, "tx performance may be slow\n"); 469 netdev_warn(netdev, "tx performance may be slow\n");
@@ -472,7 +472,7 @@ static int peak_usb_start(struct peak_usb_device *dev)
472 if (dev->adapter->dev_start) { 472 if (dev->adapter->dev_start) {
473 err = dev->adapter->dev_start(dev); 473 err = dev->adapter->dev_start(dev);
474 if (err) 474 if (err)
475 goto failed; 475 goto err_adapter;
476 } 476 }
477 477
478 dev->state |= PCAN_USB_STATE_STARTED; 478 dev->state |= PCAN_USB_STATE_STARTED;
@@ -481,19 +481,26 @@ static int peak_usb_start(struct peak_usb_device *dev)
481 if (dev->adapter->dev_set_bus) { 481 if (dev->adapter->dev_set_bus) {
482 err = dev->adapter->dev_set_bus(dev, 1); 482 err = dev->adapter->dev_set_bus(dev, 1);
483 if (err) 483 if (err)
484 goto failed; 484 goto err_adapter;
485 } 485 }
486 486
487 dev->can.state = CAN_STATE_ERROR_ACTIVE; 487 dev->can.state = CAN_STATE_ERROR_ACTIVE;
488 488
489 return 0; 489 return 0;
490 490
491failed: 491err_adapter:
492 if (err == -ENODEV) 492 if (err == -ENODEV)
493 netif_device_detach(dev->netdev); 493 netif_device_detach(dev->netdev);
494 494
495 netdev_warn(netdev, "couldn't submit control: %d\n", err); 495 netdev_warn(netdev, "couldn't submit control: %d\n", err);
496 496
497 for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) {
498 usb_free_urb(dev->tx_contexts[i].urb);
499 dev->tx_contexts[i].urb = NULL;
500 }
501err_tx:
502 usb_kill_anchored_urbs(&dev->rx_submitted);
503
497 return err; 504 return err;
498} 505}
499 506
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index e66684a438f5..75fb1d20d6fd 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -530,7 +530,7 @@ static int bfin_mac_ethtool_setwol(struct net_device *dev,
530 if (lp->wol && !lp->irq_wake_requested) { 530 if (lp->wol && !lp->irq_wake_requested) {
531 /* register wake irq handler */ 531 /* register wake irq handler */
532 rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt, 532 rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt,
533 IRQF_DISABLED, "EMAC_WAKE", dev); 533 0, "EMAC_WAKE", dev);
534 if (rc) 534 if (rc)
535 return rc; 535 return rc;
536 lp->irq_wake_requested = true; 536 lp->irq_wake_requested = true;
@@ -1686,7 +1686,7 @@ static int bfin_mac_probe(struct platform_device *pdev)
1686 /* now, enable interrupts */ 1686 /* now, enable interrupts */
1687 /* register irq handler */ 1687 /* register irq handler */
1688 rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt, 1688 rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
1689 IRQF_DISABLED, "EMAC_RX", ndev); 1689 0, "EMAC_RX", ndev);
1690 if (rc) { 1690 if (rc) {
1691 dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n"); 1691 dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
1692 rc = -EBUSY; 1692 rc = -EBUSY;
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index d6b20296b8e4..3d8c6b2cdea4 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -358,7 +358,7 @@ static int __init lance_probe( struct net_device *dev)
358 358
359 REGA(CSR0) = CSR0_STOP; 359 REGA(CSR0) = CSR0_STOP;
360 360
361 if (request_irq(LANCE_IRQ, lance_interrupt, IRQF_DISABLED, "SUN3 Lance", dev) < 0) { 361 if (request_irq(LANCE_IRQ, lance_interrupt, 0, "SUN3 Lance", dev) < 0) {
362#ifdef CONFIG_SUN3 362#ifdef CONFIG_SUN3
363 iounmap((void __iomem *)ioaddr); 363 iounmap((void __iomem *)ioaddr);
364#endif 364#endif
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 027398ebbba6..fc95b235e210 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1188,7 +1188,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1188 struct alx_priv *alx; 1188 struct alx_priv *alx;
1189 struct alx_hw *hw; 1189 struct alx_hw *hw;
1190 bool phy_configured; 1190 bool phy_configured;
1191 int bars, pm_cap, err; 1191 int bars, err;
1192 1192
1193 err = pci_enable_device_mem(pdev); 1193 err = pci_enable_device_mem(pdev);
1194 if (err) 1194 if (err)
@@ -1225,18 +1225,13 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1225 pci_enable_pcie_error_reporting(pdev); 1225 pci_enable_pcie_error_reporting(pdev);
1226 pci_set_master(pdev); 1226 pci_set_master(pdev);
1227 1227
1228 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 1228 if (!pdev->pm_cap) {
1229 if (pm_cap == 0) {
1230 dev_err(&pdev->dev, 1229 dev_err(&pdev->dev,
1231 "Can't find power management capability, aborting\n"); 1230 "Can't find power management capability, aborting\n");
1232 err = -EIO; 1231 err = -EIO;
1233 goto out_pci_release; 1232 goto out_pci_release;
1234 } 1233 }
1235 1234
1236 err = pci_set_power_state(pdev, PCI_D0);
1237 if (err)
1238 goto out_pci_release;
1239
1240 netdev = alloc_etherdev(sizeof(*alx)); 1235 netdev = alloc_etherdev(sizeof(*alx));
1241 if (!netdev) { 1236 if (!netdev) {
1242 err = -ENOMEM; 1237 err = -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index eec0af45b859..249468f95365 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -157,6 +157,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
157 if (++ring->end >= BGMAC_TX_RING_SLOTS) 157 if (++ring->end >= BGMAC_TX_RING_SLOTS)
158 ring->end = 0; 158 ring->end = 0;
159 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX, 159 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
160 ring->index_base +
160 ring->end * sizeof(struct bgmac_dma_desc)); 161 ring->end * sizeof(struct bgmac_dma_desc));
161 162
162 /* Always keep one slot free to allow detecting bugged calls. */ 163 /* Always keep one slot free to allow detecting bugged calls. */
@@ -181,6 +182,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
181 /* The last slot that hardware didn't consume yet */ 182 /* The last slot that hardware didn't consume yet */
182 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); 183 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
183 empty_slot &= BGMAC_DMA_TX_STATDPTR; 184 empty_slot &= BGMAC_DMA_TX_STATDPTR;
185 empty_slot -= ring->index_base;
186 empty_slot &= BGMAC_DMA_TX_STATDPTR;
184 empty_slot /= sizeof(struct bgmac_dma_desc); 187 empty_slot /= sizeof(struct bgmac_dma_desc);
185 188
186 while (ring->start != empty_slot) { 189 while (ring->start != empty_slot) {
@@ -274,6 +277,8 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
274 277
275 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS); 278 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
276 end_slot &= BGMAC_DMA_RX_STATDPTR; 279 end_slot &= BGMAC_DMA_RX_STATDPTR;
280 end_slot -= ring->index_base;
281 end_slot &= BGMAC_DMA_RX_STATDPTR;
277 end_slot /= sizeof(struct bgmac_dma_desc); 282 end_slot /= sizeof(struct bgmac_dma_desc);
278 283
279 ring->end = end_slot; 284 ring->end = end_slot;
@@ -418,9 +423,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
418 ring = &bgmac->tx_ring[i]; 423 ring = &bgmac->tx_ring[i];
419 ring->num_slots = BGMAC_TX_RING_SLOTS; 424 ring->num_slots = BGMAC_TX_RING_SLOTS;
420 ring->mmio_base = ring_base[i]; 425 ring->mmio_base = ring_base[i];
421 if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
422 bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
423 ring->mmio_base);
424 426
425 /* Alloc ring of descriptors */ 427 /* Alloc ring of descriptors */
426 size = ring->num_slots * sizeof(struct bgmac_dma_desc); 428 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
@@ -435,6 +437,13 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
435 if (ring->dma_base & 0xC0000000) 437 if (ring->dma_base & 0xC0000000)
436 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); 438 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
437 439
440 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
441 BGMAC_DMA_RING_TX);
442 if (ring->unaligned)
443 ring->index_base = lower_32_bits(ring->dma_base);
444 else
445 ring->index_base = 0;
446
438 /* No need to alloc TX slots yet */ 447 /* No need to alloc TX slots yet */
439 } 448 }
440 449
@@ -444,9 +453,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
444 ring = &bgmac->rx_ring[i]; 453 ring = &bgmac->rx_ring[i];
445 ring->num_slots = BGMAC_RX_RING_SLOTS; 454 ring->num_slots = BGMAC_RX_RING_SLOTS;
446 ring->mmio_base = ring_base[i]; 455 ring->mmio_base = ring_base[i];
447 if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
448 bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
449 ring->mmio_base);
450 456
451 /* Alloc ring of descriptors */ 457 /* Alloc ring of descriptors */
452 size = ring->num_slots * sizeof(struct bgmac_dma_desc); 458 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
@@ -462,6 +468,13 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
462 if (ring->dma_base & 0xC0000000) 468 if (ring->dma_base & 0xC0000000)
463 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); 469 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
464 470
471 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
472 BGMAC_DMA_RING_RX);
473 if (ring->unaligned)
474 ring->index_base = lower_32_bits(ring->dma_base);
475 else
476 ring->index_base = 0;
477
465 /* Alloc RX slots */ 478 /* Alloc RX slots */
466 for (j = 0; j < ring->num_slots; j++) { 479 for (j = 0; j < ring->num_slots; j++) {
467 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]); 480 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
@@ -489,12 +502,14 @@ static void bgmac_dma_init(struct bgmac *bgmac)
489 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { 502 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
490 ring = &bgmac->tx_ring[i]; 503 ring = &bgmac->tx_ring[i];
491 504
492 /* We don't implement unaligned addressing, so enable first */ 505 if (!ring->unaligned)
493 bgmac_dma_tx_enable(bgmac, ring); 506 bgmac_dma_tx_enable(bgmac, ring);
494 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO, 507 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
495 lower_32_bits(ring->dma_base)); 508 lower_32_bits(ring->dma_base));
496 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI, 509 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
497 upper_32_bits(ring->dma_base)); 510 upper_32_bits(ring->dma_base));
511 if (ring->unaligned)
512 bgmac_dma_tx_enable(bgmac, ring);
498 513
499 ring->start = 0; 514 ring->start = 0;
500 ring->end = 0; /* Points the slot that should *not* be read */ 515 ring->end = 0; /* Points the slot that should *not* be read */
@@ -505,12 +520,14 @@ static void bgmac_dma_init(struct bgmac *bgmac)
505 520
506 ring = &bgmac->rx_ring[i]; 521 ring = &bgmac->rx_ring[i];
507 522
508 /* We don't implement unaligned addressing, so enable first */ 523 if (!ring->unaligned)
509 bgmac_dma_rx_enable(bgmac, ring); 524 bgmac_dma_rx_enable(bgmac, ring);
510 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO, 525 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
511 lower_32_bits(ring->dma_base)); 526 lower_32_bits(ring->dma_base));
512 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI, 527 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
513 upper_32_bits(ring->dma_base)); 528 upper_32_bits(ring->dma_base));
529 if (ring->unaligned)
530 bgmac_dma_rx_enable(bgmac, ring);
514 531
515 for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots; 532 for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
516 j++, dma_desc++) { 533 j++, dma_desc++) {
@@ -531,6 +548,7 @@ static void bgmac_dma_init(struct bgmac *bgmac)
531 } 548 }
532 549
533 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX, 550 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
551 ring->index_base +
534 ring->num_slots * sizeof(struct bgmac_dma_desc)); 552 ring->num_slots * sizeof(struct bgmac_dma_desc));
535 553
536 ring->start = 0; 554 ring->start = 0;
@@ -908,10 +926,10 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
908 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc; 926 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
909 u8 et_swtype = 0; 927 u8 et_swtype = 0;
910 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY | 928 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
911 BGMAC_CHIPCTL_1_IF_TYPE_RMII; 929 BGMAC_CHIPCTL_1_IF_TYPE_MII;
912 char buf[2]; 930 char buf[4];
913 931
914 if (bcm47xx_nvram_getenv("et_swtype", buf, 1) > 0) { 932 if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
915 if (kstrtou8(buf, 0, &et_swtype)) 933 if (kstrtou8(buf, 0, &et_swtype))
916 bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n", 934 bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
917 buf); 935 buf);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 98d4b5fcc070..66c8afbdc8c7 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -333,7 +333,7 @@
333 333
334#define BGMAC_CHIPCTL_1_IF_TYPE_MASK 0x00000030 334#define BGMAC_CHIPCTL_1_IF_TYPE_MASK 0x00000030
335#define BGMAC_CHIPCTL_1_IF_TYPE_RMII 0x00000000 335#define BGMAC_CHIPCTL_1_IF_TYPE_RMII 0x00000000
336#define BGMAC_CHIPCTL_1_IF_TYPE_MI 0x00000010 336#define BGMAC_CHIPCTL_1_IF_TYPE_MII 0x00000010
337#define BGMAC_CHIPCTL_1_IF_TYPE_RGMII 0x00000020 337#define BGMAC_CHIPCTL_1_IF_TYPE_RGMII 0x00000020
338#define BGMAC_CHIPCTL_1_SW_TYPE_MASK 0x000000C0 338#define BGMAC_CHIPCTL_1_SW_TYPE_MASK 0x000000C0
339#define BGMAC_CHIPCTL_1_SW_TYPE_EPHY 0x00000000 339#define BGMAC_CHIPCTL_1_SW_TYPE_EPHY 0x00000000
@@ -384,6 +384,8 @@ struct bgmac_dma_ring {
384 u16 mmio_base; 384 u16 mmio_base;
385 struct bgmac_dma_desc *cpu_base; 385 struct bgmac_dma_desc *cpu_base;
386 dma_addr_t dma_base; 386 dma_addr_t dma_base;
387 u32 index_base; /* Used for unaligned rings only, otherwise 0 */
388 bool unaligned;
387 389
388 struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS]; 390 struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
389}; 391};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 0c338026ce01..97b3d32a98bd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -246,8 +246,37 @@ enum {
246 BNX2X_MAX_CNIC_ETH_CL_ID_IDX, 246 BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
247}; 247};
248 248
249#define BNX2X_CNIC_START_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) *\ 249/* use a value high enough to be above all the PFs, which has least significant
250 * nibble as 8, so when cnic needs to come up with a CID for UIO to use to
251 * calculate doorbell address according to old doorbell configuration scheme
252 * (db_msg_sz 1 << 7 * cid + 0x40 DPM offset) it can come up with a valid number
253 * We must avoid coming up with cid 8 for iscsi since according to this method
254 * the designated UIO cid will come out 0 and it has a special handling for that
255 * case which doesn't suit us. Therefore will will cieling to closes cid which
256 * has least signigifcant nibble 8 and if it is 8 we will move forward to 0x18.
257 */
258
259#define BNX2X_1st_NON_L2_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * \
250 (bp)->max_cos) 260 (bp)->max_cos)
261/* amount of cids traversed by UIO's DPM addition to doorbell */
262#define UIO_DPM 8
263/* roundup to DPM offset */
264#define UIO_ROUNDUP(bp) (roundup(BNX2X_1st_NON_L2_ETH_CID(bp), \
265 UIO_DPM))
266/* offset to nearest value which has lsb nibble matching DPM */
267#define UIO_CID_OFFSET(bp) ((UIO_ROUNDUP(bp) + UIO_DPM) % \
268 (UIO_DPM * 2))
269/* add offset to rounded-up cid to get a value which could be used with UIO */
270#define UIO_DPM_ALIGN(bp) (UIO_ROUNDUP(bp) + UIO_CID_OFFSET(bp))
271/* but wait - avoid UIO special case for cid 0 */
272#define UIO_DPM_CID0_OFFSET(bp) ((UIO_DPM * 2) * \
273 (UIO_DPM_ALIGN(bp) == UIO_DPM))
274/* Properly DPM aligned CID dajusted to cid 0 secal case */
275#define BNX2X_CNIC_START_ETH_CID(bp) (UIO_DPM_ALIGN(bp) + \
276 (UIO_DPM_CID0_OFFSET(bp)))
277/* how many cids were wasted - need this value for cid allocation */
278#define UIO_CID_PAD(bp) (BNX2X_CNIC_START_ETH_CID(bp) - \
279 BNX2X_1st_NON_L2_ETH_CID(bp))
251 /* iSCSI L2 */ 280 /* iSCSI L2 */
252#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp)) 281#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp))
253 /* FCoE L2 */ 282 /* FCoE L2 */
@@ -1542,7 +1571,6 @@ struct bnx2x {
1542 */ 1571 */
1543 bool fcoe_init; 1572 bool fcoe_init;
1544 1573
1545 int pm_cap;
1546 int mrrs; 1574 int mrrs;
1547 1575
1548 struct delayed_work sp_task; 1576 struct delayed_work sp_task;
@@ -1681,10 +1709,11 @@ struct bnx2x {
1681 * Maximum CID count that might be required by the bnx2x: 1709 * Maximum CID count that might be required by the bnx2x:
1682 * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI 1710 * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
1683 */ 1711 */
1712
1684#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \ 1713#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
1685 + 2 * CNIC_SUPPORT(bp)) 1714 + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
1686#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \ 1715#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
1687 + 2 * CNIC_SUPPORT(bp)) 1716 + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
1688#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\ 1717#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
1689 ILT_PAGE_CIDS)) 1718 ILT_PAGE_CIDS))
1690 1719
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 90045c920d09..e66beff2704d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2481,8 +2481,7 @@ load_error_cnic2:
2481load_error_cnic1: 2481load_error_cnic1:
2482 bnx2x_napi_disable_cnic(bp); 2482 bnx2x_napi_disable_cnic(bp);
2483 /* Update the number of queues without the cnic queues */ 2483 /* Update the number of queues without the cnic queues */
2484 rc = bnx2x_set_real_num_queues(bp, 0); 2484 if (bnx2x_set_real_num_queues(bp, 0))
2485 if (rc)
2486 BNX2X_ERR("Unable to set real_num_queues not including cnic\n"); 2485 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2487load_error_cnic0: 2486load_error_cnic0:
2488 BNX2X_ERR("CNIC-related load failed\n"); 2487 BNX2X_ERR("CNIC-related load failed\n");
@@ -3008,16 +3007,16 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3008 u16 pmcsr; 3007 u16 pmcsr;
3009 3008
3010 /* If there is no power capability, silently succeed */ 3009 /* If there is no power capability, silently succeed */
3011 if (!bp->pm_cap) { 3010 if (!bp->pdev->pm_cap) {
3012 BNX2X_DEV_INFO("No power capability. Breaking.\n"); 3011 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3013 return 0; 3012 return 0;
3014 } 3013 }
3015 3014
3016 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); 3015 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3017 3016
3018 switch (state) { 3017 switch (state) {
3019 case PCI_D0: 3018 case PCI_D0:
3020 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, 3019 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3021 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | 3020 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3022 PCI_PM_CTRL_PME_STATUS)); 3021 PCI_PM_CTRL_PME_STATUS));
3023 3022
@@ -3041,7 +3040,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3041 if (bp->wol) 3040 if (bp->wol)
3042 pmcsr |= PCI_PM_CTRL_PME_ENABLE; 3041 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3043 3042
3044 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, 3043 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3045 pmcsr); 3044 pmcsr);
3046 3045
3047 /* No more memory access after this point until 3046 /* No more memory access after this point until
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 2612e3c715d4..324de5f05332 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1387,9 +1387,9 @@ static bool bnx2x_is_nvm_accessible(struct bnx2x *bp)
1387 u16 pm = 0; 1387 u16 pm = 0;
1388 struct net_device *dev = pci_get_drvdata(bp->pdev); 1388 struct net_device *dev = pci_get_drvdata(bp->pdev);
1389 1389
1390 if (bp->pm_cap) 1390 if (bp->pdev->pm_cap)
1391 rc = pci_read_config_word(bp->pdev, 1391 rc = pci_read_config_word(bp->pdev,
1392 bp->pm_cap + PCI_PM_CTRL, &pm); 1392 bp->pdev->pm_cap + PCI_PM_CTRL, &pm);
1393 1393
1394 if ((rc && !netif_running(dev)) || 1394 if ((rc && !netif_running(dev)) ||
1395 (!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0))) 1395 (!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0)))
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index d60a2ea3da19..51468227bf3b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -175,6 +175,7 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
175#define EDC_MODE_LINEAR 0x0022 175#define EDC_MODE_LINEAR 0x0022
176#define EDC_MODE_LIMITING 0x0044 176#define EDC_MODE_LIMITING 0x0044
177#define EDC_MODE_PASSIVE_DAC 0x0055 177#define EDC_MODE_PASSIVE_DAC 0x0055
178#define EDC_MODE_ACTIVE_DAC 0x0066
178 179
179/* ETS defines*/ 180/* ETS defines*/
180#define DCBX_INVALID_COS (0xFF) 181#define DCBX_INVALID_COS (0xFF)
@@ -3684,6 +3685,41 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
3684 bnx2x_update_link_attr(params, vars->link_attr_sync); 3685 bnx2x_update_link_attr(params, vars->link_attr_sync);
3685} 3686}
3686 3687
3688static void bnx2x_disable_kr2(struct link_params *params,
3689 struct link_vars *vars,
3690 struct bnx2x_phy *phy)
3691{
3692 struct bnx2x *bp = params->bp;
3693 int i;
3694 static struct bnx2x_reg_set reg_set[] = {
3695 /* Step 1 - Program the TX/RX alignment markers */
3696 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
3697 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
3698 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
3699 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
3700 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
3701 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
3702 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
3703 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
3704 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
3705 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
3706 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
3707 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
3708 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
3709 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
3710 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
3711 };
3712 DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
3713
3714 for (i = 0; i < ARRAY_SIZE(reg_set); i++)
3715 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3716 reg_set[i].val);
3717 vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
3718 bnx2x_update_link_attr(params, vars->link_attr_sync);
3719
3720 vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
3721}
3722
3687static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy, 3723static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
3688 struct link_params *params) 3724 struct link_params *params)
3689{ 3725{
@@ -3715,7 +3751,6 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3715 struct link_params *params, 3751 struct link_params *params,
3716 struct link_vars *vars) { 3752 struct link_vars *vars) {
3717 u16 lane, i, cl72_ctrl, an_adv = 0; 3753 u16 lane, i, cl72_ctrl, an_adv = 0;
3718 u16 ucode_ver;
3719 struct bnx2x *bp = params->bp; 3754 struct bnx2x *bp = params->bp;
3720 static struct bnx2x_reg_set reg_set[] = { 3755 static struct bnx2x_reg_set reg_set[] = {
3721 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, 3756 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
@@ -3806,15 +3841,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3806 3841
3807 /* Advertise pause */ 3842 /* Advertise pause */
3808 bnx2x_ext_phy_set_pause(params, phy, vars); 3843 bnx2x_ext_phy_set_pause(params, phy, vars);
3809 /* Set KR Autoneg Work-Around flag for Warpcore version older than D108 3844 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
3810 */
3811 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3812 MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver);
3813 if (ucode_ver < 0xd108) {
3814 DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n",
3815 ucode_ver);
3816 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
3817 }
3818 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3845 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3819 MDIO_WC_REG_DIGITAL5_MISC7, 0x100); 3846 MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
3820 3847
@@ -3838,6 +3865,8 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3838 bnx2x_set_aer_mmd(params, phy); 3865 bnx2x_set_aer_mmd(params, phy);
3839 3866
3840 bnx2x_warpcore_enable_AN_KR2(phy, params, vars); 3867 bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
3868 } else {
3869 bnx2x_disable_kr2(params, vars, phy);
3841 } 3870 }
3842 3871
3843 /* Enable Autoneg: only on the main lane */ 3872 /* Enable Autoneg: only on the main lane */
@@ -4347,20 +4376,14 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4347 struct bnx2x *bp = params->bp; 4376 struct bnx2x *bp = params->bp;
4348 u32 serdes_net_if; 4377 u32 serdes_net_if;
4349 u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0; 4378 u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0;
4350 u16 lane = bnx2x_get_warpcore_lane(phy, params);
4351 4379
4352 vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1; 4380 vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1;
4353 4381
4354 if (!vars->turn_to_run_wc_rt) 4382 if (!vars->turn_to_run_wc_rt)
4355 return; 4383 return;
4356 4384
4357 /* Return if there is no link partner */
4358 if (!(bnx2x_warpcore_get_sigdet(phy, params))) {
4359 DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n");
4360 return;
4361 }
4362
4363 if (vars->rx_tx_asic_rst) { 4385 if (vars->rx_tx_asic_rst) {
4386 u16 lane = bnx2x_get_warpcore_lane(phy, params);
4364 serdes_net_if = (REG_RD(bp, params->shmem_base + 4387 serdes_net_if = (REG_RD(bp, params->shmem_base +
4365 offsetof(struct shmem_region, dev_info. 4388 offsetof(struct shmem_region, dev_info.
4366 port_hw_config[params->port].default_cfg)) & 4389 port_hw_config[params->port].default_cfg)) &
@@ -4375,14 +4398,8 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4375 /*10G KR*/ 4398 /*10G KR*/
4376 lnkup_kr = (gp_status1 >> (12+lane)) & 0x1; 4399 lnkup_kr = (gp_status1 >> (12+lane)) & 0x1;
4377 4400
4378 DP(NETIF_MSG_LINK,
4379 "gp_status1 0x%x\n", gp_status1);
4380
4381 if (lnkup_kr || lnkup) { 4401 if (lnkup_kr || lnkup) {
4382 vars->rx_tx_asic_rst = 0; 4402 vars->rx_tx_asic_rst = 0;
4383 DP(NETIF_MSG_LINK,
4384 "link up, rx_tx_asic_rst 0x%x\n",
4385 vars->rx_tx_asic_rst);
4386 } else { 4403 } else {
4387 /* Reset the lane to see if link comes up.*/ 4404 /* Reset the lane to see if link comes up.*/
4388 bnx2x_warpcore_reset_lane(bp, phy, 1); 4405 bnx2x_warpcore_reset_lane(bp, phy, 1);
@@ -4507,10 +4524,14 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4507 * enabled transmitter to avoid current leakage in case 4524 * enabled transmitter to avoid current leakage in case
4508 * no module is connected 4525 * no module is connected
4509 */ 4526 */
4510 if (bnx2x_is_sfp_module_plugged(phy, params)) 4527 if ((params->loopback_mode == LOOPBACK_NONE) ||
4511 bnx2x_sfp_module_detection(phy, params); 4528 (params->loopback_mode == LOOPBACK_EXT)) {
4512 else 4529 if (bnx2x_is_sfp_module_plugged(phy, params))
4513 bnx2x_sfp_e3_set_transmitter(params, phy, 1); 4530 bnx2x_sfp_module_detection(phy, params);
4531 else
4532 bnx2x_sfp_e3_set_transmitter(params,
4533 phy, 1);
4534 }
4514 4535
4515 bnx2x_warpcore_config_sfi(phy, params); 4536 bnx2x_warpcore_config_sfi(phy, params);
4516 break; 4537 break;
@@ -5757,6 +5778,11 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
5757 rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed, 5778 rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
5758 duplex); 5779 duplex);
5759 5780
5781 /* In case of KR link down, start up the recovering procedure */
5782 if ((!link_up) && (phy->media_type == ETH_PHY_KR) &&
5783 (!(phy->flags & FLAGS_WC_DUAL_MODE)))
5784 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
5785
5760 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", 5786 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
5761 vars->duplex, vars->flow_ctrl, vars->link_status); 5787 vars->duplex, vars->flow_ctrl, vars->link_status);
5762 return rc; 5788 return rc;
@@ -6507,6 +6533,11 @@ static int bnx2x_link_initialize(struct link_params *params,
6507 params->phy[INT_PHY].config_init(phy, params, vars); 6533 params->phy[INT_PHY].config_init(phy, params, vars);
6508 } 6534 }
6509 6535
6536 /* Re-read this value in case it was changed inside config_init due to
6537 * limitations of optic module
6538 */
6539 vars->line_speed = params->phy[INT_PHY].req_line_speed;
6540
6510 /* Init external phy*/ 6541 /* Init external phy*/
6511 if (non_ext_phy) { 6542 if (non_ext_phy) {
6512 if (params->phy[INT_PHY].supported & 6543 if (params->phy[INT_PHY].supported &
@@ -8080,7 +8111,10 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8080 if (copper_module_type & 8111 if (copper_module_type &
8081 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { 8112 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
8082 DP(NETIF_MSG_LINK, "Active Copper cable detected\n"); 8113 DP(NETIF_MSG_LINK, "Active Copper cable detected\n");
8083 check_limiting_mode = 1; 8114 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8115 *edc_mode = EDC_MODE_ACTIVE_DAC;
8116 else
8117 check_limiting_mode = 1;
8084 } else if (copper_module_type & 8118 } else if (copper_module_type &
8085 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { 8119 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
8086 DP(NETIF_MSG_LINK, 8120 DP(NETIF_MSG_LINK,
@@ -8555,6 +8589,7 @@ static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
8555 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; 8589 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
8556 break; 8590 break;
8557 case EDC_MODE_PASSIVE_DAC: 8591 case EDC_MODE_PASSIVE_DAC:
8592 case EDC_MODE_ACTIVE_DAC:
8558 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC; 8593 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC;
8559 break; 8594 break;
8560 default: 8595 default:
@@ -9730,32 +9765,41 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9730 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, 9765 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
9731 an_1000_val); 9766 an_1000_val);
9732 9767
9733 /* set 100 speed advertisement */ 9768 /* Set 10/100 speed advertisement */
9734 if ((phy->req_line_speed == SPEED_AUTO_NEG) && 9769 if (phy->req_line_speed == SPEED_AUTO_NEG) {
9735 (phy->speed_cap_mask & 9770 if (phy->speed_cap_mask &
9736 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | 9771 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
9737 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) { 9772 /* Enable autoneg and restart autoneg for legacy speeds
9738 an_10_100_val |= (1<<7); 9773 */
9739 /* Enable autoneg and restart autoneg for legacy speeds */ 9774 autoneg_val |= (1<<9 | 1<<12);
9740 autoneg_val |= (1<<9 | 1<<12);
9741
9742 if (phy->req_duplex == DUPLEX_FULL)
9743 an_10_100_val |= (1<<8); 9775 an_10_100_val |= (1<<8);
9744 DP(NETIF_MSG_LINK, "Advertising 100M\n"); 9776 DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
9745 } 9777 }
9746 /* set 10 speed advertisement */ 9778
9747 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 9779 if (phy->speed_cap_mask &
9748 (phy->speed_cap_mask & 9780 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
9749 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | 9781 /* Enable autoneg and restart autoneg for legacy speeds
9750 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) && 9782 */
9751 (phy->supported & 9783 autoneg_val |= (1<<9 | 1<<12);
9752 (SUPPORTED_10baseT_Half | 9784 an_10_100_val |= (1<<7);
9753 SUPPORTED_10baseT_Full)))) { 9785 DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
9754 an_10_100_val |= (1<<5); 9786 }
9755 autoneg_val |= (1<<9 | 1<<12); 9787
9756 if (phy->req_duplex == DUPLEX_FULL) 9788 if ((phy->speed_cap_mask &
9789 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
9790 (phy->supported & SUPPORTED_10baseT_Full)) {
9757 an_10_100_val |= (1<<6); 9791 an_10_100_val |= (1<<6);
9758 DP(NETIF_MSG_LINK, "Advertising 10M\n"); 9792 autoneg_val |= (1<<9 | 1<<12);
9793 DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
9794 }
9795
9796 if ((phy->speed_cap_mask &
9797 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) &&
9798 (phy->supported & SUPPORTED_10baseT_Half)) {
9799 an_10_100_val |= (1<<5);
9800 autoneg_val |= (1<<9 | 1<<12);
9801 DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
9802 }
9759 } 9803 }
9760 9804
9761 /* Only 10/100 are allowed to work in FORCE mode */ 9805 /* Only 10/100 are allowed to work in FORCE mode */
@@ -13432,43 +13476,6 @@ static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
13432 } 13476 }
13433 } 13477 }
13434} 13478}
13435static void bnx2x_disable_kr2(struct link_params *params,
13436 struct link_vars *vars,
13437 struct bnx2x_phy *phy)
13438{
13439 struct bnx2x *bp = params->bp;
13440 int i;
13441 static struct bnx2x_reg_set reg_set[] = {
13442 /* Step 1 - Program the TX/RX alignment markers */
13443 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
13444 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
13445 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
13446 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
13447 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
13448 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
13449 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
13450 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
13451 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
13452 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
13453 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
13454 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
13455 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
13456 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
13457 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
13458 };
13459 DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
13460
13461 for (i = 0; i < ARRAY_SIZE(reg_set); i++)
13462 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
13463 reg_set[i].val);
13464 vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
13465 bnx2x_update_link_attr(params, vars->link_attr_sync);
13466
13467 vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
13468 /* Restart AN on leading lane */
13469 bnx2x_warpcore_restart_AN_KR(phy, params);
13470}
13471
13472static void bnx2x_kr2_recovery(struct link_params *params, 13479static void bnx2x_kr2_recovery(struct link_params *params,
13473 struct link_vars *vars, 13480 struct link_vars *vars,
13474 struct bnx2x_phy *phy) 13481 struct bnx2x_phy *phy)
@@ -13546,6 +13553,8 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13546 /* Disable KR2 on both lanes */ 13553 /* Disable KR2 on both lanes */
13547 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page); 13554 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page);
13548 bnx2x_disable_kr2(params, vars, phy); 13555 bnx2x_disable_kr2(params, vars, phy);
13556 /* Restart AN on leading lane */
13557 bnx2x_warpcore_restart_AN_KR(phy, params);
13549 return; 13558 return;
13550 } 13559 }
13551} 13560}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2f8dbbbd7a86..82b658d8c04c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -4703,6 +4703,14 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
4703 attn.sig[3] = REG_RD(bp, 4703 attn.sig[3] = REG_RD(bp,
4704 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + 4704 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
4705 port*4); 4705 port*4);
4706 /* Since MCP attentions can't be disabled inside the block, we need to
4707 * read AEU registers to see whether they're currently disabled
4708 */
4709 attn.sig[3] &= ((REG_RD(bp,
4710 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
4711 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
4712 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
4713 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
4706 4714
4707 if (!CHIP_IS_E1x(bp)) 4715 if (!CHIP_IS_E1x(bp))
4708 attn.sig[4] = REG_RD(bp, 4716 attn.sig[4] = REG_RD(bp,
@@ -5447,26 +5455,24 @@ static void bnx2x_timer(unsigned long data)
5447 if (IS_PF(bp) && 5455 if (IS_PF(bp) &&
5448 !BP_NOMCP(bp)) { 5456 !BP_NOMCP(bp)) {
5449 int mb_idx = BP_FW_MB_IDX(bp); 5457 int mb_idx = BP_FW_MB_IDX(bp);
5450 u32 drv_pulse; 5458 u16 drv_pulse;
5451 u32 mcp_pulse; 5459 u16 mcp_pulse;
5452 5460
5453 ++bp->fw_drv_pulse_wr_seq; 5461 ++bp->fw_drv_pulse_wr_seq;
5454 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 5462 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5455 /* TBD - add SYSTEM_TIME */
5456 drv_pulse = bp->fw_drv_pulse_wr_seq; 5463 drv_pulse = bp->fw_drv_pulse_wr_seq;
5457 bnx2x_drv_pulse(bp); 5464 bnx2x_drv_pulse(bp);
5458 5465
5459 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & 5466 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5460 MCP_PULSE_SEQ_MASK); 5467 MCP_PULSE_SEQ_MASK);
5461 /* The delta between driver pulse and mcp response 5468 /* The delta between driver pulse and mcp response
5462 * should be 1 (before mcp response) or 0 (after mcp response) 5469 * should not get too big. If the MFW is more than 5 pulses
5470 * behind, we should worry about it enough to generate an error
5471 * log.
5463 */ 5472 */
5464 if ((drv_pulse != mcp_pulse) && 5473 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5465 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { 5474 BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5466 /* someone lost a heartbeat... */
5467 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5468 drv_pulse, mcp_pulse); 5475 drv_pulse, mcp_pulse);
5469 }
5470 } 5476 }
5471 5477
5472 if (bp->state == BNX2X_STATE_OPEN) 5478 if (bp->state == BNX2X_STATE_OPEN)
@@ -8652,6 +8658,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
8652 else if (bp->wol) { 8658 else if (bp->wol) {
8653 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 8659 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8654 u8 *mac_addr = bp->dev->dev_addr; 8660 u8 *mac_addr = bp->dev->dev_addr;
8661 struct pci_dev *pdev = bp->pdev;
8655 u32 val; 8662 u32 val;
8656 u16 pmc; 8663 u16 pmc;
8657 8664
@@ -8668,9 +8675,9 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
8668 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); 8675 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8669 8676
8670 /* Enable the PME and clear the status */ 8677 /* Enable the PME and clear the status */
8671 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc); 8678 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
8672 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; 8679 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
8673 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc); 8680 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
8674 8681
8675 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 8682 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8676 8683
@@ -10399,7 +10406,7 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10399 break; 10406 break;
10400 } 10407 }
10401 10408
10402 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); 10409 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
10403 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; 10410 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
10404 10411
10405 BNX2X_DEV_INFO("%sWoL capable\n", 10412 BNX2X_DEV_INFO("%sWoL capable\n",
@@ -12141,8 +12148,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
12141 } 12148 }
12142 12149
12143 if (IS_PF(bp)) { 12150 if (IS_PF(bp)) {
12144 bp->pm_cap = pdev->pm_cap; 12151 if (!pdev->pm_cap) {
12145 if (bp->pm_cap == 0) {
12146 dev_err(&bp->pdev->dev, 12152 dev_err(&bp->pdev->dev,
12147 "Cannot find power management capability, aborting\n"); 12153 "Cannot find power management capability, aborting\n");
12148 rc = -EIO; 12154 rc = -EIO;
@@ -13632,6 +13638,10 @@ void bnx2x_setup_cnic_info(struct bnx2x *bp)
13632 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); 13638 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
13633 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); 13639 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
13634 13640
13641 DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
13642 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
13643 cp->iscsi_l2_cid);
13644
13635 if (NO_ISCSI_OOO(bp)) 13645 if (NO_ISCSI_OOO(bp))
13636 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; 13646 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
13637} 13647}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 2604b6204abe..9ad012bdd915 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1819,7 +1819,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1819 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1819 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1820 if (fid & IGU_FID_ENCODE_IS_PF) 1820 if (fid & IGU_FID_ENCODE_IS_PF)
1821 current_pf = fid & IGU_FID_PF_NUM_MASK; 1821 current_pf = fid & IGU_FID_PF_NUM_MASK;
1822 else if (current_pf == BP_ABS_FUNC(bp)) 1822 else if (current_pf == BP_FUNC(bp))
1823 bnx2x_vf_set_igu_info(bp, sb_id, 1823 bnx2x_vf_set_igu_info(bp, sb_id,
1824 (fid & IGU_FID_VF_NUM_MASK)); 1824 (fid & IGU_FID_VF_NUM_MASK));
1825 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1825 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
@@ -3180,6 +3180,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
3180 /* set local queue arrays */ 3180 /* set local queue arrays */
3181 vf->vfqs = &bp->vfdb->vfqs[qcount]; 3181 vf->vfqs = &bp->vfdb->vfqs[qcount];
3182 qcount += vf_sb_count(vf); 3182 qcount += vf_sb_count(vf);
3183 bnx2x_iov_static_resc(bp, vf);
3183 } 3184 }
3184 3185
3185 /* prepare msix vectors in VF configuration space */ 3186 /* prepare msix vectors in VF configuration space */
@@ -3187,6 +3188,8 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
3187 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 3188 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
3188 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 3189 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
3189 num_vf_queues); 3190 num_vf_queues);
3191 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
3192 vf_idx, num_vf_queues);
3190 } 3193 }
3191 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 3194 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
3192 3195
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 6cfb88732452..da16953eb2ec 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -1765,28 +1765,28 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1765 switch (mbx->first_tlv.tl.type) { 1765 switch (mbx->first_tlv.tl.type) {
1766 case CHANNEL_TLV_ACQUIRE: 1766 case CHANNEL_TLV_ACQUIRE:
1767 bnx2x_vf_mbx_acquire(bp, vf, mbx); 1767 bnx2x_vf_mbx_acquire(bp, vf, mbx);
1768 break; 1768 return;
1769 case CHANNEL_TLV_INIT: 1769 case CHANNEL_TLV_INIT:
1770 bnx2x_vf_mbx_init_vf(bp, vf, mbx); 1770 bnx2x_vf_mbx_init_vf(bp, vf, mbx);
1771 break; 1771 return;
1772 case CHANNEL_TLV_SETUP_Q: 1772 case CHANNEL_TLV_SETUP_Q:
1773 bnx2x_vf_mbx_setup_q(bp, vf, mbx); 1773 bnx2x_vf_mbx_setup_q(bp, vf, mbx);
1774 break; 1774 return;
1775 case CHANNEL_TLV_SET_Q_FILTERS: 1775 case CHANNEL_TLV_SET_Q_FILTERS:
1776 bnx2x_vf_mbx_set_q_filters(bp, vf, mbx); 1776 bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
1777 break; 1777 return;
1778 case CHANNEL_TLV_TEARDOWN_Q: 1778 case CHANNEL_TLV_TEARDOWN_Q:
1779 bnx2x_vf_mbx_teardown_q(bp, vf, mbx); 1779 bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
1780 break; 1780 return;
1781 case CHANNEL_TLV_CLOSE: 1781 case CHANNEL_TLV_CLOSE:
1782 bnx2x_vf_mbx_close_vf(bp, vf, mbx); 1782 bnx2x_vf_mbx_close_vf(bp, vf, mbx);
1783 break; 1783 return;
1784 case CHANNEL_TLV_RELEASE: 1784 case CHANNEL_TLV_RELEASE:
1785 bnx2x_vf_mbx_release_vf(bp, vf, mbx); 1785 bnx2x_vf_mbx_release_vf(bp, vf, mbx);
1786 break; 1786 return;
1787 case CHANNEL_TLV_UPDATE_RSS: 1787 case CHANNEL_TLV_UPDATE_RSS:
1788 bnx2x_vf_mbx_update_rss(bp, vf, mbx); 1788 bnx2x_vf_mbx_update_rss(bp, vf, mbx);
1789 break; 1789 return;
1790 } 1790 }
1791 1791
1792 } else { 1792 } else {
@@ -1802,26 +1802,24 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1802 for (i = 0; i < 20; i++) 1802 for (i = 0; i < 20; i++)
1803 DP_CONT(BNX2X_MSG_IOV, "%x ", 1803 DP_CONT(BNX2X_MSG_IOV, "%x ",
1804 mbx->msg->req.tlv_buf_size.tlv_buffer[i]); 1804 mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
1805 }
1805 1806
1806 /* test whether we can respond to the VF (do we have an address 1807 /* can we respond to VF (do we have an address for it?) */
1807 * for it?) 1808 if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
1808 */ 1809 /* mbx_resp uses the op_rc of the VF */
1809 if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { 1810 vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
1810 /* mbx_resp uses the op_rc of the VF */
1811 vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
1812 1811
1813 /* notify the VF that we do not support this request */ 1812 /* notify the VF that we do not support this request */
1814 bnx2x_vf_mbx_resp(bp, vf); 1813 bnx2x_vf_mbx_resp(bp, vf);
1815 } else { 1814 } else {
1816 /* can't send a response since this VF is unknown to us 1815 /* can't send a response since this VF is unknown to us
1817 * just ack the FW to release the mailbox and unlock 1816 * just ack the FW to release the mailbox and unlock
1818 * the channel. 1817 * the channel.
1819 */ 1818 */
1820 storm_memset_vf_mbx_ack(bp, vf->abs_vfid); 1819 storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
1821 mmiowb(); 1820 /* Firmware ack should be written before unlocking channel */
1822 bnx2x_unlock_vf_pf_channel(bp, vf, 1821 mmiowb();
1823 mbx->first_tlv.tl.type); 1822 bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
1824 }
1825 } 1823 }
1826} 1824}
1827 1825
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 8142480d9770..99394bd49a13 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -3135,6 +3135,7 @@ static void cnic_service_bnx2x_bh(unsigned long data)
3135{ 3135{
3136 struct cnic_dev *dev = (struct cnic_dev *) data; 3136 struct cnic_dev *dev = (struct cnic_dev *) data;
3137 struct cnic_local *cp = dev->cnic_priv; 3137 struct cnic_local *cp = dev->cnic_priv;
3138 struct bnx2x *bp = netdev_priv(dev->netdev);
3138 u32 status_idx, new_status_idx; 3139 u32 status_idx, new_status_idx;
3139 3140
3140 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 3141 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
@@ -3146,7 +3147,7 @@ static void cnic_service_bnx2x_bh(unsigned long data)
3146 CNIC_WR16(dev, cp->kcq1.io_addr, 3147 CNIC_WR16(dev, cp->kcq1.io_addr,
3147 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 3148 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
3148 3149
3149 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE) { 3150 if (!CNIC_SUPPORTS_FCOE(bp)) {
3150 cp->arm_int(dev, status_idx); 3151 cp->arm_int(dev, status_idx);
3151 break; 3152 break;
3152 } 3153 }
@@ -5217,7 +5218,8 @@ static void cnic_init_rings(struct cnic_dev *dev)
5217 "iSCSI CLIENT_SETUP did not complete\n"); 5218 "iSCSI CLIENT_SETUP did not complete\n");
5218 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); 5219 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5219 cnic_ring_ctl(dev, cid, cli, 1); 5220 cnic_ring_ctl(dev, cid, cli, 1);
5220 *cid_ptr = cid; 5221 *cid_ptr = cid >> 4;
5222 *(cid_ptr + 1) = cid * bp->db_size;
5221 } 5223 }
5222} 5224}
5223 5225
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 5701f3d1a169..12d961c4ebca 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -3034,6 +3034,7 @@ static bool tg3_phy_led_bug(struct tg3 *tp)
3034{ 3034{
3035 switch (tg3_asic_rev(tp)) { 3035 switch (tg3_asic_rev(tp)) {
3036 case ASIC_REV_5719: 3036 case ASIC_REV_5719:
3037 case ASIC_REV_5720:
3037 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 3038 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3038 !tp->pci_fn) 3039 !tp->pci_fn)
3039 return true; 3040 return true;
@@ -16192,12 +16193,12 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16192 * So explicitly force the chip into D0 here. 16193 * So explicitly force the chip into D0 here.
16193 */ 16194 */
16194 pci_read_config_dword(tp->pdev, 16195 pci_read_config_dword(tp->pdev,
16195 tp->pm_cap + PCI_PM_CTRL, 16196 tp->pdev->pm_cap + PCI_PM_CTRL,
16196 &pm_reg); 16197 &pm_reg);
16197 pm_reg &= ~PCI_PM_CTRL_STATE_MASK; 16198 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16198 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; 16199 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16199 pci_write_config_dword(tp->pdev, 16200 pci_write_config_dword(tp->pdev,
16200 tp->pm_cap + PCI_PM_CTRL, 16201 tp->pdev->pm_cap + PCI_PM_CTRL,
16201 pm_reg); 16202 pm_reg);
16202 16203
16203 /* Also, force SERR#/PERR# in PCI command. */ 16204 /* Also, force SERR#/PERR# in PCI command. */
@@ -17346,7 +17347,6 @@ static int tg3_init_one(struct pci_dev *pdev,
17346 tp = netdev_priv(dev); 17347 tp = netdev_priv(dev);
17347 tp->pdev = pdev; 17348 tp->pdev = pdev;
17348 tp->dev = dev; 17349 tp->dev = dev;
17349 tp->pm_cap = pdev->pm_cap;
17350 tp->rx_mode = TG3_DEF_RX_MODE; 17350 tp->rx_mode = TG3_DEF_RX_MODE;
17351 tp->tx_mode = TG3_DEF_TX_MODE; 17351 tp->tx_mode = TG3_DEF_TX_MODE;
17352 tp->irq_sync = 1; 17352 tp->irq_sync = 1;
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index ddb8be1298ea..70257808aa37 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -3234,7 +3234,6 @@ struct tg3 {
3234 u8 pci_lat_timer; 3234 u8 pci_lat_timer;
3235 3235
3236 int pci_fn; 3236 int pci_fn;
3237 int pm_cap;
3238 int msi_cap; 3237 int msi_cap;
3239 int pcix_cap; 3238 int pcix_cap;
3240 int pcie_readrq; 3239 int pcie_readrq;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0d0665ca6f19..c73cabdbd4c0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6149,8 +6149,10 @@ static int __init cxgb4_init_module(void)
6149 pr_warn("could not create debugfs entry, continuing\n"); 6149 pr_warn("could not create debugfs entry, continuing\n");
6150 6150
6151 ret = pci_register_driver(&cxgb4_driver); 6151 ret = pci_register_driver(&cxgb4_driver);
6152 if (ret < 0) 6152 if (ret < 0) {
6153 debugfs_remove(cxgb4_debugfs_root); 6153 debugfs_remove(cxgb4_debugfs_root);
6154 destroy_workqueue(workq);
6155 }
6154 6156
6155 register_inet6addr_notifier(&cxgb4_inet6addr_notifier); 6157 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6156 6158
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 2db6c573cec7..263b92c00cbf 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -1321,7 +1321,7 @@ de4x5_open(struct net_device *dev)
1321 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED, 1321 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
1322 lp->adapter_name, dev)) { 1322 lp->adapter_name, dev)) {
1323 printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq); 1323 printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
1324 if (request_irq(dev->irq, de4x5_interrupt, IRQF_DISABLED | IRQF_SHARED, 1324 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
1325 lp->adapter_name, dev)) { 1325 lp->adapter_name, dev)) {
1326 printk("\n Cannot get IRQ- reconfigure your hardware.\n"); 1326 printk("\n Cannot get IRQ- reconfigure your hardware.\n");
1327 disable_ast(dev); 1327 disable_ast(dev);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index ace5050dba38..db020230bd0b 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -88,6 +88,7 @@ static inline char *nic_name(struct pci_dev *pdev)
88#define BE_MIN_MTU 256 88#define BE_MIN_MTU 256
89 89
90#define BE_NUM_VLANS_SUPPORTED 64 90#define BE_NUM_VLANS_SUPPORTED 64
91#define BE_UMC_NUM_VLANS_SUPPORTED 15
91#define BE_MAX_EQD 96u 92#define BE_MAX_EQD 96u
92#define BE_MAX_TX_FRAG_COUNT 30 93#define BE_MAX_TX_FRAG_COUNT 30
93 94
@@ -333,6 +334,7 @@ enum vf_state {
333 334
334#define BE_FLAGS_LINK_STATUS_INIT 1 335#define BE_FLAGS_LINK_STATUS_INIT 1
335#define BE_FLAGS_WORKER_SCHEDULED (1 << 3) 336#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
337#define BE_FLAGS_VLAN_PROMISC (1 << 4)
336#define BE_FLAGS_NAPI_ENABLED (1 << 9) 338#define BE_FLAGS_NAPI_ENABLED (1 << 9)
337#define BE_UC_PMAC_COUNT 30 339#define BE_UC_PMAC_COUNT 30
338#define BE_VF_UC_PMAC_COUNT 2 340#define BE_VF_UC_PMAC_COUNT 2
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 1ab5dab11eff..bd0e0c0bbcd8 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -180,6 +180,9 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
180 dev_err(&adapter->pdev->dev, 180 dev_err(&adapter->pdev->dev,
181 "opcode %d-%d failed:status %d-%d\n", 181 "opcode %d-%d failed:status %d-%d\n",
182 opcode, subsystem, compl_status, extd_status); 182 opcode, subsystem, compl_status, extd_status);
183
184 if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
185 return extd_status;
183 } 186 }
184 } 187 }
185done: 188done:
@@ -1812,6 +1815,12 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1812 } else if (flags & IFF_ALLMULTI) { 1815 } else if (flags & IFF_ALLMULTI) {
1813 req->if_flags_mask = req->if_flags = 1816 req->if_flags_mask = req->if_flags =
1814 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); 1817 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1818 } else if (flags & BE_FLAGS_VLAN_PROMISC) {
1819 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1820
1821 if (value == ON)
1822 req->if_flags =
1823 cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1815 } else { 1824 } else {
1816 struct netdev_hw_addr *ha; 1825 struct netdev_hw_addr *ha;
1817 int i = 0; 1826 int i = 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d026226db88c..108ca8abf0af 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -60,6 +60,8 @@ enum {
60 MCC_STATUS_NOT_SUPPORTED = 66 60 MCC_STATUS_NOT_SUPPORTED = 66
61}; 61};
62 62
63#define MCC_ADDL_STS_INSUFFICIENT_RESOURCES 0x16
64
63#define CQE_STATUS_COMPL_MASK 0xFFFF 65#define CQE_STATUS_COMPL_MASK 0xFFFF
64#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ 66#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
65#define CQE_STATUS_EXTD_MASK 0xFFFF 67#define CQE_STATUS_EXTD_MASK 0xFFFF
@@ -1791,7 +1793,7 @@ struct be_nic_res_desc {
1791 u8 acpi_params; 1793 u8 acpi_params;
1792 u8 wol_param; 1794 u8 wol_param;
1793 u16 rsvd7; 1795 u16 rsvd7;
1794 u32 rsvd8[3]; 1796 u32 rsvd8[7];
1795} __packed; 1797} __packed;
1796 1798
1797struct be_cmd_req_get_func_config { 1799struct be_cmd_req_get_func_config {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3224d28cdad4..2c38cc402119 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -855,11 +855,11 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
855 unsigned int eth_hdr_len; 855 unsigned int eth_hdr_len;
856 struct iphdr *ip; 856 struct iphdr *ip;
857 857
858 /* Lancer ASIC has a bug wherein packets that are 32 bytes or less 858 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
859 * may cause a transmit stall on that port. So the work-around is to 859 * may cause a transmit stall on that port. So the work-around is to
860 * pad such packets to a 36-byte length. 860 * pad short packets (<= 32 bytes) to a 36-byte length.
861 */ 861 */
862 if (unlikely(lancer_chip(adapter) && skb->len <= 32)) { 862 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
863 if (skb_padto(skb, 36)) 863 if (skb_padto(skb, 36))
864 goto tx_drop; 864 goto tx_drop;
865 skb->len = 36; 865 skb->len = 36;
@@ -1013,18 +1013,40 @@ static int be_vid_config(struct be_adapter *adapter)
1013 status = be_cmd_vlan_config(adapter, adapter->if_handle, 1013 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1014 vids, num, 1, 0); 1014 vids, num, 1, 0);
1015 1015
1016 /* Set to VLAN promisc mode as setting VLAN filter failed */
1017 if (status) { 1016 if (status) {
1018 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n"); 1017 /* Set to VLAN promisc mode as setting VLAN filter failed */
1019 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n"); 1018 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1020 goto set_vlan_promisc; 1019 goto set_vlan_promisc;
1020 dev_err(&adapter->pdev->dev,
1021 "Setting HW VLAN filtering failed.\n");
1022 } else {
1023 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1024 /* hw VLAN filtering re-enabled. */
1025 status = be_cmd_rx_filter(adapter,
1026 BE_FLAGS_VLAN_PROMISC, OFF);
1027 if (!status) {
1028 dev_info(&adapter->pdev->dev,
1029 "Disabling VLAN Promiscuous mode.\n");
1030 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1031 dev_info(&adapter->pdev->dev,
1032 "Re-Enabling HW VLAN filtering\n");
1033 }
1034 }
1021 } 1035 }
1022 1036
1023 return status; 1037 return status;
1024 1038
1025set_vlan_promisc: 1039set_vlan_promisc:
1026 status = be_cmd_vlan_config(adapter, adapter->if_handle, 1040 dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1027 NULL, 0, 1, 1); 1041
1042 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1043 if (!status) {
1044 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1045 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1046 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1047 } else
1048 dev_err(&adapter->pdev->dev,
1049 "Failed to enable VLAN Promiscuous mode.\n");
1028 return status; 1050 return status;
1029} 1051}
1030 1052
@@ -1033,10 +1055,6 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1033 struct be_adapter *adapter = netdev_priv(netdev); 1055 struct be_adapter *adapter = netdev_priv(netdev);
1034 int status = 0; 1056 int status = 0;
1035 1057
1036 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1037 status = -EINVAL;
1038 goto ret;
1039 }
1040 1058
1041 /* Packets with VID 0 are always received by Lancer by default */ 1059 /* Packets with VID 0 are always received by Lancer by default */
1042 if (lancer_chip(adapter) && vid == 0) 1060 if (lancer_chip(adapter) && vid == 0)
@@ -1059,11 +1077,6 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1059 struct be_adapter *adapter = netdev_priv(netdev); 1077 struct be_adapter *adapter = netdev_priv(netdev);
1060 int status = 0; 1078 int status = 0;
1061 1079
1062 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1063 status = -EINVAL;
1064 goto ret;
1065 }
1066
1067 /* Packets with VID 0 are always received by Lancer by default */ 1080 /* Packets with VID 0 are always received by Lancer by default */
1068 if (lancer_chip(adapter) && vid == 0) 1081 if (lancer_chip(adapter) && vid == 0)
1069 goto ret; 1082 goto ret;
@@ -1188,8 +1201,8 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
1188 1201
1189 vi->vf = vf; 1202 vi->vf = vf;
1190 vi->tx_rate = vf_cfg->tx_rate; 1203 vi->tx_rate = vf_cfg->tx_rate;
1191 vi->vlan = vf_cfg->vlan_tag; 1204 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1192 vi->qos = 0; 1205 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1193 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN); 1206 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1194 1207
1195 return 0; 1208 return 0;
@@ -1199,28 +1212,29 @@ static int be_set_vf_vlan(struct net_device *netdev,
1199 int vf, u16 vlan, u8 qos) 1212 int vf, u16 vlan, u8 qos)
1200{ 1213{
1201 struct be_adapter *adapter = netdev_priv(netdev); 1214 struct be_adapter *adapter = netdev_priv(netdev);
1215 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1202 int status = 0; 1216 int status = 0;
1203 1217
1204 if (!sriov_enabled(adapter)) 1218 if (!sriov_enabled(adapter))
1205 return -EPERM; 1219 return -EPERM;
1206 1220
1207 if (vf >= adapter->num_vfs || vlan > 4095) 1221 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1208 return -EINVAL; 1222 return -EINVAL;
1209 1223
1210 if (vlan) { 1224 if (vlan || qos) {
1211 if (adapter->vf_cfg[vf].vlan_tag != vlan) { 1225 vlan |= qos << VLAN_PRIO_SHIFT;
1226 if (vf_cfg->vlan_tag != vlan) {
1212 /* If this is new value, program it. Else skip. */ 1227 /* If this is new value, program it. Else skip. */
1213 adapter->vf_cfg[vf].vlan_tag = vlan; 1228 vf_cfg->vlan_tag = vlan;
1214 1229 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1215 status = be_cmd_set_hsw_config(adapter, vlan, 1230 vf_cfg->if_handle, 0);
1216 vf + 1, adapter->vf_cfg[vf].if_handle, 0);
1217 } 1231 }
1218 } else { 1232 } else {
1219 /* Reset Transparent Vlan Tagging. */ 1233 /* Reset Transparent Vlan Tagging. */
1220 adapter->vf_cfg[vf].vlan_tag = 0; 1234 vf_cfg->vlan_tag = 0;
1221 vlan = adapter->vf_cfg[vf].def_vid; 1235 vlan = vf_cfg->def_vid;
1222 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, 1236 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1223 adapter->vf_cfg[vf].if_handle, 0); 1237 vf_cfg->if_handle, 0);
1224 } 1238 }
1225 1239
1226 1240
@@ -2802,7 +2816,7 @@ static int be_vfs_if_create(struct be_adapter *adapter)
2802 struct be_resources res = {0}; 2816 struct be_resources res = {0};
2803 struct be_vf_cfg *vf_cfg; 2817 struct be_vf_cfg *vf_cfg;
2804 u32 cap_flags, en_flags, vf; 2818 u32 cap_flags, en_flags, vf;
2805 int status; 2819 int status = 0;
2806 2820
2807 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 2821 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2808 BE_IF_FLAGS_MULTICAST; 2822 BE_IF_FLAGS_MULTICAST;
@@ -2963,6 +2977,8 @@ static void BEx_get_resources(struct be_adapter *adapter,
2963 2977
2964 if (adapter->function_mode & FLEX10_MODE) 2978 if (adapter->function_mode & FLEX10_MODE)
2965 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8; 2979 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2980 else if (adapter->function_mode & UMC_ENABLED)
2981 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
2966 else 2982 else
2967 res->max_vlans = BE_NUM_VLANS_SUPPORTED; 2983 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
2968 res->max_mcast_mac = BE_MAX_MC; 2984 res->max_mcast_mac = BE_MAX_MC;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f9aacf5d8523..b2793b91cc55 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2199,7 +2199,7 @@ fec_probe(struct platform_device *pdev)
2199 goto failed_irq; 2199 goto failed_irq;
2200 } 2200 }
2201 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt, 2201 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
2202 IRQF_DISABLED, pdev->name, ndev); 2202 0, pdev->name, ndev);
2203 if (ret) 2203 if (ret)
2204 goto failed_irq; 2204 goto failed_irq;
2205 } 2205 }
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 098f133908ae..e006a09ba899 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -452,7 +452,9 @@ static int gianfar_ptp_probe(struct platform_device *dev)
452 err = -ENODEV; 452 err = -ENODEV;
453 453
454 etsects->caps = ptp_gianfar_caps; 454 etsects->caps = ptp_gianfar_caps;
455 etsects->cksel = DEFAULT_CKSEL; 455
456 if (get_of_u32(node, "fsl,cksel", &etsects->cksel))
457 etsects->cksel = DEFAULT_CKSEL;
456 458
457 if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) || 459 if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) ||
458 get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) || 460 get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) ||
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index e3c7c697fc45..91227d03274e 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1097,7 +1097,7 @@ static int hp100_open(struct net_device *dev)
1097 /* New: if bus is PCI or EISA, interrupts might be shared interrupts */ 1097 /* New: if bus is PCI or EISA, interrupts might be shared interrupts */
1098 if (request_irq(dev->irq, hp100_interrupt, 1098 if (request_irq(dev->irq, hp100_interrupt,
1099 lp->bus == HP100_BUS_PCI || lp->bus == 1099 lp->bus == HP100_BUS_PCI || lp->bus ==
1100 HP100_BUS_EISA ? IRQF_SHARED : IRQF_DISABLED, 1100 HP100_BUS_EISA ? IRQF_SHARED : 0,
1101 "hp100", dev)) { 1101 "hp100", dev)) {
1102 printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq); 1102 printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
1103 return -EAGAIN; 1103 return -EAGAIN;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 35853b43d66e..2d1c6bdd3618 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -102,6 +102,19 @@ static int ehea_probe_adapter(struct platform_device *dev);
102 102
103static int ehea_remove(struct platform_device *dev); 103static int ehea_remove(struct platform_device *dev);
104 104
105static struct of_device_id ehea_module_device_table[] = {
106 {
107 .name = "lhea",
108 .compatible = "IBM,lhea",
109 },
110 {
111 .type = "network",
112 .compatible = "IBM,lhea-ethernet",
113 },
114 {},
115};
116MODULE_DEVICE_TABLE(of, ehea_module_device_table);
117
105static struct of_device_id ehea_device_table[] = { 118static struct of_device_id ehea_device_table[] = {
106 { 119 {
107 .name = "lhea", 120 .name = "lhea",
@@ -109,7 +122,6 @@ static struct of_device_id ehea_device_table[] = {
109 }, 122 },
110 {}, 123 {},
111}; 124};
112MODULE_DEVICE_TABLE(of, ehea_device_table);
113 125
114static struct platform_driver ehea_driver = { 126static struct platform_driver ehea_driver = {
115 .driver = { 127 .driver = {
@@ -1285,7 +1297,7 @@ static int ehea_reg_interrupts(struct net_device *dev)
1285 1297
1286 ret = ibmebus_request_irq(port->qp_eq->attr.ist1, 1298 ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1287 ehea_qp_aff_irq_handler, 1299 ehea_qp_aff_irq_handler,
1288 IRQF_DISABLED, port->int_aff_name, port); 1300 0, port->int_aff_name, port);
1289 if (ret) { 1301 if (ret) {
1290 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n", 1302 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
1291 port->qp_eq->attr.ist1); 1303 port->qp_eq->attr.ist1);
@@ -1303,8 +1315,7 @@ static int ehea_reg_interrupts(struct net_device *dev)
1303 "%s-queue%d", dev->name, i); 1315 "%s-queue%d", dev->name, i);
1304 ret = ibmebus_request_irq(pr->eq->attr.ist1, 1316 ret = ibmebus_request_irq(pr->eq->attr.ist1,
1305 ehea_recv_irq_handler, 1317 ehea_recv_irq_handler,
1306 IRQF_DISABLED, pr->int_send_name, 1318 0, pr->int_send_name, pr);
1307 pr);
1308 if (ret) { 1319 if (ret) {
1309 netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n", 1320 netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1310 i, pr->eq->attr.ist1); 1321 i, pr->eq->attr.ist1);
@@ -3320,7 +3331,7 @@ static int ehea_probe_adapter(struct platform_device *dev)
3320 } 3331 }
3321 3332
3322 ret = ibmebus_request_irq(adapter->neq->attr.ist1, 3333 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3323 ehea_interrupt_neq, IRQF_DISABLED, 3334 ehea_interrupt_neq, 0,
3324 "ehea_neq", adapter); 3335 "ehea_neq", adapter);
3325 if (ret) { 3336 if (ret) {
3326 dev_err(&dev->dev, "requesting NEQ IRQ failed\n"); 3337 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index a8633b8f0ac5..d14c8f53384c 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -922,6 +922,14 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
922 else 922 else
923 mask &= ~(1 << 30); 923 mask &= ~(1 << 30);
924 } 924 }
925 if (mac->type == e1000_pch2lan) {
926 /* SHRAH[0,1,2] different than previous */
927 if (i == 7)
928 mask &= 0xFFF4FFFF;
929 /* SHRAH[3] different than SHRAH[0,1,2] */
930 if (i == 10)
931 mask |= (1 << 30);
932 }
925 933
926 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask, 934 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask,
927 0xFFFFFFFF); 935 0xFFFFFFFF);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index af08188d7e62..42f0f6717511 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1371,7 +1371,10 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1371 return; 1371 return;
1372 } 1372 }
1373 1373
1374 if (index < hw->mac.rar_entry_count) { 1374 /* RAR[1-6] are owned by manageability. Skip those and program the
1375 * next address into the SHRA register array.
1376 */
1377 if (index < (u32)(hw->mac.rar_entry_count - 6)) {
1375 s32 ret_val; 1378 s32 ret_val;
1376 1379
1377 ret_val = e1000_acquire_swflag_ich8lan(hw); 1380 ret_val = e1000_acquire_swflag_ich8lan(hw);
@@ -1962,8 +1965,8 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1962 if (ret_val) 1965 if (ret_val)
1963 goto release; 1966 goto release;
1964 1967
1965 /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */ 1968 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
1966 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { 1969 for (i = 0; i < (hw->mac.rar_entry_count); i++) {
1967 mac_reg = er32(RAL(i)); 1970 mac_reg = er32(RAL(i));
1968 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), 1971 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
1969 (u16)(mac_reg & 0xFFFF)); 1972 (u16)(mac_reg & 0xFFFF));
@@ -2007,10 +2010,10 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2007 return ret_val; 2010 return ret_val;
2008 2011
2009 if (enable) { 2012 if (enable) {
2010 /* Write Rx addresses (rar_entry_count for RAL/H, +4 for 2013 /* Write Rx addresses (rar_entry_count for RAL/H, and
2011 * SHRAL/H) and initial CRC values to the MAC 2014 * SHRAL/H) and initial CRC values to the MAC
2012 */ 2015 */
2013 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { 2016 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2014 u8 mac_addr[ETH_ALEN] = { 0 }; 2017 u8 mac_addr[ETH_ALEN] = { 0 };
2015 u32 addr_high, addr_low; 2018 u32 addr_high, addr_low;
2016 2019
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 59865695b282..217090df33e7 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -98,7 +98,7 @@
98#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 98#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
99 99
100#define E1000_ICH_RAR_ENTRIES 7 100#define E1000_ICH_RAR_ENTRIES 7
101#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */ 101#define E1000_PCH2_RAR_ENTRIES 11 /* RAR[0-6], SHRA[0-3] */
102#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */ 102#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
103 103
104#define PHY_PAGE_SHIFT 5 104#define PHY_PAGE_SHIFT 5
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index e87e9b01f404..4ef786775acb 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4868,7 +4868,7 @@ static void e1000_watchdog_task(struct work_struct *work)
4868 */ 4868 */
4869 if ((hw->phy.type == e1000_phy_igp_3 || 4869 if ((hw->phy.type == e1000_phy_igp_3 ||
4870 hw->phy.type == e1000_phy_bm) && 4870 hw->phy.type == e1000_phy_bm) &&
4871 (hw->mac.autoneg == true) && 4871 hw->mac.autoneg &&
4872 (adapter->link_speed == SPEED_10 || 4872 (adapter->link_speed == SPEED_10 ||
4873 adapter->link_speed == SPEED_100) && 4873 adapter->link_speed == SPEED_100) &&
4874 (adapter->link_duplex == HALF_DUPLEX)) { 4874 (adapter->link_duplex == HALF_DUPLEX)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 0c524fa9f811..cfef7fc32cdd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -701,8 +701,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
701 701
702 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); 702 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
703 if (cmd_details) { 703 if (cmd_details) {
704 memcpy(details, cmd_details, 704 *details = *cmd_details;
705 sizeof(struct i40e_asq_cmd_details));
706 705
707 /* If the cmd_details are defined copy the cookie. The 706 /* If the cmd_details are defined copy the cookie. The
708 * cpu_to_le32 is not needed here because the data is ignored 707 * cpu_to_le32 is not needed here because the data is ignored
@@ -760,7 +759,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
760 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); 759 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
761 760
762 /* if the desc is available copy the temp desc to the right place */ 761 /* if the desc is available copy the temp desc to the right place */
763 memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc)); 762 *desc_on_ring = *desc;
764 763
765 /* if buff is not NULL assume indirect command */ 764 /* if buff is not NULL assume indirect command */
766 if (buff != NULL) { 765 if (buff != NULL) {
@@ -807,7 +806,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
807 806
808 /* if ready, copy the desc back to temp */ 807 /* if ready, copy the desc back to temp */
809 if (i40e_asq_done(hw)) { 808 if (i40e_asq_done(hw)) {
810 memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc)); 809 *desc = *desc_on_ring;
811 if (buff != NULL) 810 if (buff != NULL)
812 memcpy(buff, dma_buff->va, buff_size); 811 memcpy(buff, dma_buff->va, buff_size);
813 retval = le16_to_cpu(desc->retval); 812 retval = le16_to_cpu(desc->retval);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index c21df7bc3b1d..1e4ea134975a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -507,7 +507,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
507 507
508 /* save link status information */ 508 /* save link status information */
509 if (link) 509 if (link)
510 memcpy(link, hw_link_info, sizeof(struct i40e_link_status)); 510 *link = *hw_link_info;
511 511
512 /* flag cleared so helper functions don't call AQ again */ 512 /* flag cleared so helper functions don't call AQ again */
513 hw->phy.get_link_info = false; 513 hw->phy.get_link_info = false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 601d482694ea..221aa4795017 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -101,10 +101,10 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
101 mem->size = ALIGN(size, alignment); 101 mem->size = ALIGN(size, alignment);
102 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 102 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
103 &mem->pa, GFP_KERNEL); 103 &mem->pa, GFP_KERNEL);
104 if (mem->va) 104 if (!mem->va)
105 return 0; 105 return -ENOMEM;
106 106
107 return -ENOMEM; 107 return 0;
108} 108}
109 109
110/** 110/**
@@ -136,10 +136,10 @@ int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
136 mem->size = size; 136 mem->size = size;
137 mem->va = kzalloc(size, GFP_KERNEL); 137 mem->va = kzalloc(size, GFP_KERNEL);
138 138
139 if (mem->va) 139 if (!mem->va)
140 return 0; 140 return -ENOMEM;
141 141
142 return -ENOMEM; 142 return 0;
143} 143}
144 144
145/** 145/**
@@ -174,8 +174,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
174 u16 needed, u16 id) 174 u16 needed, u16 id)
175{ 175{
176 int ret = -ENOMEM; 176 int ret = -ENOMEM;
177 int i = 0; 177 int i, j;
178 int j = 0;
179 178
180 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { 179 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
181 dev_info(&pf->pdev->dev, 180 dev_info(&pf->pdev->dev,
@@ -186,7 +185,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
186 185
187 /* start the linear search with an imperfect hint */ 186 /* start the linear search with an imperfect hint */
188 i = pile->search_hint; 187 i = pile->search_hint;
189 while (i < pile->num_entries && ret < 0) { 188 while (i < pile->num_entries) {
190 /* skip already allocated entries */ 189 /* skip already allocated entries */
191 if (pile->list[i] & I40E_PILE_VALID_BIT) { 190 if (pile->list[i] & I40E_PILE_VALID_BIT) {
192 i++; 191 i++;
@@ -205,6 +204,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
205 pile->list[i+j] = id | I40E_PILE_VALID_BIT; 204 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
206 ret = i; 205 ret = i;
207 pile->search_hint = i + j; 206 pile->search_hint = i + j;
207 break;
208 } else { 208 } else {
209 /* not enough, so skip over it and continue looking */ 209 /* not enough, so skip over it and continue looking */
210 i += j; 210 i += j;
@@ -1388,7 +1388,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1388 bool add_happened = false; 1388 bool add_happened = false;
1389 int filter_list_len = 0; 1389 int filter_list_len = 0;
1390 u32 changed_flags = 0; 1390 u32 changed_flags = 0;
1391 i40e_status ret = 0; 1391 i40e_status aq_ret = 0;
1392 struct i40e_pf *pf; 1392 struct i40e_pf *pf;
1393 int num_add = 0; 1393 int num_add = 0;
1394 int num_del = 0; 1394 int num_del = 0;
@@ -1449,28 +1449,28 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1449 1449
1450 /* flush a full buffer */ 1450 /* flush a full buffer */
1451 if (num_del == filter_list_len) { 1451 if (num_del == filter_list_len) {
1452 ret = i40e_aq_remove_macvlan(&pf->hw, 1452 aq_ret = i40e_aq_remove_macvlan(&pf->hw,
1453 vsi->seid, del_list, num_del, 1453 vsi->seid, del_list, num_del,
1454 NULL); 1454 NULL);
1455 num_del = 0; 1455 num_del = 0;
1456 memset(del_list, 0, sizeof(*del_list)); 1456 memset(del_list, 0, sizeof(*del_list));
1457 1457
1458 if (ret) 1458 if (aq_ret)
1459 dev_info(&pf->pdev->dev, 1459 dev_info(&pf->pdev->dev,
1460 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", 1460 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
1461 ret, 1461 aq_ret,
1462 pf->hw.aq.asq_last_status); 1462 pf->hw.aq.asq_last_status);
1463 } 1463 }
1464 } 1464 }
1465 if (num_del) { 1465 if (num_del) {
1466 ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, 1466 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1467 del_list, num_del, NULL); 1467 del_list, num_del, NULL);
1468 num_del = 0; 1468 num_del = 0;
1469 1469
1470 if (ret) 1470 if (aq_ret)
1471 dev_info(&pf->pdev->dev, 1471 dev_info(&pf->pdev->dev,
1472 "ignoring delete macvlan error, err %d, aq_err %d\n", 1472 "ignoring delete macvlan error, err %d, aq_err %d\n",
1473 ret, pf->hw.aq.asq_last_status); 1473 aq_ret, pf->hw.aq.asq_last_status);
1474 } 1474 }
1475 1475
1476 kfree(del_list); 1476 kfree(del_list);
@@ -1515,32 +1515,30 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1515 1515
1516 /* flush a full buffer */ 1516 /* flush a full buffer */
1517 if (num_add == filter_list_len) { 1517 if (num_add == filter_list_len) {
1518 ret = i40e_aq_add_macvlan(&pf->hw, 1518 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1519 vsi->seid, 1519 add_list, num_add,
1520 add_list, 1520 NULL);
1521 num_add,
1522 NULL);
1523 num_add = 0; 1521 num_add = 0;
1524 1522
1525 if (ret) 1523 if (aq_ret)
1526 break; 1524 break;
1527 memset(add_list, 0, sizeof(*add_list)); 1525 memset(add_list, 0, sizeof(*add_list));
1528 } 1526 }
1529 } 1527 }
1530 if (num_add) { 1528 if (num_add) {
1531 ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 1529 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1532 add_list, num_add, NULL); 1530 add_list, num_add, NULL);
1533 num_add = 0; 1531 num_add = 0;
1534 } 1532 }
1535 kfree(add_list); 1533 kfree(add_list);
1536 add_list = NULL; 1534 add_list = NULL;
1537 1535
1538 if (add_happened && (!ret)) { 1536 if (add_happened && (!aq_ret)) {
1539 /* do nothing */; 1537 /* do nothing */;
1540 } else if (add_happened && (ret)) { 1538 } else if (add_happened && (aq_ret)) {
1541 dev_info(&pf->pdev->dev, 1539 dev_info(&pf->pdev->dev,
1542 "add filter failed, err %d, aq_err %d\n", 1540 "add filter failed, err %d, aq_err %d\n",
1543 ret, pf->hw.aq.asq_last_status); 1541 aq_ret, pf->hw.aq.asq_last_status);
1544 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && 1542 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1545 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1543 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1546 &vsi->state)) { 1544 &vsi->state)) {
@@ -1556,28 +1554,27 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1556 if (changed_flags & IFF_ALLMULTI) { 1554 if (changed_flags & IFF_ALLMULTI) {
1557 bool cur_multipromisc; 1555 bool cur_multipromisc;
1558 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); 1556 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
1559 ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, 1557 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1560 vsi->seid, 1558 vsi->seid,
1561 cur_multipromisc, 1559 cur_multipromisc,
1562 NULL); 1560 NULL);
1563 if (ret) 1561 if (aq_ret)
1564 dev_info(&pf->pdev->dev, 1562 dev_info(&pf->pdev->dev,
1565 "set multi promisc failed, err %d, aq_err %d\n", 1563 "set multi promisc failed, err %d, aq_err %d\n",
1566 ret, pf->hw.aq.asq_last_status); 1564 aq_ret, pf->hw.aq.asq_last_status);
1567 } 1565 }
1568 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { 1566 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1569 bool cur_promisc; 1567 bool cur_promisc;
1570 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || 1568 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1571 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1569 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1572 &vsi->state)); 1570 &vsi->state));
1573 ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, 1571 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1574 vsi->seid, 1572 vsi->seid,
1575 cur_promisc, 1573 cur_promisc, NULL);
1576 NULL); 1574 if (aq_ret)
1577 if (ret)
1578 dev_info(&pf->pdev->dev, 1575 dev_info(&pf->pdev->dev,
1579 "set uni promisc failed, err %d, aq_err %d\n", 1576 "set uni promisc failed, err %d, aq_err %d\n",
1580 ret, pf->hw.aq.asq_last_status); 1577 aq_ret, pf->hw.aq.asq_last_status);
1581 } 1578 }
1582 1579
1583 clear_bit(__I40E_CONFIG_BUSY, &vsi->state); 1580 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1790,6 +1787,8 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
1790 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan 1787 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
1791 * @vsi: the vsi being configured 1788 * @vsi: the vsi being configured
1792 * @vid: vlan id to be removed (0 = untagged only , -1 = any) 1789 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
1790 *
1791 * Return: 0 on success or negative otherwise
1793 **/ 1792 **/
1794int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) 1793int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
1795{ 1794{
@@ -1863,37 +1862,39 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
1863 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload 1862 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
1864 * @netdev: network interface to be adjusted 1863 * @netdev: network interface to be adjusted
1865 * @vid: vlan id to be added 1864 * @vid: vlan id to be added
1865 *
1866 * net_device_ops implementation for adding vlan ids
1866 **/ 1867 **/
1867static int i40e_vlan_rx_add_vid(struct net_device *netdev, 1868static int i40e_vlan_rx_add_vid(struct net_device *netdev,
1868 __always_unused __be16 proto, u16 vid) 1869 __always_unused __be16 proto, u16 vid)
1869{ 1870{
1870 struct i40e_netdev_priv *np = netdev_priv(netdev); 1871 struct i40e_netdev_priv *np = netdev_priv(netdev);
1871 struct i40e_vsi *vsi = np->vsi; 1872 struct i40e_vsi *vsi = np->vsi;
1872 int ret; 1873 int ret = 0;
1873 1874
1874 if (vid > 4095) 1875 if (vid > 4095)
1875 return 0; 1876 return -EINVAL;
1877
1878 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
1876 1879
1877 netdev_info(vsi->netdev, "adding %pM vid=%d\n",
1878 netdev->dev_addr, vid);
1879 /* If the network stack called us with vid = 0, we should 1880 /* If the network stack called us with vid = 0, we should
1880 * indicate to i40e_vsi_add_vlan() that we want to receive 1881 * indicate to i40e_vsi_add_vlan() that we want to receive
1881 * any traffic (i.e. with any vlan tag, or untagged) 1882 * any traffic (i.e. with any vlan tag, or untagged)
1882 */ 1883 */
1883 ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY); 1884 ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
1884 1885
1885 if (!ret) { 1886 if (!ret && (vid < VLAN_N_VID))
1886 if (vid < VLAN_N_VID) 1887 set_bit(vid, vsi->active_vlans);
1887 set_bit(vid, vsi->active_vlans);
1888 }
1889 1888
1890 return 0; 1889 return ret;
1891} 1890}
1892 1891
1893/** 1892/**
1894 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 1893 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
1895 * @netdev: network interface to be adjusted 1894 * @netdev: network interface to be adjusted
1896 * @vid: vlan id to be removed 1895 * @vid: vlan id to be removed
1896 *
1897 * net_device_ops implementation for adding vlan ids
1897 **/ 1898 **/
1898static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 1899static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
1899 __always_unused __be16 proto, u16 vid) 1900 __always_unused __be16 proto, u16 vid)
@@ -1901,15 +1902,16 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
1901 struct i40e_netdev_priv *np = netdev_priv(netdev); 1902 struct i40e_netdev_priv *np = netdev_priv(netdev);
1902 struct i40e_vsi *vsi = np->vsi; 1903 struct i40e_vsi *vsi = np->vsi;
1903 1904
1904 netdev_info(vsi->netdev, "removing %pM vid=%d\n", 1905 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
1905 netdev->dev_addr, vid); 1906
1906 /* return code is ignored as there is nothing a user 1907 /* return code is ignored as there is nothing a user
1907 * can do about failure to remove and a log message was 1908 * can do about failure to remove and a log message was
1908 * already printed from another function 1909 * already printed from the other function
1909 */ 1910 */
1910 i40e_vsi_kill_vlan(vsi, vid); 1911 i40e_vsi_kill_vlan(vsi, vid);
1911 1912
1912 clear_bit(vid, vsi->active_vlans); 1913 clear_bit(vid, vsi->active_vlans);
1914
1913 return 0; 1915 return 0;
1914} 1916}
1915 1917
@@ -1936,10 +1938,10 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
1936 * @vsi: the vsi being adjusted 1938 * @vsi: the vsi being adjusted
1937 * @vid: the vlan id to set as a PVID 1939 * @vid: the vlan id to set as a PVID
1938 **/ 1940 **/
1939i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) 1941int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
1940{ 1942{
1941 struct i40e_vsi_context ctxt; 1943 struct i40e_vsi_context ctxt;
1942 i40e_status ret; 1944 i40e_status aq_ret;
1943 1945
1944 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 1946 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1945 vsi->info.pvid = cpu_to_le16(vid); 1947 vsi->info.pvid = cpu_to_le16(vid);
@@ -1948,14 +1950,15 @@ i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
1948 1950
1949 ctxt.seid = vsi->seid; 1951 ctxt.seid = vsi->seid;
1950 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 1952 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1951 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 1953 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1952 if (ret) { 1954 if (aq_ret) {
1953 dev_info(&vsi->back->pdev->dev, 1955 dev_info(&vsi->back->pdev->dev,
1954 "%s: update vsi failed, aq_err=%d\n", 1956 "%s: update vsi failed, aq_err=%d\n",
1955 __func__, vsi->back->hw.aq.asq_last_status); 1957 __func__, vsi->back->hw.aq.asq_last_status);
1958 return -ENOENT;
1956 } 1959 }
1957 1960
1958 return ret; 1961 return 0;
1959} 1962}
1960 1963
1961/** 1964/**
@@ -3326,7 +3329,8 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3326 **/ 3329 **/
3327static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) 3330static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3328{ 3331{
3329 int num_tc = 0, i; 3332 u8 num_tc = 0;
3333 int i;
3330 3334
3331 /* Scan the ETS Config Priority Table to find 3335 /* Scan the ETS Config Priority Table to find
3332 * traffic class enabled for a given priority 3336 * traffic class enabled for a given priority
@@ -3341,9 +3345,7 @@ static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3341 /* Traffic class index starts from zero so 3345 /* Traffic class index starts from zero so
3342 * increment to return the actual count 3346 * increment to return the actual count
3343 */ 3347 */
3344 num_tc++; 3348 return num_tc + 1;
3345
3346 return num_tc;
3347} 3349}
3348 3350
3349/** 3351/**
@@ -3451,28 +3453,27 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3451 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; 3453 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
3452 struct i40e_pf *pf = vsi->back; 3454 struct i40e_pf *pf = vsi->back;
3453 struct i40e_hw *hw = &pf->hw; 3455 struct i40e_hw *hw = &pf->hw;
3456 i40e_status aq_ret;
3454 u32 tc_bw_max; 3457 u32 tc_bw_max;
3455 int ret;
3456 int i; 3458 int i;
3457 3459
3458 /* Get the VSI level BW configuration */ 3460 /* Get the VSI level BW configuration */
3459 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); 3461 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
3460 if (ret) { 3462 if (aq_ret) {
3461 dev_info(&pf->pdev->dev, 3463 dev_info(&pf->pdev->dev,
3462 "couldn't get pf vsi bw config, err %d, aq_err %d\n", 3464 "couldn't get pf vsi bw config, err %d, aq_err %d\n",
3463 ret, pf->hw.aq.asq_last_status); 3465 aq_ret, pf->hw.aq.asq_last_status);
3464 return ret; 3466 return -EINVAL;
3465 } 3467 }
3466 3468
3467 /* Get the VSI level BW configuration per TC */ 3469 /* Get the VSI level BW configuration per TC */
3468 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, 3470 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
3469 &bw_ets_config, 3471 NULL);
3470 NULL); 3472 if (aq_ret) {
3471 if (ret) {
3472 dev_info(&pf->pdev->dev, 3473 dev_info(&pf->pdev->dev,
3473 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n", 3474 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
3474 ret, pf->hw.aq.asq_last_status); 3475 aq_ret, pf->hw.aq.asq_last_status);
3475 return ret; 3476 return -EINVAL;
3476 } 3477 }
3477 3478
3478 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { 3479 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
@@ -3494,7 +3495,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3494 /* 3 bits out of 4 for each TC */ 3495 /* 3 bits out of 4 for each TC */
3495 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); 3496 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
3496 } 3497 }
3497 return ret; 3498
3499 return 0;
3498} 3500}
3499 3501
3500/** 3502/**
@@ -3505,30 +3507,30 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3505 * 3507 *
3506 * Returns 0 on success, negative value on failure 3508 * Returns 0 on success, negative value on failure
3507 **/ 3509 **/
3508static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, 3510static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
3509 u8 enabled_tc,
3510 u8 *bw_share) 3511 u8 *bw_share)
3511{ 3512{
3512 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 3513 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
3513 int i, ret = 0; 3514 i40e_status aq_ret;
3515 int i;
3514 3516
3515 bw_data.tc_valid_bits = enabled_tc; 3517 bw_data.tc_valid_bits = enabled_tc;
3516 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 3518 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3517 bw_data.tc_bw_credits[i] = bw_share[i]; 3519 bw_data.tc_bw_credits[i] = bw_share[i];
3518 3520
3519 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, 3521 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
3520 &bw_data, NULL); 3522 NULL);
3521 if (ret) { 3523 if (aq_ret) {
3522 dev_info(&vsi->back->pdev->dev, 3524 dev_info(&vsi->back->pdev->dev,
3523 "%s: AQ command Config VSI BW allocation per TC failed = %d\n", 3525 "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
3524 __func__, vsi->back->hw.aq.asq_last_status); 3526 __func__, vsi->back->hw.aq.asq_last_status);
3525 return ret; 3527 return -EINVAL;
3526 } 3528 }
3527 3529
3528 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 3530 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3529 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; 3531 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
3530 3532
3531 return ret; 3533 return 0;
3532} 3534}
3533 3535
3534/** 3536/**
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 79b58353d849..47c2d10df826 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -719,6 +719,10 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
719 u32 ctrl_ext; 719 u32 ctrl_ext;
720 u32 mdic; 720 u32 mdic;
721 721
722 /* Extra read required for some PHY's on i354 */
723 if (hw->mac.type == e1000_i354)
724 igb_get_phy_id(hw);
725
722 /* For SGMII PHYs, we try the list of possible addresses until 726 /* For SGMII PHYs, we try the list of possible addresses until
723 * we find one that works. For non-SGMII PHYs 727 * we find one that works. For non-SGMII PHYs
724 * (e.g. integrated copper PHYs), an address of 1 should 728 * (e.g. integrated copper PHYs), an address of 1 should
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index f0dfd41dd4bd..298f0ed50670 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -712,6 +712,7 @@ static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
712static s32 igb_set_default_fc(struct e1000_hw *hw) 712static s32 igb_set_default_fc(struct e1000_hw *hw)
713{ 713{
714 s32 ret_val = 0; 714 s32 ret_val = 0;
715 u16 lan_offset;
715 u16 nvm_data; 716 u16 nvm_data;
716 717
717 /* Read and store word 0x0F of the EEPROM. This word contains bits 718 /* Read and store word 0x0F of the EEPROM. This word contains bits
@@ -722,7 +723,14 @@ static s32 igb_set_default_fc(struct e1000_hw *hw)
722 * control setting, then the variable hw->fc will 723 * control setting, then the variable hw->fc will
723 * be initialized based on a value in the EEPROM. 724 * be initialized based on a value in the EEPROM.
724 */ 725 */
725 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); 726 if (hw->mac.type == e1000_i350) {
727 lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
728 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG
729 + lan_offset, 1, &nvm_data);
730 } else {
731 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG,
732 1, &nvm_data);
733 }
726 734
727 if (ret_val) { 735 if (ret_val) {
728 hw_dbg("NVM Read Error\n"); 736 hw_dbg("NVM Read Error\n");
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 48cbc833b051..86d51429a189 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1607,6 +1607,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1607 igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); 1607 igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
1608 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); 1608 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
1609 } 1609 }
1610 } else if (hw->phy.type == e1000_phy_82580) {
1611 /* enable MII loopback */
1612 igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
1610 } 1613 }
1611 1614
1612 /* add small delay to avoid loopback test failure */ 1615 /* add small delay to avoid loopback test failure */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 0e1b973659b0..e8649abf97c0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -160,6 +160,13 @@ static int ixgbe_get_settings(struct net_device *netdev,
160 bool autoneg = false; 160 bool autoneg = false;
161 bool link_up; 161 bool link_up;
162 162
163 /* SFP type is needed for get_link_capabilities */
164 if (hw->phy.media_type & (ixgbe_media_type_fiber |
165 ixgbe_media_type_fiber_qsfp)) {
166 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
167 hw->phy.ops.identify_sfp(hw);
168 }
169
163 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); 170 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
164 171
165 /* set the supported link speeds */ 172 /* set the supported link speeds */
@@ -186,6 +193,11 @@ static int ixgbe_get_settings(struct net_device *netdev,
186 ecmd->advertising |= ADVERTISED_1000baseT_Full; 193 ecmd->advertising |= ADVERTISED_1000baseT_Full;
187 if (supported_link & IXGBE_LINK_SPEED_100_FULL) 194 if (supported_link & IXGBE_LINK_SPEED_100_FULL)
188 ecmd->advertising |= ADVERTISED_100baseT_Full; 195 ecmd->advertising |= ADVERTISED_100baseT_Full;
196
197 if (hw->phy.multispeed_fiber && !autoneg) {
198 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
199 ecmd->advertising = ADVERTISED_10000baseT_Full;
200 }
189 } 201 }
190 202
191 if (autoneg) { 203 if (autoneg) {
@@ -314,6 +326,14 @@ static int ixgbe_set_settings(struct net_device *netdev,
314 if (ecmd->advertising & ~ecmd->supported) 326 if (ecmd->advertising & ~ecmd->supported)
315 return -EINVAL; 327 return -EINVAL;
316 328
329 /* only allow one speed at a time if no autoneg */
330 if (!ecmd->autoneg && hw->phy.multispeed_fiber) {
331 if (ecmd->advertising ==
332 (ADVERTISED_10000baseT_Full |
333 ADVERTISED_1000baseT_Full))
334 return -EINVAL;
335 }
336
317 old = hw->phy.autoneg_advertised; 337 old = hw->phy.autoneg_advertised;
318 advertised = 0; 338 advertised = 0;
319 if (ecmd->advertising & ADVERTISED_10000baseT_Full) 339 if (ecmd->advertising & ADVERTISED_10000baseT_Full)
@@ -1805,6 +1825,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1805 unsigned int size = 1024; 1825 unsigned int size = 1024;
1806 netdev_tx_t tx_ret_val; 1826 netdev_tx_t tx_ret_val;
1807 struct sk_buff *skb; 1827 struct sk_buff *skb;
1828 u32 flags_orig = adapter->flags;
1829
1830 /* DCB can modify the frames on Tx */
1831 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1808 1832
1809 /* allocate test skb */ 1833 /* allocate test skb */
1810 skb = alloc_skb(size, GFP_KERNEL); 1834 skb = alloc_skb(size, GFP_KERNEL);
@@ -1857,6 +1881,7 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1857 1881
1858 /* free the original skb */ 1882 /* free the original skb */
1859 kfree_skb(skb); 1883 kfree_skb(skb);
1884 adapter->flags = flags_orig;
1860 1885
1861 return ret_val; 1886 return ret_val;
1862} 1887}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 7aba452833e5..0ade0cd5ef53 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3571,7 +3571,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3571{ 3571{
3572 struct ixgbe_hw *hw = &adapter->hw; 3572 struct ixgbe_hw *hw = &adapter->hw;
3573 int i; 3573 int i;
3574 u32 rxctrl; 3574 u32 rxctrl, rfctl;
3575 3575
3576 /* disable receives while setting up the descriptors */ 3576 /* disable receives while setting up the descriptors */
3577 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3577 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
@@ -3580,6 +3580,13 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3580 ixgbe_setup_psrtype(adapter); 3580 ixgbe_setup_psrtype(adapter);
3581 ixgbe_setup_rdrxctl(adapter); 3581 ixgbe_setup_rdrxctl(adapter);
3582 3582
3583 /* RSC Setup */
3584 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
3585 rfctl &= ~IXGBE_RFCTL_RSC_DIS;
3586 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
3587 rfctl |= IXGBE_RFCTL_RSC_DIS;
3588 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
3589
3583 /* Program registers for the distribution of queues */ 3590 /* Program registers for the distribution of queues */
3584 ixgbe_setup_mrqc(adapter); 3591 ixgbe_setup_mrqc(adapter);
3585 3592
@@ -5993,8 +6000,16 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
5993 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; 6000 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
5994 6001
5995 speed = hw->phy.autoneg_advertised; 6002 speed = hw->phy.autoneg_advertised;
5996 if ((!speed) && (hw->mac.ops.get_link_capabilities)) 6003 if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
5997 hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); 6004 hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
6005
6006 /* setup the highest link when no autoneg */
6007 if (!autoneg) {
6008 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
6009 speed = IXGBE_LINK_SPEED_10GB_FULL;
6010 }
6011 }
6012
5998 if (hw->mac.ops.setup_link) 6013 if (hw->mac.ops.setup_link)
5999 hw->mac.ops.setup_link(hw, speed, true); 6014 hw->mac.ops.setup_link(hw, speed, true);
6000 6015
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 6442cf8f9dce..10775cb9b6d8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1861,6 +1861,7 @@ enum {
1861#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 1861#define IXGBE_RFCTL_ISCSI_DIS 0x00000001
1862#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E 1862#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E
1863#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 1863#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1
1864#define IXGBE_RFCTL_RSC_DIS 0x00000020
1864#define IXGBE_RFCTL_NFSW_DIS 0x00000040 1865#define IXGBE_RFCTL_NFSW_DIS 0x00000040
1865#define IXGBE_RFCTL_NFSR_DIS 0x00000080 1866#define IXGBE_RFCTL_NFSR_DIS 0x00000080
1866#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300 1867#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index bfdb06860397..6a6c1f76d8e0 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -282,8 +282,7 @@ ltq_etop_hw_init(struct net_device *dev)
282 282
283 if (IS_TX(i)) { 283 if (IS_TX(i)) {
284 ltq_dma_alloc_tx(&ch->dma); 284 ltq_dma_alloc_tx(&ch->dma);
285 request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED, 285 request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv);
286 "etop_tx", priv);
287 } else if (IS_RX(i)) { 286 } else if (IS_RX(i)) {
288 ltq_dma_alloc_rx(&ch->dma); 287 ltq_dma_alloc_rx(&ch->dma);
289 for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM; 288 for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
@@ -291,8 +290,7 @@ ltq_etop_hw_init(struct net_device *dev)
291 if (ltq_etop_alloc_skb(ch)) 290 if (ltq_etop_alloc_skb(ch))
292 return -ENOMEM; 291 return -ENOMEM;
293 ch->dma.desc = 0; 292 ch->dma.desc = 0;
294 request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED, 293 request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv);
295 "etop_rx", priv);
296 } 294 }
297 ch->dma.irq = irq; 295 ch->dma.irq = irq;
298 } 296 }
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 4ae0c7426010..fff62460185c 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1123,8 +1123,7 @@ static int pxa168_eth_open(struct net_device *dev)
1123 struct pxa168_eth_private *pep = netdev_priv(dev); 1123 struct pxa168_eth_private *pep = netdev_priv(dev);
1124 int err; 1124 int err;
1125 1125
1126 err = request_irq(dev->irq, pxa168_eth_int_handler, 1126 err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev);
1127 IRQF_DISABLED, dev->name, dev);
1128 if (err) { 1127 if (err) {
1129 dev_err(&dev->dev, "can't assign irq\n"); 1128 dev_err(&dev->dev, "can't assign irq\n");
1130 return -EAGAIN; 1129 return -EAGAIN;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index ef94a591f9e5..ecc7f7b696b8 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3086,23 +3086,27 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3086 PCI_DMA_FROMDEVICE); 3086 PCI_DMA_FROMDEVICE);
3087 skge_rx_reuse(e, skge->rx_buf_size); 3087 skge_rx_reuse(e, skge->rx_buf_size);
3088 } else { 3088 } else {
3089 struct skge_element ee;
3089 struct sk_buff *nskb; 3090 struct sk_buff *nskb;
3090 3091
3091 nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); 3092 nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
3092 if (!nskb) 3093 if (!nskb)
3093 goto resubmit; 3094 goto resubmit;
3094 3095
3096 ee = *e;
3097
3098 skb = ee.skb;
3099 prefetch(skb->data);
3100
3095 if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { 3101 if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
3096 dev_kfree_skb(nskb); 3102 dev_kfree_skb(nskb);
3097 goto resubmit; 3103 goto resubmit;
3098 } 3104 }
3099 3105
3100 pci_unmap_single(skge->hw->pdev, 3106 pci_unmap_single(skge->hw->pdev,
3101 dma_unmap_addr(e, mapaddr), 3107 dma_unmap_addr(&ee, mapaddr),
3102 dma_unmap_len(e, maplen), 3108 dma_unmap_len(&ee, maplen),
3103 PCI_DMA_FROMDEVICE); 3109 PCI_DMA_FROMDEVICE);
3104 skb = e->skb;
3105 prefetch(skb->data);
3106 } 3110 }
3107 3111
3108 skb_put(skb, len); 3112 skb_put(skb, len);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index a28cd801a236..0c750985f47e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -53,9 +53,11 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
53 for (i = 0; i < priv->tx_ring_num; i++) { 53 for (i = 0; i < priv->tx_ring_num; i++) {
54 priv->tx_cq[i].moder_cnt = priv->tx_frames; 54 priv->tx_cq[i].moder_cnt = priv->tx_frames;
55 priv->tx_cq[i].moder_time = priv->tx_usecs; 55 priv->tx_cq[i].moder_time = priv->tx_usecs;
56 err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]); 56 if (priv->port_up) {
57 if (err) 57 err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]);
58 return err; 58 if (err)
59 return err;
60 }
59 } 61 }
60 62
61 if (priv->adaptive_rx_coal) 63 if (priv->adaptive_rx_coal)
@@ -65,9 +67,11 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
65 priv->rx_cq[i].moder_cnt = priv->rx_frames; 67 priv->rx_cq[i].moder_cnt = priv->rx_frames;
66 priv->rx_cq[i].moder_time = priv->rx_usecs; 68 priv->rx_cq[i].moder_time = priv->rx_usecs;
67 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 69 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
68 err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]); 70 if (priv->port_up) {
69 if (err) 71 err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
70 return err; 72 if (err)
73 return err;
74 }
71 } 75 }
72 76
73 return err; 77 return err;
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 0fba1532d326..075f4e21d33d 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -915,7 +915,7 @@ static int ks_net_open(struct net_device *netdev)
915 struct ks_net *ks = netdev_priv(netdev); 915 struct ks_net *ks = netdev_priv(netdev);
916 int err; 916 int err;
917 917
918#define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW) 918#define KS_INT_FLAGS IRQF_TRIGGER_LOW
919 /* lock the card, even if we may not actually do anything 919 /* lock the card, even if we may not actually do anything
920 * else at the moment. 920 * else at the moment.
921 */ 921 */
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 83c2091c9c23..bd1a2d2bc2ae 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -543,7 +543,7 @@ static const struct of_device_id moxart_mac_match[] = {
543 { } 543 { }
544}; 544};
545 545
546struct __initdata platform_driver moxart_mac_driver = { 546static struct platform_driver moxart_mac_driver = {
547 .probe = moxart_mac_probe, 547 .probe = moxart_mac_probe,
548 .remove = moxart_remove, 548 .remove = moxart_remove,
549 .driver = { 549 .driver = {
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index c20766c2f65b..79257f71c5d9 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -83,8 +83,7 @@ static int jazzsonic_open(struct net_device* dev)
83{ 83{
84 int retval; 84 int retval;
85 85
86 retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED, 86 retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev);
87 "sonic", dev);
88 if (retval) { 87 if (retval) {
89 printk(KERN_ERR "%s: unable to get IRQ %d.\n", 88 printk(KERN_ERR "%s: unable to get IRQ %d.\n",
90 dev->name, dev->irq); 89 dev->name, dev->irq);
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index c2e0256fe3df..4da172ac5599 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -95,8 +95,7 @@ static int xtsonic_open(struct net_device *dev)
95{ 95{
96 int retval; 96 int retval;
97 97
98 retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED, 98 retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev);
99 "sonic", dev);
100 if (retval) { 99 if (retval) {
101 printk(KERN_ERR "%s: unable to get IRQ %d.\n", 100 printk(KERN_ERR "%s: unable to get IRQ %d.\n",
102 dev->name, dev->irq); 101 dev->name, dev->irq);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index c498181a9aa8..5b65356e7568 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1219,7 +1219,7 @@ static int pasemi_mac_open(struct net_device *dev)
1219 snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx", 1219 snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx",
1220 dev->name); 1220 dev->name);
1221 1221
1222 ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, IRQF_DISABLED, 1222 ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, 0,
1223 mac->tx_irq_name, mac->tx); 1223 mac->tx_irq_name, mac->tx);
1224 if (ret) { 1224 if (ret) {
1225 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", 1225 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
@@ -1230,7 +1230,7 @@ static int pasemi_mac_open(struct net_device *dev)
1230 snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx", 1230 snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx",
1231 dev->name); 1231 dev->name);
1232 1232
1233 ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, IRQF_DISABLED, 1233 ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, 0,
1234 mac->rx_irq_name, mac->rx); 1234 mac->rx_irq_name, mac->rx);
1235 if (ret) { 1235 if (ret) {
1236 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", 1236 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 4d7ad0074d1c..ebe4c86e5230 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1794,3 +1794,11 @@ const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = {
1794 .set_msglevel = qlcnic_set_msglevel, 1794 .set_msglevel = qlcnic_set_msglevel,
1795 .get_msglevel = qlcnic_get_msglevel, 1795 .get_msglevel = qlcnic_get_msglevel,
1796}; 1796};
1797
1798const struct ethtool_ops qlcnic_ethtool_failed_ops = {
1799 .get_settings = qlcnic_get_settings,
1800 .get_drvinfo = qlcnic_get_drvinfo,
1801 .set_msglevel = qlcnic_set_msglevel,
1802 .get_msglevel = qlcnic_get_msglevel,
1803 .set_dump = qlcnic_set_dump,
1804};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index c4c5023e1fdf..21d00a0449a1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -431,6 +431,9 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
431 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 431 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
432 usleep_range(10000, 11000); 432 usleep_range(10000, 11000);
433 433
434 if (!adapter->fw_work.work.func)
435 return;
436
434 cancel_delayed_work_sync(&adapter->fw_work); 437 cancel_delayed_work_sync(&adapter->fw_work);
435} 438}
436 439
@@ -2275,8 +2278,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2275 adapter->portnum = adapter->ahw->pci_func; 2278 adapter->portnum = adapter->ahw->pci_func;
2276 err = qlcnic_start_firmware(adapter); 2279 err = qlcnic_start_firmware(adapter);
2277 if (err) { 2280 if (err) {
2278 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); 2281 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
2279 goto err_out_free_hw; 2282 "\t\tIf reboot doesn't help, try flashing the card\n");
2283 goto err_out_maintenance_mode;
2280 } 2284 }
2281 2285
2282 qlcnic_get_multiq_capability(adapter); 2286 qlcnic_get_multiq_capability(adapter);
@@ -2408,6 +2412,22 @@ err_out_disable_pdev:
2408 pci_set_drvdata(pdev, NULL); 2412 pci_set_drvdata(pdev, NULL);
2409 pci_disable_device(pdev); 2413 pci_disable_device(pdev);
2410 return err; 2414 return err;
2415
2416err_out_maintenance_mode:
2417 netdev->netdev_ops = &qlcnic_netdev_failed_ops;
2418 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
2419 err = register_netdev(netdev);
2420
2421 if (err) {
2422 dev_err(&pdev->dev, "Failed to register net device\n");
2423 qlcnic_clr_all_drv_state(adapter, 0);
2424 goto err_out_free_hw;
2425 }
2426
2427 pci_set_drvdata(pdev, adapter);
2428 qlcnic_add_sysfs(adapter);
2429
2430 return 0;
2411} 2431}
2412 2432
2413static void qlcnic_remove(struct pci_dev *pdev) 2433static void qlcnic_remove(struct pci_dev *pdev)
@@ -2518,8 +2538,16 @@ static int qlcnic_resume(struct pci_dev *pdev)
2518static int qlcnic_open(struct net_device *netdev) 2538static int qlcnic_open(struct net_device *netdev)
2519{ 2539{
2520 struct qlcnic_adapter *adapter = netdev_priv(netdev); 2540 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2541 u32 state;
2521 int err; 2542 int err;
2522 2543
2544 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
2545 if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
2546 netdev_err(netdev, "%s: Device is in FAILED state\n", __func__);
2547
2548 return -EIO;
2549 }
2550
2523 netif_carrier_off(netdev); 2551 netif_carrier_off(netdev);
2524 2552
2525 err = qlcnic_attach(adapter); 2553 err = qlcnic_attach(adapter);
@@ -3228,6 +3256,13 @@ void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key)
3228 return; 3256 return;
3229 3257
3230 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); 3258 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
3259 if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
3260 netdev_err(adapter->netdev, "%s: Device is in FAILED state\n",
3261 __func__);
3262 qlcnic_api_unlock(adapter);
3263
3264 return;
3265 }
3231 3266
3232 if (state == QLCNIC_DEV_READY) { 3267 if (state == QLCNIC_DEV_READY) {
3233 QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, 3268 QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 652cc13c5023..392b9bd12b4f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -1561,6 +1561,7 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
1561{ 1561{
1562 int err; 1562 int err;
1563 1563
1564 adapter->need_fw_reset = 0;
1564 qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox); 1565 qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
1565 qlcnic_83xx_enable_mbx_interrupt(adapter); 1566 qlcnic_83xx_enable_mbx_interrupt(adapter);
1566 1567
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 330d9a8774ad..686f460b1502 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -397,6 +397,7 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
397{ 397{
398 struct net_device *netdev = adapter->netdev; 398 struct net_device *netdev = adapter->netdev;
399 399
400 rtnl_lock();
400 if (netif_running(netdev)) 401 if (netif_running(netdev))
401 __qlcnic_down(adapter, netdev); 402 __qlcnic_down(adapter, netdev);
402 403
@@ -407,12 +408,15 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
407 /* After disabling SRIOV re-init the driver in default mode 408 /* After disabling SRIOV re-init the driver in default mode
408 configure opmode based on op_mode of function 409 configure opmode based on op_mode of function
409 */ 410 */
410 if (qlcnic_83xx_configure_opmode(adapter)) 411 if (qlcnic_83xx_configure_opmode(adapter)) {
412 rtnl_unlock();
411 return -EIO; 413 return -EIO;
414 }
412 415
413 if (netif_running(netdev)) 416 if (netif_running(netdev))
414 __qlcnic_up(adapter, netdev); 417 __qlcnic_up(adapter, netdev);
415 418
419 rtnl_unlock();
416 return 0; 420 return 0;
417} 421}
418 422
@@ -533,6 +537,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
533 return -EIO; 537 return -EIO;
534 } 538 }
535 539
540 rtnl_lock();
536 if (netif_running(netdev)) 541 if (netif_running(netdev))
537 __qlcnic_down(adapter, netdev); 542 __qlcnic_down(adapter, netdev);
538 543
@@ -555,6 +560,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
555 __qlcnic_up(adapter, netdev); 560 __qlcnic_up(adapter, netdev);
556 561
557error: 562error:
563 rtnl_unlock();
558 return err; 564 return err;
559} 565}
560 566
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index c6165d05cc13..019f4377307f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -1272,6 +1272,7 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
1272void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) 1272void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
1273{ 1273{
1274 struct device *dev = &adapter->pdev->dev; 1274 struct device *dev = &adapter->pdev->dev;
1275 u32 state;
1275 1276
1276 if (device_create_bin_file(dev, &bin_attr_port_stats)) 1277 if (device_create_bin_file(dev, &bin_attr_port_stats))
1277 dev_info(dev, "failed to create port stats sysfs entry"); 1278 dev_info(dev, "failed to create port stats sysfs entry");
@@ -1285,8 +1286,13 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
1285 if (device_create_bin_file(dev, &bin_attr_mem)) 1286 if (device_create_bin_file(dev, &bin_attr_mem))
1286 dev_info(dev, "failed to create mem sysfs entry\n"); 1287 dev_info(dev, "failed to create mem sysfs entry\n");
1287 1288
1289 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
1290 if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
1291 return;
1292
1288 if (device_create_bin_file(dev, &bin_attr_pci_config)) 1293 if (device_create_bin_file(dev, &bin_attr_pci_config))
1289 dev_info(dev, "failed to create pci config sysfs entry"); 1294 dev_info(dev, "failed to create pci config sysfs entry");
1295
1290 if (device_create_file(dev, &dev_attr_beacon)) 1296 if (device_create_file(dev, &dev_attr_beacon))
1291 dev_info(dev, "failed to create beacon sysfs entry"); 1297 dev_info(dev, "failed to create beacon sysfs entry");
1292 1298
@@ -1307,6 +1313,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
1307void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) 1313void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
1308{ 1314{
1309 struct device *dev = &adapter->pdev->dev; 1315 struct device *dev = &adapter->pdev->dev;
1316 u32 state;
1310 1317
1311 device_remove_bin_file(dev, &bin_attr_port_stats); 1318 device_remove_bin_file(dev, &bin_attr_port_stats);
1312 1319
@@ -1315,6 +1322,11 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
1315 device_remove_file(dev, &dev_attr_diag_mode); 1322 device_remove_file(dev, &dev_attr_diag_mode);
1316 device_remove_bin_file(dev, &bin_attr_crb); 1323 device_remove_bin_file(dev, &bin_attr_crb);
1317 device_remove_bin_file(dev, &bin_attr_mem); 1324 device_remove_bin_file(dev, &bin_attr_mem);
1325
1326 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
1327 if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
1328 return;
1329
1318 device_remove_bin_file(dev, &bin_attr_pci_config); 1330 device_remove_bin_file(dev, &bin_attr_pci_config);
1319 device_remove_file(dev, &dev_attr_beacon); 1331 device_remove_file(dev, &dev_attr_beacon);
1320 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) 1332 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 10093f0c4c0f..6bc5db703920 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -740,8 +740,8 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
740 int i; 740 int i;
741 741
742 if (!mpi_coredump) { 742 if (!mpi_coredump) {
743 netif_err(qdev, drv, qdev->ndev, "No memory available\n"); 743 netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
744 return -ENOMEM; 744 return -EINVAL;
745 } 745 }
746 746
747 /* Try to get the spinlock, but dont worry if 747 /* Try to get the spinlock, but dont worry if
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
index ff2bf8a4e247..7ad146080c36 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
@@ -1274,7 +1274,7 @@ void ql_mpi_reset_work(struct work_struct *work)
1274 return; 1274 return;
1275 } 1275 }
1276 1276
1277 if (!ql_core_dump(qdev, qdev->mpi_coredump)) { 1277 if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) {
1278 netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n"); 1278 netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
1279 qdev->core_is_dumped = 1; 1279 qdev->core_is_dumped = 1;
1280 queue_delayed_work(qdev->workqueue, 1280 queue_delayed_work(qdev->workqueue,
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 6f87f2cde647..3397cee89777 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4231,6 +4231,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
4231 case RTL_GIGA_MAC_VER_23: 4231 case RTL_GIGA_MAC_VER_23:
4232 case RTL_GIGA_MAC_VER_24: 4232 case RTL_GIGA_MAC_VER_24:
4233 case RTL_GIGA_MAC_VER_34: 4233 case RTL_GIGA_MAC_VER_34:
4234 case RTL_GIGA_MAC_VER_35:
4234 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); 4235 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4235 break; 4236 break;
4236 case RTL_GIGA_MAC_VER_40: 4237 case RTL_GIGA_MAC_VER_40:
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 8b7152565c5e..088921294448 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -7,7 +7,7 @@ config SFC
7 select I2C_ALGOBIT 7 select I2C_ALGOBIT
8 select PTP_1588_CLOCK 8 select PTP_1588_CLOCK
9 ---help--- 9 ---help---
10 This driver supports 10-gigabit Ethernet cards based on 10 This driver supports 10/40-gigabit Ethernet cards based on
11 the Solarflare SFC4000, SFC9000-family and SFC9100-family 11 the Solarflare SFC4000, SFC9000-family and SFC9100-family
12 controllers. 12 controllers.
13 13
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 5f42313b4965..9f18ae984f9e 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -94,7 +94,7 @@ static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
94 return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]); 94 return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
95} 95}
96 96
97static int efx_ef10_init_capabilities(struct efx_nic *efx) 97static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
98{ 98{
99 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN); 99 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
100 struct efx_ef10_nic_data *nic_data = efx->nic_data; 100 struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -107,16 +107,27 @@ static int efx_ef10_init_capabilities(struct efx_nic *efx)
107 outbuf, sizeof(outbuf), &outlen); 107 outbuf, sizeof(outbuf), &outlen);
108 if (rc) 108 if (rc)
109 return rc; 109 return rc;
110 if (outlen < sizeof(outbuf)) {
111 netif_err(efx, drv, efx->net_dev,
112 "unable to read datapath firmware capabilities\n");
113 return -EIO;
114 }
110 115
111 if (outlen >= sizeof(outbuf)) { 116 nic_data->datapath_caps =
112 nic_data->datapath_caps = 117 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
113 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); 118
114 if (!(nic_data->datapath_caps & 119 if (!(nic_data->datapath_caps &
115 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) { 120 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
116 netif_err(efx, drv, efx->net_dev, 121 netif_err(efx, drv, efx->net_dev,
117 "Capabilities don't indicate TSO support.\n"); 122 "current firmware does not support TSO\n");
118 return -ENODEV; 123 return -ENODEV;
119 } 124 }
125
126 if (!(nic_data->datapath_caps &
127 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
128 netif_err(efx, probe, efx->net_dev,
129 "current firmware does not support an RX prefix\n");
130 return -ENODEV;
120 } 131 }
121 132
122 return 0; 133 return 0;
@@ -217,21 +228,13 @@ static int efx_ef10_probe(struct efx_nic *efx)
217 if (rc) 228 if (rc)
218 goto fail3; 229 goto fail3;
219 230
220 rc = efx_ef10_init_capabilities(efx); 231 rc = efx_ef10_init_datapath_caps(efx);
221 if (rc < 0) 232 if (rc < 0)
222 goto fail3; 233 goto fail3;
223 234
224 efx->rx_packet_len_offset = 235 efx->rx_packet_len_offset =
225 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; 236 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
226 237
227 if (!(nic_data->datapath_caps &
228 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
229 netif_err(efx, probe, efx->net_dev,
230 "current firmware does not support an RX prefix\n");
231 rc = -ENODEV;
232 goto fail3;
233 }
234
235 rc = efx_mcdi_port_get_number(efx); 238 rc = efx_mcdi_port_get_number(efx);
236 if (rc < 0) 239 if (rc < 0)
237 goto fail3; 240 goto fail3;
@@ -260,8 +263,6 @@ static int efx_ef10_probe(struct efx_nic *efx)
260 if (rc) 263 if (rc)
261 goto fail3; 264 goto fail3;
262 265
263 efx_ptp_probe(efx);
264
265 return 0; 266 return 0;
266 267
267fail3: 268fail3:
@@ -342,6 +343,13 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
342 struct efx_ef10_nic_data *nic_data = efx->nic_data; 343 struct efx_ef10_nic_data *nic_data = efx->nic_data;
343 int rc; 344 int rc;
344 345
346 if (nic_data->must_check_datapath_caps) {
347 rc = efx_ef10_init_datapath_caps(efx);
348 if (rc)
349 return rc;
350 nic_data->must_check_datapath_caps = false;
351 }
352
345 if (nic_data->must_realloc_vis) { 353 if (nic_data->must_realloc_vis) {
346 /* We cannot let the number of VIs change now */ 354 /* We cannot let the number of VIs change now */
347 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis, 355 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
@@ -710,6 +718,14 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
710 nic_data->must_restore_filters = true; 718 nic_data->must_restore_filters = true;
711 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; 719 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
712 720
721 /* The datapath firmware might have been changed */
722 nic_data->must_check_datapath_caps = true;
723
724 /* MAC statistics have been cleared on the NIC; clear the local
725 * statistic that we update with efx_update_diff_stat().
726 */
727 nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
728
713 return -EIO; 729 return -EIO;
714} 730}
715 731
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 128d7cdf9eb2..c082562dbf4e 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -27,10 +27,10 @@
27 27
28/* A reboot/assertion causes the MCDI status word to be set after the 28/* A reboot/assertion causes the MCDI status word to be set after the
29 * command word is set or a REBOOT event is sent. If we notice a reboot 29 * command word is set or a REBOOT event is sent. If we notice a reboot
30 * via these mechanisms then wait 20ms for the status word to be set. 30 * via these mechanisms then wait 250ms for the status word to be set.
31 */ 31 */
32#define MCDI_STATUS_DELAY_US 100 32#define MCDI_STATUS_DELAY_US 100
33#define MCDI_STATUS_DELAY_COUNT 200 33#define MCDI_STATUS_DELAY_COUNT 2500
34#define MCDI_STATUS_SLEEP_MS \ 34#define MCDI_STATUS_SLEEP_MS \
35 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) 35 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
36 36
@@ -800,9 +800,6 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
800 } else { 800 } else {
801 int count; 801 int count;
802 802
803 /* Nobody was waiting for an MCDI request, so trigger a reset */
804 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
805
806 /* Consume the status word since efx_mcdi_rpc_finish() won't */ 803 /* Consume the status word since efx_mcdi_rpc_finish() won't */
807 for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { 804 for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
808 if (efx_mcdi_poll_reboot(efx)) 805 if (efx_mcdi_poll_reboot(efx))
@@ -810,6 +807,9 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
810 udelay(MCDI_STATUS_DELAY_US); 807 udelay(MCDI_STATUS_DELAY_US);
811 } 808 }
812 mcdi->new_epoch = true; 809 mcdi->new_epoch = true;
810
811 /* Nobody was waiting for an MCDI request, so trigger a reset */
812 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
813 } 813 }
814 814
815 spin_unlock(&mcdi->iface_lock); 815 spin_unlock(&mcdi->iface_lock);
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 8d33da6697fb..7b6be61d549f 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -556,6 +556,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
556 case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break; 556 case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
557 case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break; 557 case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
558 case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break; 558 case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
559 case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
559 default: return -EINVAL; 560 default: return -EINVAL;
560 } 561 }
561 } else { 562 } else {
@@ -841,6 +842,7 @@ static unsigned int efx_mcdi_event_link_speed[] = {
841 [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, 842 [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
842 [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, 843 [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
843 [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, 844 [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
845 [MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
844}; 846};
845 847
846void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) 848void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 4b1e188f7a2f..fda29d39032f 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -400,6 +400,8 @@ enum {
400 * @rx_rss_context: Firmware handle for our RSS context 400 * @rx_rss_context: Firmware handle for our RSS context
401 * @stats: Hardware statistics 401 * @stats: Hardware statistics
402 * @workaround_35388: Flag: firmware supports workaround for bug 35388 402 * @workaround_35388: Flag: firmware supports workaround for bug 35388
403 * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
404 * after MC reboot
403 * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of 405 * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
404 * %MC_CMD_GET_CAPABILITIES response) 406 * %MC_CMD_GET_CAPABILITIES response)
405 */ 407 */
@@ -413,6 +415,7 @@ struct efx_ef10_nic_data {
413 u32 rx_rss_context; 415 u32 rx_rss_context;
414 u64 stats[EF10_STAT_COUNT]; 416 u64 stats[EF10_STAT_COUNT];
415 bool workaround_35388; 417 bool workaround_35388;
418 bool must_check_datapath_caps;
416 u32 datapath_caps; 419 u32 datapath_caps;
417}; 420};
418 421
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 370e13dde115..5730fe2445a6 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -271,7 +271,7 @@ static inline void mcf_outsw(void *a, unsigned char *p, int l)
271#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l) 271#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l)
272#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l) 272#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l)
273 273
274#define SMC_IRQ_FLAGS (IRQF_DISABLED) 274#define SMC_IRQ_FLAGS 0
275 275
276#else 276#else
277 277
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index ffa5c4ad1210..5f9e79f7f2df 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1356,8 +1356,7 @@ static int smsc9420_open(struct net_device *dev)
1356 smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF); 1356 smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF);
1357 smsc9420_pci_flush_write(pd); 1357 smsc9420_pci_flush_write(pd);
1358 1358
1359 result = request_irq(irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED, 1359 result = request_irq(irq, smsc9420_isr, IRQF_SHARED, DRV_NAME, pd);
1360 DRV_NAME, pd);
1361 if (result) { 1360 if (result) {
1362 smsc_warn(IFUP, "Unable to use IRQ = %d", irq); 1361 smsc_warn(IFUP, "Unable to use IRQ = %d", irq);
1363 result = -ENODEV; 1362 result = -ENODEV;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 9c805e0c0cae..f7f2ef49c0c1 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1726,7 +1726,7 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
1726 goto fail_alloc_irq; 1726 goto fail_alloc_irq;
1727 } 1727 }
1728 result = request_irq(card->irq, gelic_card_interrupt, 1728 result = request_irq(card->irq, gelic_card_interrupt,
1729 IRQF_DISABLED, netdev->name, card); 1729 0, netdev->name, card);
1730 1730
1731 if (result) { 1731 if (result) {
1732 dev_info(ctodev(card), "%s:request_irq failed (%d)\n", 1732 dev_info(ctodev(card), "%s:request_irq failed (%d)\n",
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index c8f088ab5fdf..bdf697b184ae 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -32,7 +32,7 @@
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 33
34#define DRV_NAME "via-rhine" 34#define DRV_NAME "via-rhine"
35#define DRV_VERSION "1.5.0" 35#define DRV_VERSION "1.5.1"
36#define DRV_RELDATE "2010-10-09" 36#define DRV_RELDATE "2010-10-09"
37 37
38#include <linux/types.h> 38#include <linux/types.h>
@@ -1704,7 +1704,12 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1704 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); 1704 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1705 1705
1706 if (unlikely(vlan_tx_tag_present(skb))) { 1706 if (unlikely(vlan_tx_tag_present(skb))) {
1707 rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16); 1707 u16 vid_pcp = vlan_tx_tag_get(skb);
1708
1709 /* drop CFI/DEI bit, register needs VID and PCP */
1710 vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1711 ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1712 rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1708 /* request tagging */ 1713 /* request tagging */
1709 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); 1714 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1710 } 1715 }
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index b88121f240ca..0029148077a9 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -297,6 +297,12 @@ static int temac_dma_bd_init(struct net_device *ndev)
297 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 297 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
298 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); 298 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
299 299
300 /* Init descriptor indexes */
301 lp->tx_bd_ci = 0;
302 lp->tx_bd_next = 0;
303 lp->tx_bd_tail = 0;
304 lp->rx_bd_ci = 0;
305
300 return 0; 306 return 0;
301 307
302out: 308out:
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index f07c340990da..3f138ca88670 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -191,8 +191,8 @@ static inline int mcs_setup_transceiver_vishay(struct mcs_cb *mcs)
191 goto error; 191 goto error;
192 192
193 ret = 0; 193 ret = 0;
194 error: 194error:
195 return ret; 195 return ret;
196} 196}
197 197
198/* Setup a communication between mcs7780 and agilent chip. */ 198/* Setup a communication between mcs7780 and agilent chip. */
@@ -501,8 +501,11 @@ static inline int mcs_setup_urbs(struct mcs_cb *mcs)
501 return 0; 501 return 0;
502 502
503 mcs->rx_urb = usb_alloc_urb(0, GFP_KERNEL); 503 mcs->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
504 if (!mcs->rx_urb) 504 if (!mcs->rx_urb) {
505 usb_free_urb(mcs->tx_urb);
506 mcs->tx_urb = NULL;
505 return 0; 507 return 0;
508 }
506 509
507 return 1; 510 return 1;
508} 511}
@@ -643,9 +646,9 @@ static int mcs_speed_change(struct mcs_cb *mcs)
643 ret = mcs_set_reg(mcs, MCS_MODE_REG, rval); 646 ret = mcs_set_reg(mcs, MCS_MODE_REG, rval);
644 647
645 mcs->speed = mcs->new_speed; 648 mcs->speed = mcs->new_speed;
646 error: 649error:
647 mcs->new_speed = 0; 650 mcs->new_speed = 0;
648 return ret; 651 return ret;
649} 652}
650 653
651/* Ioctl calls not supported at this time. Can be an area of future work. */ 654/* Ioctl calls not supported at this time. Can be an area of future work. */
@@ -738,17 +741,20 @@ static int mcs_net_open(struct net_device *netdev)
738 741
739 ret = mcs_receive_start(mcs); 742 ret = mcs_receive_start(mcs);
740 if (ret) 743 if (ret)
741 goto error3; 744 goto error4;
742 745
743 netif_start_queue(netdev); 746 netif_start_queue(netdev);
744 return 0; 747 return 0;
745 748
746 error3: 749error4:
747 irlap_close(mcs->irlap); 750 usb_free_urb(mcs->rx_urb);
748 error2: 751 usb_free_urb(mcs->tx_urb);
749 kfree_skb(mcs->rx_buff.skb); 752error3:
750 error1: 753 irlap_close(mcs->irlap);
751 return ret; 754error2:
755 kfree_skb(mcs->rx_buff.skb);
756error1:
757 return ret;
752} 758}
753 759
754/* Receive callback function. */ 760/* Receive callback function. */
@@ -946,11 +952,11 @@ static int mcs_probe(struct usb_interface *intf,
946 usb_set_intfdata(intf, mcs); 952 usb_set_intfdata(intf, mcs);
947 return 0; 953 return 0;
948 954
949 error2: 955error2:
950 free_netdev(ndev); 956 free_netdev(ndev);
951 957
952 error1: 958error1:
953 return ret; 959 return ret;
954} 960}
955 961
956/* The current device is removed, the USB layer tells us to shut down. */ 962/* The current device is removed, the USB layer tells us to shut down. */
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index fcbf680c3e62..a17d85a331f1 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -146,6 +146,7 @@ static int loopback_dev_init(struct net_device *dev)
146 146
147static void loopback_dev_free(struct net_device *dev) 147static void loopback_dev_free(struct net_device *dev)
148{ 148{
149 dev_net(dev)->loopback_dev = NULL;
149 free_percpu(dev->lstats); 150 free_percpu(dev->lstats);
150 free_netdev(dev); 151 free_netdev(dev);
151} 152}
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index dcb21347c670..adeee615dd19 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -684,15 +684,12 @@ restart:
684 case NETDEV_RELEASE: 684 case NETDEV_RELEASE:
685 case NETDEV_JOIN: 685 case NETDEV_JOIN:
686 case NETDEV_UNREGISTER: 686 case NETDEV_UNREGISTER:
687 /* 687 /* rtnl_lock already held
688 * rtnl_lock already held
689 * we might sleep in __netpoll_cleanup() 688 * we might sleep in __netpoll_cleanup()
690 */ 689 */
691 spin_unlock_irqrestore(&target_list_lock, flags); 690 spin_unlock_irqrestore(&target_list_lock, flags);
692 691
693 mutex_lock(&nt->mutex);
694 __netpoll_cleanup(&nt->np); 692 __netpoll_cleanup(&nt->np);
695 mutex_unlock(&nt->mutex);
696 693
697 spin_lock_irqsave(&target_list_lock, flags); 694 spin_lock_irqsave(&target_list_lock, flags);
698 dev_put(nt->np.dev); 695 dev_put(nt->np.dev);
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index db472ffb6e89..313a0377f68f 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -30,9 +30,9 @@
30#include <linux/ethtool.h> 30#include <linux/ethtool.h>
31#include <linux/phy.h> 31#include <linux/phy.h>
32 32
33#include <asm/io.h> 33#include <linux/io.h>
34#include <asm/irq.h> 34#include <asm/irq.h>
35#include <asm/uaccess.h> 35#include <linux/uaccess.h>
36 36
37/* Cicada Extended Control Register 1 */ 37/* Cicada Extended Control Register 1 */
38#define MII_CIS8201_EXT_CON1 0x17 38#define MII_CIS8201_EXT_CON1 0x17
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 6fa5ae00039f..01805319e1e0 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
281 nf_reset(skb); 281 nf_reset(skb);
282 282
283 skb->ip_summed = CHECKSUM_NONE; 283 skb->ip_summed = CHECKSUM_NONE;
284 ip_select_ident(iph, &rt->dst, NULL); 284 ip_select_ident(skb, &rt->dst, NULL);
285 ip_send_check(iph); 285 ip_send_check(iph);
286 286
287 ip_local_out(skb); 287 ip_local_out(skb);
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index a34d6bf5e43b..cc70ecfc7062 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -429,11 +429,13 @@ static void slip_write_wakeup(struct tty_struct *tty)
429 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) 429 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
430 return; 430 return;
431 431
432 spin_lock(&sl->lock);
432 if (sl->xleft <= 0) { 433 if (sl->xleft <= 0) {
433 /* Now serial buffer is almost free & we can start 434 /* Now serial buffer is almost free & we can start
434 * transmission of another packet */ 435 * transmission of another packet */
435 sl->dev->stats.tx_packets++; 436 sl->dev->stats.tx_packets++;
436 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 437 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
438 spin_unlock(&sl->lock);
437 sl_unlock(sl); 439 sl_unlock(sl);
438 return; 440 return;
439 } 441 }
@@ -441,6 +443,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
441 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 443 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
442 sl->xleft -= actual; 444 sl->xleft -= actual;
443 sl->xhead += actual; 445 sl->xhead += actual;
446 spin_unlock(&sl->lock);
444} 447}
445 448
446static void sl_tx_timeout(struct net_device *dev) 449static void sl_tx_timeout(struct net_device *dev)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a639de8401f8..807815fc9968 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1641,11 +1641,11 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1641 INIT_LIST_HEAD(&tun->disabled); 1641 INIT_LIST_HEAD(&tun->disabled);
1642 err = tun_attach(tun, file, false); 1642 err = tun_attach(tun, file, false);
1643 if (err < 0) 1643 if (err < 0)
1644 goto err_free_dev; 1644 goto err_free_flow;
1645 1645
1646 err = register_netdevice(tun->dev); 1646 err = register_netdevice(tun->dev);
1647 if (err < 0) 1647 if (err < 0)
1648 goto err_free_dev; 1648 goto err_detach;
1649 1649
1650 if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) || 1650 if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
1651 device_create_file(&tun->dev->dev, &dev_attr_owner) || 1651 device_create_file(&tun->dev->dev, &dev_attr_owner) ||
@@ -1689,7 +1689,12 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1689 strcpy(ifr->ifr_name, tun->dev->name); 1689 strcpy(ifr->ifr_name, tun->dev->name);
1690 return 0; 1690 return 0;
1691 1691
1692 err_free_dev: 1692err_detach:
1693 tun_detach_all(dev);
1694err_free_flow:
1695 tun_flow_uninit(tun);
1696 security_tun_dev_free_security(tun->security);
1697err_free_dev:
1693 free_netdev(dev); 1698 free_netdev(dev);
1694 return err; 1699 return err;
1695} 1700}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 03ad4dc293aa..2023f3ea891e 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -33,7 +33,7 @@
33#include <linux/usb/usbnet.h> 33#include <linux/usb/usbnet.h>
34 34
35 35
36#if defined(CONFIG_USB_NET_RNDIS_HOST) || defined(CONFIG_USB_NET_RNDIS_HOST_MODULE) 36#if IS_ENABLED(CONFIG_USB_NET_RNDIS_HOST)
37 37
38static int is_rndis(struct usb_interface_descriptor *desc) 38static int is_rndis(struct usb_interface_descriptor *desc)
39{ 39{
@@ -69,8 +69,7 @@ static const u8 mbm_guid[16] = {
69 0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a, 69 0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a,
70}; 70};
71 71
72/* 72/* probes control interface, claims data interface, collects the bulk
73 * probes control interface, claims data interface, collects the bulk
74 * endpoints, activates data interface (if needed), maybe sets MTU. 73 * endpoints, activates data interface (if needed), maybe sets MTU.
75 * all pure cdc, except for certain firmware workarounds, and knowing 74 * all pure cdc, except for certain firmware workarounds, and knowing
76 * that rndis uses one different rule. 75 * that rndis uses one different rule.
@@ -88,7 +87,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
88 struct usb_cdc_mdlm_desc *desc = NULL; 87 struct usb_cdc_mdlm_desc *desc = NULL;
89 struct usb_cdc_mdlm_detail_desc *detail = NULL; 88 struct usb_cdc_mdlm_detail_desc *detail = NULL;
90 89
91 if (sizeof dev->data < sizeof *info) 90 if (sizeof(dev->data) < sizeof(*info))
92 return -EDOM; 91 return -EDOM;
93 92
94 /* expect strict spec conformance for the descriptors, but 93 /* expect strict spec conformance for the descriptors, but
@@ -126,10 +125,10 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
126 is_activesync(&intf->cur_altsetting->desc) || 125 is_activesync(&intf->cur_altsetting->desc) ||
127 is_wireless_rndis(&intf->cur_altsetting->desc)); 126 is_wireless_rndis(&intf->cur_altsetting->desc));
128 127
129 memset(info, 0, sizeof *info); 128 memset(info, 0, sizeof(*info));
130 info->control = intf; 129 info->control = intf;
131 while (len > 3) { 130 while (len > 3) {
132 if (buf [1] != USB_DT_CS_INTERFACE) 131 if (buf[1] != USB_DT_CS_INTERFACE)
133 goto next_desc; 132 goto next_desc;
134 133
135 /* use bDescriptorSubType to identify the CDC descriptors. 134 /* use bDescriptorSubType to identify the CDC descriptors.
@@ -139,14 +138,14 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
139 * in favor of a complicated OID-based RPC scheme doing what 138 * in favor of a complicated OID-based RPC scheme doing what
140 * CDC Ethernet achieves with a simple descriptor. 139 * CDC Ethernet achieves with a simple descriptor.
141 */ 140 */
142 switch (buf [2]) { 141 switch (buf[2]) {
143 case USB_CDC_HEADER_TYPE: 142 case USB_CDC_HEADER_TYPE:
144 if (info->header) { 143 if (info->header) {
145 dev_dbg(&intf->dev, "extra CDC header\n"); 144 dev_dbg(&intf->dev, "extra CDC header\n");
146 goto bad_desc; 145 goto bad_desc;
147 } 146 }
148 info->header = (void *) buf; 147 info->header = (void *) buf;
149 if (info->header->bLength != sizeof *info->header) { 148 if (info->header->bLength != sizeof(*info->header)) {
150 dev_dbg(&intf->dev, "CDC header len %u\n", 149 dev_dbg(&intf->dev, "CDC header len %u\n",
151 info->header->bLength); 150 info->header->bLength);
152 goto bad_desc; 151 goto bad_desc;
@@ -175,7 +174,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
175 goto bad_desc; 174 goto bad_desc;
176 } 175 }
177 info->u = (void *) buf; 176 info->u = (void *) buf;
178 if (info->u->bLength != sizeof *info->u) { 177 if (info->u->bLength != sizeof(*info->u)) {
179 dev_dbg(&intf->dev, "CDC union len %u\n", 178 dev_dbg(&intf->dev, "CDC union len %u\n",
180 info->u->bLength); 179 info->u->bLength);
181 goto bad_desc; 180 goto bad_desc;
@@ -233,7 +232,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
233 goto bad_desc; 232 goto bad_desc;
234 } 233 }
235 info->ether = (void *) buf; 234 info->ether = (void *) buf;
236 if (info->ether->bLength != sizeof *info->ether) { 235 if (info->ether->bLength != sizeof(*info->ether)) {
237 dev_dbg(&intf->dev, "CDC ether len %u\n", 236 dev_dbg(&intf->dev, "CDC ether len %u\n",
238 info->ether->bLength); 237 info->ether->bLength);
239 goto bad_desc; 238 goto bad_desc;
@@ -274,8 +273,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
274 break; 273 break;
275 } 274 }
276next_desc: 275next_desc:
277 len -= buf [0]; /* bLength */ 276 len -= buf[0]; /* bLength */
278 buf += buf [0]; 277 buf += buf[0];
279 } 278 }
280 279
281 /* Microsoft ActiveSync based and some regular RNDIS devices lack the 280 /* Microsoft ActiveSync based and some regular RNDIS devices lack the
@@ -379,9 +378,7 @@ void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf)
379} 378}
380EXPORT_SYMBOL_GPL(usbnet_cdc_unbind); 379EXPORT_SYMBOL_GPL(usbnet_cdc_unbind);
381 380
382/*------------------------------------------------------------------------- 381/* Communications Device Class, Ethernet Control model
383 *
384 * Communications Device Class, Ethernet Control model
385 * 382 *
386 * Takes two interfaces. The DATA interface is inactive till an altsetting 383 * Takes two interfaces. The DATA interface is inactive till an altsetting
387 * is selected. Configuration data includes class descriptors. There's 384 * is selected. Configuration data includes class descriptors. There's
@@ -389,8 +386,7 @@ EXPORT_SYMBOL_GPL(usbnet_cdc_unbind);
389 * 386 *
390 * This should interop with whatever the 2.4 "CDCEther.c" driver 387 * This should interop with whatever the 2.4 "CDCEther.c" driver
391 * (by Brad Hards) talked with, with more functionality. 388 * (by Brad Hards) talked with, with more functionality.
392 * 389 */
393 *-------------------------------------------------------------------------*/
394 390
395static void dumpspeed(struct usbnet *dev, __le32 *speeds) 391static void dumpspeed(struct usbnet *dev, __le32 *speeds)
396{ 392{
@@ -404,7 +400,7 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
404{ 400{
405 struct usb_cdc_notification *event; 401 struct usb_cdc_notification *event;
406 402
407 if (urb->actual_length < sizeof *event) 403 if (urb->actual_length < sizeof(*event))
408 return; 404 return;
409 405
410 /* SPEED_CHANGE can get split into two 8-byte packets */ 406 /* SPEED_CHANGE can get split into two 8-byte packets */
@@ -423,7 +419,7 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
423 case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */ 419 case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */
424 netif_dbg(dev, timer, dev->net, "CDC: speed change (len %d)\n", 420 netif_dbg(dev, timer, dev->net, "CDC: speed change (len %d)\n",
425 urb->actual_length); 421 urb->actual_length);
426 if (urb->actual_length != (sizeof *event + 8)) 422 if (urb->actual_length != (sizeof(*event) + 8))
427 set_bit(EVENT_STS_SPLIT, &dev->flags); 423 set_bit(EVENT_STS_SPLIT, &dev->flags);
428 else 424 else
429 dumpspeed(dev, (__le32 *) &event[1]); 425 dumpspeed(dev, (__le32 *) &event[1]);
@@ -469,7 +465,6 @@ EXPORT_SYMBOL_GPL(usbnet_cdc_bind);
469static const struct driver_info cdc_info = { 465static const struct driver_info cdc_info = {
470 .description = "CDC Ethernet Device", 466 .description = "CDC Ethernet Device",
471 .flags = FLAG_ETHER | FLAG_POINTTOPOINT, 467 .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
472 // .check_connect = cdc_check_connect,
473 .bind = usbnet_cdc_bind, 468 .bind = usbnet_cdc_bind,
474 .unbind = usbnet_cdc_unbind, 469 .unbind = usbnet_cdc_unbind,
475 .status = usbnet_cdc_status, 470 .status = usbnet_cdc_status,
@@ -493,9 +488,8 @@ static const struct driver_info wwan_info = {
493#define DELL_VENDOR_ID 0x413C 488#define DELL_VENDOR_ID 0x413C
494#define REALTEK_VENDOR_ID 0x0bda 489#define REALTEK_VENDOR_ID 0x0bda
495 490
496static const struct usb_device_id products [] = { 491static const struct usb_device_id products[] = {
497/* 492/* BLACKLIST !!
498 * BLACKLIST !!
499 * 493 *
500 * First blacklist any products that are egregiously nonconformant 494 * First blacklist any products that are egregiously nonconformant
501 * with the CDC Ethernet specs. Minor braindamage we cope with; when 495 * with the CDC Ethernet specs. Minor braindamage we cope with; when
@@ -542,7 +536,7 @@ static const struct usb_device_id products [] = {
542 .driver_info = 0, 536 .driver_info = 0,
543}, { 537}, {
544 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO 538 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
545 | USB_DEVICE_ID_MATCH_DEVICE, 539 | USB_DEVICE_ID_MATCH_DEVICE,
546 .idVendor = 0x04DD, 540 .idVendor = 0x04DD,
547 .idProduct = 0x8007, /* C-700 */ 541 .idProduct = 0x8007, /* C-700 */
548 ZAURUS_MASTER_INTERFACE, 542 ZAURUS_MASTER_INTERFACE,
@@ -659,8 +653,7 @@ static const struct usb_device_id products [] = {
659 .driver_info = 0, 653 .driver_info = 0,
660}, 654},
661 655
662/* 656/* WHITELIST!!!
663 * WHITELIST!!!
664 * 657 *
665 * CDC Ether uses two interfaces, not necessarily consecutive. 658 * CDC Ether uses two interfaces, not necessarily consecutive.
666 * We match the main interface, ignoring the optional device 659 * We match the main interface, ignoring the optional device
@@ -672,60 +665,40 @@ static const struct usb_device_id products [] = {
672 */ 665 */
673{ 666{
674 /* ZTE (Vodafone) K3805-Z */ 667 /* ZTE (Vodafone) K3805-Z */
675 .match_flags = USB_DEVICE_ID_MATCH_VENDOR 668 USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1003, USB_CLASS_COMM,
676 | USB_DEVICE_ID_MATCH_PRODUCT 669 USB_CDC_SUBCLASS_ETHERNET,
677 | USB_DEVICE_ID_MATCH_INT_INFO, 670 USB_CDC_PROTO_NONE),
678 .idVendor = ZTE_VENDOR_ID,
679 .idProduct = 0x1003,
680 .bInterfaceClass = USB_CLASS_COMM,
681 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
682 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
683 .driver_info = (unsigned long)&wwan_info, 671 .driver_info = (unsigned long)&wwan_info,
684}, { 672}, {
685 /* ZTE (Vodafone) K3806-Z */ 673 /* ZTE (Vodafone) K3806-Z */
686 .match_flags = USB_DEVICE_ID_MATCH_VENDOR 674 USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1015, USB_CLASS_COMM,
687 | USB_DEVICE_ID_MATCH_PRODUCT 675 USB_CDC_SUBCLASS_ETHERNET,
688 | USB_DEVICE_ID_MATCH_INT_INFO, 676 USB_CDC_PROTO_NONE),
689 .idVendor = ZTE_VENDOR_ID,
690 .idProduct = 0x1015,
691 .bInterfaceClass = USB_CLASS_COMM,
692 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
693 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
694 .driver_info = (unsigned long)&wwan_info, 677 .driver_info = (unsigned long)&wwan_info,
695}, { 678}, {
696 /* ZTE (Vodafone) K4510-Z */ 679 /* ZTE (Vodafone) K4510-Z */
697 .match_flags = USB_DEVICE_ID_MATCH_VENDOR 680 USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1173, USB_CLASS_COMM,
698 | USB_DEVICE_ID_MATCH_PRODUCT 681 USB_CDC_SUBCLASS_ETHERNET,
699 | USB_DEVICE_ID_MATCH_INT_INFO, 682 USB_CDC_PROTO_NONE),
700 .idVendor = ZTE_VENDOR_ID,
701 .idProduct = 0x1173,
702 .bInterfaceClass = USB_CLASS_COMM,
703 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
704 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
705 .driver_info = (unsigned long)&wwan_info, 683 .driver_info = (unsigned long)&wwan_info,
706}, { 684}, {
707 /* ZTE (Vodafone) K3770-Z */ 685 /* ZTE (Vodafone) K3770-Z */
708 .match_flags = USB_DEVICE_ID_MATCH_VENDOR 686 USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1177, USB_CLASS_COMM,
709 | USB_DEVICE_ID_MATCH_PRODUCT 687 USB_CDC_SUBCLASS_ETHERNET,
710 | USB_DEVICE_ID_MATCH_INT_INFO, 688 USB_CDC_PROTO_NONE),
711 .idVendor = ZTE_VENDOR_ID,
712 .idProduct = 0x1177,
713 .bInterfaceClass = USB_CLASS_COMM,
714 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
715 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
716 .driver_info = (unsigned long)&wwan_info, 689 .driver_info = (unsigned long)&wwan_info,
717}, { 690}, {
718 /* ZTE (Vodafone) K3772-Z */ 691 /* ZTE (Vodafone) K3772-Z */
719 .match_flags = USB_DEVICE_ID_MATCH_VENDOR 692 USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1181, USB_CLASS_COMM,
720 | USB_DEVICE_ID_MATCH_PRODUCT 693 USB_CDC_SUBCLASS_ETHERNET,
721 | USB_DEVICE_ID_MATCH_INT_INFO, 694 USB_CDC_PROTO_NONE),
722 .idVendor = ZTE_VENDOR_ID,
723 .idProduct = 0x1181,
724 .bInterfaceClass = USB_CLASS_COMM,
725 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
726 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
727 .driver_info = (unsigned long)&wwan_info, 695 .driver_info = (unsigned long)&wwan_info,
728}, { 696}, {
697 /* Telit modules */
698 USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM,
699 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
700 .driver_info = (kernel_ulong_t) &wwan_info,
701}, {
729 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, 702 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
730 USB_CDC_PROTO_NONE), 703 USB_CDC_PROTO_NONE),
731 .driver_info = (unsigned long) &cdc_info, 704 .driver_info = (unsigned long) &cdc_info,
@@ -736,15 +709,11 @@ static const struct usb_device_id products [] = {
736 709
737}, { 710}, {
738 /* Various Huawei modems with a network port like the UMG1831 */ 711 /* Various Huawei modems with a network port like the UMG1831 */
739 .match_flags = USB_DEVICE_ID_MATCH_VENDOR 712 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_COMM,
740 | USB_DEVICE_ID_MATCH_INT_INFO, 713 USB_CDC_SUBCLASS_ETHERNET, 255),
741 .idVendor = HUAWEI_VENDOR_ID,
742 .bInterfaceClass = USB_CLASS_COMM,
743 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
744 .bInterfaceProtocol = 255,
745 .driver_info = (unsigned long)&wwan_info, 714 .driver_info = (unsigned long)&wwan_info,
746}, 715},
747 { }, // END 716 { }, /* END */
748}; 717};
749MODULE_DEVICE_TABLE(usb, products); 718MODULE_DEVICE_TABLE(usb, products);
750 719
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 2dbb9460349d..c6867f926cff 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -303,7 +303,7 @@ static void dm9601_set_multicast(struct net_device *net)
303 rx_ctl |= 0x02; 303 rx_ctl |= 0x02;
304 } else if (net->flags & IFF_ALLMULTI || 304 } else if (net->flags & IFF_ALLMULTI ||
305 netdev_mc_count(net) > DM_MAX_MCAST) { 305 netdev_mc_count(net) > DM_MAX_MCAST) {
306 rx_ctl |= 0x04; 306 rx_ctl |= 0x08;
307 } else if (!netdev_mc_empty(net)) { 307 } else if (!netdev_mc_empty(net)) {
308 struct netdev_hw_addr *ha; 308 struct netdev_hw_addr *ha;
309 309
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 6312332afeba..3d6aaf79d8b2 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -714,7 +714,7 @@ static const struct usb_device_id products[] = {
714 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 714 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
715 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 715 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
716 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 716 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
717 {QMI_FIXED_INTF(0x1e2d, 0x12d1, 4)}, /* Cinterion PLxx */ 717 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
718 718
719 /* 4. Gobi 1000 devices */ 719 /* 4. Gobi 1000 devices */
720 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 720 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 7b331e613e02..bf94e10a37c8 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1241,7 +1241,9 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
1241 if (num_sgs == 1) 1241 if (num_sgs == 1)
1242 return 0; 1242 return 0;
1243 1243
1244 urb->sg = kmalloc(num_sgs * sizeof(struct scatterlist), GFP_ATOMIC); 1244 /* reserve one for zero packet */
1245 urb->sg = kmalloc((num_sgs + 1) * sizeof(struct scatterlist),
1246 GFP_ATOMIC);
1245 if (!urb->sg) 1247 if (!urb->sg)
1246 return -ENOMEM; 1248 return -ENOMEM;
1247 1249
@@ -1305,7 +1307,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1305 if (build_dma_sg(skb, urb) < 0) 1307 if (build_dma_sg(skb, urb) < 0)
1306 goto drop; 1308 goto drop;
1307 } 1309 }
1308 entry->length = length = urb->transfer_buffer_length; 1310 length = urb->transfer_buffer_length;
1309 1311
1310 /* don't assume the hardware handles USB_ZERO_PACKET 1312 /* don't assume the hardware handles USB_ZERO_PACKET
1311 * NOTE: strictly conforming cdc-ether devices should expect 1313 * NOTE: strictly conforming cdc-ether devices should expect
@@ -1317,15 +1319,18 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1317 if (length % dev->maxpacket == 0) { 1319 if (length % dev->maxpacket == 0) {
1318 if (!(info->flags & FLAG_SEND_ZLP)) { 1320 if (!(info->flags & FLAG_SEND_ZLP)) {
1319 if (!(info->flags & FLAG_MULTI_PACKET)) { 1321 if (!(info->flags & FLAG_MULTI_PACKET)) {
1320 urb->transfer_buffer_length++; 1322 length++;
1321 if (skb_tailroom(skb)) { 1323 if (skb_tailroom(skb) && !urb->num_sgs) {
1322 skb->data[skb->len] = 0; 1324 skb->data[skb->len] = 0;
1323 __skb_put(skb, 1); 1325 __skb_put(skb, 1);
1324 } 1326 } else if (urb->num_sgs)
1327 sg_set_buf(&urb->sg[urb->num_sgs++],
1328 dev->padding_pkt, 1);
1325 } 1329 }
1326 } else 1330 } else
1327 urb->transfer_flags |= URB_ZERO_PACKET; 1331 urb->transfer_flags |= URB_ZERO_PACKET;
1328 } 1332 }
1333 entry->length = urb->transfer_buffer_length = length;
1329 1334
1330 spin_lock_irqsave(&dev->txq.lock, flags); 1335 spin_lock_irqsave(&dev->txq.lock, flags);
1331 retval = usb_autopm_get_interface_async(dev->intf); 1336 retval = usb_autopm_get_interface_async(dev->intf);
@@ -1509,6 +1514,7 @@ void usbnet_disconnect (struct usb_interface *intf)
1509 1514
1510 usb_kill_urb(dev->interrupt); 1515 usb_kill_urb(dev->interrupt);
1511 usb_free_urb(dev->interrupt); 1516 usb_free_urb(dev->interrupt);
1517 kfree(dev->padding_pkt);
1512 1518
1513 free_netdev(net); 1519 free_netdev(net);
1514} 1520}
@@ -1679,9 +1685,16 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1679 /* initialize max rx_qlen and tx_qlen */ 1685 /* initialize max rx_qlen and tx_qlen */
1680 usbnet_update_max_qlen(dev); 1686 usbnet_update_max_qlen(dev);
1681 1687
1688 if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) &&
1689 !(info->flags & FLAG_MULTI_PACKET)) {
1690 dev->padding_pkt = kzalloc(1, GFP_KERNEL);
1691 if (!dev->padding_pkt)
1692 goto out4;
1693 }
1694
1682 status = register_netdev (net); 1695 status = register_netdev (net);
1683 if (status) 1696 if (status)
1684 goto out4; 1697 goto out5;
1685 netif_info(dev, probe, dev->net, 1698 netif_info(dev, probe, dev->net,
1686 "register '%s' at usb-%s-%s, %s, %pM\n", 1699 "register '%s' at usb-%s-%s, %s, %pM\n",
1687 udev->dev.driver->name, 1700 udev->dev.driver->name,
@@ -1699,6 +1712,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1699 1712
1700 return 0; 1713 return 0;
1701 1714
1715out5:
1716 kfree(dev->padding_pkt);
1702out4: 1717out4:
1703 usb_free_urb(dev->interrupt); 1718 usb_free_urb(dev->interrupt);
1704out3: 1719out3:
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index bf64b4191dcc..2ef5b6219f3f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -564,7 +564,7 @@ static void vxlan_notify_add_rx_port(struct sock *sk)
564 struct net_device *dev; 564 struct net_device *dev;
565 struct net *net = sock_net(sk); 565 struct net *net = sock_net(sk);
566 sa_family_t sa_family = sk->sk_family; 566 sa_family_t sa_family = sk->sk_family;
567 u16 port = htons(inet_sk(sk)->inet_sport); 567 __be16 port = inet_sk(sk)->inet_sport;
568 568
569 rcu_read_lock(); 569 rcu_read_lock();
570 for_each_netdev_rcu(net, dev) { 570 for_each_netdev_rcu(net, dev) {
@@ -581,7 +581,7 @@ static void vxlan_notify_del_rx_port(struct sock *sk)
581 struct net_device *dev; 581 struct net_device *dev;
582 struct net *net = sock_net(sk); 582 struct net *net = sock_net(sk);
583 sa_family_t sa_family = sk->sk_family; 583 sa_family_t sa_family = sk->sk_family;
584 u16 port = htons(inet_sk(sk)->inet_sport); 584 __be16 port = inet_sk(sk)->inet_sport;
585 585
586 rcu_read_lock(); 586 rcu_read_lock();
587 for_each_netdev_rcu(net, dev) { 587 for_each_netdev_rcu(net, dev) {
@@ -952,8 +952,7 @@ void vxlan_sock_release(struct vxlan_sock *vs)
952 952
953 spin_lock(&vn->sock_lock); 953 spin_lock(&vn->sock_lock);
954 hlist_del_rcu(&vs->hlist); 954 hlist_del_rcu(&vs->hlist);
955 smp_wmb(); 955 rcu_assign_sk_user_data(vs->sock->sk, NULL);
956 vs->sock->sk->sk_user_data = NULL;
957 vxlan_notify_del_rx_port(sk); 956 vxlan_notify_del_rx_port(sk);
958 spin_unlock(&vn->sock_lock); 957 spin_unlock(&vn->sock_lock);
959 958
@@ -1048,8 +1047,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1048 1047
1049 port = inet_sk(sk)->inet_sport; 1048 port = inet_sk(sk)->inet_sport;
1050 1049
1051 smp_read_barrier_depends(); 1050 vs = rcu_dereference_sk_user_data(sk);
1052 vs = (struct vxlan_sock *)sk->sk_user_data;
1053 if (!vs) 1051 if (!vs)
1054 goto drop; 1052 goto drop;
1055 1053
@@ -2021,7 +2019,8 @@ static struct device_type vxlan_type = {
2021}; 2019};
2022 2020
2023/* Calls the ndo_add_vxlan_port of the caller in order to 2021/* Calls the ndo_add_vxlan_port of the caller in order to
2024 * supply the listening VXLAN udp ports. 2022 * supply the listening VXLAN udp ports. Callers are expected
2023 * to implement the ndo_add_vxlan_port.
2025 */ 2024 */
2026void vxlan_get_rx_port(struct net_device *dev) 2025void vxlan_get_rx_port(struct net_device *dev)
2027{ 2026{
@@ -2029,16 +2028,13 @@ void vxlan_get_rx_port(struct net_device *dev)
2029 struct net *net = dev_net(dev); 2028 struct net *net = dev_net(dev);
2030 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2029 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2031 sa_family_t sa_family; 2030 sa_family_t sa_family;
2032 u16 port; 2031 __be16 port;
2033 int i; 2032 unsigned int i;
2034
2035 if (!dev || !dev->netdev_ops || !dev->netdev_ops->ndo_add_vxlan_port)
2036 return;
2037 2033
2038 spin_lock(&vn->sock_lock); 2034 spin_lock(&vn->sock_lock);
2039 for (i = 0; i < PORT_HASH_SIZE; ++i) { 2035 for (i = 0; i < PORT_HASH_SIZE; ++i) {
2040 hlist_for_each_entry_rcu(vs, vs_head(net, i), hlist) { 2036 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
2041 port = htons(inet_sk(vs->sock->sk)->inet_sport); 2037 port = inet_sk(vs->sock->sk)->inet_sport;
2042 sa_family = vs->sock->sk->sk_family; 2038 sa_family = vs->sock->sk->sk_family;
2043 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family, 2039 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
2044 port); 2040 port);
@@ -2304,8 +2300,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
2304 atomic_set(&vs->refcnt, 1); 2300 atomic_set(&vs->refcnt, 1);
2305 vs->rcv = rcv; 2301 vs->rcv = rcv;
2306 vs->data = data; 2302 vs->data = data;
2307 smp_wmb(); 2303 rcu_assign_sk_user_data(vs->sock->sk, vs);
2308 vs->sock->sk->sk_user_data = vs;
2309 2304
2310 spin_lock(&vn->sock_lock); 2305 spin_lock(&vn->sock_lock);
2311 hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); 2306 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
@@ -2492,15 +2487,19 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2492 2487
2493 SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops); 2488 SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
2494 2489
2495 /* create an fdb entry for default destination */ 2490 /* create an fdb entry for a valid default destination */
2496 err = vxlan_fdb_create(vxlan, all_zeros_mac, 2491 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
2497 &vxlan->default_dst.remote_ip, 2492 err = vxlan_fdb_create(vxlan, all_zeros_mac,
2498 NUD_REACHABLE|NUD_PERMANENT, 2493 &vxlan->default_dst.remote_ip,
2499 NLM_F_EXCL|NLM_F_CREATE, 2494 NUD_REACHABLE|NUD_PERMANENT,
2500 vxlan->dst_port, vxlan->default_dst.remote_vni, 2495 NLM_F_EXCL|NLM_F_CREATE,
2501 vxlan->default_dst.remote_ifindex, NTF_SELF); 2496 vxlan->dst_port,
2502 if (err) 2497 vxlan->default_dst.remote_vni,
2503 return err; 2498 vxlan->default_dst.remote_ifindex,
2499 NTF_SELF);
2500 if (err)
2501 return err;
2502 }
2504 2503
2505 err = register_netdevice(dev); 2504 err = register_netdevice(dev);
2506 if (err) { 2505 if (err) {
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 4ee472a5a4e4..ab9e3a8410bc 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -1270,13 +1270,6 @@ static void ath9k_antenna_check(struct ath_softc *sc,
1270 return; 1270 return;
1271 1271
1272 /* 1272 /*
1273 * All MPDUs in an aggregate will use the same LNA
1274 * as the first MPDU.
1275 */
1276 if (rs->rs_isaggr && !rs->rs_firstaggr)
1277 return;
1278
1279 /*
1280 * Change the default rx antenna if rx diversity 1273 * Change the default rx antenna if rx diversity
1281 * chooses the other antenna 3 times in a row. 1274 * chooses the other antenna 3 times in a row.
1282 */ 1275 */
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 35b515fe3ffa..5ac713d2ff5d 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -399,6 +399,7 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
399 tbf->bf_buf_addr = bf->bf_buf_addr; 399 tbf->bf_buf_addr = bf->bf_buf_addr;
400 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); 400 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
401 tbf->bf_state = bf->bf_state; 401 tbf->bf_state = bf->bf_state;
402 tbf->bf_state.stale = false;
402 403
403 return tbf; 404 return tbf;
404} 405}
@@ -1389,11 +1390,15 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1389 u16 tid, u16 *ssn) 1390 u16 tid, u16 *ssn)
1390{ 1391{
1391 struct ath_atx_tid *txtid; 1392 struct ath_atx_tid *txtid;
1393 struct ath_txq *txq;
1392 struct ath_node *an; 1394 struct ath_node *an;
1393 u8 density; 1395 u8 density;
1394 1396
1395 an = (struct ath_node *)sta->drv_priv; 1397 an = (struct ath_node *)sta->drv_priv;
1396 txtid = ATH_AN_2_TID(an, tid); 1398 txtid = ATH_AN_2_TID(an, tid);
1399 txq = txtid->ac->txq;
1400
1401 ath_txq_lock(sc, txq);
1397 1402
1398 /* update ampdu factor/density, they may have changed. This may happen 1403 /* update ampdu factor/density, they may have changed. This may happen
1399 * in HT IBSS when a beacon with HT-info is received after the station 1404 * in HT IBSS when a beacon with HT-info is received after the station
@@ -1417,6 +1422,8 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1417 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); 1422 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1418 txtid->baw_head = txtid->baw_tail = 0; 1423 txtid->baw_head = txtid->baw_tail = 0;
1419 1424
1425 ath_txq_unlock_complete(sc, txq);
1426
1420 return 0; 1427 return 0;
1421} 1428}
1422 1429
@@ -1555,8 +1562,10 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
1555 __skb_unlink(bf->bf_mpdu, tid_q); 1562 __skb_unlink(bf->bf_mpdu, tid_q);
1556 list_add_tail(&bf->list, &bf_q); 1563 list_add_tail(&bf->list, &bf_q);
1557 ath_set_rates(tid->an->vif, tid->an->sta, bf); 1564 ath_set_rates(tid->an->vif, tid->an->sta, bf);
1558 ath_tx_addto_baw(sc, tid, bf); 1565 if (bf_isampdu(bf)) {
1559 bf->bf_state.bf_type &= ~BUF_AGGR; 1566 ath_tx_addto_baw(sc, tid, bf);
1567 bf->bf_state.bf_type &= ~BUF_AGGR;
1568 }
1560 if (bf_tail) 1569 if (bf_tail)
1561 bf_tail->bf_next = bf; 1570 bf_tail->bf_next = bf;
1562 1571
@@ -1950,7 +1959,9 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1950 if (bf_is_ampdu_not_probing(bf)) 1959 if (bf_is_ampdu_not_probing(bf))
1951 txq->axq_ampdu_depth++; 1960 txq->axq_ampdu_depth++;
1952 1961
1953 bf = bf->bf_lastbf->bf_next; 1962 bf_last = bf->bf_lastbf;
1963 bf = bf_last->bf_next;
1964 bf_last->bf_next = NULL;
1954 } 1965 }
1955 } 1966 }
1956} 1967}
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index fc8a0fa6d3b2..b00a7e92225f 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -28,7 +28,7 @@ config BRCMFMAC
28 28
29config BRCMFMAC_SDIO 29config BRCMFMAC_SDIO
30 bool "SDIO bus interface support for FullMAC driver" 30 bool "SDIO bus interface support for FullMAC driver"
31 depends on MMC 31 depends on (MMC = y || MMC = BRCMFMAC)
32 depends on BRCMFMAC 32 depends on BRCMFMAC
33 select FW_LOADER 33 select FW_LOADER
34 default y 34 default y
@@ -39,7 +39,7 @@ config BRCMFMAC_SDIO
39 39
40config BRCMFMAC_USB 40config BRCMFMAC_USB
41 bool "USB bus interface support for FullMAC driver" 41 bool "USB bus interface support for FullMAC driver"
42 depends on USB 42 depends on (USB = y || USB = BRCMFMAC)
43 depends on BRCMFMAC 43 depends on BRCMFMAC
44 select FW_LOADER 44 select FW_LOADER
45 ---help--- 45 ---help---
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 64f4a2bc8dde..c3462b75bd08 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -464,8 +464,6 @@ static struct sdio_driver brcmf_sdmmc_driver = {
464 464
465static int brcmf_sdio_pd_probe(struct platform_device *pdev) 465static int brcmf_sdio_pd_probe(struct platform_device *pdev)
466{ 466{
467 int ret;
468
469 brcmf_dbg(SDIO, "Enter\n"); 467 brcmf_dbg(SDIO, "Enter\n");
470 468
471 brcmfmac_sdio_pdata = pdev->dev.platform_data; 469 brcmfmac_sdio_pdata = pdev->dev.platform_data;
@@ -473,11 +471,7 @@ static int brcmf_sdio_pd_probe(struct platform_device *pdev)
473 if (brcmfmac_sdio_pdata->power_on) 471 if (brcmfmac_sdio_pdata->power_on)
474 brcmfmac_sdio_pdata->power_on(); 472 brcmfmac_sdio_pdata->power_on();
475 473
476 ret = sdio_register_driver(&brcmf_sdmmc_driver); 474 return 0;
477 if (ret)
478 brcmf_err("sdio_register_driver failed: %d\n", ret);
479
480 return ret;
481} 475}
482 476
483static int brcmf_sdio_pd_remove(struct platform_device *pdev) 477static int brcmf_sdio_pd_remove(struct platform_device *pdev)
@@ -500,6 +494,15 @@ static struct platform_driver brcmf_sdio_pd = {
500 } 494 }
501}; 495};
502 496
497void brcmf_sdio_register(void)
498{
499 int ret;
500
501 ret = sdio_register_driver(&brcmf_sdmmc_driver);
502 if (ret)
503 brcmf_err("sdio_register_driver failed: %d\n", ret);
504}
505
503void brcmf_sdio_exit(void) 506void brcmf_sdio_exit(void)
504{ 507{
505 brcmf_dbg(SDIO, "Enter\n"); 508 brcmf_dbg(SDIO, "Enter\n");
@@ -510,18 +513,13 @@ void brcmf_sdio_exit(void)
510 sdio_unregister_driver(&brcmf_sdmmc_driver); 513 sdio_unregister_driver(&brcmf_sdmmc_driver);
511} 514}
512 515
513void brcmf_sdio_init(void) 516void __init brcmf_sdio_init(void)
514{ 517{
515 int ret; 518 int ret;
516 519
517 brcmf_dbg(SDIO, "Enter\n"); 520 brcmf_dbg(SDIO, "Enter\n");
518 521
519 ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe); 522 ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
520 if (ret == -ENODEV) { 523 if (ret == -ENODEV)
521 brcmf_dbg(SDIO, "No platform data available, registering without.\n"); 524 brcmf_dbg(SDIO, "No platform data available.\n");
522 ret = sdio_register_driver(&brcmf_sdmmc_driver);
523 }
524
525 if (ret)
526 brcmf_err("driver registration failed: %d\n", ret);
527} 525}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index f7c1985844e4..74156f84180c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -156,10 +156,11 @@ extern int brcmf_bus_start(struct device *dev);
156#ifdef CONFIG_BRCMFMAC_SDIO 156#ifdef CONFIG_BRCMFMAC_SDIO
157extern void brcmf_sdio_exit(void); 157extern void brcmf_sdio_exit(void);
158extern void brcmf_sdio_init(void); 158extern void brcmf_sdio_init(void);
159extern void brcmf_sdio_register(void);
159#endif 160#endif
160#ifdef CONFIG_BRCMFMAC_USB 161#ifdef CONFIG_BRCMFMAC_USB
161extern void brcmf_usb_exit(void); 162extern void brcmf_usb_exit(void);
162extern void brcmf_usb_init(void); 163extern void brcmf_usb_register(void);
163#endif 164#endif
164 165
165#endif /* _BRCMF_BUS_H_ */ 166#endif /* _BRCMF_BUS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index e067aec1fbf1..40e7f854e10f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -1231,21 +1231,23 @@ u32 brcmf_get_chip_info(struct brcmf_if *ifp)
1231 return bus->chip << 4 | bus->chiprev; 1231 return bus->chip << 4 | bus->chiprev;
1232} 1232}
1233 1233
1234static void brcmf_driver_init(struct work_struct *work) 1234static void brcmf_driver_register(struct work_struct *work)
1235{ 1235{
1236 brcmf_debugfs_init();
1237
1238#ifdef CONFIG_BRCMFMAC_SDIO 1236#ifdef CONFIG_BRCMFMAC_SDIO
1239 brcmf_sdio_init(); 1237 brcmf_sdio_register();
1240#endif 1238#endif
1241#ifdef CONFIG_BRCMFMAC_USB 1239#ifdef CONFIG_BRCMFMAC_USB
1242 brcmf_usb_init(); 1240 brcmf_usb_register();
1243#endif 1241#endif
1244} 1242}
1245static DECLARE_WORK(brcmf_driver_work, brcmf_driver_init); 1243static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
1246 1244
1247static int __init brcmfmac_module_init(void) 1245static int __init brcmfmac_module_init(void)
1248{ 1246{
1247 brcmf_debugfs_init();
1248#ifdef CONFIG_BRCMFMAC_SDIO
1249 brcmf_sdio_init();
1250#endif
1249 if (!schedule_work(&brcmf_driver_work)) 1251 if (!schedule_work(&brcmf_driver_work))
1250 return -EBUSY; 1252 return -EBUSY;
1251 1253
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 39e01a7c8556..f4aea47e0730 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -1539,7 +1539,7 @@ void brcmf_usb_exit(void)
1539 brcmf_release_fw(&fw_image_list); 1539 brcmf_release_fw(&fw_image_list);
1540} 1540}
1541 1541
1542void brcmf_usb_init(void) 1542void brcmf_usb_register(void)
1543{ 1543{
1544 brcmf_dbg(USB, "Enter\n"); 1544 brcmf_dbg(USB, "Enter\n");
1545 INIT_LIST_HEAD(&fw_image_list); 1545 INIT_LIST_HEAD(&fw_image_list);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 3a6544710c8a..edc5d105ff98 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -457,6 +457,8 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
457 if (err != 0) 457 if (err != 0)
458 brcms_err(wl->wlc->hw->d11core, "%s: brcms_up() returned %d\n", 458 brcms_err(wl->wlc->hw->d11core, "%s: brcms_up() returned %d\n",
459 __func__, err); 459 __func__, err);
460
461 bcma_core_pci_power_save(wl->wlc->hw->d11core->bus, true);
460 return err; 462 return err;
461} 463}
462 464
@@ -479,6 +481,8 @@ static void brcms_ops_stop(struct ieee80211_hw *hw)
479 return; 481 return;
480 } 482 }
481 483
484 bcma_core_pci_power_save(wl->wlc->hw->d11core->bus, false);
485
482 /* put driver in down state */ 486 /* put driver in down state */
483 spin_lock_bh(&wl->lock); 487 spin_lock_bh(&wl->lock);
484 brcms_down(wl); 488 brcms_down(wl);
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index d06376014bcd..899cad34ccd3 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -40,6 +40,7 @@ struct hwbus_priv {
40 struct cw1200_common *core; 40 struct cw1200_common *core;
41 const struct cw1200_platform_data_spi *pdata; 41 const struct cw1200_platform_data_spi *pdata;
42 spinlock_t lock; /* Serialize all bus operations */ 42 spinlock_t lock; /* Serialize all bus operations */
43 wait_queue_head_t wq;
43 int claimed; 44 int claimed;
44}; 45};
45 46
@@ -197,8 +198,11 @@ static void cw1200_spi_lock(struct hwbus_priv *self)
197{ 198{
198 unsigned long flags; 199 unsigned long flags;
199 200
201 DECLARE_WAITQUEUE(wait, current);
202
200 might_sleep(); 203 might_sleep();
201 204
205 add_wait_queue(&self->wq, &wait);
202 spin_lock_irqsave(&self->lock, flags); 206 spin_lock_irqsave(&self->lock, flags);
203 while (1) { 207 while (1) {
204 set_current_state(TASK_UNINTERRUPTIBLE); 208 set_current_state(TASK_UNINTERRUPTIBLE);
@@ -211,6 +215,7 @@ static void cw1200_spi_lock(struct hwbus_priv *self)
211 set_current_state(TASK_RUNNING); 215 set_current_state(TASK_RUNNING);
212 self->claimed = 1; 216 self->claimed = 1;
213 spin_unlock_irqrestore(&self->lock, flags); 217 spin_unlock_irqrestore(&self->lock, flags);
218 remove_wait_queue(&self->wq, &wait);
214 219
215 return; 220 return;
216} 221}
@@ -222,6 +227,8 @@ static void cw1200_spi_unlock(struct hwbus_priv *self)
222 spin_lock_irqsave(&self->lock, flags); 227 spin_lock_irqsave(&self->lock, flags);
223 self->claimed = 0; 228 self->claimed = 0;
224 spin_unlock_irqrestore(&self->lock, flags); 229 spin_unlock_irqrestore(&self->lock, flags);
230 wake_up(&self->wq);
231
225 return; 232 return;
226} 233}
227 234
@@ -243,9 +250,10 @@ static int cw1200_spi_irq_subscribe(struct hwbus_priv *self)
243 250
244 pr_debug("SW IRQ subscribe\n"); 251 pr_debug("SW IRQ subscribe\n");
245 252
246 ret = request_any_context_irq(self->func->irq, cw1200_spi_irq_handler, 253 ret = request_threaded_irq(self->func->irq, NULL,
247 IRQF_TRIGGER_HIGH, 254 cw1200_spi_irq_handler,
248 "cw1200_wlan_irq", self); 255 IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
256 "cw1200_wlan_irq", self);
249 if (WARN_ON(ret < 0)) 257 if (WARN_ON(ret < 0))
250 goto exit; 258 goto exit;
251 259
@@ -400,6 +408,8 @@ static int cw1200_spi_probe(struct spi_device *func)
400 408
401 spi_set_drvdata(func, self); 409 spi_set_drvdata(func, self);
402 410
411 init_waitqueue_head(&self->wq);
412
403 status = cw1200_spi_irq_subscribe(self); 413 status = cw1200_spi_irq_subscribe(self);
404 414
405 status = cw1200_core_probe(&cw1200_spi_hwbus_ops, 415 status = cw1200_core_probe(&cw1200_spi_hwbus_ops,
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 21c688264708..1214c587fd08 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -150,7 +150,7 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
150 */ 150 */
151int 151int
152mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, 152mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
153 struct mwifiex_ra_list_tbl *pra_list, int headroom, 153 struct mwifiex_ra_list_tbl *pra_list,
154 int ptrindex, unsigned long ra_list_flags) 154 int ptrindex, unsigned long ra_list_flags)
155 __releases(&priv->wmm.ra_list_spinlock) 155 __releases(&priv->wmm.ra_list_spinlock)
156{ 156{
@@ -160,6 +160,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
160 int pad = 0, ret; 160 int pad = 0, ret;
161 struct mwifiex_tx_param tx_param; 161 struct mwifiex_tx_param tx_param;
162 struct txpd *ptx_pd = NULL; 162 struct txpd *ptx_pd = NULL;
163 int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN;
163 164
164 skb_src = skb_peek(&pra_list->skb_head); 165 skb_src = skb_peek(&pra_list->skb_head);
165 if (!skb_src) { 166 if (!skb_src) {
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.h b/drivers/net/wireless/mwifiex/11n_aggr.h
index 900e1c62a0cc..892098d6a696 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.h
+++ b/drivers/net/wireless/mwifiex/11n_aggr.h
@@ -26,7 +26,7 @@
26int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv, 26int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv,
27 struct sk_buff *skb); 27 struct sk_buff *skb);
28int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, 28int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
29 struct mwifiex_ra_list_tbl *ptr, int headroom, 29 struct mwifiex_ra_list_tbl *ptr,
30 int ptr_index, unsigned long flags) 30 int ptr_index, unsigned long flags)
31 __releases(&priv->wmm.ra_list_spinlock); 31 __releases(&priv->wmm.ra_list_spinlock);
32 32
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 2d761477d15e..a6c46f3b6e3a 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -1155,7 +1155,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
1155 uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions); 1155 uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions);
1156 1156
1157 if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) && 1157 if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) &&
1158 adapter->iface_type == MWIFIEX_SDIO) { 1158 adapter->iface_type != MWIFIEX_USB) {
1159 mwifiex_hs_activated_event(priv, true); 1159 mwifiex_hs_activated_event(priv, true);
1160 return 0; 1160 return 0;
1161 } else { 1161 } else {
@@ -1167,8 +1167,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
1167 } 1167 }
1168 if (conditions != HS_CFG_CANCEL) { 1168 if (conditions != HS_CFG_CANCEL) {
1169 adapter->is_hs_configured = true; 1169 adapter->is_hs_configured = true;
1170 if (adapter->iface_type == MWIFIEX_USB || 1170 if (adapter->iface_type == MWIFIEX_USB)
1171 adapter->iface_type == MWIFIEX_PCIE)
1172 mwifiex_hs_activated_event(priv, true); 1171 mwifiex_hs_activated_event(priv, true);
1173 } else { 1172 } else {
1174 adapter->is_hs_configured = false; 1173 adapter->is_hs_configured = false;
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 2472d4b7f00e..1c70b8d09227 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -447,9 +447,6 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
447 */ 447 */
448 adapter->is_suspended = true; 448 adapter->is_suspended = true;
449 449
450 for (i = 0; i < adapter->priv_num; i++)
451 netif_carrier_off(adapter->priv[i]->netdev);
452
453 if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb) 450 if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
454 usb_kill_urb(card->rx_cmd.urb); 451 usb_kill_urb(card->rx_cmd.urb);
455 452
@@ -509,10 +506,6 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
509 MWIFIEX_RX_CMD_BUF_SIZE); 506 MWIFIEX_RX_CMD_BUF_SIZE);
510 } 507 }
511 508
512 for (i = 0; i < adapter->priv_num; i++)
513 if (adapter->priv[i]->media_connected)
514 netif_carrier_on(adapter->priv[i]->netdev);
515
516 /* Disable Host Sleep */ 509 /* Disable Host Sleep */
517 if (adapter->hs_activated) 510 if (adapter->hs_activated)
518 mwifiex_cancel_hs(mwifiex_get_priv(adapter, 511 mwifiex_cancel_hs(mwifiex_get_priv(adapter,
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 2e8f9cdea54d..95fa3599b407 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -1239,8 +1239,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1239 if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) && 1239 if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) &&
1240 mwifiex_is_11n_aggragation_possible(priv, ptr, 1240 mwifiex_is_11n_aggragation_possible(priv, ptr,
1241 adapter->tx_buf_size)) 1241 adapter->tx_buf_size))
1242 mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN, 1242 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
1243 ptr_index, flags);
1244 /* ra_list_spinlock has been freed in 1243 /* ra_list_spinlock has been freed in
1245 mwifiex_11n_aggregate_pkt() */ 1244 mwifiex_11n_aggregate_pkt() */
1246 else 1245 else
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index b9deef66cf4b..e328d3058c41 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -83,6 +83,7 @@ static struct usb_device_id p54u_table[] = {
83 {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */ 83 {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */
84 {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ 84 {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
85 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ 85 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
86 {USB_DEVICE(0x07aa, 0x0020)}, /* Corega WLUSB2GTST USB */
86 {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */ 87 {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */
87 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ 88 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
88 {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */ 89 {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */
@@ -979,6 +980,7 @@ static int p54u_load_firmware(struct ieee80211_hw *dev,
979 if (err) { 980 if (err) {
980 dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s " 981 dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s "
981 "(%d)!\n", p54u_fwlist[i].fw, err); 982 "(%d)!\n", p54u_fwlist[i].fw, err);
983 usb_put_dev(udev);
982 } 984 }
983 985
984 return err; 986 return err;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 95e6e61c3de0..88ce656f96cd 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -6659,19 +6659,20 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
6659 rt2800_init_registers(rt2x00dev))) 6659 rt2800_init_registers(rt2x00dev)))
6660 return -EIO; 6660 return -EIO;
6661 6661
6662 if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev)))
6663 return -EIO;
6664
6662 /* 6665 /*
6663 * Send signal to firmware during boot time. 6666 * Send signal to firmware during boot time.
6664 */ 6667 */
6665 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0); 6668 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
6666 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); 6669 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
6667 if (rt2x00_is_usb(rt2x00dev)) { 6670 if (rt2x00_is_usb(rt2x00dev))
6668 rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0); 6671 rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0);
6669 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0); 6672 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
6670 }
6671 msleep(1); 6673 msleep(1);
6672 6674
6673 if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) || 6675 if (unlikely(rt2800_wait_bbp_ready(rt2x00dev)))
6674 rt2800_wait_bbp_ready(rt2x00dev)))
6675 return -EIO; 6676 return -EIO;
6676 6677
6677 rt2800_init_bbp(rt2x00dev); 6678 rt2800_init_bbp(rt2x00dev);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 841fb9dfc9da..9a6edb0c014e 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -438,17 +438,16 @@ static int rtl8187_init_urbs(struct ieee80211_hw *dev)
438 skb_queue_tail(&priv->rx_queue, skb); 438 skb_queue_tail(&priv->rx_queue, skb);
439 usb_anchor_urb(entry, &priv->anchored); 439 usb_anchor_urb(entry, &priv->anchored);
440 ret = usb_submit_urb(entry, GFP_KERNEL); 440 ret = usb_submit_urb(entry, GFP_KERNEL);
441 usb_put_urb(entry);
441 if (ret) { 442 if (ret) {
442 skb_unlink(skb, &priv->rx_queue); 443 skb_unlink(skb, &priv->rx_queue);
443 usb_unanchor_urb(entry); 444 usb_unanchor_urb(entry);
444 goto err; 445 goto err;
445 } 446 }
446 usb_free_urb(entry);
447 } 447 }
448 return ret; 448 return ret;
449 449
450err: 450err:
451 usb_free_urb(entry);
452 kfree_skb(skb); 451 kfree_skb(skb);
453 usb_kill_anchored_urbs(&priv->anchored); 452 usb_kill_anchored_urbs(&priv->anchored);
454 return ret; 453 return ret;
@@ -956,8 +955,12 @@ static int rtl8187_start(struct ieee80211_hw *dev)
956 (RETRY_COUNT << 8 /* short retry limit */) | 955 (RETRY_COUNT << 8 /* short retry limit */) |
957 (RETRY_COUNT << 0 /* long retry limit */) | 956 (RETRY_COUNT << 0 /* long retry limit */) |
958 (7 << 21 /* MAX TX DMA */)); 957 (7 << 21 /* MAX TX DMA */));
959 rtl8187_init_urbs(dev); 958 ret = rtl8187_init_urbs(dev);
960 rtl8187b_init_status_urb(dev); 959 if (ret)
960 goto rtl8187_start_exit;
961 ret = rtl8187b_init_status_urb(dev);
962 if (ret)
963 usb_kill_anchored_urbs(&priv->anchored);
961 goto rtl8187_start_exit; 964 goto rtl8187_start_exit;
962 } 965 }
963 966
@@ -966,7 +969,9 @@ static int rtl8187_start(struct ieee80211_hw *dev)
966 rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0); 969 rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0);
967 rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0); 970 rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0);
968 971
969 rtl8187_init_urbs(dev); 972 ret = rtl8187_init_urbs(dev);
973 if (ret)
974 goto rtl8187_start_exit;
970 975
971 reg = RTL818X_RX_CONF_ONLYERLPKT | 976 reg = RTL818X_RX_CONF_ONLYERLPKT |
972 RTL818X_RX_CONF_RX_AUTORESETPHY | 977 RTL818X_RX_CONF_RX_AUTORESETPHY |
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index cc03e7c87cbe..703258742d28 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -2057,7 +2057,7 @@ struct rtl_priv {
2057 that it points to the data allocated 2057 that it points to the data allocated
2058 beyond this structure like: 2058 beyond this structure like:
2059 rtl_pci_priv or rtl_usb_priv */ 2059 rtl_pci_priv or rtl_usb_priv */
2060 u8 priv[0]; 2060 u8 priv[0] __aligned(sizeof(void *));
2061}; 2061};
2062 2062
2063#define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv)) 2063#define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv))
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index a1977430ddfb..5715318d6bab 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -184,6 +184,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
184 unsigned long rx_ring_ref, unsigned int tx_evtchn, 184 unsigned long rx_ring_ref, unsigned int tx_evtchn,
185 unsigned int rx_evtchn); 185 unsigned int rx_evtchn);
186void xenvif_disconnect(struct xenvif *vif); 186void xenvif_disconnect(struct xenvif *vif);
187void xenvif_free(struct xenvif *vif);
187 188
188int xenvif_xenbus_init(void); 189int xenvif_xenbus_init(void);
189void xenvif_xenbus_fini(void); 190void xenvif_xenbus_fini(void);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 625c6f49cfba..01bb854c7f62 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -353,6 +353,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
353 } 353 }
354 354
355 netdev_dbg(dev, "Successfully created xenvif\n"); 355 netdev_dbg(dev, "Successfully created xenvif\n");
356
357 __module_get(THIS_MODULE);
358
356 return vif; 359 return vif;
357} 360}
358 361
@@ -366,8 +369,6 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
366 if (vif->tx_irq) 369 if (vif->tx_irq)
367 return 0; 370 return 0;
368 371
369 __module_get(THIS_MODULE);
370
371 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); 372 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
372 if (err < 0) 373 if (err < 0)
373 goto err; 374 goto err;
@@ -406,7 +407,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
406 407
407 init_waitqueue_head(&vif->wq); 408 init_waitqueue_head(&vif->wq);
408 vif->task = kthread_create(xenvif_kthread, 409 vif->task = kthread_create(xenvif_kthread,
409 (void *)vif, vif->dev->name); 410 (void *)vif, "%s", vif->dev->name);
410 if (IS_ERR(vif->task)) { 411 if (IS_ERR(vif->task)) {
411 pr_warn("Could not allocate kthread for %s\n", vif->dev->name); 412 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
412 err = PTR_ERR(vif->task); 413 err = PTR_ERR(vif->task);
@@ -452,12 +453,6 @@ void xenvif_carrier_off(struct xenvif *vif)
452 453
453void xenvif_disconnect(struct xenvif *vif) 454void xenvif_disconnect(struct xenvif *vif)
454{ 455{
455 /* Disconnect funtion might get called by generic framework
456 * even before vif connects, so we need to check if we really
457 * need to do a module_put.
458 */
459 int need_module_put = 0;
460
461 if (netif_carrier_ok(vif->dev)) 456 if (netif_carrier_ok(vif->dev))
462 xenvif_carrier_off(vif); 457 xenvif_carrier_off(vif);
463 458
@@ -468,23 +463,22 @@ void xenvif_disconnect(struct xenvif *vif)
468 unbind_from_irqhandler(vif->tx_irq, vif); 463 unbind_from_irqhandler(vif->tx_irq, vif);
469 unbind_from_irqhandler(vif->rx_irq, vif); 464 unbind_from_irqhandler(vif->rx_irq, vif);
470 } 465 }
471 /* vif->irq is valid, we had a module_get in 466 vif->tx_irq = 0;
472 * xenvif_connect.
473 */
474 need_module_put = 1;
475 } 467 }
476 468
477 if (vif->task) 469 if (vif->task)
478 kthread_stop(vif->task); 470 kthread_stop(vif->task);
479 471
472 xenvif_unmap_frontend_rings(vif);
473}
474
475void xenvif_free(struct xenvif *vif)
476{
480 netif_napi_del(&vif->napi); 477 netif_napi_del(&vif->napi);
481 478
482 unregister_netdev(vif->dev); 479 unregister_netdev(vif->dev);
483 480
484 xenvif_unmap_frontend_rings(vif);
485
486 free_netdev(vif->dev); 481 free_netdev(vif->dev);
487 482
488 if (need_module_put) 483 module_put(THIS_MODULE);
489 module_put(THIS_MODULE);
490} 484}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 956130c70036..f3e591c611de 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -212,6 +212,49 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
212 return false; 212 return false;
213} 213}
214 214
215struct xenvif_count_slot_state {
216 unsigned long copy_off;
217 bool head;
218};
219
220unsigned int xenvif_count_frag_slots(struct xenvif *vif,
221 unsigned long offset, unsigned long size,
222 struct xenvif_count_slot_state *state)
223{
224 unsigned count = 0;
225
226 offset &= ~PAGE_MASK;
227
228 while (size > 0) {
229 unsigned long bytes;
230
231 bytes = PAGE_SIZE - offset;
232
233 if (bytes > size)
234 bytes = size;
235
236 if (start_new_rx_buffer(state->copy_off, bytes, state->head)) {
237 count++;
238 state->copy_off = 0;
239 }
240
241 if (state->copy_off + bytes > MAX_BUFFER_OFFSET)
242 bytes = MAX_BUFFER_OFFSET - state->copy_off;
243
244 state->copy_off += bytes;
245
246 offset += bytes;
247 size -= bytes;
248
249 if (offset == PAGE_SIZE)
250 offset = 0;
251
252 state->head = false;
253 }
254
255 return count;
256}
257
215/* 258/*
216 * Figure out how many ring slots we're going to need to send @skb to 259 * Figure out how many ring slots we're going to need to send @skb to
217 * the guest. This function is essentially a dry run of 260 * the guest. This function is essentially a dry run of
@@ -219,48 +262,39 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
219 */ 262 */
220unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) 263unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
221{ 264{
265 struct xenvif_count_slot_state state;
222 unsigned int count; 266 unsigned int count;
223 int i, copy_off; 267 unsigned char *data;
268 unsigned i;
224 269
225 count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE); 270 state.head = true;
271 state.copy_off = 0;
226 272
227 copy_off = skb_headlen(skb) % PAGE_SIZE; 273 /* Slot for the first (partial) page of data. */
274 count = 1;
228 275
276 /* Need a slot for the GSO prefix for GSO extra data? */
229 if (skb_shinfo(skb)->gso_size) 277 if (skb_shinfo(skb)->gso_size)
230 count++; 278 count++;
231 279
232 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 280 data = skb->data;
233 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 281 while (data < skb_tail_pointer(skb)) {
234 unsigned long offset = skb_shinfo(skb)->frags[i].page_offset; 282 unsigned long offset = offset_in_page(data);
235 unsigned long bytes; 283 unsigned long size = PAGE_SIZE - offset;
236
237 offset &= ~PAGE_MASK;
238
239 while (size > 0) {
240 BUG_ON(offset >= PAGE_SIZE);
241 BUG_ON(copy_off > MAX_BUFFER_OFFSET);
242
243 bytes = PAGE_SIZE - offset;
244
245 if (bytes > size)
246 bytes = size;
247 284
248 if (start_new_rx_buffer(copy_off, bytes, 0)) { 285 if (data + size > skb_tail_pointer(skb))
249 count++; 286 size = skb_tail_pointer(skb) - data;
250 copy_off = 0;
251 }
252 287
253 if (copy_off + bytes > MAX_BUFFER_OFFSET) 288 count += xenvif_count_frag_slots(vif, offset, size, &state);
254 bytes = MAX_BUFFER_OFFSET - copy_off;
255 289
256 copy_off += bytes; 290 data += size;
291 }
257 292
258 offset += bytes; 293 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
259 size -= bytes; 294 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
295 unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
260 296
261 if (offset == PAGE_SIZE) 297 count += xenvif_count_frag_slots(vif, offset, size, &state);
262 offset = 0;
263 }
264 } 298 }
265 return count; 299 return count;
266} 300}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 1fe48fe364ed..b45bce20ad76 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -24,6 +24,12 @@
24struct backend_info { 24struct backend_info {
25 struct xenbus_device *dev; 25 struct xenbus_device *dev;
26 struct xenvif *vif; 26 struct xenvif *vif;
27
28 /* This is the state that will be reflected in xenstore when any
29 * active hotplug script completes.
30 */
31 enum xenbus_state state;
32
27 enum xenbus_state frontend_state; 33 enum xenbus_state frontend_state;
28 struct xenbus_watch hotplug_status_watch; 34 struct xenbus_watch hotplug_status_watch;
29 u8 have_hotplug_status_watch:1; 35 u8 have_hotplug_status_watch:1;
@@ -42,7 +48,7 @@ static int netback_remove(struct xenbus_device *dev)
42 if (be->vif) { 48 if (be->vif) {
43 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); 49 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
44 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); 50 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
45 xenvif_disconnect(be->vif); 51 xenvif_free(be->vif);
46 be->vif = NULL; 52 be->vif = NULL;
47 } 53 }
48 kfree(be); 54 kfree(be);
@@ -136,6 +142,8 @@ static int netback_probe(struct xenbus_device *dev,
136 if (err) 142 if (err)
137 goto fail; 143 goto fail;
138 144
145 be->state = XenbusStateInitWait;
146
139 /* This kicks hotplug scripts, so do it immediately. */ 147 /* This kicks hotplug scripts, so do it immediately. */
140 backend_create_xenvif(be); 148 backend_create_xenvif(be);
141 149
@@ -208,15 +216,113 @@ static void backend_create_xenvif(struct backend_info *be)
208 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); 216 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
209} 217}
210 218
219static void backend_disconnect(struct backend_info *be)
220{
221 if (be->vif)
222 xenvif_disconnect(be->vif);
223}
211 224
212static void disconnect_backend(struct xenbus_device *dev) 225static void backend_connect(struct backend_info *be)
213{ 226{
214 struct backend_info *be = dev_get_drvdata(&dev->dev); 227 if (be->vif)
228 connect(be);
229}
215 230
216 if (be->vif) { 231static inline void backend_switch_state(struct backend_info *be,
217 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); 232 enum xenbus_state state)
218 xenvif_disconnect(be->vif); 233{
219 be->vif = NULL; 234 struct xenbus_device *dev = be->dev;
235
236 pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state));
237 be->state = state;
238
239 /* If we are waiting for a hotplug script then defer the
240 * actual xenbus state change.
241 */
242 if (!be->have_hotplug_status_watch)
243 xenbus_switch_state(dev, state);
244}
245
246/* Handle backend state transitions:
247 *
248 * The backend state starts in InitWait and the following transitions are
249 * allowed.
250 *
251 * InitWait -> Connected
252 *
253 * ^ \ |
254 * | \ |
255 * | \ |
256 * | \ |
257 * | \ |
258 * | \ |
259 * | V V
260 *
261 * Closed <-> Closing
262 *
263 * The state argument specifies the eventual state of the backend and the
264 * function transitions to that state via the shortest path.
265 */
266static void set_backend_state(struct backend_info *be,
267 enum xenbus_state state)
268{
269 while (be->state != state) {
270 switch (be->state) {
271 case XenbusStateClosed:
272 switch (state) {
273 case XenbusStateInitWait:
274 case XenbusStateConnected:
275 pr_info("%s: prepare for reconnect\n",
276 be->dev->nodename);
277 backend_switch_state(be, XenbusStateInitWait);
278 break;
279 case XenbusStateClosing:
280 backend_switch_state(be, XenbusStateClosing);
281 break;
282 default:
283 BUG();
284 }
285 break;
286 case XenbusStateInitWait:
287 switch (state) {
288 case XenbusStateConnected:
289 backend_connect(be);
290 backend_switch_state(be, XenbusStateConnected);
291 break;
292 case XenbusStateClosing:
293 case XenbusStateClosed:
294 backend_switch_state(be, XenbusStateClosing);
295 break;
296 default:
297 BUG();
298 }
299 break;
300 case XenbusStateConnected:
301 switch (state) {
302 case XenbusStateInitWait:
303 case XenbusStateClosing:
304 case XenbusStateClosed:
305 backend_disconnect(be);
306 backend_switch_state(be, XenbusStateClosing);
307 break;
308 default:
309 BUG();
310 }
311 break;
312 case XenbusStateClosing:
313 switch (state) {
314 case XenbusStateInitWait:
315 case XenbusStateConnected:
316 case XenbusStateClosed:
317 backend_switch_state(be, XenbusStateClosed);
318 break;
319 default:
320 BUG();
321 }
322 break;
323 default:
324 BUG();
325 }
220 } 326 }
221} 327}
222 328
@@ -228,42 +334,33 @@ static void frontend_changed(struct xenbus_device *dev,
228{ 334{
229 struct backend_info *be = dev_get_drvdata(&dev->dev); 335 struct backend_info *be = dev_get_drvdata(&dev->dev);
230 336
231 pr_debug("frontend state %s\n", xenbus_strstate(frontend_state)); 337 pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state));
232 338
233 be->frontend_state = frontend_state; 339 be->frontend_state = frontend_state;
234 340
235 switch (frontend_state) { 341 switch (frontend_state) {
236 case XenbusStateInitialising: 342 case XenbusStateInitialising:
237 if (dev->state == XenbusStateClosed) { 343 set_backend_state(be, XenbusStateInitWait);
238 pr_info("%s: prepare for reconnect\n", dev->nodename);
239 xenbus_switch_state(dev, XenbusStateInitWait);
240 }
241 break; 344 break;
242 345
243 case XenbusStateInitialised: 346 case XenbusStateInitialised:
244 break; 347 break;
245 348
246 case XenbusStateConnected: 349 case XenbusStateConnected:
247 if (dev->state == XenbusStateConnected) 350 set_backend_state(be, XenbusStateConnected);
248 break;
249 backend_create_xenvif(be);
250 if (be->vif)
251 connect(be);
252 break; 351 break;
253 352
254 case XenbusStateClosing: 353 case XenbusStateClosing:
255 if (be->vif) 354 set_backend_state(be, XenbusStateClosing);
256 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
257 disconnect_backend(dev);
258 xenbus_switch_state(dev, XenbusStateClosing);
259 break; 355 break;
260 356
261 case XenbusStateClosed: 357 case XenbusStateClosed:
262 xenbus_switch_state(dev, XenbusStateClosed); 358 set_backend_state(be, XenbusStateClosed);
263 if (xenbus_dev_is_online(dev)) 359 if (xenbus_dev_is_online(dev))
264 break; 360 break;
265 /* fall through if not online */ 361 /* fall through if not online */
266 case XenbusStateUnknown: 362 case XenbusStateUnknown:
363 set_backend_state(be, XenbusStateClosed);
267 device_unregister(&dev->dev); 364 device_unregister(&dev->dev);
268 break; 365 break;
269 366
@@ -356,7 +453,9 @@ static void hotplug_status_changed(struct xenbus_watch *watch,
356 if (IS_ERR(str)) 453 if (IS_ERR(str))
357 return; 454 return;
358 if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) { 455 if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) {
359 xenbus_switch_state(be->dev, XenbusStateConnected); 456 /* Complete any pending state change */
457 xenbus_switch_state(be->dev, be->state);
458
360 /* Not interested in this watch anymore. */ 459 /* Not interested in this watch anymore. */
361 unregister_hotplug_status_watch(be); 460 unregister_hotplug_status_watch(be);
362 } 461 }
@@ -386,12 +485,8 @@ static void connect(struct backend_info *be)
386 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, 485 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
387 hotplug_status_changed, 486 hotplug_status_changed,
388 "%s/%s", dev->nodename, "hotplug-status"); 487 "%s/%s", dev->nodename, "hotplug-status");
389 if (err) { 488 if (!err)
390 /* Switch now, since we can't do a watch. */
391 xenbus_switch_state(dev, XenbusStateConnected);
392 } else {
393 be->have_hotplug_status_watch = 1; 489 be->have_hotplug_status_watch = 1;
394 }
395 490
396 netif_wake_queue(be->vif->dev); 491 netif_wake_queue(be->vif->dev);
397} 492}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 7c29ee4ed0ae..b0299e6d9a3f 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -47,6 +47,9 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
47 if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev) 47 if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev)
48 return; 48 return;
49 49
50 if (pci_dev->pme_poll)
51 pci_dev->pme_poll = false;
52
50 if (pci_dev->current_state == PCI_D3cold) { 53 if (pci_dev->current_state == PCI_D3cold) {
51 pci_wakeup_event(pci_dev); 54 pci_wakeup_event(pci_dev);
52 pm_runtime_resume(&pci_dev->dev); 55 pm_runtime_resume(&pci_dev->dev);
@@ -57,9 +60,6 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
57 if (pci_dev->pme_support) 60 if (pci_dev->pme_support)
58 pci_check_pme_status(pci_dev); 61 pci_check_pme_status(pci_dev);
59 62
60 if (pci_dev->pme_poll)
61 pci_dev->pme_poll = false;
62
63 pci_wakeup_event(pci_dev); 63 pci_wakeup_event(pci_dev);
64 pm_runtime_resume(&pci_dev->dev); 64 pm_runtime_resume(&pci_dev->dev);
65 65
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e8ccf6c0f08a..bdd64b1b4817 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1155,8 +1155,14 @@ static void pci_enable_bridge(struct pci_dev *dev)
1155 1155
1156 pci_enable_bridge(dev->bus->self); 1156 pci_enable_bridge(dev->bus->self);
1157 1157
1158 if (pci_is_enabled(dev)) 1158 if (pci_is_enabled(dev)) {
1159 if (!dev->is_busmaster) {
1160 dev_warn(&dev->dev, "driver skip pci_set_master, fix it!\n");
1161 pci_set_master(dev);
1162 }
1159 return; 1163 return;
1164 }
1165
1160 retval = pci_enable_device(dev); 1166 retval = pci_enable_device(dev);
1161 if (retval) 1167 if (retval)
1162 dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n", 1168 dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index a138965c01cb..b8fcc38c0d11 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -490,7 +490,7 @@ exit:
490 * <devicename> <state> <pinname> are values that should match the pinctrl-maps 490 * <devicename> <state> <pinname> are values that should match the pinctrl-maps
491 * <newvalue> reflects the new config and is driver dependant 491 * <newvalue> reflects the new config and is driver dependant
492 */ 492 */
493static int pinconf_dbg_config_write(struct file *file, 493static ssize_t pinconf_dbg_config_write(struct file *file,
494 const char __user *user_buf, size_t count, loff_t *ppos) 494 const char __user *user_buf, size_t count, loff_t *ppos)
495{ 495{
496 struct pinctrl_maps *maps_node; 496 struct pinctrl_maps *maps_node;
@@ -508,7 +508,7 @@ static int pinconf_dbg_config_write(struct file *file,
508 int i; 508 int i;
509 509
510 /* Get userspace string and assure termination */ 510 /* Get userspace string and assure termination */
511 buf_size = min(count, (size_t)(sizeof(buf)-1)); 511 buf_size = min(count, sizeof(buf) - 1);
512 if (copy_from_user(buf, user_buf, buf_size)) 512 if (copy_from_user(buf, user_buf, buf_size))
513 return -EFAULT; 513 return -EFAULT;
514 buf[buf_size] = 0; 514 buf[buf_size] = 0;
diff --git a/drivers/pinctrl/pinctrl-exynos.c b/drivers/pinctrl/pinctrl-exynos.c
index 2689f8d01a1e..155b1b3a0e7a 100644
--- a/drivers/pinctrl/pinctrl-exynos.c
+++ b/drivers/pinctrl/pinctrl-exynos.c
@@ -663,18 +663,18 @@ static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
663/* pin banks of s5pv210 pin-controller */ 663/* pin banks of s5pv210 pin-controller */
664static struct samsung_pin_bank s5pv210_pin_bank[] = { 664static struct samsung_pin_bank s5pv210_pin_bank[] = {
665 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), 665 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
666 EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04), 666 EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpa1", 0x04),
667 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08), 667 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
668 EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c), 668 EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c),
669 EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10), 669 EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10),
670 EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14), 670 EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14),
671 EXYNOS_PIN_BANK_EINTG(4, 0x0c0, "gpd1", 0x18), 671 EXYNOS_PIN_BANK_EINTG(6, 0x0c0, "gpd1", 0x18),
672 EXYNOS_PIN_BANK_EINTG(5, 0x0e0, "gpe0", 0x1c), 672 EXYNOS_PIN_BANK_EINTG(8, 0x0e0, "gpe0", 0x1c),
673 EXYNOS_PIN_BANK_EINTG(8, 0x100, "gpe1", 0x20), 673 EXYNOS_PIN_BANK_EINTG(5, 0x100, "gpe1", 0x20),
674 EXYNOS_PIN_BANK_EINTG(6, 0x120, "gpf0", 0x24), 674 EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpf0", 0x24),
675 EXYNOS_PIN_BANK_EINTG(8, 0x140, "gpf1", 0x28), 675 EXYNOS_PIN_BANK_EINTG(8, 0x140, "gpf1", 0x28),
676 EXYNOS_PIN_BANK_EINTG(8, 0x160, "gpf2", 0x2c), 676 EXYNOS_PIN_BANK_EINTG(8, 0x160, "gpf2", 0x2c),
677 EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpf3", 0x30), 677 EXYNOS_PIN_BANK_EINTG(6, 0x180, "gpf3", 0x30),
678 EXYNOS_PIN_BANK_EINTG(7, 0x1a0, "gpg0", 0x34), 678 EXYNOS_PIN_BANK_EINTG(7, 0x1a0, "gpg0", 0x34),
679 EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38), 679 EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38),
680 EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c), 680 EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c),
diff --git a/drivers/pinctrl/pinctrl-palmas.c b/drivers/pinctrl/pinctrl-palmas.c
index 82638fac3cfa..30c4d356cb33 100644
--- a/drivers/pinctrl/pinctrl-palmas.c
+++ b/drivers/pinctrl/pinctrl-palmas.c
@@ -891,9 +891,10 @@ static int palmas_pinconf_set(struct pinctrl_dev *pctldev,
891 param = pinconf_to_config_param(configs[i]); 891 param = pinconf_to_config_param(configs[i]);
892 param_val = pinconf_to_config_argument(configs[i]); 892 param_val = pinconf_to_config_argument(configs[i]);
893 893
894 if (param == PIN_CONFIG_BIAS_PULL_PIN_DEFAULT)
895 continue;
896
894 switch (param) { 897 switch (param) {
895 case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
896 return 0;
897 case PIN_CONFIG_BIAS_DISABLE: 898 case PIN_CONFIG_BIAS_DISABLE:
898 case PIN_CONFIG_BIAS_PULL_UP: 899 case PIN_CONFIG_BIAS_PULL_UP:
899 case PIN_CONFIG_BIAS_PULL_DOWN: 900 case PIN_CONFIG_BIAS_PULL_DOWN:
diff --git a/drivers/pinctrl/pinctrl-tegra114.c b/drivers/pinctrl/pinctrl-tegra114.c
index 622c4854977e..93c9e3899d5e 100644
--- a/drivers/pinctrl/pinctrl-tegra114.c
+++ b/drivers/pinctrl/pinctrl-tegra114.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Arthur: Pritesh Raithatha <praithatha@nvidia.com> 6 * Author: Pritesh Raithatha <praithatha@nvidia.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License, 9 * under the terms and conditions of the GNU General Public License,
@@ -2763,7 +2763,6 @@ static struct platform_driver tegra114_pinctrl_driver = {
2763}; 2763};
2764module_platform_driver(tegra114_pinctrl_driver); 2764module_platform_driver(tegra114_pinctrl_driver);
2765 2765
2766MODULE_ALIAS("platform:tegra114-pinctrl");
2767MODULE_AUTHOR("Pritesh Raithatha <praithatha@nvidia.com>"); 2766MODULE_AUTHOR("Pritesh Raithatha <praithatha@nvidia.com>");
2768MODULE_DESCRIPTION("NVIDIA Tegra114 pincontrol driver"); 2767MODULE_DESCRIPTION("NVIDIA Tegra114 pinctrl driver");
2769MODULE_LICENSE("GPL v2"); 2768MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 1a7816390773..b9f2653e4ef9 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -709,7 +709,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
709 struct of_regulator_match **da9063_reg_matches) 709 struct of_regulator_match **da9063_reg_matches)
710{ 710{
711 da9063_reg_matches = NULL; 711 da9063_reg_matches = NULL;
712 return PTR_ERR(-ENODEV); 712 return ERR_PTR(-ENODEV);
713} 713}
714#endif 714#endif
715 715
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 488dfe7ce9a6..7e2b165972e6 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -201,13 +201,7 @@ static unsigned int palmas_smps_ramp_delay[4] = {0, 10000, 5000, 2500};
201#define SMPS_CTRL_MODE_ECO 0x02 201#define SMPS_CTRL_MODE_ECO 0x02
202#define SMPS_CTRL_MODE_PWM 0x03 202#define SMPS_CTRL_MODE_PWM 0x03
203 203
204/* These values are derived from the data sheet. And are the number of steps 204#define PALMAS_SMPS_NUM_VOLTAGES 122
205 * where there is a voltage change, the ranges at beginning and end of register
206 * max/min values where there are no change are ommitted.
207 *
208 * So they are basically (maxV-minV)/stepV
209 */
210#define PALMAS_SMPS_NUM_VOLTAGES 117
211#define PALMAS_SMPS10_NUM_VOLTAGES 2 205#define PALMAS_SMPS10_NUM_VOLTAGES 2
212#define PALMAS_LDO_NUM_VOLTAGES 50 206#define PALMAS_LDO_NUM_VOLTAGES 50
213 207
@@ -979,6 +973,7 @@ static int palmas_regulators_probe(struct platform_device *pdev)
979 pmic->desc[id].min_uV = 900000; 973 pmic->desc[id].min_uV = 900000;
980 pmic->desc[id].uV_step = 50000; 974 pmic->desc[id].uV_step = 50000;
981 pmic->desc[id].linear_min_sel = 1; 975 pmic->desc[id].linear_min_sel = 1;
976 pmic->desc[id].enable_time = 500;
982 pmic->desc[id].vsel_reg = 977 pmic->desc[id].vsel_reg =
983 PALMAS_BASE_TO_REG(PALMAS_LDO_BASE, 978 PALMAS_BASE_TO_REG(PALMAS_LDO_BASE,
984 palmas_regs_info[id].vsel_addr); 979 palmas_regs_info[id].vsel_addr);
@@ -997,6 +992,11 @@ static int palmas_regulators_probe(struct platform_device *pdev)
997 pmic->desc[id].min_uV = 450000; 992 pmic->desc[id].min_uV = 450000;
998 pmic->desc[id].uV_step = 25000; 993 pmic->desc[id].uV_step = 25000;
999 } 994 }
995
996 /* LOD6 in vibrator mode will have enable time 2000us */
997 if (pdata && pdata->ldo6_vibrator &&
998 (id == PALMAS_REG_LDO6))
999 pmic->desc[id].enable_time = 2000;
1000 } else { 1000 } else {
1001 pmic->desc[id].n_voltages = 1; 1001 pmic->desc[id].n_voltages = 1;
1002 pmic->desc[id].ops = &palmas_ops_extreg; 1002 pmic->desc[id].ops = &palmas_ops_extreg;
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index d8e3e1262bc2..20c271d49dcb 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -279,8 +279,12 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb,
279 ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, regs->control_reg, 279 ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, regs->control_reg,
280 abb->base); 280 abb->base);
281 281
282 /* program LDO VBB vset override if needed */ 282 /*
283 if (abb->ldo_base) 283 * program LDO VBB vset override if needed for !bypass mode
284 * XXX: Do not switch sequence - for !bypass, LDO override reset *must*
285 * be performed *before* switch to bias mode else VBB glitches.
286 */
287 if (abb->ldo_base && info->opp_sel != TI_ABB_NOMINAL_OPP)
284 ti_abb_program_ldovbb(dev, abb, info); 288 ti_abb_program_ldovbb(dev, abb, info);
285 289
286 /* Initiate ABB ldo change */ 290 /* Initiate ABB ldo change */
@@ -295,6 +299,14 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb,
295 if (ret) 299 if (ret)
296 goto out; 300 goto out;
297 301
302 /*
303 * Reset LDO VBB vset override bypass mode
304 * XXX: Do not switch sequence - for bypass, LDO override reset *must*
305 * be performed *after* switch to bypass else VBB glitches.
306 */
307 if (abb->ldo_base && info->opp_sel == TI_ABB_NOMINAL_OPP)
308 ti_abb_program_ldovbb(dev, abb, info);
309
298out: 310out:
299 return ret; 311 return ret;
300} 312}
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 1432b26ef2e9..2205fbc2c37b 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -63,7 +63,7 @@ static irqreturn_t wm831x_ldo_uv_irq(int irq, void *data)
63 */ 63 */
64 64
65static const struct regulator_linear_range wm831x_gp_ldo_ranges[] = { 65static const struct regulator_linear_range wm831x_gp_ldo_ranges[] = {
66 { .min_uV = 900000, .max_uV = 1650000, .min_sel = 0, .max_sel = 14, 66 { .min_uV = 900000, .max_uV = 1600000, .min_sel = 0, .max_sel = 14,
67 .uV_step = 50000 }, 67 .uV_step = 50000 },
68 { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31, 68 { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31,
69 .uV_step = 100000 }, 69 .uV_step = 100000 },
@@ -332,7 +332,7 @@ static struct platform_driver wm831x_gp_ldo_driver = {
332 */ 332 */
333 333
334static const struct regulator_linear_range wm831x_aldo_ranges[] = { 334static const struct regulator_linear_range wm831x_aldo_ranges[] = {
335 { .min_uV = 1000000, .max_uV = 1650000, .min_sel = 0, .max_sel = 12, 335 { .min_uV = 1000000, .max_uV = 1600000, .min_sel = 0, .max_sel = 12,
336 .uV_step = 50000 }, 336 .uV_step = 50000 },
337 { .min_uV = 1700000, .max_uV = 3500000, .min_sel = 13, .max_sel = 31, 337 { .min_uV = 1700000, .max_uV = 3500000, .min_sel = 13, .max_sel = 31,
338 .uV_step = 100000 }, 338 .uV_step = 100000 },
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 835b5f0f344e..61ca9292a429 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -543,7 +543,7 @@ static int wm8350_dcdc_set_suspend_mode(struct regulator_dev *rdev,
543} 543}
544 544
545static const struct regulator_linear_range wm8350_ldo_ranges[] = { 545static const struct regulator_linear_range wm8350_ldo_ranges[] = {
546 { .min_uV = 900000, .max_uV = 1750000, .min_sel = 0, .max_sel = 15, 546 { .min_uV = 900000, .max_uV = 1650000, .min_sel = 0, .max_sel = 15,
547 .uV_step = 50000 }, 547 .uV_step = 50000 },
548 { .min_uV = 1800000, .max_uV = 3300000, .min_sel = 16, .max_sel = 31, 548 { .min_uV = 1800000, .max_uV = 3300000, .min_sel = 16, .max_sel = 31,
549 .uV_step = 100000 }, 549 .uV_step = 100000 },
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 08b22a901c25..d7ca9305ff45 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -105,7 +105,7 @@
105#define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ) 105#define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ)
106#define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe)) 106#define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe))
107#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe)) 107#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe))
108#define BNX2FC_5771X_DB_PAGE_SIZE 128 108#define BNX2X_DB_SHIFT 3
109 109
110#define BNX2FC_TASK_SIZE 128 110#define BNX2FC_TASK_SIZE 128
111#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE) 111#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index c0d035a8f8f9..46a37657307f 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1421,8 +1421,7 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
1421 1421
1422 reg_base = pci_resource_start(hba->pcidev, 1422 reg_base = pci_resource_start(hba->pcidev,
1423 BNX2X_DOORBELL_PCI_BAR); 1423 BNX2X_DOORBELL_PCI_BAR);
1424 reg_off = BNX2FC_5771X_DB_PAGE_SIZE * 1424 reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF);
1425 (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
1426 tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4); 1425 tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
1427 if (!tgt->ctx_base) 1426 if (!tgt->ctx_base)
1428 return -ENOMEM; 1427 return -ENOMEM;
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 6940f0930a84..c73bbcb63c02 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -64,7 +64,7 @@
64#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8 64#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8
65#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4 65#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4
66 66
67#define BNX2I_5771X_DBELL_PAGE_SIZE 128 67#define BNX2X_DB_SHIFT 3
68 68
69/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */ 69/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
70#define MAX_BD_LENGTH 65535 70#define MAX_BD_LENGTH 65535
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index af3e675d4d48..5be718c241c4 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2738,8 +2738,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
2738 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { 2738 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
2739 reg_base = pci_resource_start(ep->hba->pcidev, 2739 reg_base = pci_resource_start(ep->hba->pcidev,
2740 BNX2X_DOORBELL_PCI_BAR); 2740 BNX2X_DOORBELL_PCI_BAR);
2741 reg_off = BNX2I_5771X_DBELL_PAGE_SIZE * (cid_num & 0x1FFFF) + 2741 reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
2742 DPM_TRIGER_TYPE;
2743 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4); 2742 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
2744 goto arm_cq; 2743 goto arm_cq;
2745 } 2744 }
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index a84aab47a113..f73287eab373 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -96,6 +96,15 @@ config COMEDI_SKEL
96 To compile this driver as a module, choose M here: the module will be 96 To compile this driver as a module, choose M here: the module will be
97 called skel. 97 called skel.
98 98
99config COMEDI_SSV_DNP
100 tristate "SSV Embedded Systems DIL/Net-PC support"
101 depends on X86_32 || COMPILE_TEST
102 ---help---
103 Enable support for SSV Embedded Systems DIL/Net-PC
104
105 To compile this driver as a module, choose M here: the module will be
106 called ssv_dnp.
107
99endif # COMEDI_MISC_DRIVERS 108endif # COMEDI_MISC_DRIVERS
100 109
101menuconfig COMEDI_ISA_DRIVERS 110menuconfig COMEDI_ISA_DRIVERS
@@ -386,6 +395,14 @@ config COMEDI_DMM32AT
386 To compile this driver as a module, choose M here: the module will be 395 To compile this driver as a module, choose M here: the module will be
387 called dmm32at. 396 called dmm32at.
388 397
398config COMEDI_UNIOXX5
399 tristate "Fastwel UNIOxx-5 analog and digital io board support"
400 ---help---
401 Enable support for Fastwel UNIOxx-5 (analog and digital i/o) boards
402
403 To compile this driver as a module, choose M here: the module will be
404 called unioxx5.
405
389config COMEDI_FL512 406config COMEDI_FL512
390 tristate "FL512 ISA card support" 407 tristate "FL512 ISA card support"
391 ---help--- 408 ---help---
@@ -855,14 +872,6 @@ config COMEDI_DYNA_PCI10XX
855 To compile this driver as a module, choose M here: the module will be 872 To compile this driver as a module, choose M here: the module will be
856 called dyna_pci10xx. 873 called dyna_pci10xx.
857 874
858config COMEDI_UNIOXX5
859 tristate "Fastwel UNIOxx-5 analog and digital io board support"
860 ---help---
861 Enable support for Fastwel UNIOxx-5 (analog and digital i/o) boards
862
863 To compile this driver as a module, choose M here: the module will be
864 called unioxx5.
865
866config COMEDI_GSC_HPDI 875config COMEDI_GSC_HPDI
867 tristate "General Standards PCI-HPDI32 / PMC-HPDI32 support" 876 tristate "General Standards PCI-HPDI32 / PMC-HPDI32 support"
868 select COMEDI_FC 877 select COMEDI_FC
@@ -1085,14 +1094,6 @@ config COMEDI_S626
1085 To compile this driver as a module, choose M here: the module will be 1094 To compile this driver as a module, choose M here: the module will be
1086 called s626. 1095 called s626.
1087 1096
1088config COMEDI_SSV_DNP
1089 tristate "SSV Embedded Systems DIL/Net-PC support"
1090 ---help---
1091 Enable support for SSV Embedded Systems DIL/Net-PC
1092
1093 To compile this driver as a module, choose M here: the module will be
1094 called ssv_dnp.
1095
1096config COMEDI_MITE 1097config COMEDI_MITE
1097 depends on HAS_DMA 1098 depends on HAS_DMA
1098 tristate 1099 tristate
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 3ba4c5712dff..853f62b2b1a9 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -369,28 +369,23 @@ static int ni_65xx_dio_insn_bits(struct comedi_device *dev,
369{ 369{
370 const struct ni_65xx_board *board = comedi_board(dev); 370 const struct ni_65xx_board *board = comedi_board(dev);
371 struct ni_65xx_private *devpriv = dev->private; 371 struct ni_65xx_private *devpriv = dev->private;
372 unsigned base_bitfield_channel; 372 int base_bitfield_channel;
373 const unsigned max_ports_per_bitfield = 5;
374 unsigned read_bits = 0; 373 unsigned read_bits = 0;
375 unsigned j; 374 int last_port_offset = ni_65xx_port_by_channel(s->n_chan - 1);
375 int port_offset;
376 376
377 base_bitfield_channel = CR_CHAN(insn->chanspec); 377 base_bitfield_channel = CR_CHAN(insn->chanspec);
378 for (j = 0; j < max_ports_per_bitfield; ++j) { 378 for (port_offset = ni_65xx_port_by_channel(base_bitfield_channel);
379 const unsigned port_offset = 379 port_offset <= last_port_offset; port_offset++) {
380 ni_65xx_port_by_channel(base_bitfield_channel) + j; 380 unsigned port = sprivate(s)->base_port + port_offset;
381 const unsigned port = 381 int base_port_channel = port_offset * ni_65xx_channels_per_port;
382 sprivate(s)->base_port + port_offset;
383 unsigned base_port_channel;
384 unsigned port_mask, port_data, port_read_bits; 382 unsigned port_mask, port_data, port_read_bits;
385 int bitshift; 383 int bitshift = base_port_channel - base_bitfield_channel;
386 if (port >= ni_65xx_total_num_ports(board)) 384
385 if (bitshift >= 32)
387 break; 386 break;
388 base_port_channel = port_offset * ni_65xx_channels_per_port;
389 port_mask = data[0]; 387 port_mask = data[0];
390 port_data = data[1]; 388 port_data = data[1];
391 bitshift = base_port_channel - base_bitfield_channel;
392 if (bitshift >= 32 || bitshift <= -32)
393 break;
394 if (bitshift > 0) { 389 if (bitshift > 0) {
395 port_mask >>= bitshift; 390 port_mask >>= bitshift;
396 port_data >>= bitshift; 391 port_data >>= bitshift;
diff --git a/drivers/staging/dgap/dgap_driver.c b/drivers/staging/dgap/dgap_driver.c
index 724a685753dd..40ef785a0428 100644
--- a/drivers/staging/dgap/dgap_driver.c
+++ b/drivers/staging/dgap/dgap_driver.c
@@ -474,7 +474,7 @@ static void dgap_cleanup_board(struct board_t *brd)
474 474
475 DGAP_LOCK(dgap_global_lock, flags); 475 DGAP_LOCK(dgap_global_lock, flags);
476 brd->msgbuf = NULL; 476 brd->msgbuf = NULL;
477 printk(brd->msgbuf_head); 477 printk("%s", brd->msgbuf_head);
478 kfree(brd->msgbuf_head); 478 kfree(brd->msgbuf_head);
479 brd->msgbuf_head = NULL; 479 brd->msgbuf_head = NULL;
480 DGAP_UNLOCK(dgap_global_lock, flags); 480 DGAP_UNLOCK(dgap_global_lock, flags);
@@ -628,7 +628,7 @@ static int dgap_found_board(struct pci_dev *pdev, int id)
628 DPR_INIT(("dgap_scan(%d) - printing out the msgbuf\n", i)); 628 DPR_INIT(("dgap_scan(%d) - printing out the msgbuf\n", i));
629 DGAP_LOCK(dgap_global_lock, flags); 629 DGAP_LOCK(dgap_global_lock, flags);
630 brd->msgbuf = NULL; 630 brd->msgbuf = NULL;
631 printk(brd->msgbuf_head); 631 printk("%s", brd->msgbuf_head);
632 kfree(brd->msgbuf_head); 632 kfree(brd->msgbuf_head);
633 brd->msgbuf_head = NULL; 633 brd->msgbuf_head = NULL;
634 DGAP_UNLOCK(dgap_global_lock, flags); 634 DGAP_UNLOCK(dgap_global_lock, flags);
@@ -955,25 +955,28 @@ static void dgap_mbuf(struct board_t *brd, const char *fmt, ...) {
955 char buf[1024]; 955 char buf[1024];
956 int i; 956 int i;
957 unsigned long flags; 957 unsigned long flags;
958 size_t length;
958 959
959 DGAP_LOCK(dgap_global_lock, flags); 960 DGAP_LOCK(dgap_global_lock, flags);
960 961
961 /* Format buf using fmt and arguments contained in ap. */ 962 /* Format buf using fmt and arguments contained in ap. */
962 va_start(ap, fmt); 963 va_start(ap, fmt);
963 i = vsprintf(buf, fmt, ap); 964 i = vsnprintf(buf, sizeof(buf), fmt, ap);
964 va_end(ap); 965 va_end(ap);
965 966
966 DPR((buf)); 967 DPR((buf));
967 968
968 if (!brd || !brd->msgbuf) { 969 if (!brd || !brd->msgbuf) {
969 printk(buf); 970 printk("%s", buf);
970 DGAP_UNLOCK(dgap_global_lock, flags); 971 DGAP_UNLOCK(dgap_global_lock, flags);
971 return; 972 return;
972 } 973 }
973 974
974 memcpy(brd->msgbuf, buf, strlen(buf)); 975 length = strlen(buf) + 1;
975 brd->msgbuf += strlen(buf); 976 if (brd->msgbuf - brd->msgbuf_head < length)
976 *brd->msgbuf = 0; 977 length = brd->msgbuf - brd->msgbuf_head;
978 memcpy(brd->msgbuf, buf, length);
979 brd->msgbuf += length;
977 980
978 DGAP_UNLOCK(dgap_global_lock, flags); 981 DGAP_UNLOCK(dgap_global_lock, flags);
979} 982}
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index f8c1e22585d6..71d2b83cc3a1 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -454,7 +454,7 @@ static void dgnc_cleanup_board(struct board_t *brd)
454 454
455 DGNC_LOCK(dgnc_global_lock, flags); 455 DGNC_LOCK(dgnc_global_lock, flags);
456 brd->msgbuf = NULL; 456 brd->msgbuf = NULL;
457 printk(brd->msgbuf_head); 457 printk("%s", brd->msgbuf_head);
458 kfree(brd->msgbuf_head); 458 kfree(brd->msgbuf_head);
459 brd->msgbuf_head = NULL; 459 brd->msgbuf_head = NULL;
460 DGNC_UNLOCK(dgnc_global_lock, flags); 460 DGNC_UNLOCK(dgnc_global_lock, flags);
@@ -710,7 +710,7 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
710 DPR_INIT(("dgnc_scan(%d) - printing out the msgbuf\n", i)); 710 DPR_INIT(("dgnc_scan(%d) - printing out the msgbuf\n", i));
711 DGNC_LOCK(dgnc_global_lock, flags); 711 DGNC_LOCK(dgnc_global_lock, flags);
712 brd->msgbuf = NULL; 712 brd->msgbuf = NULL;
713 printk(brd->msgbuf_head); 713 printk("%s", brd->msgbuf_head);
714 kfree(brd->msgbuf_head); 714 kfree(brd->msgbuf_head);
715 brd->msgbuf_head = NULL; 715 brd->msgbuf_head = NULL;
716 DGNC_UNLOCK(dgnc_global_lock, flags); 716 DGNC_UNLOCK(dgnc_global_lock, flags);
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index db4d6dc03243..b36feb080cba 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -37,7 +37,7 @@ config IIO_SIMPLE_DUMMY_EVENTS
37 37
38config IIO_SIMPLE_DUMMY_BUFFER 38config IIO_SIMPLE_DUMMY_BUFFER
39 boolean "Buffered capture support" 39 boolean "Buffered capture support"
40 depends on IIO_KFIFO_BUF 40 select IIO_KFIFO_BUF
41 help 41 help
42 Add buffered data capture to the simple dummy driver. 42 Add buffered data capture to the simple dummy driver.
43 43
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index 351936c3efd6..e4998e4d4434 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -563,6 +563,7 @@ static int isl29018_probe(struct i2c_client *client,
563 mutex_init(&chip->lock); 563 mutex_init(&chip->lock);
564 564
565 chip->lux_scale = 1; 565 chip->lux_scale = 1;
566 chip->lux_uscale = 0;
566 chip->range = 1000; 567 chip->range = 1000;
567 chip->adc_bit = 16; 568 chip->adc_bit = 16;
568 chip->suspended = false; 569 chip->suspended = false;
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index d2748c329eae..c3f3f539e787 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -229,7 +229,7 @@ static int hmc5843_read_measurement(struct iio_dev *indio_dev,
229 if (result < 0) 229 if (result < 0)
230 return -EINVAL; 230 return -EINVAL;
231 231
232 *val = result; 232 *val = sign_extend32(result, 15);
233 return IIO_VAL_INT; 233 return IIO_VAL_INT;
234} 234}
235 235
diff --git a/drivers/staging/iio/meter/ade7854-spi.c b/drivers/staging/iio/meter/ade7854-spi.c
index a802cf2491d6..4c6d2041260b 100644
--- a/drivers/staging/iio/meter/ade7854-spi.c
+++ b/drivers/staging/iio/meter/ade7854-spi.c
@@ -299,7 +299,7 @@ static int ade7854_spi_probe(struct spi_device *spi)
299 if (ret) 299 if (ret)
300 iio_device_free(indio_dev); 300 iio_device_free(indio_dev);
301 301
302 return 0; 302 return ret;
303} 303}
304 304
305static int ade7854_spi_remove(struct spi_device *spi) 305static int ade7854_spi_remove(struct spi_device *spi)
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 47c5888461ff..a2e52a0c53c9 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -41,7 +41,6 @@ struct imx_drm_device {
41 struct list_head encoder_list; 41 struct list_head encoder_list;
42 struct list_head connector_list; 42 struct list_head connector_list;
43 struct mutex mutex; 43 struct mutex mutex;
44 int references;
45 int pipes; 44 int pipes;
46 struct drm_fbdev_cma *fbhelper; 45 struct drm_fbdev_cma *fbhelper;
47}; 46};
@@ -241,8 +240,6 @@ struct drm_device *imx_drm_device_get(void)
241 } 240 }
242 } 241 }
243 242
244 imxdrm->references++;
245
246 return imxdrm->drm; 243 return imxdrm->drm;
247 244
248unwind_crtc: 245unwind_crtc:
@@ -280,8 +277,6 @@ void imx_drm_device_put(void)
280 list_for_each_entry(enc, &imxdrm->encoder_list, list) 277 list_for_each_entry(enc, &imxdrm->encoder_list, list)
281 module_put(enc->owner); 278 module_put(enc->owner);
282 279
283 imxdrm->references--;
284
285 mutex_unlock(&imxdrm->mutex); 280 mutex_unlock(&imxdrm->mutex);
286} 281}
287EXPORT_SYMBOL_GPL(imx_drm_device_put); 282EXPORT_SYMBOL_GPL(imx_drm_device_put);
@@ -485,7 +480,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
485 480
486 mutex_lock(&imxdrm->mutex); 481 mutex_lock(&imxdrm->mutex);
487 482
488 if (imxdrm->references) { 483 if (imxdrm->drm->open_count) {
489 ret = -EBUSY; 484 ret = -EBUSY;
490 goto err_busy; 485 goto err_busy;
491 } 486 }
@@ -564,7 +559,7 @@ int imx_drm_add_encoder(struct drm_encoder *encoder,
564 559
565 mutex_lock(&imxdrm->mutex); 560 mutex_lock(&imxdrm->mutex);
566 561
567 if (imxdrm->references) { 562 if (imxdrm->drm->open_count) {
568 ret = -EBUSY; 563 ret = -EBUSY;
569 goto err_busy; 564 goto err_busy;
570 } 565 }
@@ -709,7 +704,7 @@ int imx_drm_add_connector(struct drm_connector *connector,
709 704
710 mutex_lock(&imxdrm->mutex); 705 mutex_lock(&imxdrm->mutex);
711 706
712 if (imxdrm->references) { 707 if (imxdrm->drm->open_count) {
713 ret = -EBUSY; 708 ret = -EBUSY;
714 goto err_busy; 709 goto err_busy;
715 } 710 }
diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
index 2f44d56700af..776d3632dc7d 100644
--- a/drivers/staging/line6/toneport.c
+++ b/drivers/staging/line6/toneport.c
@@ -244,13 +244,17 @@ static int snd_toneport_source_put(struct snd_kcontrol *kcontrol,
244 struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol); 244 struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol);
245 struct usb_line6_toneport *toneport = 245 struct usb_line6_toneport *toneport =
246 (struct usb_line6_toneport *)line6pcm->line6; 246 (struct usb_line6_toneport *)line6pcm->line6;
247 unsigned int source;
247 248
248 if (ucontrol->value.enumerated.item[0] == toneport->source) 249 source = ucontrol->value.enumerated.item[0];
250 if (source >= ARRAY_SIZE(toneport_source_info))
251 return -EINVAL;
252 if (source == toneport->source)
249 return 0; 253 return 0;
250 254
251 toneport->source = ucontrol->value.enumerated.item[0]; 255 toneport->source = source;
252 toneport_send_cmd(toneport->line6.usbdev, 256 toneport_send_cmd(toneport->line6.usbdev,
253 toneport_source_info[toneport->source].code, 0x0000); 257 toneport_source_info[source].code, 0x0000);
254 return 1; 258 return 1;
255} 259}
256 260
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 086ca3d7241b..26b49a24b3df 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1802,7 +1802,7 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
1802int 1802int
1803kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name) 1803kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
1804{ 1804{
1805 struct task_struct *task = kthread_run(fn, arg, name); 1805 struct task_struct *task = kthread_run(fn, arg, "%s", name);
1806 1806
1807 if (IS_ERR(task)) 1807 if (IS_ERR(task))
1808 return PTR_ERR(task); 1808 return PTR_ERR(task);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 2c581b7fa8ad..68a4f52ec998 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -1005,7 +1005,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
1005int 1005int
1006ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name) 1006ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
1007{ 1007{
1008 struct task_struct *task = kthread_run(fn, arg, name); 1008 struct task_struct *task = kthread_run(fn, arg, "%s", name);
1009 1009
1010 if (IS_ERR(task)) 1010 if (IS_ERR(task))
1011 return PTR_ERR(task); 1011 return PTR_ERR(task);
diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig
index 4e898e491860..2156a44d0740 100644
--- a/drivers/staging/lustre/lustre/Kconfig
+++ b/drivers/staging/lustre/lustre/Kconfig
@@ -1,6 +1,6 @@
1config LUSTRE_FS 1config LUSTRE_FS
2 tristate "Lustre file system client support" 2 tristate "Lustre file system client support"
3 depends on INET && m 3 depends on INET && m && !MIPS && !XTENSA && !SUPERH
4 select LNET 4 select LNET
5 select CRYPTO 5 select CRYPTO
6 select CRYPTO_CRC32 6 select CRYPTO_CRC32
@@ -52,7 +52,7 @@ config LUSTRE_DEBUG_EXPENSIVE_CHECK
52config LUSTRE_TRANSLATE_ERRNOS 52config LUSTRE_TRANSLATE_ERRNOS
53 bool 53 bool
54 depends on LUSTRE_FS && !X86 54 depends on LUSTRE_FS && !X86
55 default true 55 default y
56 56
57config LUSTRE_LLITE_LLOOP 57config LUSTRE_LLITE_LLOOP
58 bool "Lustre virtual block device" 58 bool "Lustre virtual block device"
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index 3916bda3004c..a100a0b96381 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -800,9 +800,9 @@ static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
800 800
801 init_completion(&bltd.bltd_comp); 801 init_completion(&bltd.bltd_comp);
802 bltd.bltd_num = atomic_read(&blp->blp_num_threads); 802 bltd.bltd_num = atomic_read(&blp->blp_num_threads);
803 snprintf(bltd.bltd_name, sizeof(bltd.bltd_name) - 1, 803 snprintf(bltd.bltd_name, sizeof(bltd.bltd_name),
804 "ldlm_bl_%02d", bltd.bltd_num); 804 "ldlm_bl_%02d", bltd.bltd_num);
805 task = kthread_run(ldlm_bl_thread_main, &bltd, bltd.bltd_name); 805 task = kthread_run(ldlm_bl_thread_main, &bltd, "%s", bltd.bltd_name);
806 if (IS_ERR(task)) { 806 if (IS_ERR(task)) {
807 CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n", 807 CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n",
808 atomic_read(&blp->blp_num_threads), PTR_ERR(task)); 808 atomic_read(&blp->blp_num_threads), PTR_ERR(task));
diff --git a/drivers/staging/lustre/lustre/libcfs/workitem.c b/drivers/staging/lustre/lustre/libcfs/workitem.c
index 462172d1a756..1a55c81892e0 100644
--- a/drivers/staging/lustre/lustre/libcfs/workitem.c
+++ b/drivers/staging/lustre/lustre/libcfs/workitem.c
@@ -397,7 +397,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
397 sched->ws_name, sched->ws_nthreads); 397 sched->ws_name, sched->ws_nthreads);
398 } 398 }
399 399
400 task = kthread_run(cfs_wi_scheduler, sched, name); 400 task = kthread_run(cfs_wi_scheduler, sched, "%s", name);
401 if (!IS_ERR(task)) { 401 if (!IS_ERR(task)) {
402 nthrs--; 402 nthrs--;
403 continue; 403 continue;
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 2644edf438c1..c8b43442dc74 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -1387,7 +1387,7 @@ echo_copyout_lsm (struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
1387 if (nob > ulsm_nob) 1387 if (nob > ulsm_nob)
1388 return (-EINVAL); 1388 return (-EINVAL);
1389 1389
1390 if (copy_to_user (ulsm, lsm, sizeof(ulsm))) 1390 if (copy_to_user (ulsm, lsm, sizeof(*ulsm)))
1391 return (-EFAULT); 1391 return (-EFAULT);
1392 1392
1393 for (i = 0; i < lsm->lsm_stripe_count; i++) { 1393 for (i = 0; i < lsm->lsm_stripe_count; i++) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
index 227a0ae9593b..5dec771d70ee 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
@@ -383,8 +383,8 @@ int ptlrpc_start_pinger(void)
383 383
384 /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we 384 /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
385 * just drop the VM and FILES in cfs_daemonize_ctxt() right away. */ 385 * just drop the VM and FILES in cfs_daemonize_ctxt() right away. */
386 rc = PTR_ERR(kthread_run(ptlrpc_pinger_main, 386 rc = PTR_ERR(kthread_run(ptlrpc_pinger_main, &pinger_thread,
387 &pinger_thread, pinger_thread.t_name)); 387 "%s", pinger_thread.t_name));
388 if (IS_ERR_VALUE(rc)) { 388 if (IS_ERR_VALUE(rc)) {
389 CERROR("cannot start thread: %d\n", rc); 389 CERROR("cannot start thread: %d\n", rc);
390 return rc; 390 return rc;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index fbdeff65d059..89c9be96f454 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -615,7 +615,7 @@ int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
615 init_completion(&pc->pc_starting); 615 init_completion(&pc->pc_starting);
616 init_completion(&pc->pc_finishing); 616 init_completion(&pc->pc_finishing);
617 spin_lock_init(&pc->pc_lock); 617 spin_lock_init(&pc->pc_lock);
618 strncpy(pc->pc_name, name, sizeof(pc->pc_name) - 1); 618 strlcpy(pc->pc_name, name, sizeof(pc->pc_name));
619 pc->pc_set = ptlrpc_prep_set(); 619 pc->pc_set = ptlrpc_prep_set();
620 if (pc->pc_set == NULL) 620 if (pc->pc_set == NULL)
621 GOTO(out, rc = -ENOMEM); 621 GOTO(out, rc = -ENOMEM);
@@ -638,7 +638,7 @@ int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
638 GOTO(out, rc); 638 GOTO(out, rc);
639 } 639 }
640 640
641 task = kthread_run(ptlrpcd, pc, pc->pc_name); 641 task = kthread_run(ptlrpcd, pc, "%s", pc->pc_name);
642 if (IS_ERR(task)) 642 if (IS_ERR(task))
643 GOTO(out, rc = PTR_ERR(task)); 643 GOTO(out, rc = PTR_ERR(task));
644 644
@@ -745,7 +745,7 @@ static int ptlrpcd_init(void)
745 if (ptlrpcds == NULL) 745 if (ptlrpcds == NULL)
746 GOTO(out, rc = -ENOMEM); 746 GOTO(out, rc = -ENOMEM);
747 747
748 snprintf(name, 15, "ptlrpcd_rcv"); 748 snprintf(name, sizeof(name), "ptlrpcd_rcv");
749 set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags); 749 set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags);
750 rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv); 750 rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv);
751 if (rc < 0) 751 if (rc < 0)
@@ -764,7 +764,7 @@ static int ptlrpcd_init(void)
764 * unnecessary dependency. But how to distribute async RPCs load 764 * unnecessary dependency. But how to distribute async RPCs load
765 * among all the ptlrpc daemons becomes another trouble. */ 765 * among all the ptlrpc daemons becomes another trouble. */
766 for (i = 0; i < nthreads; i++) { 766 for (i = 0; i < nthreads; i++) {
767 snprintf(name, 15, "ptlrpcd_%d", i); 767 snprintf(name, sizeof(name), "ptlrpcd_%d", i);
768 rc = ptlrpcd_start(i, nthreads, name, &ptlrpcds->pd_threads[i]); 768 rc = ptlrpcd_start(i, nthreads, name, &ptlrpcds->pd_threads[i]);
769 if (rc < 0) 769 if (rc < 0)
770 GOTO(out, rc); 770 GOTO(out, rc);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index e90c8fb7da6a..6547f46a7729 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -59,8 +59,8 @@
59 ****************************************/ 59 ****************************************/
60 60
61 61
62#define PTRS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *)) 62#define POINTERS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *))
63#define PAGES_PER_POOL (PTRS_PER_PAGE) 63#define PAGES_PER_POOL (POINTERS_PER_PAGE)
64 64
65#define IDLE_IDX_MAX (100) 65#define IDLE_IDX_MAX (100)
66#define IDLE_IDX_WEIGHT (3) 66#define IDLE_IDX_WEIGHT (3)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index ac8b5fd2300b..acf75f3873d1 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -2718,15 +2718,15 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
2718 spin_unlock(&svcpt->scp_lock); 2718 spin_unlock(&svcpt->scp_lock);
2719 2719
2720 if (svcpt->scp_cpt >= 0) { 2720 if (svcpt->scp_cpt >= 0) {
2721 snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s%02d_%03d", 2721 snprintf(thread->t_name, sizeof(thread->t_name), "%s%02d_%03d",
2722 svc->srv_thread_name, svcpt->scp_cpt, thread->t_id); 2722 svc->srv_thread_name, svcpt->scp_cpt, thread->t_id);
2723 } else { 2723 } else {
2724 snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s_%04d", 2724 snprintf(thread->t_name, sizeof(thread->t_name), "%s_%04d",
2725 svc->srv_thread_name, thread->t_id); 2725 svc->srv_thread_name, thread->t_id);
2726 } 2726 }
2727 2727
2728 CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name); 2728 CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name);
2729 rc = PTR_ERR(kthread_run(ptlrpc_main, thread, thread->t_name)); 2729 rc = PTR_ERR(kthread_run(ptlrpc_main, thread, "%s", thread->t_name));
2730 if (IS_ERR_VALUE(rc)) { 2730 if (IS_ERR_VALUE(rc)) {
2731 CERROR("cannot start thread '%s': rc %d\n", 2731 CERROR("cannot start thread '%s': rc %d\n",
2732 thread->t_name, rc); 2732 thread->t_name, rc);
diff --git a/drivers/staging/octeon-usb/cvmx-usb.c b/drivers/staging/octeon-usb/cvmx-usb.c
index d7b3c82b5ead..45dfe94199ae 100644
--- a/drivers/staging/octeon-usb/cvmx-usb.c
+++ b/drivers/staging/octeon-usb/cvmx-usb.c
@@ -604,7 +604,7 @@ int cvmx_usb_initialize(struct cvmx_usb_state *state, int usb_port_number,
604 } 604 }
605 } 605 }
606 606
607 memset(usb, 0, sizeof(usb)); 607 memset(usb, 0, sizeof(*usb));
608 usb->init_flags = flags; 608 usb->init_flags = flags;
609 609
610 /* Initialize the USB state structure */ 610 /* Initialize the USB state structure */
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
index 78b6cb743769..199059d64c9b 100644
--- a/drivers/staging/octeon/ethernet-mem.c
+++ b/drivers/staging/octeon/ethernet-mem.c
@@ -48,13 +48,8 @@ static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
48 while (freed) { 48 while (freed) {
49 49
50 struct sk_buff *skb = dev_alloc_skb(size + 256); 50 struct sk_buff *skb = dev_alloc_skb(size + 256);
51 if (unlikely(skb == NULL)) { 51 if (unlikely(skb == NULL))
52 pr_warning
53 ("Failed to allocate skb for hardware pool %d\n",
54 pool);
55 break; 52 break;
56 }
57
58 skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f)); 53 skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f));
59 *(struct sk_buff **)(skb->data - sizeof(void *)) = skb; 54 *(struct sk_buff **)(skb->data - sizeof(void *)) = skb;
60 cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128)); 55 cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128));
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index d8f5f694ec35..ea53af30dfa7 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -373,9 +373,7 @@ int cvm_oct_rgmii_init(struct net_device *dev)
373 * Enable interrupts on inband status changes 373 * Enable interrupts on inband status changes
374 * for this port. 374 * for this port.
375 */ 375 */
376 gmx_rx_int_en.u64 = 376 gmx_rx_int_en.u64 = 0;
377 cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
378 (index, interface));
379 gmx_rx_int_en.s.phy_dupx = 1; 377 gmx_rx_int_en.s.phy_dupx = 1;
380 gmx_rx_int_en.s.phy_link = 1; 378 gmx_rx_int_en.s.phy_link = 1;
381 gmx_rx_int_en.s.phy_spd = 1; 379 gmx_rx_int_en.s.phy_spd = 1;
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 34afc16bc493..e14a1bb04361 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -303,6 +303,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
303 if (backlog > budget * cores_in_use && napi != NULL) 303 if (backlog > budget * cores_in_use && napi != NULL)
304 cvm_oct_enable_one_cpu(); 304 cvm_oct_enable_one_cpu();
305 } 305 }
306 rx_count++;
306 307
307 skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1; 308 skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
308 if (likely(skb_in_hw)) { 309 if (likely(skb_in_hw)) {
@@ -336,9 +337,6 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
336 */ 337 */
337 skb = dev_alloc_skb(work->len); 338 skb = dev_alloc_skb(work->len);
338 if (!skb) { 339 if (!skb) {
339 printk_ratelimited("Port %d failed to allocate "
340 "skbuff, packet dropped\n",
341 work->ipprt);
342 cvm_oct_free_work(work); 340 cvm_oct_free_work(work);
343 continue; 341 continue;
344 } 342 }
@@ -429,7 +427,6 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
429#endif 427#endif
430 } 428 }
431 netif_receive_skb(skb); 429 netif_receive_skb(skb);
432 rx_count++;
433 } else { 430 } else {
434 /* Drop any packet received for a device that isn't up */ 431 /* Drop any packet received for a device that isn't up */
435 /* 432 /*
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index 3605c5da822d..6fc77428e83a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -157,8 +157,8 @@ _func_enter_;
157 157
158 *frlen = *frlen + (len + 2); 158 *frlen = *frlen + (len + 2);
159 159
160 return pbuf + len + 2;
161_func_exit_; 160_func_exit_;
161 return pbuf + len + 2;
162} 162}
163 163
164inline u8 *rtw_set_ie_ch_switch (u8 *buf, u32 *buf_len, u8 ch_switch_mode, 164inline u8 *rtw_set_ie_ch_switch (u8 *buf, u32 *buf_len, u8 ch_switch_mode,
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 8b2ba26ba38d..4b2eb8e9b562 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -1827,13 +1827,13 @@ unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_fra
1827 1827
1828#ifdef CONFIG_88EU_P2P 1828#ifdef CONFIG_88EU_P2P
1829 1829
1830static int get_reg_classes_full_count(struct p2p_channels channel_list) 1830static int get_reg_classes_full_count(struct p2p_channels *channel_list)
1831{ 1831{
1832 int cnt = 0; 1832 int cnt = 0;
1833 int i; 1833 int i;
1834 1834
1835 for (i = 0; i < channel_list.reg_classes; i++) { 1835 for (i = 0; i < channel_list->reg_classes; i++) {
1836 cnt += channel_list.reg_class[i].channels; 1836 cnt += channel_list->reg_class[i].channels;
1837 } 1837 }
1838 1838
1839 return cnt; 1839 return cnt;
@@ -2065,7 +2065,7 @@ void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
2065 /* + number of channels in all classes */ 2065 /* + number of channels in all classes */
2066 len_channellist_attr = 3 2066 len_channellist_attr = 3
2067 + (1 + 1) * (u16)(pmlmeext->channel_list.reg_classes) 2067 + (1 + 1) * (u16)(pmlmeext->channel_list.reg_classes)
2068 + get_reg_classes_full_count(pmlmeext->channel_list); 2068 + get_reg_classes_full_count(&pmlmeext->channel_list);
2069 2069
2070 *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr); 2070 *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
2071 p2pielen += 2; 2071 p2pielen += 2;
@@ -2437,7 +2437,7 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
2437 /* + number of channels in all classes */ 2437 /* + number of channels in all classes */
2438 len_channellist_attr = 3 2438 len_channellist_attr = 3
2439 + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes 2439 + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
2440 + get_reg_classes_full_count(pmlmeext->channel_list); 2440 + get_reg_classes_full_count(&pmlmeext->channel_list);
2441 2441
2442 *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr); 2442 *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
2443 2443
@@ -2859,7 +2859,7 @@ void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
2859 /* + number of channels in all classes */ 2859 /* + number of channels in all classes */
2860 len_channellist_attr = 3 2860 len_channellist_attr = 3
2861 + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes 2861 + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
2862 + get_reg_classes_full_count(pmlmeext->channel_list); 2862 + get_reg_classes_full_count(&pmlmeext->channel_list);
2863 2863
2864 *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr); 2864 *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
2865 2865
@@ -3120,7 +3120,7 @@ void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialo
3120 /* + number of channels in all classes */ 3120 /* + number of channels in all classes */
3121 len_channellist_attr = 3 3121 len_channellist_attr = 3
3122 + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes 3122 + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
3123 + get_reg_classes_full_count(pmlmeext->channel_list); 3123 + get_reg_classes_full_count(&pmlmeext->channel_list);
3124 3124
3125 *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr); 3125 *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
3126 p2pielen += 2; 3126 p2pielen += 2;
diff --git a/drivers/staging/rtl8188eu/core/rtw_mp.c b/drivers/staging/rtl8188eu/core/rtw_mp.c
index c7ff2e4d1f23..9832dcbbd07f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mp.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mp.c
@@ -907,7 +907,7 @@ u32 mp_query_psd(struct adapter *pAdapter, u8 *data)
907 sscanf(data, "pts =%d, start =%d, stop =%d", &psd_pts, &psd_start, &psd_stop); 907 sscanf(data, "pts =%d, start =%d, stop =%d", &psd_pts, &psd_start, &psd_stop);
908 } 908 }
909 909
910 _rtw_memset(data, '\0', sizeof(data)); 910 _rtw_memset(data, '\0', sizeof(*data));
911 911
912 i = psd_start; 912 i = psd_start;
913 while (i < psd_stop) { 913 while (i < psd_stop) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 013ea487e7ac..8018edd3d42e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -631,7 +631,7 @@ void WMMOnAssocRsp(struct adapter *padapter)
631 inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3; 631 inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3;
632 632
633 if (pregpriv->wifi_spec == 1) { 633 if (pregpriv->wifi_spec == 1) {
634 u32 j, tmp, change_inx; 634 u32 j, tmp, change_inx = false;
635 635
636 /* entry indx: 0->vo, 1->vi, 2->be, 3->bk. */ 636 /* entry indx: 0->vo, 1->vi, 2->be, 3->bk. */
637 for (i = 0; i < 4; i++) { 637 for (i = 0; i < 4; i++) {
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index 9c2e7a20c09e..ec0028d4e61a 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -57,7 +57,7 @@ static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
57 u8 cut_ver, fab_ver; 57 u8 cut_ver, fab_ver;
58 58
59 /* Init Value */ 59 /* Init Value */
60 _rtw_memset(dm_odm, 0, sizeof(dm_odm)); 60 _rtw_memset(dm_odm, 0, sizeof(*dm_odm));
61 61
62 dm_odm->Adapter = Adapter; 62 dm_odm->Adapter = Adapter;
63 63
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index 2bfe72841921..4787bacdcad8 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -1010,7 +1010,7 @@ enum dm_dig_op {
1010#define DM_false_ALARM_THRESH_LOW 400 1010#define DM_false_ALARM_THRESH_LOW 400
1011#define DM_false_ALARM_THRESH_HIGH 1000 1011#define DM_false_ALARM_THRESH_HIGH 1000
1012 1012
1013#define DM_DIG_MAX_NIC 0x3e 1013#define DM_DIG_MAX_NIC 0x4e
1014#define DM_DIG_MIN_NIC 0x1e /* 0x22/0x1c */ 1014#define DM_DIG_MIN_NIC 0x1e /* 0x22/0x1c */
1015 1015
1016#define DM_DIG_MAX_AP 0x32 1016#define DM_DIG_MAX_AP 0x32
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index 52b280165a92..555c801d2ded 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -188,7 +188,7 @@ enum ChannelPlan {
188 188
189struct txpowerinfo24g { 189struct txpowerinfo24g {
190 u8 IndexCCK_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G]; 190 u8 IndexCCK_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
191 u8 IndexBW40_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G-1]; 191 u8 IndexBW40_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
192 /* If only one tx, only BW20 and OFDM are used. */ 192 /* If only one tx, only BW20 and OFDM are used. */
193 s8 CCK_Diff[MAX_RF_PATH][MAX_TX_COUNT]; 193 s8 CCK_Diff[MAX_RF_PATH][MAX_TX_COUNT];
194 s8 OFDM_Diff[MAX_RF_PATH][MAX_TX_COUNT]; 194 s8 OFDM_Diff[MAX_RF_PATH][MAX_TX_COUNT];
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index a96b018e5e6a..853ab80a2b86 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -870,6 +870,7 @@ static struct fwevent wlanevents[] = {
870 {0, NULL}, 870 {0, NULL},
871 {0, NULL}, 871 {0, NULL},
872 {0, &rtw_cpwm_event_callback}, 872 {0, &rtw_cpwm_event_callback},
873 {0, NULL},
873}; 874};
874 875
875#endif/* _RTL_MLME_EXT_C_ */ 876#endif/* _RTL_MLME_EXT_C_ */
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index cd4100fb3645..95953ebc0279 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -6973,7 +6973,7 @@ static int rtw_mp_ctx(struct net_device *dev,
6973 stop = strncmp(extra, "stop", 4); 6973 stop = strncmp(extra, "stop", 4);
6974 sscanf(extra, "count =%d, pkt", &count); 6974 sscanf(extra, "count =%d, pkt", &count);
6975 6975
6976 _rtw_memset(extra, '\0', sizeof(extra)); 6976 _rtw_memset(extra, '\0', sizeof(*extra));
6977 6977
6978 if (stop == 0) { 6978 if (stop == 0) {
6979 bStartTest = 0; /* To set Stop */ 6979 bStartTest = 0; /* To set Stop */
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index d3078d200e50..9ca3180ebaa0 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -54,6 +54,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
54 /*=== Customer ID ===*/ 54 /*=== Customer ID ===*/
55 /****** 8188EUS ********/ 55 /****** 8188EUS ********/
56 {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */ 56 {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */
57 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
57 {} /* Terminating entry */ 58 {} /* Terminating entry */
58}; 59};
59 60
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index 5bc361b16d4c..56144014b7c9 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -37,6 +37,8 @@ rt_status SendTxCommandPacket(struct net_device *dev, void *pData, u32 DataLen)
37 /* Get TCB and local buffer from common pool. 37 /* Get TCB and local buffer from common pool.
38 (It is shared by CmdQ, MgntQ, and USB coalesce DataQ) */ 38 (It is shared by CmdQ, MgntQ, and USB coalesce DataQ) */
39 skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4); 39 skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4);
40 if (!skb)
41 return RT_STATUS_FAILURE;
40 memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev)); 42 memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
41 tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); 43 tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
42 tcb_desc->queue_index = TXCMD_QUEUE; 44 tcb_desc->queue_index = TXCMD_QUEUE;
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index dbf11ecb794e..19d3cf451b88 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -172,8 +172,8 @@ static u16 swGetOFDMControlRate(struct vnt_private *pDevice, u16 wRateIdx)
172 if (!CARDbIsOFDMinBasicRate(pDevice)) { 172 if (!CARDbIsOFDMinBasicRate(pDevice)) {
173 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO 173 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
174 "swGetOFDMControlRate:(NO OFDM) %d\n", wRateIdx); 174 "swGetOFDMControlRate:(NO OFDM) %d\n", wRateIdx);
175 if (wRateIdx > RATE_24M) 175 if (wRateIdx > RATE_24M)
176 wRateIdx = RATE_24M; 176 wRateIdx = RATE_24M;
177 return wRateIdx; 177 return wRateIdx;
178 } 178 }
179 179
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index d0cf7d8a20e5..8872e0f84f40 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -1634,6 +1634,9 @@ int iwctl_siwencodeext(struct net_device *dev, struct iw_request_info *info,
1634 if (pMgmt == NULL) 1634 if (pMgmt == NULL)
1635 return -EFAULT; 1635 return -EFAULT;
1636 1636
1637 if (!(pDevice->flags & DEVICE_FLAGS_OPENED))
1638 return -ENODEV;
1639
1637 buf = kzalloc(sizeof(struct viawget_wpa_param), GFP_KERNEL); 1640 buf = kzalloc(sizeof(struct viawget_wpa_param), GFP_KERNEL);
1638 if (buf == NULL) 1641 if (buf == NULL)
1639 return -ENOMEM; 1642 return -ENOMEM;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 536971786ae8..6f9d28182445 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1098,6 +1098,8 @@ static int device_close(struct net_device *dev)
1098 memset(pMgmt->abyCurrBSSID, 0, 6); 1098 memset(pMgmt->abyCurrBSSID, 0, 6);
1099 pMgmt->eCurrState = WMAC_STATE_IDLE; 1099 pMgmt->eCurrState = WMAC_STATE_IDLE;
1100 1100
1101 pDevice->flags &= ~DEVICE_FLAGS_OPENED;
1102
1101 device_free_tx_bufs(pDevice); 1103 device_free_tx_bufs(pDevice);
1102 device_free_rx_bufs(pDevice); 1104 device_free_rx_bufs(pDevice);
1103 device_free_int_bufs(pDevice); 1105 device_free_int_bufs(pDevice);
@@ -1109,7 +1111,6 @@ static int device_close(struct net_device *dev)
1109 usb_free_urb(pDevice->pInterruptURB); 1111 usb_free_urb(pDevice->pInterruptURB);
1110 1112
1111 BSSvClearNodeDBTable(pDevice, 0); 1113 BSSvClearNodeDBTable(pDevice, 0);
1112 pDevice->flags &=(~DEVICE_FLAGS_OPENED);
1113 1114
1114 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n"); 1115 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n");
1115 1116
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index fb743a8811bb..14f3e852215d 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -148,6 +148,8 @@ static void *s_vGetFreeContext(struct vnt_private *pDevice)
148 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n"); 148 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n");
149 149
150 for (ii = 0; ii < pDevice->cbTD; ii++) { 150 for (ii = 0; ii < pDevice->cbTD; ii++) {
151 if (!pDevice->apTD[ii])
152 return NULL;
151 pContext = pDevice->apTD[ii]; 153 pContext = pDevice->apTD[ii];
152 if (pContext->bBoolInUse == false) { 154 if (pContext->bBoolInUse == false) {
153 pContext->bBoolInUse = true; 155 pContext->bBoolInUse = true;
diff --git a/drivers/staging/xillybus/xillybus_core.c b/drivers/staging/xillybus/xillybus_core.c
index efc56987a60b..7db6f03a0054 100644
--- a/drivers/staging/xillybus/xillybus_core.c
+++ b/drivers/staging/xillybus/xillybus_core.c
@@ -2054,7 +2054,7 @@ static int xillybus_init_chrdev(struct xilly_endpoint *endpoint,
2054 NULL, 2054 NULL,
2055 MKDEV(major, i), 2055 MKDEV(major, i),
2056 NULL, 2056 NULL,
2057 devname); 2057 "%s", devname);
2058 2058
2059 if (IS_ERR(device)) { 2059 if (IS_ERR(device)) {
2060 pr_warn("xillybus: Failed to create %s " 2060 pr_warn("xillybus: Failed to create %s "
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 91d94b564433..2c4ed52ca849 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -981,4 +981,3 @@ MODULE_PARM_DESC(num_devices, "Number of zram devices");
981MODULE_LICENSE("Dual BSD/GPL"); 981MODULE_LICENSE("Dual BSD/GPL");
982MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); 982MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
983MODULE_DESCRIPTION("Compressed RAM Block Device"); 983MODULE_DESCRIPTION("Compressed RAM Block Device");
984MODULE_ALIAS("devname:zram");
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 35b61f7d6c63..38e44b9abf0f 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -753,7 +753,8 @@ static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
753 753
754static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) 754static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
755{ 755{
756 struct iscsi_cmd *cmd; 756 LIST_HEAD(ack_list);
757 struct iscsi_cmd *cmd, *cmd_p;
757 758
758 conn->exp_statsn = exp_statsn; 759 conn->exp_statsn = exp_statsn;
759 760
@@ -761,19 +762,23 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
761 return; 762 return;
762 763
763 spin_lock_bh(&conn->cmd_lock); 764 spin_lock_bh(&conn->cmd_lock);
764 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 765 list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) {
765 spin_lock(&cmd->istate_lock); 766 spin_lock(&cmd->istate_lock);
766 if ((cmd->i_state == ISTATE_SENT_STATUS) && 767 if ((cmd->i_state == ISTATE_SENT_STATUS) &&
767 iscsi_sna_lt(cmd->stat_sn, exp_statsn)) { 768 iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
768 cmd->i_state = ISTATE_REMOVE; 769 cmd->i_state = ISTATE_REMOVE;
769 spin_unlock(&cmd->istate_lock); 770 spin_unlock(&cmd->istate_lock);
770 iscsit_add_cmd_to_immediate_queue(cmd, conn, 771 list_move_tail(&cmd->i_conn_node, &ack_list);
771 cmd->i_state);
772 continue; 772 continue;
773 } 773 }
774 spin_unlock(&cmd->istate_lock); 774 spin_unlock(&cmd->istate_lock);
775 } 775 }
776 spin_unlock_bh(&conn->cmd_lock); 776 spin_unlock_bh(&conn->cmd_lock);
777
778 list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
779 list_del(&cmd->i_conn_node);
780 iscsit_free_cmd(cmd, false);
781 }
777} 782}
778 783
779static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) 784static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 14d1aed5af1d..ef6d836a4d09 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -1192,7 +1192,7 @@ get_target:
1192 */ 1192 */
1193alloc_tags: 1193alloc_tags:
1194 tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth); 1194 tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth);
1195 tag_num += ISCSIT_EXTRA_TAGS; 1195 tag_num += (tag_num / 2) + ISCSIT_EXTRA_TAGS;
1196 tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; 1196 tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
1197 1197
1198 ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size); 1198 ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index f2de28e178fd..b0cac0c342e1 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -736,7 +736,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
736 * Fallthrough 736 * Fallthrough
737 */ 737 */
738 case ISCSI_OP_SCSI_TMFUNC: 738 case ISCSI_OP_SCSI_TMFUNC:
739 rc = transport_generic_free_cmd(&cmd->se_cmd, 1); 739 rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
740 if (!rc && shutdown && se_cmd && se_cmd->se_sess) { 740 if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
741 __iscsit_free_cmd(cmd, true, shutdown); 741 __iscsit_free_cmd(cmd, true, shutdown);
742 target_put_sess_cmd(se_cmd->se_sess, se_cmd); 742 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
@@ -752,7 +752,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
752 se_cmd = &cmd->se_cmd; 752 se_cmd = &cmd->se_cmd;
753 __iscsit_free_cmd(cmd, true, shutdown); 753 __iscsit_free_cmd(cmd, true, shutdown);
754 754
755 rc = transport_generic_free_cmd(&cmd->se_cmd, 1); 755 rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
756 if (!rc && shutdown && se_cmd->se_sess) { 756 if (!rc && shutdown && se_cmd->se_sess) {
757 __iscsit_free_cmd(cmd, true, shutdown); 757 __iscsit_free_cmd(cmd, true, shutdown);
758 target_put_sess_cmd(se_cmd->se_sess, se_cmd); 758 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 6c17295e8d7c..4714c6f8da4b 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -349,7 +349,16 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
349{ 349{
350 struct se_device *dev = cmd->se_dev; 350 struct se_device *dev = cmd->se_dev;
351 351
352 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 352 /*
353 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
354 * within target_complete_ok_work() if the command was successfully
355 * sent to the backend driver.
356 */
357 spin_lock_irq(&cmd->t_state_lock);
358 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
359 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
360 spin_unlock_irq(&cmd->t_state_lock);
361
353 /* 362 /*
354 * Unlock ->caw_sem originally obtained during sbc_compare_and_write() 363 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
355 * before the original READ I/O submission. 364 * before the original READ I/O submission.
@@ -363,7 +372,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
363{ 372{
364 struct se_device *dev = cmd->se_dev; 373 struct se_device *dev = cmd->se_dev;
365 struct scatterlist *write_sg = NULL, *sg; 374 struct scatterlist *write_sg = NULL, *sg;
366 unsigned char *buf, *addr; 375 unsigned char *buf = NULL, *addr;
367 struct sg_mapping_iter m; 376 struct sg_mapping_iter m;
368 unsigned int offset = 0, len; 377 unsigned int offset = 0, len;
369 unsigned int nlbas = cmd->t_task_nolb; 378 unsigned int nlbas = cmd->t_task_nolb;
@@ -378,6 +387,15 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
378 */ 387 */
379 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) 388 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
380 return TCM_NO_SENSE; 389 return TCM_NO_SENSE;
390 /*
391 * Immediately exit + release dev->caw_sem if command has already
392 * been failed with a non-zero SCSI status.
393 */
394 if (cmd->scsi_status) {
395 pr_err("compare_and_write_callback: non zero scsi_status:"
396 " 0x%02x\n", cmd->scsi_status);
397 goto out;
398 }
381 399
382 buf = kzalloc(cmd->data_length, GFP_KERNEL); 400 buf = kzalloc(cmd->data_length, GFP_KERNEL);
383 if (!buf) { 401 if (!buf) {
@@ -508,6 +526,12 @@ sbc_compare_and_write(struct se_cmd *cmd)
508 cmd->transport_complete_callback = NULL; 526 cmd->transport_complete_callback = NULL;
509 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 527 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
510 } 528 }
529 /*
530 * Reset cmd->data_length to individual block_size in order to not
531 * confuse backend drivers that depend on this value matching the
532 * size of the I/O being submitted.
533 */
534 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
511 535
512 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, 536 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
513 DMA_FROM_DEVICE); 537 DMA_FROM_DEVICE);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 84747cc1aac0..81e945eefbbd 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -236,17 +236,24 @@ int transport_alloc_session_tags(struct se_session *se_sess,
236{ 236{
237 int rc; 237 int rc;
238 238
239 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, GFP_KERNEL); 239 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
240 GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
240 if (!se_sess->sess_cmd_map) { 241 if (!se_sess->sess_cmd_map) {
241 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 242 se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
242 return -ENOMEM; 243 if (!se_sess->sess_cmd_map) {
244 pr_err("Unable to allocate se_sess->sess_cmd_map\n");
245 return -ENOMEM;
246 }
243 } 247 }
244 248
245 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); 249 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
246 if (rc < 0) { 250 if (rc < 0) {
247 pr_err("Unable to init se_sess->sess_tag_pool," 251 pr_err("Unable to init se_sess->sess_tag_pool,"
248 " tag_num: %u\n", tag_num); 252 " tag_num: %u\n", tag_num);
249 kfree(se_sess->sess_cmd_map); 253 if (is_vmalloc_addr(se_sess->sess_cmd_map))
254 vfree(se_sess->sess_cmd_map);
255 else
256 kfree(se_sess->sess_cmd_map);
250 se_sess->sess_cmd_map = NULL; 257 se_sess->sess_cmd_map = NULL;
251 return -ENOMEM; 258 return -ENOMEM;
252 } 259 }
@@ -412,7 +419,10 @@ void transport_free_session(struct se_session *se_sess)
412{ 419{
413 if (se_sess->sess_cmd_map) { 420 if (se_sess->sess_cmd_map) {
414 percpu_ida_destroy(&se_sess->sess_tag_pool); 421 percpu_ida_destroy(&se_sess->sess_tag_pool);
415 kfree(se_sess->sess_cmd_map); 422 if (is_vmalloc_addr(se_sess->sess_cmd_map))
423 vfree(se_sess->sess_cmd_map);
424 else
425 kfree(se_sess->sess_cmd_map);
416 } 426 }
417 kmem_cache_free(se_sess_cache, se_sess); 427 kmem_cache_free(se_sess_cache, se_sess);
418} 428}
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 4d22e7d2adca..3da4fd10b9f8 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -298,8 +298,8 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
298 (unsigned long long)xop->dst_lba); 298 (unsigned long long)xop->dst_lba);
299 299
300 if (dc != 0) { 300 if (dc != 0) {
301 xop->dbl = (desc[29] << 16) & 0xff; 301 xop->dbl = (desc[29] & 0xff) << 16;
302 xop->dbl |= (desc[30] << 8) & 0xff; 302 xop->dbl |= (desc[30] & 0xff) << 8;
303 xop->dbl |= desc[31] & 0xff; 303 xop->dbl |= desc[31] & 0xff;
304 304
305 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl); 305 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index e61c36cbb866..c193af6a628f 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -636,6 +636,7 @@ struct console xenboot_console = {
636 .name = "xenboot", 636 .name = "xenboot",
637 .write = xenboot_write_console, 637 .write = xenboot_write_console,
638 .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME, 638 .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
639 .index = -1,
639}; 640};
640#endif /* CONFIG_EARLY_PRINTK */ 641#endif /* CONFIG_EARLY_PRINTK */
641 642
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index c9a9ddd1d0bc..7a744b69c3d1 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1758,8 +1758,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
1758 canon_change = (old->c_lflag ^ tty->termios.c_lflag) & ICANON; 1758 canon_change = (old->c_lflag ^ tty->termios.c_lflag) & ICANON;
1759 if (canon_change) { 1759 if (canon_change) {
1760 bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); 1760 bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
1761 ldata->line_start = 0; 1761 ldata->line_start = ldata->canon_head = ldata->read_tail;
1762 ldata->canon_head = ldata->read_tail;
1763 ldata->erasing = 0; 1762 ldata->erasing = 0;
1764 ldata->lnext = 0; 1763 ldata->lnext = 0;
1765 } 1764 }
@@ -2184,28 +2183,34 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
2184 2183
2185 if (!input_available_p(tty, 0)) { 2184 if (!input_available_p(tty, 0)) {
2186 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { 2185 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
2187 retval = -EIO; 2186 up_read(&tty->termios_rwsem);
2188 break; 2187 tty_flush_to_ldisc(tty);
2189 } 2188 down_read(&tty->termios_rwsem);
2190 if (tty_hung_up_p(file)) 2189 if (!input_available_p(tty, 0)) {
2191 break; 2190 retval = -EIO;
2192 if (!timeout) 2191 break;
2193 break; 2192 }
2194 if (file->f_flags & O_NONBLOCK) { 2193 } else {
2195 retval = -EAGAIN; 2194 if (tty_hung_up_p(file))
2196 break; 2195 break;
2197 } 2196 if (!timeout)
2198 if (signal_pending(current)) { 2197 break;
2199 retval = -ERESTARTSYS; 2198 if (file->f_flags & O_NONBLOCK) {
2200 break; 2199 retval = -EAGAIN;
2201 } 2200 break;
2202 n_tty_set_room(tty); 2201 }
2203 up_read(&tty->termios_rwsem); 2202 if (signal_pending(current)) {
2203 retval = -ERESTARTSYS;
2204 break;
2205 }
2206 n_tty_set_room(tty);
2207 up_read(&tty->termios_rwsem);
2204 2208
2205 timeout = schedule_timeout(timeout); 2209 timeout = schedule_timeout(timeout);
2206 2210
2207 down_read(&tty->termios_rwsem); 2211 down_read(&tty->termios_rwsem);
2208 continue; 2212 continue;
2213 }
2209 } 2214 }
2210 __set_current_state(TASK_RUNNING); 2215 __set_current_state(TASK_RUNNING);
2211 2216
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 52379e56a31e..44077c0b7670 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -667,30 +667,21 @@ static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf)
667 667
668static int dma_push_rx(struct eg20t_port *priv, int size) 668static int dma_push_rx(struct eg20t_port *priv, int size)
669{ 669{
670 struct tty_struct *tty;
671 int room; 670 int room;
672 struct uart_port *port = &priv->port; 671 struct uart_port *port = &priv->port;
673 struct tty_port *tport = &port->state->port; 672 struct tty_port *tport = &port->state->port;
674 673
675 port = &priv->port;
676 tty = tty_port_tty_get(tport);
677 if (!tty) {
678 dev_dbg(priv->port.dev, "%s:tty is busy now", __func__);
679 return 0;
680 }
681
682 room = tty_buffer_request_room(tport, size); 674 room = tty_buffer_request_room(tport, size);
683 675
684 if (room < size) 676 if (room < size)
685 dev_warn(port->dev, "Rx overrun: dropping %u bytes\n", 677 dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
686 size - room); 678 size - room);
687 if (!room) 679 if (!room)
688 return room; 680 return 0;
689 681
690 tty_insert_flip_string(tport, sg_virt(&priv->sg_rx), size); 682 tty_insert_flip_string(tport, sg_virt(&priv->sg_rx), size);
691 683
692 port->icount.rx += room; 684 port->icount.rx += room;
693 tty_kref_put(tty);
694 685
695 return room; 686 return room;
696} 687}
@@ -1098,6 +1089,8 @@ static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr)
1098 if (tty == NULL) { 1089 if (tty == NULL) {
1099 for (i = 0; error_msg[i] != NULL; i++) 1090 for (i = 0; error_msg[i] != NULL; i++)
1100 dev_err(&priv->pdev->dev, error_msg[i]); 1091 dev_err(&priv->pdev->dev, error_msg[i]);
1092 } else {
1093 tty_kref_put(tty);
1101 } 1094 }
1102} 1095}
1103 1096
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index d0d972f7e43e..0489a2bdcdf9 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -732,7 +732,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
732static void tegra_uart_stop_rx(struct uart_port *u) 732static void tegra_uart_stop_rx(struct uart_port *u)
733{ 733{
734 struct tegra_uart_port *tup = to_tegra_uport(u); 734 struct tegra_uart_port *tup = to_tegra_uport(u);
735 struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port); 735 struct tty_struct *tty;
736 struct tty_port *port = &u->state->port; 736 struct tty_port *port = &u->state->port;
737 struct dma_tx_state state; 737 struct dma_tx_state state;
738 unsigned long ier; 738 unsigned long ier;
@@ -744,6 +744,8 @@ static void tegra_uart_stop_rx(struct uart_port *u)
744 if (!tup->rx_in_progress) 744 if (!tup->rx_in_progress)
745 return; 745 return;
746 746
747 tty = tty_port_tty_get(&tup->uport.state->port);
748
747 tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */ 749 tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */
748 750
749 ier = tup->ier_shadow; 751 ier = tup->ier_shadow;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index a9355ce1c6d5..3a1a01af9a80 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -854,7 +854,8 @@ void disassociate_ctty(int on_exit)
854 struct pid *tty_pgrp = tty_get_pgrp(tty); 854 struct pid *tty_pgrp = tty_get_pgrp(tty);
855 if (tty_pgrp) { 855 if (tty_pgrp) {
856 kill_pgrp(tty_pgrp, SIGHUP, on_exit); 856 kill_pgrp(tty_pgrp, SIGHUP, on_exit);
857 kill_pgrp(tty_pgrp, SIGCONT, on_exit); 857 if (!on_exit)
858 kill_pgrp(tty_pgrp, SIGCONT, on_exit);
858 put_pid(tty_pgrp); 859 put_pid(tty_pgrp);
859 } 860 }
860 } 861 }
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 03ba081c5772..6fd60fece6b4 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -1201,6 +1201,9 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
1201 } 1201 }
1202 return 0; 1202 return 0;
1203 case TCFLSH: 1203 case TCFLSH:
1204 retval = tty_check_change(tty);
1205 if (retval)
1206 return retval;
1204 return __tty_perform_flush(tty, arg); 1207 return __tty_perform_flush(tty, arg);
1205 default: 1208 default:
1206 /* Try the mode commands */ 1209 /* Try the mode commands */
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 4a851e15e58c..77b47d82c9a6 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -1,6 +1,6 @@
1config USB_CHIPIDEA 1config USB_CHIPIDEA
2 tristate "ChipIdea Highspeed Dual Role Controller" 2 tristate "ChipIdea Highspeed Dual Role Controller"
3 depends on (USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET) 3 depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA
4 help 4 help
5 Say Y here if your system has a dual role high speed USB 5 Say Y here if your system has a dual role high speed USB
6 controller based on ChipIdea silicon IP. Currently, only the 6 controller based on ChipIdea silicon IP. Currently, only the
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 74d998d9b45b..be822a2c1776 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -131,7 +131,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
131 if (ret) { 131 if (ret) {
132 dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n", 132 dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n",
133 ret); 133 ret);
134 goto err_clk; 134 goto err_phy;
135 } 135 }
136 } 136 }
137 137
@@ -143,7 +143,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
143 dev_err(&pdev->dev, 143 dev_err(&pdev->dev,
144 "Can't register ci_hdrc platform device, err=%d\n", 144 "Can't register ci_hdrc platform device, err=%d\n",
145 ret); 145 ret);
146 goto err_clk; 146 goto err_phy;
147 } 147 }
148 148
149 if (data->usbmisc_data) { 149 if (data->usbmisc_data) {
@@ -164,6 +164,9 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
164 164
165disable_device: 165disable_device:
166 ci_hdrc_remove_device(data->ci_pdev); 166 ci_hdrc_remove_device(data->ci_pdev);
167err_phy:
168 if (data->phy)
169 usb_phy_shutdown(data->phy);
167err_clk: 170err_clk:
168 clk_disable_unprepare(data->clk); 171 clk_disable_unprepare(data->clk);
169 return ret; 172 return ret;
diff --git a/drivers/usb/chipidea/ci_hdrc_pci.c b/drivers/usb/chipidea/ci_hdrc_pci.c
index 042320a6c6c7..d514332ac081 100644
--- a/drivers/usb/chipidea/ci_hdrc_pci.c
+++ b/drivers/usb/chipidea/ci_hdrc_pci.c
@@ -129,7 +129,12 @@ static DEFINE_PCI_DEVICE_TABLE(ci_hdrc_pci_id_table) = {
129 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829), 129 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829),
130 .driver_data = (kernel_ulong_t)&penwell_pci_platdata, 130 .driver_data = (kernel_ulong_t)&penwell_pci_platdata,
131 }, 131 },
132 { 0, 0, 0, 0, 0, 0, 0 /* end: all zeroes */ } 132 {
133 /* Intel Clovertrail */
134 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe006),
135 .driver_data = (kernel_ulong_t)&penwell_pci_platdata,
136 },
137 { 0 } /* end: all zeroes */
133}; 138};
134MODULE_DEVICE_TABLE(pci, ci_hdrc_pci_id_table); 139MODULE_DEVICE_TABLE(pci, ci_hdrc_pci_id_table);
135 140
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 94626409559a..23763dcec069 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -605,6 +605,7 @@ static int ci_hdrc_remove(struct platform_device *pdev)
605 dbg_remove_files(ci); 605 dbg_remove_files(ci);
606 free_irq(ci->irq, ci); 606 free_irq(ci->irq, ci);
607 ci_role_destroy(ci); 607 ci_role_destroy(ci);
608 kfree(ci->hw_bank.regmap);
608 609
609 return 0; 610 return 0;
610} 611}
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 6b4c2f2eb946..9333083dd111 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1600,6 +1600,8 @@ static void destroy_eps(struct ci_hdrc *ci)
1600 for (i = 0; i < ci->hw_ep_max; i++) { 1600 for (i = 0; i < ci->hw_ep_max; i++) {
1601 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i]; 1601 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
1602 1602
1603 if (hwep->pending_td)
1604 free_pending_td(hwep);
1603 dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma); 1605 dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
1604 } 1606 }
1605} 1607}
@@ -1667,13 +1669,13 @@ static int ci_udc_stop(struct usb_gadget *gadget,
1667 if (ci->platdata->notify_event) 1669 if (ci->platdata->notify_event)
1668 ci->platdata->notify_event(ci, 1670 ci->platdata->notify_event(ci,
1669 CI_HDRC_CONTROLLER_STOPPED_EVENT); 1671 CI_HDRC_CONTROLLER_STOPPED_EVENT);
1670 ci->driver = NULL;
1671 spin_unlock_irqrestore(&ci->lock, flags); 1672 spin_unlock_irqrestore(&ci->lock, flags);
1672 _gadget_stop_activity(&ci->gadget); 1673 _gadget_stop_activity(&ci->gadget);
1673 spin_lock_irqsave(&ci->lock, flags); 1674 spin_lock_irqsave(&ci->lock, flags);
1674 pm_runtime_put(&ci->gadget.dev); 1675 pm_runtime_put(&ci->gadget.dev);
1675 } 1676 }
1676 1677
1678 ci->driver = NULL;
1677 spin_unlock_irqrestore(&ci->lock, flags); 1679 spin_unlock_irqrestore(&ci->lock, flags);
1678 1680
1679 return 0; 1681 return 0;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 737e3c19967b..71dc5d768fa5 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -742,6 +742,22 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
742 if ((index & ~USB_DIR_IN) == 0) 742 if ((index & ~USB_DIR_IN) == 0)
743 return 0; 743 return 0;
744 ret = findintfep(ps->dev, index); 744 ret = findintfep(ps->dev, index);
745 if (ret < 0) {
746 /*
747 * Some not fully compliant Win apps seem to get
748 * index wrong and have the endpoint number here
749 * rather than the endpoint address (with the
750 * correct direction). Win does let this through,
751 * so we'll not reject it here but leave it to
752 * the device to not break KVM. But we warn.
753 */
754 ret = findintfep(ps->dev, index ^ 0x80);
755 if (ret >= 0)
756 dev_info(&ps->dev->dev,
757 "%s: process %i (%s) requesting ep %02x but needs %02x\n",
758 __func__, task_pid_nr(current),
759 current->comm, index, index ^ 0x80);
760 }
745 if (ret >= 0) 761 if (ret >= 0)
746 ret = checkintf(ps, ret); 762 ret = checkintf(ps, ret);
747 break; 763 break;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index dde4c83516a1..e6b682c6c236 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3426,6 +3426,9 @@ static int usb_req_set_sel(struct usb_device *udev, enum usb3_link_state state)
3426 unsigned long long u2_pel; 3426 unsigned long long u2_pel;
3427 int ret; 3427 int ret;
3428 3428
3429 if (udev->state != USB_STATE_CONFIGURED)
3430 return 0;
3431
3429 /* Convert SEL and PEL stored in ns to us */ 3432 /* Convert SEL and PEL stored in ns to us */
3430 u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); 3433 u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
3431 u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); 3434 u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index b870872e020f..70fc43027a5c 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -1,7 +1,6 @@
1config USB_DWC3 1config USB_DWC3
2 tristate "DesignWare USB3 DRD Core Support" 2 tristate "DesignWare USB3 DRD Core Support"
3 depends on (USB || USB_GADGET) && HAS_DMA 3 depends on (USB || USB_GADGET) && HAS_DMA
4 depends on EXTCON
5 select USB_XHCI_PLATFORM if USB_SUPPORT && USB_XHCI_HCD 4 select USB_XHCI_PLATFORM if USB_SUPPORT && USB_XHCI_HCD
6 help 5 help
7 Say Y or M here if your system has a Dual Role SuperSpeed 6 Say Y or M here if your system has a Dual Role SuperSpeed
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 9b138129e856..2e252aae51ca 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -28,6 +28,8 @@
28/* FIXME define these in <linux/pci_ids.h> */ 28/* FIXME define these in <linux/pci_ids.h> */
29#define PCI_VENDOR_ID_SYNOPSYS 0x16c3 29#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
30#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd 30#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
31#define PCI_DEVICE_ID_INTEL_BYT 0x0f37
32#define PCI_DEVICE_ID_INTEL_MRFLD 0x119e
31 33
32struct dwc3_pci { 34struct dwc3_pci {
33 struct device *dev; 35 struct device *dev;
@@ -187,6 +189,8 @@ static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = {
187 PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 189 PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
188 PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3), 190 PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
189 }, 191 },
192 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
193 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
190 { } /* Terminating Entry */ 194 { } /* Terminating Entry */
191}; 195};
192MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table); 196MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index f168eaebdef8..5452c0fce360 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2611,15 +2611,13 @@ int dwc3_gadget_init(struct dwc3 *dwc)
2611 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 2611 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2612 if (ret) { 2612 if (ret) {
2613 dev_err(dwc->dev, "failed to register udc\n"); 2613 dev_err(dwc->dev, "failed to register udc\n");
2614 goto err5; 2614 goto err4;
2615 } 2615 }
2616 2616
2617 return 0; 2617 return 0;
2618 2618
2619err5:
2620 dwc3_gadget_free_endpoints(dwc);
2621
2622err4: 2619err4:
2620 dwc3_gadget_free_endpoints(dwc);
2623 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2621 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2624 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2622 dwc->ep0_bounce, dwc->ep0_bounce_addr);
2625 2623
diff --git a/drivers/usb/gadget/cdc2.c b/drivers/usb/gadget/cdc2.c
index 5a5acf22c694..e126b6b248e6 100644
--- a/drivers/usb/gadget/cdc2.c
+++ b/drivers/usb/gadget/cdc2.c
@@ -113,12 +113,6 @@ static int __init cdc_do_config(struct usb_configuration *c)
113 c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; 113 c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
114 } 114 }
115 115
116 fi_ecm = usb_get_function_instance("ecm");
117 if (IS_ERR(fi_ecm)) {
118 status = PTR_ERR(fi_ecm);
119 goto err_func_ecm;
120 }
121
122 f_ecm = usb_get_function(fi_ecm); 116 f_ecm = usb_get_function(fi_ecm);
123 if (IS_ERR(f_ecm)) { 117 if (IS_ERR(f_ecm)) {
124 status = PTR_ERR(f_ecm); 118 status = PTR_ERR(f_ecm);
@@ -129,35 +123,24 @@ static int __init cdc_do_config(struct usb_configuration *c)
129 if (status) 123 if (status)
130 goto err_add_ecm; 124 goto err_add_ecm;
131 125
132 fi_serial = usb_get_function_instance("acm");
133 if (IS_ERR(fi_serial)) {
134 status = PTR_ERR(fi_serial);
135 goto err_get_acm;
136 }
137
138 f_acm = usb_get_function(fi_serial); 126 f_acm = usb_get_function(fi_serial);
139 if (IS_ERR(f_acm)) { 127 if (IS_ERR(f_acm)) {
140 status = PTR_ERR(f_acm); 128 status = PTR_ERR(f_acm);
141 goto err_func_acm; 129 goto err_get_acm;
142 } 130 }
143 131
144 status = usb_add_function(c, f_acm); 132 status = usb_add_function(c, f_acm);
145 if (status) 133 if (status)
146 goto err_add_acm; 134 goto err_add_acm;
147
148 return 0; 135 return 0;
149 136
150err_add_acm: 137err_add_acm:
151 usb_put_function(f_acm); 138 usb_put_function(f_acm);
152err_func_acm:
153 usb_put_function_instance(fi_serial);
154err_get_acm: 139err_get_acm:
155 usb_remove_function(c, f_ecm); 140 usb_remove_function(c, f_ecm);
156err_add_ecm: 141err_add_ecm:
157 usb_put_function(f_ecm); 142 usb_put_function(f_ecm);
158err_get_ecm: 143err_get_ecm:
159 usb_put_function_instance(fi_ecm);
160err_func_ecm:
161 return status; 144 return status;
162} 145}
163 146
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 06ecd08fd57a..b8a2376971a4 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -923,8 +923,9 @@ static int dummy_udc_stop(struct usb_gadget *g,
923 struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g); 923 struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
924 struct dummy *dum = dum_hcd->dum; 924 struct dummy *dum = dum_hcd->dum;
925 925
926 dev_dbg(udc_dev(dum), "unregister gadget driver '%s'\n", 926 if (driver)
927 driver->driver.name); 927 dev_dbg(udc_dev(dum), "unregister gadget driver '%s'\n",
928 driver->driver.name);
928 929
929 dum->driver = NULL; 930 dum->driver = NULL;
930 931
@@ -1000,8 +1001,8 @@ static int dummy_udc_remove(struct platform_device *pdev)
1000{ 1001{
1001 struct dummy *dum = platform_get_drvdata(pdev); 1002 struct dummy *dum = platform_get_drvdata(pdev);
1002 1003
1003 usb_del_gadget_udc(&dum->gadget);
1004 device_remove_file(&dum->gadget.dev, &dev_attr_function); 1004 device_remove_file(&dum->gadget.dev, &dev_attr_function);
1005 usb_del_gadget_udc(&dum->gadget);
1005 return 0; 1006 return 0;
1006} 1007}
1007 1008
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index edab45da3741..8d9e6f7e8f1a 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -995,7 +995,7 @@ static void ecm_unbind(struct usb_configuration *c, struct usb_function *f)
995 usb_ep_free_request(ecm->notify, ecm->notify_req); 995 usb_ep_free_request(ecm->notify, ecm->notify_req);
996} 996}
997 997
998struct usb_function *ecm_alloc(struct usb_function_instance *fi) 998static struct usb_function *ecm_alloc(struct usb_function_instance *fi)
999{ 999{
1000 struct f_ecm *ecm; 1000 struct f_ecm *ecm;
1001 struct f_ecm_opts *opts; 1001 struct f_ecm_opts *opts;
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
index d00392d879db..d61c11d765d0 100644
--- a/drivers/usb/gadget/f_eem.c
+++ b/drivers/usb/gadget/f_eem.c
@@ -624,7 +624,7 @@ static void eem_unbind(struct usb_configuration *c, struct usb_function *f)
624 usb_free_all_descriptors(f); 624 usb_free_all_descriptors(f);
625} 625}
626 626
627struct usb_function *eem_alloc(struct usb_function_instance *fi) 627static struct usb_function *eem_alloc(struct usb_function_instance *fi)
628{ 628{
629 struct f_eem *eem; 629 struct f_eem *eem;
630 struct f_eem_opts *opts; 630 struct f_eem_opts *opts;
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 1a66c5baa0d1..44cf775a8627 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1034,37 +1034,19 @@ struct ffs_sb_fill_data {
1034 struct ffs_file_perms perms; 1034 struct ffs_file_perms perms;
1035 umode_t root_mode; 1035 umode_t root_mode;
1036 const char *dev_name; 1036 const char *dev_name;
1037 union { 1037 struct ffs_data *ffs_data;
1038 /* set by ffs_fs_mount(), read by ffs_sb_fill() */
1039 void *private_data;
1040 /* set by ffs_sb_fill(), read by ffs_fs_mount */
1041 struct ffs_data *ffs_data;
1042 };
1043}; 1038};
1044 1039
1045static int ffs_sb_fill(struct super_block *sb, void *_data, int silent) 1040static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1046{ 1041{
1047 struct ffs_sb_fill_data *data = _data; 1042 struct ffs_sb_fill_data *data = _data;
1048 struct inode *inode; 1043 struct inode *inode;
1049 struct ffs_data *ffs; 1044 struct ffs_data *ffs = data->ffs_data;
1050 1045
1051 ENTER(); 1046 ENTER();
1052 1047
1053 /* Initialise data */
1054 ffs = ffs_data_new();
1055 if (unlikely(!ffs))
1056 goto Enomem;
1057
1058 ffs->sb = sb; 1048 ffs->sb = sb;
1059 ffs->dev_name = kstrdup(data->dev_name, GFP_KERNEL); 1049 data->ffs_data = NULL;
1060 if (unlikely(!ffs->dev_name))
1061 goto Enomem;
1062 ffs->file_perms = data->perms;
1063 ffs->private_data = data->private_data;
1064
1065 /* used by the caller of this function */
1066 data->ffs_data = ffs;
1067
1068 sb->s_fs_info = ffs; 1050 sb->s_fs_info = ffs;
1069 sb->s_blocksize = PAGE_CACHE_SIZE; 1051 sb->s_blocksize = PAGE_CACHE_SIZE;
1070 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1052 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
@@ -1080,17 +1062,14 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1080 &data->perms); 1062 &data->perms);
1081 sb->s_root = d_make_root(inode); 1063 sb->s_root = d_make_root(inode);
1082 if (unlikely(!sb->s_root)) 1064 if (unlikely(!sb->s_root))
1083 goto Enomem; 1065 return -ENOMEM;
1084 1066
1085 /* EP0 file */ 1067 /* EP0 file */
1086 if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs, 1068 if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
1087 &ffs_ep0_operations, NULL))) 1069 &ffs_ep0_operations, NULL)))
1088 goto Enomem; 1070 return -ENOMEM;
1089 1071
1090 return 0; 1072 return 0;
1091
1092Enomem:
1093 return -ENOMEM;
1094} 1073}
1095 1074
1096static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts) 1075static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
@@ -1193,6 +1172,7 @@ ffs_fs_mount(struct file_system_type *t, int flags,
1193 struct dentry *rv; 1172 struct dentry *rv;
1194 int ret; 1173 int ret;
1195 void *ffs_dev; 1174 void *ffs_dev;
1175 struct ffs_data *ffs;
1196 1176
1197 ENTER(); 1177 ENTER();
1198 1178
@@ -1200,18 +1180,30 @@ ffs_fs_mount(struct file_system_type *t, int flags,
1200 if (unlikely(ret < 0)) 1180 if (unlikely(ret < 0))
1201 return ERR_PTR(ret); 1181 return ERR_PTR(ret);
1202 1182
1183 ffs = ffs_data_new();
1184 if (unlikely(!ffs))
1185 return ERR_PTR(-ENOMEM);
1186 ffs->file_perms = data.perms;
1187
1188 ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
1189 if (unlikely(!ffs->dev_name)) {
1190 ffs_data_put(ffs);
1191 return ERR_PTR(-ENOMEM);
1192 }
1193
1203 ffs_dev = functionfs_acquire_dev_callback(dev_name); 1194 ffs_dev = functionfs_acquire_dev_callback(dev_name);
1204 if (IS_ERR(ffs_dev)) 1195 if (IS_ERR(ffs_dev)) {
1205 return ffs_dev; 1196 ffs_data_put(ffs);
1197 return ERR_CAST(ffs_dev);
1198 }
1199 ffs->private_data = ffs_dev;
1200 data.ffs_data = ffs;
1206 1201
1207 data.dev_name = dev_name;
1208 data.private_data = ffs_dev;
1209 rv = mount_nodev(t, flags, &data, ffs_sb_fill); 1202 rv = mount_nodev(t, flags, &data, ffs_sb_fill);
1210 1203 if (IS_ERR(rv) && data.ffs_data) {
1211 /* data.ffs_data is set by ffs_sb_fill */
1212 if (IS_ERR(rv))
1213 functionfs_release_dev_callback(data.ffs_data); 1204 functionfs_release_dev_callback(data.ffs_data);
1214 1205 ffs_data_put(data.ffs_data);
1206 }
1215 return rv; 1207 return rv;
1216} 1208}
1217 1209
@@ -2264,6 +2256,8 @@ static int ffs_func_bind(struct usb_configuration *c,
2264 data->raw_descs + ret, 2256 data->raw_descs + ret,
2265 (sizeof data->raw_descs) - ret, 2257 (sizeof data->raw_descs) - ret,
2266 __ffs_func_bind_do_descs, func); 2258 __ffs_func_bind_do_descs, func);
2259 if (unlikely(ret < 0))
2260 goto error;
2267 } 2261 }
2268 2262
2269 /* 2263 /*
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 313b835eedfd..a01d7d38c016 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -2260,10 +2260,12 @@ reset:
2260 /* Disable the endpoints */ 2260 /* Disable the endpoints */
2261 if (fsg->bulk_in_enabled) { 2261 if (fsg->bulk_in_enabled) {
2262 usb_ep_disable(fsg->bulk_in); 2262 usb_ep_disable(fsg->bulk_in);
2263 fsg->bulk_in->driver_data = NULL;
2263 fsg->bulk_in_enabled = 0; 2264 fsg->bulk_in_enabled = 0;
2264 } 2265 }
2265 if (fsg->bulk_out_enabled) { 2266 if (fsg->bulk_out_enabled) {
2266 usb_ep_disable(fsg->bulk_out); 2267 usb_ep_disable(fsg->bulk_out);
2268 fsg->bulk_out->driver_data = NULL;
2267 fsg->bulk_out_enabled = 0; 2269 fsg->bulk_out_enabled = 0;
2268 } 2270 }
2269 2271
diff --git a/drivers/usb/gadget/fotg210-udc.c b/drivers/usb/gadget/fotg210-udc.c
index 32db2eee2d87..bbbfd1948778 100644
--- a/drivers/usb/gadget/fotg210-udc.c
+++ b/drivers/usb/gadget/fotg210-udc.c
@@ -1214,6 +1214,6 @@ static struct platform_driver fotg210_driver = {
1214 1214
1215module_platform_driver(fotg210_driver); 1215module_platform_driver(fotg210_driver);
1216 1216
1217MODULE_AUTHOR("Yuan-Hsin Chen <yhchen@faraday-tech.com>"); 1217MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang <john453@faraday-tech.com>");
1218MODULE_LICENSE("GPL"); 1218MODULE_LICENSE("GPL");
1219MODULE_DESCRIPTION(DRIVER_DESC); 1219MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c
index f1dd6daabe21..b278abe52453 100644
--- a/drivers/usb/gadget/fusb300_udc.c
+++ b/drivers/usb/gadget/fusb300_udc.c
@@ -22,7 +22,7 @@
22 22
23MODULE_DESCRIPTION("FUSB300 USB gadget driver"); 23MODULE_DESCRIPTION("FUSB300 USB gadget driver");
24MODULE_LICENSE("GPL"); 24MODULE_LICENSE("GPL");
25MODULE_AUTHOR("Yuan Hsin Chen <yhchen@faraday-tech.com>"); 25MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang <john453@faraday-tech.com>");
26MODULE_ALIAS("platform:fusb300_udc"); 26MODULE_ALIAS("platform:fusb300_udc");
27 27
28#define DRIVER_VERSION "20 October 2010" 28#define DRIVER_VERSION "20 October 2010"
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index 2a1ebefd8f9e..23393254a8a3 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -179,7 +179,7 @@ err_conf:
179 return ret; 179 return ret;
180} 180}
181 181
182static int rndis_config_register(struct usb_composite_dev *cdev) 182static __ref int rndis_config_register(struct usb_composite_dev *cdev)
183{ 183{
184 static struct usb_configuration config = { 184 static struct usb_configuration config = {
185 .bConfigurationValue = MULTI_RNDIS_CONFIG_NUM, 185 .bConfigurationValue = MULTI_RNDIS_CONFIG_NUM,
@@ -194,7 +194,7 @@ static int rndis_config_register(struct usb_composite_dev *cdev)
194 194
195#else 195#else
196 196
197static int rndis_config_register(struct usb_composite_dev *cdev) 197static __ref int rndis_config_register(struct usb_composite_dev *cdev)
198{ 198{
199 return 0; 199 return 0;
200} 200}
@@ -241,7 +241,7 @@ err_conf:
241 return ret; 241 return ret;
242} 242}
243 243
244static int cdc_config_register(struct usb_composite_dev *cdev) 244static __ref int cdc_config_register(struct usb_composite_dev *cdev)
245{ 245{
246 static struct usb_configuration config = { 246 static struct usb_configuration config = {
247 .bConfigurationValue = MULTI_CDC_CONFIG_NUM, 247 .bConfigurationValue = MULTI_CDC_CONFIG_NUM,
@@ -256,7 +256,7 @@ static int cdc_config_register(struct usb_composite_dev *cdev)
256 256
257#else 257#else
258 258
259static int cdc_config_register(struct usb_composite_dev *cdev) 259static __ref int cdc_config_register(struct usb_composite_dev *cdev)
260{ 260{
261 return 0; 261 return 0;
262} 262}
diff --git a/drivers/usb/gadget/mv_u3d_core.c b/drivers/usb/gadget/mv_u3d_core.c
index bbb6e98c4384..561b30efb8ee 100644
--- a/drivers/usb/gadget/mv_u3d_core.c
+++ b/drivers/usb/gadget/mv_u3d_core.c
@@ -645,6 +645,7 @@ static int mv_u3d_ep_disable(struct usb_ep *_ep)
645 struct mv_u3d_ep *ep; 645 struct mv_u3d_ep *ep;
646 struct mv_u3d_ep_context *ep_context; 646 struct mv_u3d_ep_context *ep_context;
647 u32 epxcr, direction; 647 u32 epxcr, direction;
648 unsigned long flags;
648 649
649 if (!_ep) 650 if (!_ep)
650 return -EINVAL; 651 return -EINVAL;
@@ -661,7 +662,9 @@ static int mv_u3d_ep_disable(struct usb_ep *_ep)
661 direction = mv_u3d_ep_dir(ep); 662 direction = mv_u3d_ep_dir(ep);
662 663
663 /* nuke all pending requests (does flush) */ 664 /* nuke all pending requests (does flush) */
665 spin_lock_irqsave(&u3d->lock, flags);
664 mv_u3d_nuke(ep, -ESHUTDOWN); 666 mv_u3d_nuke(ep, -ESHUTDOWN);
667 spin_unlock_irqrestore(&u3d->lock, flags);
665 668
666 /* Disable the endpoint for Rx or Tx and reset the endpoint type */ 669 /* Disable the endpoint for Rx or Tx and reset the endpoint type */
667 if (direction == MV_U3D_EP_DIR_OUT) { 670 if (direction == MV_U3D_EP_DIR_OUT) {
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index cc9207473dbc..0ac6064aa3b8 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -2054,7 +2054,7 @@ static struct pxa25x_udc memory = {
2054/* 2054/*
2055 * probe - binds to the platform device 2055 * probe - binds to the platform device
2056 */ 2056 */
2057static int __init pxa25x_udc_probe(struct platform_device *pdev) 2057static int pxa25x_udc_probe(struct platform_device *pdev)
2058{ 2058{
2059 struct pxa25x_udc *dev = &memory; 2059 struct pxa25x_udc *dev = &memory;
2060 int retval, irq; 2060 int retval, irq;
@@ -2203,7 +2203,7 @@ static void pxa25x_udc_shutdown(struct platform_device *_dev)
2203 pullup_off(); 2203 pullup_off();
2204} 2204}
2205 2205
2206static int __exit pxa25x_udc_remove(struct platform_device *pdev) 2206static int pxa25x_udc_remove(struct platform_device *pdev)
2207{ 2207{
2208 struct pxa25x_udc *dev = platform_get_drvdata(pdev); 2208 struct pxa25x_udc *dev = platform_get_drvdata(pdev);
2209 2209
@@ -2294,7 +2294,8 @@ static int pxa25x_udc_resume(struct platform_device *dev)
2294 2294
2295static struct platform_driver udc_driver = { 2295static struct platform_driver udc_driver = {
2296 .shutdown = pxa25x_udc_shutdown, 2296 .shutdown = pxa25x_udc_shutdown,
2297 .remove = __exit_p(pxa25x_udc_remove), 2297 .probe = pxa25x_udc_probe,
2298 .remove = pxa25x_udc_remove,
2298 .suspend = pxa25x_udc_suspend, 2299 .suspend = pxa25x_udc_suspend,
2299 .resume = pxa25x_udc_resume, 2300 .resume = pxa25x_udc_resume,
2300 .driver = { 2301 .driver = {
@@ -2303,7 +2304,7 @@ static struct platform_driver udc_driver = {
2303 }, 2304 },
2304}; 2305};
2305 2306
2306module_platform_driver_probe(udc_driver, pxa25x_udc_probe); 2307module_platform_driver(udc_driver);
2307 2308
2308MODULE_DESCRIPTION(DRIVER_DESC); 2309MODULE_DESCRIPTION(DRIVER_DESC);
2309MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell"); 2310MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index d69b36a99dbc..a8a99e4748d5 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -543,7 +543,7 @@ static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
543 * FIFO, requests of >512 cause the endpoint to get stuck with a 543 * FIFO, requests of >512 cause the endpoint to get stuck with a
544 * fragment of the end of the transfer in it. 544 * fragment of the end of the transfer in it.
545 */ 545 */
546 if (can_write > 512) 546 if (can_write > 512 && !periodic)
547 can_write = 512; 547 can_write = 512;
548 548
549 /* 549 /*
@@ -2475,8 +2475,6 @@ irq_retry:
2475 if (gintsts & GINTSTS_ErlySusp) { 2475 if (gintsts & GINTSTS_ErlySusp) {
2476 dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n"); 2476 dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
2477 writel(GINTSTS_ErlySusp, hsotg->regs + GINTSTS); 2477 writel(GINTSTS_ErlySusp, hsotg->regs + GINTSTS);
2478
2479 s3c_hsotg_disconnect(hsotg);
2480 } 2478 }
2481 2479
2482 /* 2480 /*
@@ -2962,9 +2960,6 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget,
2962 if (!hsotg) 2960 if (!hsotg)
2963 return -ENODEV; 2961 return -ENODEV;
2964 2962
2965 if (!driver || driver != hsotg->driver || !driver->unbind)
2966 return -EINVAL;
2967
2968 /* all endpoints should be shutdown */ 2963 /* all endpoints should be shutdown */
2969 for (ep = 0; ep < hsotg->num_of_eps; ep++) 2964 for (ep = 0; ep < hsotg->num_of_eps; ep++)
2970 s3c_hsotg_ep_disable(&hsotg->eps[ep].ep); 2965 s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
@@ -2972,15 +2967,15 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget,
2972 spin_lock_irqsave(&hsotg->lock, flags); 2967 spin_lock_irqsave(&hsotg->lock, flags);
2973 2968
2974 s3c_hsotg_phy_disable(hsotg); 2969 s3c_hsotg_phy_disable(hsotg);
2975 regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
2976 2970
2977 hsotg->driver = NULL; 2971 if (!driver)
2972 hsotg->driver = NULL;
2973
2978 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 2974 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
2979 2975
2980 spin_unlock_irqrestore(&hsotg->lock, flags); 2976 spin_unlock_irqrestore(&hsotg->lock, flags);
2981 2977
2982 dev_info(hsotg->dev, "unregistered gadget driver '%s'\n", 2978 regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
2983 driver->driver.name);
2984 2979
2985 return 0; 2980 return 0;
2986} 2981}
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 947b009009f1..f2407b2e8a99 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -130,7 +130,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
130 } 130 }
131 131
132 /* Enable USB controller, 83xx or 8536 */ 132 /* Enable USB controller, 83xx or 8536 */
133 if (pdata->have_sysif_regs) 133 if (pdata->have_sysif_regs && pdata->controller_ver < FSL_USB_VER_1_6)
134 setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4); 134 setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4);
135 135
136 /* Don't need to set host mode here. It will be done by tdi_reset() */ 136 /* Don't need to set host mode here. It will be done by tdi_reset() */
@@ -232,15 +232,9 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
232 case FSL_USB2_PHY_ULPI: 232 case FSL_USB2_PHY_ULPI:
233 if (pdata->have_sysif_regs && pdata->controller_ver) { 233 if (pdata->have_sysif_regs && pdata->controller_ver) {
234 /* controller version 1.6 or above */ 234 /* controller version 1.6 or above */
235 clrbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN);
235 setbits32(non_ehci + FSL_SOC_USB_CTRL, 236 setbits32(non_ehci + FSL_SOC_USB_CTRL,
236 ULPI_PHY_CLK_SEL); 237 ULPI_PHY_CLK_SEL | USB_CTRL_USB_EN);
237 /*
238 * Due to controller issue of PHY_CLK_VALID in ULPI
239 * mode, we set USB_CTRL_USB_EN before checking
240 * PHY_CLK_VALID, otherwise PHY_CLK_VALID doesn't work.
241 */
242 clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
243 UTMI_PHY_EN, USB_CTRL_USB_EN);
244 } 238 }
245 portsc |= PORT_PTS_ULPI; 239 portsc |= PORT_PTS_ULPI;
246 break; 240 break;
@@ -270,8 +264,9 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
270 if (pdata->have_sysif_regs && pdata->controller_ver && 264 if (pdata->have_sysif_regs && pdata->controller_ver &&
271 (phy_mode == FSL_USB2_PHY_ULPI)) { 265 (phy_mode == FSL_USB2_PHY_ULPI)) {
272 /* check PHY_CLK_VALID to get phy clk valid */ 266 /* check PHY_CLK_VALID to get phy clk valid */
273 if (!spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) & 267 if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
274 PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0)) { 268 PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0) ||
269 in_be32(non_ehci + FSL_SOC_USB_PRICTRL))) {
275 printk(KERN_WARNING "fsl-ehci: USB PHY clock invalid\n"); 270 printk(KERN_WARNING "fsl-ehci: USB PHY clock invalid\n");
276 return -EINVAL; 271 return -EINVAL;
277 } 272 }
@@ -669,7 +664,7 @@ static const struct hc_driver ehci_fsl_hc_driver = {
669 * generic hardware linkage 664 * generic hardware linkage
670 */ 665 */
671 .irq = ehci_irq, 666 .irq = ehci_irq,
672 .flags = HCD_USB2 | HCD_MEMORY | HCD_BH, 667 .flags = HCD_USB2 | HCD_MEMORY,
673 668
674 /* 669 /*
675 * basic lifecycle operations 670 * basic lifecycle operations
diff --git a/drivers/usb/host/ehci-grlib.c b/drivers/usb/host/ehci-grlib.c
index b52a66ce92e8..83ab51af250f 100644
--- a/drivers/usb/host/ehci-grlib.c
+++ b/drivers/usb/host/ehci-grlib.c
@@ -43,7 +43,7 @@ static const struct hc_driver ehci_grlib_hc_driver = {
43 * generic hardware linkage 43 * generic hardware linkage
44 */ 44 */
45 .irq = ehci_irq, 45 .irq = ehci_irq,
46 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 46 .flags = HCD_MEMORY | HCD_USB2,
47 47
48 /* 48 /*
49 * basic lifecycle operations 49 * basic lifecycle operations
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 5d6022f30ebe..86ab9fd9fe9e 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1158,7 +1158,7 @@ static const struct hc_driver ehci_hc_driver = {
1158 * generic hardware linkage 1158 * generic hardware linkage
1159 */ 1159 */
1160 .irq = ehci_irq, 1160 .irq = ehci_irq,
1161 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 1161 .flags = HCD_MEMORY | HCD_USB2,
1162 1162
1163 /* 1163 /*
1164 * basic lifecycle operations 1164 * basic lifecycle operations
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index 417c10da9450..35cdbd88bbbe 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -96,7 +96,7 @@ static const struct hc_driver mv_ehci_hc_driver = {
96 * generic hardware linkage 96 * generic hardware linkage
97 */ 97 */
98 .irq = ehci_irq, 98 .irq = ehci_irq,
99 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 99 .flags = HCD_MEMORY | HCD_USB2,
100 100
101 /* 101 /*
102 * basic lifecycle operations 102 * basic lifecycle operations
diff --git a/drivers/usb/host/ehci-octeon.c b/drivers/usb/host/ehci-octeon.c
index ab0397e4d8f3..45cc00158412 100644
--- a/drivers/usb/host/ehci-octeon.c
+++ b/drivers/usb/host/ehci-octeon.c
@@ -51,7 +51,7 @@ static const struct hc_driver ehci_octeon_hc_driver = {
51 * generic hardware linkage 51 * generic hardware linkage
52 */ 52 */
53 .irq = ehci_irq, 53 .irq = ehci_irq,
54 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 54 .flags = HCD_MEMORY | HCD_USB2,
55 55
56 /* 56 /*
57 * basic lifecycle operations 57 * basic lifecycle operations
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 6bd299e61f58..854c2ec7b699 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -361,7 +361,7 @@ static struct pci_driver ehci_pci_driver = {
361 .remove = usb_hcd_pci_remove, 361 .remove = usb_hcd_pci_remove,
362 .shutdown = usb_hcd_pci_shutdown, 362 .shutdown = usb_hcd_pci_shutdown,
363 363
364#ifdef CONFIG_PM_SLEEP 364#ifdef CONFIG_PM
365 .driver = { 365 .driver = {
366 .pm = &usb_hcd_pci_pm_ops 366 .pm = &usb_hcd_pci_pm_ops
367 }, 367 },
diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c
index 893b707f0000..601e208bd782 100644
--- a/drivers/usb/host/ehci-pmcmsp.c
+++ b/drivers/usb/host/ehci-pmcmsp.c
@@ -286,7 +286,7 @@ static const struct hc_driver ehci_msp_hc_driver = {
286#else 286#else
287 .irq = ehci_irq, 287 .irq = ehci_irq,
288#endif 288#endif
289 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 289 .flags = HCD_MEMORY | HCD_USB2,
290 290
291 /* 291 /*
292 * basic lifecycle operations 292 * basic lifecycle operations
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 6cc5567bf9c8..932293fa32de 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -28,7 +28,7 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
28 * generic hardware linkage 28 * generic hardware linkage
29 */ 29 */
30 .irq = ehci_irq, 30 .irq = ehci_irq,
31 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 31 .flags = HCD_MEMORY | HCD_USB2,
32 32
33 /* 33 /*
34 * basic lifecycle operations 34 * basic lifecycle operations
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 8188542ba17e..fd983771b025 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -71,7 +71,7 @@ static const struct hc_driver ps3_ehci_hc_driver = {
71 .product_desc = "PS3 EHCI Host Controller", 71 .product_desc = "PS3 EHCI Host Controller",
72 .hcd_priv_size = sizeof(struct ehci_hcd), 72 .hcd_priv_size = sizeof(struct ehci_hcd),
73 .irq = ehci_irq, 73 .irq = ehci_irq,
74 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 74 .flags = HCD_MEMORY | HCD_USB2,
75 .reset = ps3_ehci_hc_reset, 75 .reset = ps3_ehci_hc_reset,
76 .start = ehci_run, 76 .start = ehci_run,
77 .stop = ehci_stop, 77 .stop = ehci_stop,
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index e321804c3475..a7f776a13eb1 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -247,6 +247,8 @@ static int qtd_copy_status (
247 247
248static void 248static void
249ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status) 249ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
250__releases(ehci->lock)
251__acquires(ehci->lock)
250{ 252{
251 if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { 253 if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
252 /* ... update hc-wide periodic stats */ 254 /* ... update hc-wide periodic stats */
@@ -272,8 +274,11 @@ ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
272 urb->actual_length, urb->transfer_buffer_length); 274 urb->actual_length, urb->transfer_buffer_length);
273#endif 275#endif
274 276
277 /* complete() can reenter this HCD */
275 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); 278 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
279 spin_unlock (&ehci->lock);
276 usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status); 280 usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
281 spin_lock (&ehci->lock);
277} 282}
278 283
279static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh); 284static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c
index 8a734498079b..b2de52d39614 100644
--- a/drivers/usb/host/ehci-sead3.c
+++ b/drivers/usb/host/ehci-sead3.c
@@ -55,7 +55,7 @@ const struct hc_driver ehci_sead3_hc_driver = {
55 * generic hardware linkage 55 * generic hardware linkage
56 */ 56 */
57 .irq = ehci_irq, 57 .irq = ehci_irq,
58 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 58 .flags = HCD_MEMORY | HCD_USB2,
59 59
60 /* 60 /*
61 * basic lifecycle operations 61 * basic lifecycle operations
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index dc899eb2b861..93e59a13bc1f 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -36,7 +36,7 @@ static const struct hc_driver ehci_sh_hc_driver = {
36 * generic hardware linkage 36 * generic hardware linkage
37 */ 37 */
38 .irq = ehci_irq, 38 .irq = ehci_irq,
39 .flags = HCD_USB2 | HCD_MEMORY | HCD_BH, 39 .flags = HCD_USB2 | HCD_MEMORY,
40 40
41 /* 41 /*
42 * basic lifecycle operations 42 * basic lifecycle operations
diff --git a/drivers/usb/host/ehci-tilegx.c b/drivers/usb/host/ehci-tilegx.c
index 67026ffbf9a8..cca4be90a864 100644
--- a/drivers/usb/host/ehci-tilegx.c
+++ b/drivers/usb/host/ehci-tilegx.c
@@ -61,7 +61,7 @@ static const struct hc_driver ehci_tilegx_hc_driver = {
61 * Generic hardware linkage. 61 * Generic hardware linkage.
62 */ 62 */
63 .irq = ehci_irq, 63 .irq = ehci_irq,
64 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 64 .flags = HCD_MEMORY | HCD_USB2,
65 65
66 /* 66 /*
67 * Basic lifecycle operations. 67 * Basic lifecycle operations.
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
index 1c370dfbee0d..59e0e24c753f 100644
--- a/drivers/usb/host/ehci-w90x900.c
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -108,7 +108,7 @@ static const struct hc_driver ehci_w90x900_hc_driver = {
108 * generic hardware linkage 108 * generic hardware linkage
109 */ 109 */
110 .irq = ehci_irq, 110 .irq = ehci_irq,
111 .flags = HCD_USB2|HCD_MEMORY|HCD_BH, 111 .flags = HCD_USB2|HCD_MEMORY,
112 112
113 /* 113 /*
114 * basic lifecycle operations 114 * basic lifecycle operations
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 95979f9f4381..eba962e6ebfb 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -79,7 +79,7 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
79 * generic hardware linkage 79 * generic hardware linkage
80 */ 80 */
81 .irq = ehci_irq, 81 .irq = ehci_irq,
82 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 82 .flags = HCD_MEMORY | HCD_USB2,
83 83
84 /* 84 /*
85 * basic lifecycle operations 85 * basic lifecycle operations
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 9e0020d9e4c8..abd5050a4899 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -24,7 +24,7 @@ struct fsl_usb2_dev_data {
24 enum fsl_usb2_operating_modes op_mode; /* operating mode */ 24 enum fsl_usb2_operating_modes op_mode; /* operating mode */
25}; 25};
26 26
27struct fsl_usb2_dev_data dr_mode_data[] = { 27static struct fsl_usb2_dev_data dr_mode_data[] = {
28 { 28 {
29 .dr_mode = "host", 29 .dr_mode = "host",
30 .drivers = { "fsl-ehci", NULL, NULL, }, 30 .drivers = { "fsl-ehci", NULL, NULL, },
@@ -42,7 +42,7 @@ struct fsl_usb2_dev_data dr_mode_data[] = {
42 }, 42 },
43}; 43};
44 44
45struct fsl_usb2_dev_data *get_dr_mode_data(struct device_node *np) 45static struct fsl_usb2_dev_data *get_dr_mode_data(struct device_node *np)
46{ 46{
47 const unsigned char *prop; 47 const unsigned char *prop;
48 int i; 48 int i;
@@ -75,7 +75,7 @@ static enum fsl_usb2_phy_modes determine_usb_phy(const char *phy_type)
75 return FSL_USB2_PHY_NONE; 75 return FSL_USB2_PHY_NONE;
76} 76}
77 77
78struct platform_device *fsl_usb2_device_register( 78static struct platform_device *fsl_usb2_device_register(
79 struct platform_device *ofdev, 79 struct platform_device *ofdev,
80 struct fsl_usb2_platform_data *pdata, 80 struct fsl_usb2_platform_data *pdata,
81 const char *name, int id) 81 const char *name, int id)
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 60a5de505ca1..adb01d950a16 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -824,13 +824,13 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
824 i = DIV_ROUND_UP(wrap_frame( 824 i = DIV_ROUND_UP(wrap_frame(
825 cur_frame - urb->start_frame), 825 cur_frame - urb->start_frame),
826 urb->interval); 826 urb->interval);
827 if (urb->transfer_flags & URB_ISO_ASAP) { 827
828 /* Treat underruns as if URB_ISO_ASAP was set */
829 if ((urb->transfer_flags & URB_ISO_ASAP) ||
830 i >= urb->number_of_packets) {
828 urb->start_frame = wrap_frame(urb->start_frame 831 urb->start_frame = wrap_frame(urb->start_frame
829 + i * urb->interval); 832 + i * urb->interval);
830 i = 0; 833 i = 0;
831 } else if (i >= urb->number_of_packets) {
832 ret = -EXDEV;
833 goto alloc_dmem_failed;
834 } 834 }
835 } 835 }
836 } 836 }
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 8f6b695af6a4..604cad1bcf9c 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -216,31 +216,26 @@ static int ohci_urb_enqueue (
216 frame &= ~(ed->interval - 1); 216 frame &= ~(ed->interval - 1);
217 frame |= ed->branch; 217 frame |= ed->branch;
218 urb->start_frame = frame; 218 urb->start_frame = frame;
219 ed->last_iso = frame + ed->interval * (size - 1);
219 } 220 }
220 } else if (ed->type == PIPE_ISOCHRONOUS) { 221 } else if (ed->type == PIPE_ISOCHRONOUS) {
221 u16 next = ohci_frame_no(ohci) + 1; 222 u16 next = ohci_frame_no(ohci) + 1;
222 u16 frame = ed->last_iso + ed->interval; 223 u16 frame = ed->last_iso + ed->interval;
224 u16 length = ed->interval * (size - 1);
223 225
224 /* Behind the scheduling threshold? */ 226 /* Behind the scheduling threshold? */
225 if (unlikely(tick_before(frame, next))) { 227 if (unlikely(tick_before(frame, next))) {
226 228
227 /* USB_ISO_ASAP: Round up to the first available slot */ 229 /* URB_ISO_ASAP: Round up to the first available slot */
228 if (urb->transfer_flags & URB_ISO_ASAP) { 230 if (urb->transfer_flags & URB_ISO_ASAP) {
229 frame += (next - frame + ed->interval - 1) & 231 frame += (next - frame + ed->interval - 1) &
230 -ed->interval; 232 -ed->interval;
231 233
232 /* 234 /*
233 * Not ASAP: Use the next slot in the stream. If 235 * Not ASAP: Use the next slot in the stream,
234 * the entire URB falls before the threshold, fail. 236 * no matter what.
235 */ 237 */
236 } else { 238 } else {
237 if (tick_before(frame + ed->interval *
238 (urb->number_of_packets - 1), next)) {
239 retval = -EXDEV;
240 usb_hcd_unlink_urb_from_ep(hcd, urb);
241 goto fail;
242 }
243
244 /* 239 /*
245 * Some OHCI hardware doesn't handle late TDs 240 * Some OHCI hardware doesn't handle late TDs
246 * correctly. After retiring them it proceeds 241 * correctly. After retiring them it proceeds
@@ -251,9 +246,16 @@ static int ohci_urb_enqueue (
251 urb_priv->td_cnt = DIV_ROUND_UP( 246 urb_priv->td_cnt = DIV_ROUND_UP(
252 (u16) (next - frame), 247 (u16) (next - frame),
253 ed->interval); 248 ed->interval);
249 if (urb_priv->td_cnt >= urb_priv->length) {
250 ++urb_priv->td_cnt; /* Mark it */
251 ohci_dbg(ohci, "iso underrun %p (%u+%u < %u)\n",
252 urb, frame, length,
253 next);
254 }
254 } 255 }
255 } 256 }
256 urb->start_frame = frame; 257 urb->start_frame = frame;
258 ed->last_iso = frame + length;
257 } 259 }
258 260
259 /* fill the TDs and link them to the ed; and 261 /* fill the TDs and link them to the ed; and
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index df4a6707322d..e7f577e63624 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -41,9 +41,13 @@ finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status)
41__releases(ohci->lock) 41__releases(ohci->lock)
42__acquires(ohci->lock) 42__acquires(ohci->lock)
43{ 43{
44 struct device *dev = ohci_to_hcd(ohci)->self.controller; 44 struct device *dev = ohci_to_hcd(ohci)->self.controller;
45 struct usb_host_endpoint *ep = urb->ep;
46 struct urb_priv *urb_priv;
47
45 // ASSERT (urb->hcpriv != 0); 48 // ASSERT (urb->hcpriv != 0);
46 49
50 restart:
47 urb_free_priv (ohci, urb->hcpriv); 51 urb_free_priv (ohci, urb->hcpriv);
48 urb->hcpriv = NULL; 52 urb->hcpriv = NULL;
49 if (likely(status == -EINPROGRESS)) 53 if (likely(status == -EINPROGRESS))
@@ -80,6 +84,21 @@ __acquires(ohci->lock)
80 ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE); 84 ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
81 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); 85 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
82 } 86 }
87
88 /*
89 * An isochronous URB that is sumitted too late won't have any TDs
90 * (marked by the fact that the td_cnt value is larger than the
91 * actual number of TDs). If the next URB on this endpoint is like
92 * that, give it back now.
93 */
94 if (!list_empty(&ep->urb_list)) {
95 urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
96 urb_priv = urb->hcpriv;
97 if (urb_priv->td_cnt > urb_priv->length) {
98 status = 0;
99 goto restart;
100 }
101 }
83} 102}
84 103
85 104
@@ -546,7 +565,6 @@ td_fill (struct ohci_hcd *ohci, u32 info,
546 td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000); 565 td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000);
547 *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci, 566 *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci,
548 (data & 0x0FFF) | 0xE000); 567 (data & 0x0FFF) | 0xE000);
549 td->ed->last_iso = info & 0xffff;
550 } else { 568 } else {
551 td->hwCBP = cpu_to_hc32 (ohci, data); 569 td->hwCBP = cpu_to_hc32 (ohci, data);
552 } 570 }
@@ -996,7 +1014,7 @@ rescan_this:
996 urb_priv->td_cnt++; 1014 urb_priv->td_cnt++;
997 1015
998 /* if URB is done, clean up */ 1016 /* if URB is done, clean up */
999 if (urb_priv->td_cnt == urb_priv->length) { 1017 if (urb_priv->td_cnt >= urb_priv->length) {
1000 modified = completed = 1; 1018 modified = completed = 1;
1001 finish_urb(ohci, urb, 0); 1019 finish_urb(ohci, urb, 0);
1002 } 1020 }
@@ -1086,7 +1104,7 @@ static void takeback_td(struct ohci_hcd *ohci, struct td *td)
1086 urb_priv->td_cnt++; 1104 urb_priv->td_cnt++;
1087 1105
1088 /* If all this urb's TDs are done, call complete() */ 1106 /* If all this urb's TDs are done, call complete() */
1089 if (urb_priv->td_cnt == urb_priv->length) 1107 if (urb_priv->td_cnt >= urb_priv->length)
1090 finish_urb(ohci, urb, status); 1108 finish_urb(ohci, urb, status);
1091 1109
1092 /* clean schedule: unlink EDs that are no longer busy */ 1110 /* clean schedule: unlink EDs that are no longer busy */
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index c300bd2f7d1c..0f228c46eeda 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -293,7 +293,7 @@ static struct pci_driver uhci_pci_driver = {
293 .remove = usb_hcd_pci_remove, 293 .remove = usb_hcd_pci_remove,
294 .shutdown = uhci_shutdown, 294 .shutdown = uhci_shutdown,
295 295
296#ifdef CONFIG_PM_SLEEP 296#ifdef CONFIG_PM
297 .driver = { 297 .driver = {
298 .pm = &usb_hcd_pci_pm_ops 298 .pm = &usb_hcd_pci_pm_ops
299 }, 299 },
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 041c6ddb695c..da6f56d996ce 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -1303,7 +1303,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1303 } 1303 }
1304 1304
1305 /* Fell behind? */ 1305 /* Fell behind? */
1306 if (uhci_frame_before_eq(frame, next)) { 1306 if (!uhci_frame_before_eq(next, frame)) {
1307 1307
1308 /* USB_ISO_ASAP: Round up to the first available slot */ 1308 /* USB_ISO_ASAP: Round up to the first available slot */
1309 if (urb->transfer_flags & URB_ISO_ASAP) 1309 if (urb->transfer_flags & URB_ISO_ASAP)
@@ -1311,13 +1311,17 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1311 -qh->period; 1311 -qh->period;
1312 1312
1313 /* 1313 /*
1314 * Not ASAP: Use the next slot in the stream. If 1314 * Not ASAP: Use the next slot in the stream,
1315 * the entire URB falls before the threshold, fail. 1315 * no matter what.
1316 */ 1316 */
1317 else if (!uhci_frame_before_eq(next, 1317 else if (!uhci_frame_before_eq(next,
1318 frame + (urb->number_of_packets - 1) * 1318 frame + (urb->number_of_packets - 1) *
1319 qh->period)) 1319 qh->period))
1320 return -EXDEV; 1320 dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n",
1321 urb, frame,
1322 (urb->number_of_packets - 1) *
1323 qh->period,
1324 next);
1321 } 1325 }
1322 } 1326 }
1323 1327
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index fae697ed0b70..773a6b28c4f1 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -287,7 +287,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
287 if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) 287 if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue)
288 xhci_queue_stop_endpoint(xhci, slot_id, i, suspend); 288 xhci_queue_stop_endpoint(xhci, slot_id, i, suspend);
289 } 289 }
290 cmd->command_trb = xhci->cmd_ring->enqueue; 290 cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
291 list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list); 291 list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list);
292 xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend); 292 xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend);
293 xhci_ring_cmd_db(xhci); 293 xhci_ring_cmd_db(xhci);
@@ -552,11 +552,15 @@ void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex)
552 * - Mark a port as being done with device resume, 552 * - Mark a port as being done with device resume,
553 * and ring the endpoint doorbells. 553 * and ring the endpoint doorbells.
554 * - Stop the Synopsys redriver Compliance Mode polling. 554 * - Stop the Synopsys redriver Compliance Mode polling.
555 * - Drop and reacquire the xHCI lock, in order to wait for port resume.
555 */ 556 */
556static u32 xhci_get_port_status(struct usb_hcd *hcd, 557static u32 xhci_get_port_status(struct usb_hcd *hcd,
557 struct xhci_bus_state *bus_state, 558 struct xhci_bus_state *bus_state,
558 __le32 __iomem **port_array, 559 __le32 __iomem **port_array,
559 u16 wIndex, u32 raw_port_status) 560 u16 wIndex, u32 raw_port_status,
561 unsigned long flags)
562 __releases(&xhci->lock)
563 __acquires(&xhci->lock)
560{ 564{
561 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 565 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
562 u32 status = 0; 566 u32 status = 0;
@@ -591,21 +595,42 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
591 return 0xffffffff; 595 return 0xffffffff;
592 if (time_after_eq(jiffies, 596 if (time_after_eq(jiffies,
593 bus_state->resume_done[wIndex])) { 597 bus_state->resume_done[wIndex])) {
598 int time_left;
599
594 xhci_dbg(xhci, "Resume USB2 port %d\n", 600 xhci_dbg(xhci, "Resume USB2 port %d\n",
595 wIndex + 1); 601 wIndex + 1);
596 bus_state->resume_done[wIndex] = 0; 602 bus_state->resume_done[wIndex] = 0;
597 clear_bit(wIndex, &bus_state->resuming_ports); 603 clear_bit(wIndex, &bus_state->resuming_ports);
604
605 set_bit(wIndex, &bus_state->rexit_ports);
598 xhci_set_link_state(xhci, port_array, wIndex, 606 xhci_set_link_state(xhci, port_array, wIndex,
599 XDEV_U0); 607 XDEV_U0);
600 xhci_dbg(xhci, "set port %d resume\n", 608
601 wIndex + 1); 609 spin_unlock_irqrestore(&xhci->lock, flags);
602 slot_id = xhci_find_slot_id_by_port(hcd, xhci, 610 time_left = wait_for_completion_timeout(
603 wIndex + 1); 611 &bus_state->rexit_done[wIndex],
604 if (!slot_id) { 612 msecs_to_jiffies(
605 xhci_dbg(xhci, "slot_id is zero\n"); 613 XHCI_MAX_REXIT_TIMEOUT));
606 return 0xffffffff; 614 spin_lock_irqsave(&xhci->lock, flags);
615
616 if (time_left) {
617 slot_id = xhci_find_slot_id_by_port(hcd,
618 xhci, wIndex + 1);
619 if (!slot_id) {
620 xhci_dbg(xhci, "slot_id is zero\n");
621 return 0xffffffff;
622 }
623 xhci_ring_device(xhci, slot_id);
624 } else {
625 int port_status = xhci_readl(xhci,
626 port_array[wIndex]);
627 xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n",
628 XHCI_MAX_REXIT_TIMEOUT,
629 port_status);
630 status |= USB_PORT_STAT_SUSPEND;
631 clear_bit(wIndex, &bus_state->rexit_ports);
607 } 632 }
608 xhci_ring_device(xhci, slot_id); 633
609 bus_state->port_c_suspend |= 1 << wIndex; 634 bus_state->port_c_suspend |= 1 << wIndex;
610 bus_state->suspended_ports &= ~(1 << wIndex); 635 bus_state->suspended_ports &= ~(1 << wIndex);
611 } else { 636 } else {
@@ -728,7 +753,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
728 break; 753 break;
729 } 754 }
730 status = xhci_get_port_status(hcd, bus_state, port_array, 755 status = xhci_get_port_status(hcd, bus_state, port_array,
731 wIndex, temp); 756 wIndex, temp, flags);
732 if (status == 0xffffffff) 757 if (status == 0xffffffff)
733 goto error; 758 goto error;
734 759
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 53b972c2a09f..83bcd13622c3 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2428,6 +2428,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2428 for (i = 0; i < USB_MAXCHILDREN; ++i) { 2428 for (i = 0; i < USB_MAXCHILDREN; ++i) {
2429 xhci->bus_state[0].resume_done[i] = 0; 2429 xhci->bus_state[0].resume_done[i] = 0;
2430 xhci->bus_state[1].resume_done[i] = 0; 2430 xhci->bus_state[1].resume_done[i] = 0;
2431 /* Only the USB 2.0 completions will ever be used. */
2432 init_completion(&xhci->bus_state[1].rexit_done[i]);
2431 } 2433 }
2432 2434
2433 if (scratchpad_alloc(xhci, flags)) 2435 if (scratchpad_alloc(xhci, flags))
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c2d495057eb5..236c3aabe940 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -351,7 +351,7 @@ static struct pci_driver xhci_pci_driver = {
351 /* suspend and resume implemented later */ 351 /* suspend and resume implemented later */
352 352
353 .shutdown = usb_hcd_pci_shutdown, 353 .shutdown = usb_hcd_pci_shutdown,
354#ifdef CONFIG_PM_SLEEP 354#ifdef CONFIG_PM
355 .driver = { 355 .driver = {
356 .pm = &usb_hcd_pci_pm_ops 356 .pm = &usb_hcd_pci_pm_ops
357 }, 357 },
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 411da1fc7ae8..6bfbd80ec2b9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -123,6 +123,16 @@ static int enqueue_is_link_trb(struct xhci_ring *ring)
123 return TRB_TYPE_LINK_LE32(link->control); 123 return TRB_TYPE_LINK_LE32(link->control);
124} 124}
125 125
126union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring)
127{
128 /* Enqueue pointer can be left pointing to the link TRB,
129 * we must handle that
130 */
131 if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control))
132 return ring->enq_seg->next->trbs;
133 return ring->enqueue;
134}
135
126/* Updates trb to point to the next TRB in the ring, and updates seg if the next 136/* Updates trb to point to the next TRB in the ring, and updates seg if the next
127 * TRB is in a new segment. This does not skip over link TRBs, and it does not 137 * TRB is in a new segment. This does not skip over link TRBs, and it does not
128 * effect the ring dequeue or enqueue pointers. 138 * effect the ring dequeue or enqueue pointers.
@@ -859,8 +869,12 @@ remove_finished_td:
859 /* Otherwise ring the doorbell(s) to restart queued transfers */ 869 /* Otherwise ring the doorbell(s) to restart queued transfers */
860 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 870 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
861 } 871 }
862 ep->stopped_td = NULL; 872
863 ep->stopped_trb = NULL; 873 /* Clear stopped_td and stopped_trb if endpoint is not halted */
874 if (!(ep->ep_state & EP_HALTED)) {
875 ep->stopped_td = NULL;
876 ep->stopped_trb = NULL;
877 }
864 878
865 /* 879 /*
866 * Drop the lock and complete the URBs in the cancelled TD list. 880 * Drop the lock and complete the URBs in the cancelled TD list.
@@ -1414,6 +1428,12 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1414 inc_deq(xhci, xhci->cmd_ring); 1428 inc_deq(xhci, xhci->cmd_ring);
1415 return; 1429 return;
1416 } 1430 }
1431 /* There is no command to handle if we get a stop event when the
1432 * command ring is empty, event->cmd_trb points to the next
1433 * unset command
1434 */
1435 if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
1436 return;
1417 } 1437 }
1418 1438
1419 switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]) 1439 switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
@@ -1743,6 +1763,19 @@ static void handle_port_status(struct xhci_hcd *xhci,
1743 } 1763 }
1744 } 1764 }
1745 1765
1766 /*
1767 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
1768 * RExit to a disconnect state). If so, let the the driver know it's
1769 * out of the RExit state.
1770 */
1771 if (!DEV_SUPERSPEED(temp) &&
1772 test_and_clear_bit(faked_port_index,
1773 &bus_state->rexit_ports)) {
1774 complete(&bus_state->rexit_done[faked_port_index]);
1775 bogus_port_status = true;
1776 goto cleanup;
1777 }
1778
1746 if (hcd->speed != HCD_USB3) 1779 if (hcd->speed != HCD_USB3)
1747 xhci_test_and_clear_bit(xhci, port_array, faked_port_index, 1780 xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
1748 PORT_PLC); 1781 PORT_PLC);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 49b6edb84a79..1e36dbb48366 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -2598,15 +2598,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2598 if (command) { 2598 if (command) {
2599 cmd_completion = command->completion; 2599 cmd_completion = command->completion;
2600 cmd_status = &command->status; 2600 cmd_status = &command->status;
2601 command->command_trb = xhci->cmd_ring->enqueue; 2601 command->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
2602
2603 /* Enqueue pointer can be left pointing to the link TRB,
2604 * we must handle that
2605 */
2606 if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
2607 command->command_trb =
2608 xhci->cmd_ring->enq_seg->next->trbs;
2609
2610 list_add_tail(&command->cmd_list, &virt_dev->cmd_list); 2602 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
2611 } else { 2603 } else {
2612 cmd_completion = &virt_dev->cmd_completion; 2604 cmd_completion = &virt_dev->cmd_completion;
@@ -2614,7 +2606,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2614 } 2606 }
2615 init_completion(cmd_completion); 2607 init_completion(cmd_completion);
2616 2608
2617 cmd_trb = xhci->cmd_ring->dequeue; 2609 cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
2618 if (!ctx_change) 2610 if (!ctx_change)
2619 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, 2611 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2620 udev->slot_id, must_succeed); 2612 udev->slot_id, must_succeed);
@@ -3439,14 +3431,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3439 3431
3440 /* Attempt to submit the Reset Device command to the command ring */ 3432 /* Attempt to submit the Reset Device command to the command ring */
3441 spin_lock_irqsave(&xhci->lock, flags); 3433 spin_lock_irqsave(&xhci->lock, flags);
3442 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; 3434 reset_device_cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
3443
3444 /* Enqueue pointer can be left pointing to the link TRB,
3445 * we must handle that
3446 */
3447 if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
3448 reset_device_cmd->command_trb =
3449 xhci->cmd_ring->enq_seg->next->trbs;
3450 3435
3451 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); 3436 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
3452 ret = xhci_queue_reset_device(xhci, slot_id); 3437 ret = xhci_queue_reset_device(xhci, slot_id);
@@ -3650,7 +3635,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3650 union xhci_trb *cmd_trb; 3635 union xhci_trb *cmd_trb;
3651 3636
3652 spin_lock_irqsave(&xhci->lock, flags); 3637 spin_lock_irqsave(&xhci->lock, flags);
3653 cmd_trb = xhci->cmd_ring->dequeue; 3638 cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
3654 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); 3639 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
3655 if (ret) { 3640 if (ret) {
3656 spin_unlock_irqrestore(&xhci->lock, flags); 3641 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -3785,7 +3770,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3785 slot_ctx->dev_info >> 27); 3770 slot_ctx->dev_info >> 27);
3786 3771
3787 spin_lock_irqsave(&xhci->lock, flags); 3772 spin_lock_irqsave(&xhci->lock, flags);
3788 cmd_trb = xhci->cmd_ring->dequeue; 3773 cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
3789 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, 3774 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
3790 udev->slot_id); 3775 udev->slot_id);
3791 if (ret) { 3776 if (ret) {
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 46aa14894148..289fbfbae746 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1412,8 +1412,18 @@ struct xhci_bus_state {
1412 unsigned long resume_done[USB_MAXCHILDREN]; 1412 unsigned long resume_done[USB_MAXCHILDREN];
1413 /* which ports have started to resume */ 1413 /* which ports have started to resume */
1414 unsigned long resuming_ports; 1414 unsigned long resuming_ports;
1415 /* Which ports are waiting on RExit to U0 transition. */
1416 unsigned long rexit_ports;
1417 struct completion rexit_done[USB_MAXCHILDREN];
1415}; 1418};
1416 1419
1420
1421/*
1422 * It can take up to 20 ms to transition from RExit to U0 on the
1423 * Intel Lynx Point LP xHCI host.
1424 */
1425#define XHCI_MAX_REXIT_TIMEOUT (20 * 1000)
1426
1417static inline unsigned int hcd_index(struct usb_hcd *hcd) 1427static inline unsigned int hcd_index(struct usb_hcd *hcd)
1418{ 1428{
1419 if (hcd->speed == HCD_USB3) 1429 if (hcd->speed == HCD_USB3)
@@ -1840,6 +1850,7 @@ int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
1840 union xhci_trb *cmd_trb); 1850 union xhci_trb *cmd_trb);
1841void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, 1851void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
1842 unsigned int ep_index, unsigned int stream_id); 1852 unsigned int ep_index, unsigned int stream_id);
1853union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring);
1843 1854
1844/* xHCI roothub code */ 1855/* xHCI roothub code */
1845void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array, 1856void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 4047cbb91bac..bd4138d80a48 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -535,6 +535,9 @@ static int dsps_probe(struct platform_device *pdev)
535 struct dsps_glue *glue; 535 struct dsps_glue *glue;
536 int ret; 536 int ret;
537 537
538 if (!strcmp(pdev->name, "musb-hdrc"))
539 return -ENODEV;
540
538 match = of_match_node(musb_dsps_of_match, pdev->dev.of_node); 541 match = of_match_node(musb_dsps_of_match, pdev->dev.of_node);
539 if (!match) { 542 if (!match) {
540 dev_err(&pdev->dev, "fail to get matching of_match struct\n"); 543 dev_err(&pdev->dev, "fail to get matching of_match struct\n");
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 9a08679d204d..b19ed213ab85 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1790,6 +1790,10 @@ int musb_gadget_setup(struct musb *musb)
1790 musb->g.max_speed = USB_SPEED_HIGH; 1790 musb->g.max_speed = USB_SPEED_HIGH;
1791 musb->g.speed = USB_SPEED_UNKNOWN; 1791 musb->g.speed = USB_SPEED_UNKNOWN;
1792 1792
1793 MUSB_DEV_MODE(musb);
1794 musb->xceiv->otg->default_a = 0;
1795 musb->xceiv->state = OTG_STATE_B_IDLE;
1796
1793 /* this "gadget" abstracts/virtualizes the controller */ 1797 /* this "gadget" abstracts/virtualizes the controller */
1794 musb->g.name = musb_driver_name; 1798 musb->g.name = musb_driver_name;
1795 musb->g.is_otg = 1; 1799 musb->g.is_otg = 1;
@@ -1849,7 +1853,6 @@ static int musb_gadget_start(struct usb_gadget *g,
1849 musb->gadget_driver = driver; 1853 musb->gadget_driver = driver;
1850 1854
1851 spin_lock_irqsave(&musb->lock, flags); 1855 spin_lock_irqsave(&musb->lock, flags);
1852 musb->is_active = 1;
1853 1856
1854 otg_set_peripheral(otg, &musb->g); 1857 otg_set_peripheral(otg, &musb->g);
1855 musb->xceiv->state = OTG_STATE_B_IDLE; 1858 musb->xceiv->state = OTG_STATE_B_IDLE;
diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c
index b2f29c9aebbf..02799a5efcd4 100644
--- a/drivers/usb/phy/phy-gpio-vbus-usb.c
+++ b/drivers/usb/phy/phy-gpio-vbus-usb.c
@@ -241,7 +241,7 @@ static int gpio_vbus_set_suspend(struct usb_phy *phy, int suspend)
241 241
242/* platform driver interface */ 242/* platform driver interface */
243 243
244static int __init gpio_vbus_probe(struct platform_device *pdev) 244static int gpio_vbus_probe(struct platform_device *pdev)
245{ 245{
246 struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev); 246 struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
247 struct gpio_vbus_data *gpio_vbus; 247 struct gpio_vbus_data *gpio_vbus;
@@ -349,7 +349,7 @@ err_gpio:
349 return err; 349 return err;
350} 350}
351 351
352static int __exit gpio_vbus_remove(struct platform_device *pdev) 352static int gpio_vbus_remove(struct platform_device *pdev)
353{ 353{
354 struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev); 354 struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev);
355 struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev); 355 struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
@@ -398,8 +398,6 @@ static const struct dev_pm_ops gpio_vbus_dev_pm_ops = {
398}; 398};
399#endif 399#endif
400 400
401/* NOTE: the gpio-vbus device may *NOT* be hotplugged */
402
403MODULE_ALIAS("platform:gpio-vbus"); 401MODULE_ALIAS("platform:gpio-vbus");
404 402
405static struct platform_driver gpio_vbus_driver = { 403static struct platform_driver gpio_vbus_driver = {
@@ -410,10 +408,11 @@ static struct platform_driver gpio_vbus_driver = {
410 .pm = &gpio_vbus_dev_pm_ops, 408 .pm = &gpio_vbus_dev_pm_ops,
411#endif 409#endif
412 }, 410 },
413 .remove = __exit_p(gpio_vbus_remove), 411 .probe = gpio_vbus_probe,
412 .remove = gpio_vbus_remove,
414}; 413};
415 414
416module_platform_driver_probe(gpio_vbus_driver, gpio_vbus_probe); 415module_platform_driver(gpio_vbus_driver);
417 416
418MODULE_DESCRIPTION("simple GPIO controlled OTG transceiver driver"); 417MODULE_DESCRIPTION("simple GPIO controlled OTG transceiver driver");
419MODULE_AUTHOR("Philipp Zabel"); 418MODULE_AUTHOR("Philipp Zabel");
diff --git a/drivers/usb/phy/phy-omap-usb3.c b/drivers/usb/phy/phy-omap-usb3.c
index fc15694d3031..4e8a0405f956 100644
--- a/drivers/usb/phy/phy-omap-usb3.c
+++ b/drivers/usb/phy/phy-omap-usb3.c
@@ -79,7 +79,7 @@ static struct usb_dpll_params *omap_usb3_get_dpll_params(unsigned long rate)
79 return &dpll_map[i].params; 79 return &dpll_map[i].params;
80 } 80 }
81 81
82 return 0; 82 return NULL;
83} 83}
84 84
85static int omap_usb3_suspend(struct usb_phy *x, int suspend) 85static int omap_usb3_suspend(struct usb_phy *x, int suspend)
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index c454bfa22a10..ddb9c51f2c99 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -60,7 +60,7 @@ config USB_SERIAL_SIMPLE
60 - Suunto ANT+ USB device. 60 - Suunto ANT+ USB device.
61 - Fundamental Software dongle. 61 - Fundamental Software dongle.
62 - HP4x calculators 62 - HP4x calculators
63 - a number of Motoroloa phones 63 - a number of Motorola phones
64 - Siemens USB/MPI adapter. 64 - Siemens USB/MPI adapter.
65 - ViVOtech ViVOpay USB device. 65 - ViVOtech ViVOpay USB device.
66 - Infineon Modem Flashloader USB interface 66 - Infineon Modem Flashloader USB interface
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1cf6f125f5f0..80a7104d5ddb 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -81,6 +81,7 @@ static void option_instat_callback(struct urb *urb);
81 81
82#define HUAWEI_VENDOR_ID 0x12D1 82#define HUAWEI_VENDOR_ID 0x12D1
83#define HUAWEI_PRODUCT_E173 0x140C 83#define HUAWEI_PRODUCT_E173 0x140C
84#define HUAWEI_PRODUCT_E1750 0x1406
84#define HUAWEI_PRODUCT_K4505 0x1464 85#define HUAWEI_PRODUCT_K4505 0x1464
85#define HUAWEI_PRODUCT_K3765 0x1465 86#define HUAWEI_PRODUCT_K3765 0x1465
86#define HUAWEI_PRODUCT_K4605 0x14C6 87#define HUAWEI_PRODUCT_K4605 0x14C6
@@ -567,6 +568,8 @@ static const struct usb_device_id option_ids[] = {
567 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, 568 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
568 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), 569 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
569 .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, 570 .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
571 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
572 .driver_info = (kernel_ulong_t) &net_intf2_blacklist },
570 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, 573 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
571 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) }, 574 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
572 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), 575 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index e7a84f0f5179..bedf8e47713b 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -139,6 +139,7 @@ enum pl2303_type {
139 HX_TA, /* HX(A) / X(A) / TA version */ /* TODO: improve */ 139 HX_TA, /* HX(A) / X(A) / TA version */ /* TODO: improve */
140 HXD_EA_RA_SA, /* HXD / EA / RA / SA version */ /* TODO: improve */ 140 HXD_EA_RA_SA, /* HXD / EA / RA / SA version */ /* TODO: improve */
141 TB, /* TB version */ 141 TB, /* TB version */
142 HX_CLONE, /* Cheap and less functional clone of the HX chip */
142}; 143};
143/* 144/*
144 * NOTE: don't know the difference between type 0 and type 1, 145 * NOTE: don't know the difference between type 0 and type 1,
@@ -206,8 +207,23 @@ static int pl2303_startup(struct usb_serial *serial)
206 * the device descriptors of the X/HX, HXD, EA, RA, SA, TA, TB 207 * the device descriptors of the X/HX, HXD, EA, RA, SA, TA, TB
207 */ 208 */
208 if (le16_to_cpu(serial->dev->descriptor.bcdDevice) == 0x300) { 209 if (le16_to_cpu(serial->dev->descriptor.bcdDevice) == 0x300) {
209 type = HX_TA; 210 /* Check if the device is a clone */
210 type_str = "X/HX/TA"; 211 pl2303_vendor_read(0x9494, 0, serial, buf);
212 /*
213 * NOTE: Not sure if this read is really needed.
214 * The HX returns 0x00, the clone 0x02, but the Windows
215 * driver seems to ignore the value and continues.
216 */
217 pl2303_vendor_write(0x0606, 0xaa, serial);
218 pl2303_vendor_read(0x8686, 0, serial, buf);
219 if (buf[0] != 0xaa) {
220 type = HX_CLONE;
221 type_str = "X/HX clone (limited functionality)";
222 } else {
223 type = HX_TA;
224 type_str = "X/HX/TA";
225 }
226 pl2303_vendor_write(0x0606, 0x00, serial);
211 } else if (le16_to_cpu(serial->dev->descriptor.bcdDevice) 227 } else if (le16_to_cpu(serial->dev->descriptor.bcdDevice)
212 == 0x400) { 228 == 0x400) {
213 type = HXD_EA_RA_SA; 229 type = HXD_EA_RA_SA;
@@ -305,8 +321,9 @@ static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type,
305{ 321{
306 /* 322 /*
307 * NOTE: Only the values defined in baud_sup are supported ! 323 * NOTE: Only the values defined in baud_sup are supported !
308 * => if unsupported values are set, the PL2303 seems to 324 * => if unsupported values are set, the PL2303 uses 9600 baud instead
309 * use 9600 baud (at least my PL2303X always does) 325 * => HX clones just don't work at unsupported baud rates < 115200 baud,
326 * for baud rates > 115200 they run at 115200 baud
310 */ 327 */
311 const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600, 328 const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600,
312 4800, 7200, 9600, 14400, 19200, 28800, 38400, 329 4800, 7200, 9600, 14400, 19200, 28800, 38400,
@@ -316,14 +333,14 @@ static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type,
316 * NOTE: With the exception of type_0/1 devices, the following 333 * NOTE: With the exception of type_0/1 devices, the following
317 * additional baud rates are supported (tested with HX rev. 3A only): 334 * additional baud rates are supported (tested with HX rev. 3A only):
318 * 110*, 56000*, 128000, 134400, 161280, 201600, 256000*, 268800, 335 * 110*, 56000*, 128000, 134400, 161280, 201600, 256000*, 268800,
319 * 403200, 806400. (*: not HX) 336 * 403200, 806400. (*: not HX and HX clones)
320 * 337 *
321 * Maximum values: HXD, TB: 12000000; HX, TA: 6000000; 338 * Maximum values: HXD, TB: 12000000; HX, TA: 6000000;
322 * type_0+1: 1228800; RA: 921600; SA: 115200 339 * type_0+1: 1228800; RA: 921600; HX clones, SA: 115200
323 * 340 *
324 * As long as we are not using this encoding method for anything else 341 * As long as we are not using this encoding method for anything else
325 * than the type_0+1 and HX chips, there is no point in complicating 342 * than the type_0+1, HX and HX clone chips, there is no point in
326 * the code to support them. 343 * complicating the code to support them.
327 */ 344 */
328 int i; 345 int i;
329 346
@@ -347,6 +364,8 @@ static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type,
347 baud = min_t(int, baud, 6000000); 364 baud = min_t(int, baud, 6000000);
348 else if (type == type_0 || type == type_1) 365 else if (type == type_0 || type == type_1)
349 baud = min_t(int, baud, 1228800); 366 baud = min_t(int, baud, 1228800);
367 else if (type == HX_CLONE)
368 baud = min_t(int, baud, 115200);
350 /* Direct (standard) baud rate encoding method */ 369 /* Direct (standard) baud rate encoding method */
351 put_unaligned_le32(baud, buf); 370 put_unaligned_le32(baud, buf);
352 371
@@ -359,7 +378,8 @@ static int pl2303_baudrate_encode_divisor(int baud, enum pl2303_type type,
359 /* 378 /*
360 * Divisor based baud rate encoding method 379 * Divisor based baud rate encoding method
361 * 380 *
362 * NOTE: it's not clear if the type_0/1 chips support this method 381 * NOTE: HX clones do NOT support this method.
382 * It's not clear if the type_0/1 chips support it.
363 * 383 *
364 * divisor = 12MHz * 32 / baudrate = 2^A * B 384 * divisor = 12MHz * 32 / baudrate = 2^A * B
365 * 385 *
@@ -452,7 +472,7 @@ static void pl2303_encode_baudrate(struct tty_struct *tty,
452 * 1) Direct method: encodes the baud rate value directly 472 * 1) Direct method: encodes the baud rate value directly
453 * => supported by all chip types 473 * => supported by all chip types
454 * 2) Divisor based method: encodes a divisor to a base value (12MHz*32) 474 * 2) Divisor based method: encodes a divisor to a base value (12MHz*32)
455 * => supported by HX chips (and likely not by type_0/1 chips) 475 * => not supported by HX clones (and likely type_0/1 chips)
456 * 476 *
457 * NOTE: Although the divisor based baud rate encoding method is much 477 * NOTE: Although the divisor based baud rate encoding method is much
458 * more flexible, some of the standard baud rate values can not be 478 * more flexible, some of the standard baud rate values can not be
@@ -460,7 +480,7 @@ static void pl2303_encode_baudrate(struct tty_struct *tty,
460 * the device likely uses the same baud rate generator for both methods 480 * the device likely uses the same baud rate generator for both methods
461 * so that there is likley no difference. 481 * so that there is likley no difference.
462 */ 482 */
463 if (type == type_0 || type == type_1) 483 if (type == type_0 || type == type_1 || type == HX_CLONE)
464 baud = pl2303_baudrate_encode_direct(baud, type, buf); 484 baud = pl2303_baudrate_encode_direct(baud, type, buf);
465 else 485 else
466 baud = pl2303_baudrate_encode_divisor(baud, type, buf); 486 baud = pl2303_baudrate_encode_divisor(baud, type, buf);
@@ -813,6 +833,7 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
813 result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 833 result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
814 BREAK_REQUEST, BREAK_REQUEST_TYPE, state, 834 BREAK_REQUEST, BREAK_REQUEST_TYPE, state,
815 0, NULL, 0, 100); 835 0, NULL, 0, 100);
836 /* NOTE: HX clones don't support sending breaks, -EPIPE is returned */
816 if (result) 837 if (result)
817 dev_err(&port->dev, "error sending break = %d\n", result); 838 dev_err(&port->dev, "error sending break = %d\n", result);
818} 839}
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 4b79a1f2f901..ce5221fa393a 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -461,7 +461,7 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
461 u32 i; 461 u32 i;
462 for (i = 0; i < tv_cmd->tvc_sgl_count; i++) 462 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
463 put_page(sg_page(&tv_cmd->tvc_sgl[i])); 463 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
464 } 464 }
465 465
466 tcm_vhost_put_inflight(tv_cmd->inflight); 466 tcm_vhost_put_inflight(tv_cmd->inflight);
467 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); 467 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
@@ -728,7 +728,12 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq,
728 } 728 }
729 se_sess = tv_nexus->tvn_se_sess; 729 se_sess = tv_nexus->tvn_se_sess;
730 730
731 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_KERNEL); 731 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
732 if (tag < 0) {
733 pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
734 return ERR_PTR(-ENOMEM);
735 }
736
732 cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag]; 737 cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
733 sg = cmd->tvc_sgl; 738 sg = cmd->tvc_sgl;
734 pages = cmd->tvc_upages; 739 pages = cmd->tvc_upages;
@@ -1373,21 +1378,30 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1373 return 0; 1378 return 0;
1374} 1379}
1375 1380
1381static void vhost_scsi_free(struct vhost_scsi *vs)
1382{
1383 if (is_vmalloc_addr(vs))
1384 vfree(vs);
1385 else
1386 kfree(vs);
1387}
1388
1376static int vhost_scsi_open(struct inode *inode, struct file *f) 1389static int vhost_scsi_open(struct inode *inode, struct file *f)
1377{ 1390{
1378 struct vhost_scsi *vs; 1391 struct vhost_scsi *vs;
1379 struct vhost_virtqueue **vqs; 1392 struct vhost_virtqueue **vqs;
1380 int r, i; 1393 int r = -ENOMEM, i;
1381 1394
1382 vs = kzalloc(sizeof(*vs), GFP_KERNEL); 1395 vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
1383 if (!vs) 1396 if (!vs) {
1384 return -ENOMEM; 1397 vs = vzalloc(sizeof(*vs));
1398 if (!vs)
1399 goto err_vs;
1400 }
1385 1401
1386 vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL); 1402 vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1387 if (!vqs) { 1403 if (!vqs)
1388 kfree(vs); 1404 goto err_vqs;
1389 return -ENOMEM;
1390 }
1391 1405
1392 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work); 1406 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1393 vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work); 1407 vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
@@ -1407,14 +1421,18 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
1407 1421
1408 tcm_vhost_init_inflight(vs, NULL); 1422 tcm_vhost_init_inflight(vs, NULL);
1409 1423
1410 if (r < 0) { 1424 if (r < 0)
1411 kfree(vqs); 1425 goto err_init;
1412 kfree(vs);
1413 return r;
1414 }
1415 1426
1416 f->private_data = vs; 1427 f->private_data = vs;
1417 return 0; 1428 return 0;
1429
1430err_init:
1431 kfree(vqs);
1432err_vqs:
1433 vhost_scsi_free(vs);
1434err_vs:
1435 return r;
1418} 1436}
1419 1437
1420static int vhost_scsi_release(struct inode *inode, struct file *f) 1438static int vhost_scsi_release(struct inode *inode, struct file *f)
@@ -1431,7 +1449,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
1431 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */ 1449 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1432 vhost_scsi_flush(vs); 1450 vhost_scsi_flush(vs);
1433 kfree(vs->dev.vqs); 1451 kfree(vs->dev.vqs);
1434 kfree(vs); 1452 vhost_scsi_free(vs);
1435 return 0; 1453 return 0;
1436} 1454}
1437 1455
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9a9502a4aa50..69068e0d8f31 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -161,9 +161,11 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
161 if (list_empty(&work->node)) { 161 if (list_empty(&work->node)) {
162 list_add_tail(&work->node, &dev->work_list); 162 list_add_tail(&work->node, &dev->work_list);
163 work->queue_seq++; 163 work->queue_seq++;
164 spin_unlock_irqrestore(&dev->work_lock, flags);
164 wake_up_process(dev->worker); 165 wake_up_process(dev->worker);
166 } else {
167 spin_unlock_irqrestore(&dev->work_lock, flags);
165 } 168 }
166 spin_unlock_irqrestore(&dev->work_lock, flags);
167} 169}
168EXPORT_SYMBOL_GPL(vhost_work_queue); 170EXPORT_SYMBOL_GPL(vhost_work_queue);
169 171
diff --git a/drivers/video/mmp/hw/mmp_ctrl.c b/drivers/video/mmp/hw/mmp_ctrl.c
index 75dca19bf214..6ac755270ab4 100644
--- a/drivers/video/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/mmp/hw/mmp_ctrl.c
@@ -514,7 +514,7 @@ static int mmphw_probe(struct platform_device *pdev)
514 if (IS_ERR(ctrl->clk)) { 514 if (IS_ERR(ctrl->clk)) {
515 dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name); 515 dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name);
516 ret = -ENOENT; 516 ret = -ENOENT;
517 goto failed_get_clk; 517 goto failed;
518 } 518 }
519 clk_prepare_enable(ctrl->clk); 519 clk_prepare_enable(ctrl->clk);
520 520
@@ -551,21 +551,8 @@ failed_path_init:
551 path_deinit(path_plat); 551 path_deinit(path_plat);
552 } 552 }
553 553
554 if (ctrl->clk) { 554 clk_disable_unprepare(ctrl->clk);
555 devm_clk_put(ctrl->dev, ctrl->clk);
556 clk_disable_unprepare(ctrl->clk);
557 }
558failed_get_clk:
559 devm_free_irq(ctrl->dev, ctrl->irq, ctrl);
560failed: 555failed:
561 if (ctrl) {
562 if (ctrl->reg_base)
563 devm_iounmap(ctrl->dev, ctrl->reg_base);
564 devm_release_mem_region(ctrl->dev, res->start,
565 resource_size(res));
566 devm_kfree(ctrl->dev, ctrl);
567 }
568
569 dev_err(&pdev->dev, "device init failed\n"); 556 dev_err(&pdev->dev, "device init failed\n");
570 557
571 return ret; 558 return ret;
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index d250ed0f806d..27197a8048c0 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -620,6 +620,7 @@ static int mxsfb_restore_mode(struct mxsfb_info *host)
620 break; 620 break;
621 case 3: 621 case 3:
622 bits_per_pixel = 32; 622 bits_per_pixel = 32;
623 break;
623 case 1: 624 case 1:
624 default: 625 default:
625 return -EINVAL; 626 return -EINVAL;
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index 7ef079c146e7..c172a5281f9e 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -2075,6 +2075,7 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
2075 if (!fb_find_mode(&info->var, info, mode_option, NULL, 0, 2075 if (!fb_find_mode(&info->var, info, mode_option, NULL, 0,
2076 info->monspecs.modedb, 16)) { 2076 info->monspecs.modedb, 16)) {
2077 printk(KERN_ERR "neofb: Unable to find usable video mode.\n"); 2077 printk(KERN_ERR "neofb: Unable to find usable video mode.\n");
2078 err = -EINVAL;
2078 goto err_map_video; 2079 goto err_map_video;
2079 } 2080 }
2080 2081
@@ -2097,7 +2098,8 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
2097 info->fix.smem_len >> 10, info->var.xres, 2098 info->fix.smem_len >> 10, info->var.xres,
2098 info->var.yres, h_sync / 1000, h_sync % 1000, v_sync); 2099 info->var.yres, h_sync / 1000, h_sync % 1000, v_sync);
2099 2100
2100 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) 2101 err = fb_alloc_cmap(&info->cmap, 256, 0);
2102 if (err < 0)
2101 goto err_map_video; 2103 goto err_map_video;
2102 2104
2103 err = register_framebuffer(info); 2105 err = register_framebuffer(info);
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
index 171821ddd78d..ba5b40f581f6 100644
--- a/drivers/video/of_display_timing.c
+++ b/drivers/video/of_display_timing.c
@@ -120,7 +120,7 @@ int of_get_display_timing(struct device_node *np, const char *name,
120 return -EINVAL; 120 return -EINVAL;
121 } 121 }
122 122
123 timing_np = of_find_node_by_name(np, name); 123 timing_np = of_get_child_by_name(np, name);
124 if (!timing_np) { 124 if (!timing_np) {
125 pr_err("%s: could not find node '%s'\n", 125 pr_err("%s: could not find node '%s'\n",
126 of_node_full_name(np), name); 126 of_node_full_name(np), name);
@@ -143,11 +143,11 @@ struct display_timings *of_get_display_timings(struct device_node *np)
143 struct display_timings *disp; 143 struct display_timings *disp;
144 144
145 if (!np) { 145 if (!np) {
146 pr_err("%s: no devicenode given\n", of_node_full_name(np)); 146 pr_err("%s: no device node given\n", of_node_full_name(np));
147 return NULL; 147 return NULL;
148 } 148 }
149 149
150 timings_np = of_find_node_by_name(np, "display-timings"); 150 timings_np = of_get_child_by_name(np, "display-timings");
151 if (!timings_np) { 151 if (!timings_np) {
152 pr_err("%s: could not find display-timings node\n", 152 pr_err("%s: could not find display-timings node\n",
153 of_node_full_name(np)); 153 of_node_full_name(np));
diff --git a/drivers/video/omap2/displays-new/Kconfig b/drivers/video/omap2/displays-new/Kconfig
index 6c90885b0940..10b25e7cd878 100644
--- a/drivers/video/omap2/displays-new/Kconfig
+++ b/drivers/video/omap2/displays-new/Kconfig
@@ -35,6 +35,7 @@ config DISPLAY_PANEL_DPI
35 35
36config DISPLAY_PANEL_DSI_CM 36config DISPLAY_PANEL_DSI_CM
37 tristate "Generic DSI Command Mode Panel" 37 tristate "Generic DSI Command Mode Panel"
38 depends on BACKLIGHT_CLASS_DEVICE
38 help 39 help
39 Driver for generic DSI command mode panels. 40 Driver for generic DSI command mode panels.
40 41
diff --git a/drivers/video/omap2/displays-new/connector-analog-tv.c b/drivers/video/omap2/displays-new/connector-analog-tv.c
index 1b60698f141e..ccd9073f706f 100644
--- a/drivers/video/omap2/displays-new/connector-analog-tv.c
+++ b/drivers/video/omap2/displays-new/connector-analog-tv.c
@@ -191,7 +191,7 @@ static int tvc_probe_pdata(struct platform_device *pdev)
191 in = omap_dss_find_output(pdata->source); 191 in = omap_dss_find_output(pdata->source);
192 if (in == NULL) { 192 if (in == NULL) {
193 dev_err(&pdev->dev, "Failed to find video source\n"); 193 dev_err(&pdev->dev, "Failed to find video source\n");
194 return -ENODEV; 194 return -EPROBE_DEFER;
195 } 195 }
196 196
197 ddata->in = in; 197 ddata->in = in;
diff --git a/drivers/video/omap2/displays-new/connector-dvi.c b/drivers/video/omap2/displays-new/connector-dvi.c
index bc5f8ceda371..63d88ee6dfe4 100644
--- a/drivers/video/omap2/displays-new/connector-dvi.c
+++ b/drivers/video/omap2/displays-new/connector-dvi.c
@@ -263,7 +263,7 @@ static int dvic_probe_pdata(struct platform_device *pdev)
263 in = omap_dss_find_output(pdata->source); 263 in = omap_dss_find_output(pdata->source);
264 if (in == NULL) { 264 if (in == NULL) {
265 dev_err(&pdev->dev, "Failed to find video source\n"); 265 dev_err(&pdev->dev, "Failed to find video source\n");
266 return -ENODEV; 266 return -EPROBE_DEFER;
267 } 267 }
268 268
269 ddata->in = in; 269 ddata->in = in;
diff --git a/drivers/video/omap2/displays-new/connector-hdmi.c b/drivers/video/omap2/displays-new/connector-hdmi.c
index c5826716d6ab..9abe2c039ae9 100644
--- a/drivers/video/omap2/displays-new/connector-hdmi.c
+++ b/drivers/video/omap2/displays-new/connector-hdmi.c
@@ -290,7 +290,7 @@ static int hdmic_probe_pdata(struct platform_device *pdev)
290 in = omap_dss_find_output(pdata->source); 290 in = omap_dss_find_output(pdata->source);
291 if (in == NULL) { 291 if (in == NULL) {
292 dev_err(&pdev->dev, "Failed to find video source\n"); 292 dev_err(&pdev->dev, "Failed to find video source\n");
293 return -ENODEV; 293 return -EPROBE_DEFER;
294 } 294 }
295 295
296 ddata->in = in; 296 ddata->in = in;
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 02a7340111df..477975009eee 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -3691,6 +3691,7 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
3691 } 3691 }
3692 3692
3693 pm_runtime_enable(&pdev->dev); 3693 pm_runtime_enable(&pdev->dev);
3694 pm_runtime_irq_safe(&pdev->dev);
3694 3695
3695 r = dispc_runtime_get(); 3696 r = dispc_runtime_get();
3696 if (r) 3697 if (r)
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 47ca86c5c6c0..d838ba829459 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -1336,14 +1336,7 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1336 (info->var.bits_per_pixel * info->var.xres_virtual); 1336 (info->var.bits_per_pixel * info->var.xres_virtual);
1337 if (info->var.yres_virtual < info->var.yres) { 1337 if (info->var.yres_virtual < info->var.yres) {
1338 dev_err(info->device, "virtual vertical size smaller than real\n"); 1338 dev_err(info->device, "virtual vertical size smaller than real\n");
1339 goto err_find_mode; 1339 rc = -EINVAL;
1340 }
1341
1342 /* maximize virtual vertical size for fast scrolling */
1343 info->var.yres_virtual = info->fix.smem_len * 8 /
1344 (info->var.bits_per_pixel * info->var.xres_virtual);
1345 if (info->var.yres_virtual < info->var.yres) {
1346 dev_err(info->device, "virtual vertical size smaller than real\n");
1347 goto err_find_mode; 1340 goto err_find_mode;
1348 } 1341 }
1349 1342
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index a50c6e3a7cc4..b232908a6192 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -398,8 +398,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
398 if (nr_pages > ARRAY_SIZE(frame_list)) 398 if (nr_pages > ARRAY_SIZE(frame_list))
399 nr_pages = ARRAY_SIZE(frame_list); 399 nr_pages = ARRAY_SIZE(frame_list);
400 400
401 scratch_page = get_balloon_scratch_page();
402
403 for (i = 0; i < nr_pages; i++) { 401 for (i = 0; i < nr_pages; i++) {
404 page = alloc_page(gfp); 402 page = alloc_page(gfp);
405 if (page == NULL) { 403 if (page == NULL) {
@@ -413,6 +411,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
413 411
414 scrub_page(page); 412 scrub_page(page);
415 413
414 /*
415 * Ballooned out frames are effectively replaced with
416 * a scratch frame. Ensure direct mappings and the
417 * p2m are consistent.
418 */
419 scratch_page = get_balloon_scratch_page();
416#ifdef CONFIG_XEN_HAVE_PVMMU 420#ifdef CONFIG_XEN_HAVE_PVMMU
417 if (xen_pv_domain() && !PageHighMem(page)) { 421 if (xen_pv_domain() && !PageHighMem(page)) {
418 ret = HYPERVISOR_update_va_mapping( 422 ret = HYPERVISOR_update_va_mapping(
@@ -422,24 +426,19 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
422 BUG_ON(ret); 426 BUG_ON(ret);
423 } 427 }
424#endif 428#endif
425 }
426
427 /* Ensure that ballooned highmem pages don't have kmaps. */
428 kmap_flush_unused();
429 flush_tlb_all();
430
431 /* No more mappings: invalidate P2M and add to balloon. */
432 for (i = 0; i < nr_pages; i++) {
433 pfn = mfn_to_pfn(frame_list[i]);
434 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 429 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
435 unsigned long p; 430 unsigned long p;
436 p = page_to_pfn(scratch_page); 431 p = page_to_pfn(scratch_page);
437 __set_phys_to_machine(pfn, pfn_to_mfn(p)); 432 __set_phys_to_machine(pfn, pfn_to_mfn(p));
438 } 433 }
434 put_balloon_scratch_page();
435
439 balloon_append(pfn_to_page(pfn)); 436 balloon_append(pfn_to_page(pfn));
440 } 437 }
441 438
442 put_balloon_scratch_page(); 439 /* Ensure that ballooned highmem pages don't have kmaps. */
440 kmap_flush_unused();
441 flush_tlb_all();
443 442
444 set_xen_guest_handle(reservation.extent_start, frame_list); 443 set_xen_guest_handle(reservation.extent_start, frame_list);
445 reservation.nr_extents = nr_pages; 444 reservation.nr_extents = nr_pages;