aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-11-08 01:34:39 -0500
committerDave Airlie <airlied@redhat.com>2013-11-08 01:34:39 -0500
commit91915260ea5ed9d9b19bfb75d53c989c8ada2ab0 (patch)
treef7eb16ced65f39ebd0bb32e3b4e5e0f365755536 /drivers
parent21136946c495b0e1e0f7e25a8de6f170efbdeadf (diff)
parent07bf139b906013ecef0c5e0441564d1ae10e974a (diff)
Merge tag 'drm-intel-fixes-2013-11-07' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
Bit a bit -fixes pull request in the merge window than usual dua to two feauture-y things: - Display CRCs are now enabled on all platforms, including the odd DP case on gm45/vlv. Since this is a testing-only feature it should ever hurt, but I figured it'll help with regression-testing -fixes. So I left it in and didn't postpone it to 3.14. - Display power well refactoring from Imre. Would have caused major pain conflict with the bdw stage 1 patches if I'd postpone this to -next. It's only an relatively small interface rework, so shouldn't cause pain. It's also been in my tree since almost 3 weeks already. That accounts for about two thirds of the pull, otherwise just bugfixes: - vlv backlight fix from Jesse/Jani - vlv vblank timestamp fix from Jesse - improved edp detection through vbt from Ville (fixes a vlv issue) - eDP vdd fix from Paulo - fixes for dvo lvds on i830M - a few smaller things all over Note: This contains a backmerge of v3.12. Since the -internal branch always applied on top of -nightly I need that unified base to merge bdw patches. So you'll get a conflict with radeon connector props when pulling this (and nouveau/master will also conflict a bit when Ben doesn't rebase). The backmerge itself only had conflicts in drm/i915. There's also a tiny conflict between Jani's backlight fix and your sysfs lifetime fix in drm-next. * tag 'drm-intel-fixes-2013-11-07' of git://people.freedesktop.org/~danvet/drm-intel: (940 commits) drm/i915/vlv: use per-pipe backlight controls v2 drm/i915: make backlight functions take a connector drm/i915: move opregion asle request handling to a work queue drm/i915/vlv: use PIPE_START_VBLANK interrupts on VLV drm/i915: Make intel_dp_is_edp() less specific drm/i915: Give names to the VBT child device type bits drm/i915/vlv: enable HDA display audio for Valleyview2 drm/i915/dvo: call ->mode_set callback only when the port is running drm/i915: avoid unclaimed registers when capturing the error state drm/i915: Enable DP port CRC for the "auto" source on g4x/vlv drm/i915: scramble reset support for DP port CRC on vlv drm/i915: scramble reset support for DP port CRC on g4x drm/i916: add "auto" pipe CRC source ... Conflicts: MAINTAINERS drivers/gpu/drm/i915/intel_panel.c drivers/gpu/drm/nouveau/core/subdev/mc/base.c drivers/gpu/drm/radeon/atombios_encoders.c drivers/gpu/drm/radeon/radeon_connectors.c
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig8
-rw-r--r--drivers/acpi/device_pm.c56
-rw-r--r--drivers/acpi/power.c104
-rw-r--r--drivers/acpi/scan.c3
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/ata/ahci_platform.c2
-rw-r--r--drivers/ata/libahci.c10
-rw-r--r--drivers/ata/libata-acpi.c14
-rw-r--r--drivers/ata/libata-eh.c6
-rw-r--r--drivers/ata/libata-scsi.c3
-rw-r--r--drivers/ata/libata.h4
-rw-r--r--drivers/ata/pata_isapnp.c2
-rw-r--r--drivers/base/memory.c7
-rw-r--r--drivers/bcma/driver_pci.c49
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c5
-rw-r--r--drivers/bus/mvebu-mbus.c12
-rw-r--r--drivers/char/random.c11
-rw-r--r--drivers/char/tpm/xen-tpmfront.c1
-rw-r--r--drivers/clk/clk-nomadik.c21
-rw-r--r--drivers/clk/mvebu/armada-370.c4
-rw-r--r--drivers/clk/socfpga/clk.c2
-rw-r--r--drivers/clk/versatile/clk-icst.c2
-rw-r--r--drivers/connector/cn_proc.c18
-rw-r--r--drivers/connector/connector.c9
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c8
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c51
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c2
-rw-r--r--drivers/cpufreq/spear-cpufreq.c2
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/edma.c5
-rw-r--r--drivers/dma/imx-dma.c31
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c9
-rw-r--r--drivers/gpio/gpio-lynxpoint.c5
-rw-r--r--drivers/gpio/gpio-omap.c158
-rw-r--r--drivers/gpio/gpio-rcar.c7
-rw-r--r--drivers/gpio/gpiolib.c6
-rw-r--r--drivers/gpu/drm/drm_drv.c11
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c8
-rw-r--r--drivers/gpu/drm/gma500/gtt.c1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c357
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c29
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c9
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h92
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c109
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c211
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h130
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c29
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c10
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h34
-rw-r--r--drivers/gpu/drm/i915/intel_display.c258
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c73
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h16
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c16
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c25
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c58
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c153
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c276
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c52
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/cik.c10
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h4
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/ni.c1
-rw-r--r--drivers/gpu/drm/radeon/r600.c1
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c24
-rw-r--r--drivers/gpu/drm/radeon/r600d.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c37
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c11
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/sid.h4
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-core.c13
-rw-r--r--drivers/hid/hid-holtek-mouse.c4
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-input.c13
-rw-r--r--drivers/hid/hid-roccat-kone.c2
-rw-r--r--drivers/hid/hid-roccat-koneplus.c4
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c4
-rw-r--r--drivers/hid/hid-roccat-pyra.c4
-rw-r--r--drivers/hid/hid-wiimote-core.c5
-rw-r--r--drivers/hid/hid-wiimote-modules.c40
-rw-r--r--drivers/hid/hid-wiimote.h4
-rw-r--r--drivers/hid/hidraw.c21
-rw-r--r--drivers/hid/uhid.c3
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/hwmon/applesmc.c13
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c5
-rw-r--r--drivers/i2c/busses/i2c-imx.c11
-rw-r--r--drivers/i2c/busses/i2c-mxs.c3
-rw-r--r--drivers/i2c/busses/i2c-omap.c3
-rw-r--r--drivers/i2c/busses/i2c-stu300.c11
-rw-r--r--drivers/i2c/i2c-core.c3
-rw-r--r--drivers/i2c/muxes/i2c-arb-gpio-challenge.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c14
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c4
-rw-r--r--drivers/iio/amplifiers/ad8366.c4
-rw-r--r--drivers/iio/frequency/adf4350.c6
-rw-r--r--drivers/iio/industrialio-buffer.c3
-rw-r--r--drivers/iio/industrialio-core.c2
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c18
-rw-r--r--drivers/infiniband/Kconfig11
-rw-r--r--drivers/infiniband/core/uverbs.h2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c4
-rw-r--r--drivers/infiniband/core/uverbs_main.c6
-rw-r--r--drivers/infiniband/hw/amso1100/c2_ae.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c16
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c70
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c80
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c6
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c14
-rw-r--r--drivers/input/input.c10
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c11
-rw-r--r--drivers/input/misc/cm109.c14
-rw-r--r--drivers/input/mouse/alps.c1
-rw-r--r--drivers/input/serio/i8042.c23
-rw-r--r--drivers/input/tablet/wacom_sys.c4
-rw-r--r--drivers/input/tablet/wacom_wac.c8
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/arm-smmu.c13
-rw-r--r--drivers/md/bcache/request.c5
-rw-r--r--drivers/md/dm-snap-persistent.c18
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/md/raid1.c1
-rw-r--r--drivers/md/raid10.c1
-rw-r--r--drivers/md/raid5.c20
-rw-r--r--drivers/media/dvb-frontends/tda10071.c9
-rw-r--r--drivers/media/i2c/ad9389b.c15
-rw-r--r--drivers/media/i2c/adv7511.c18
-rw-r--r--drivers/media/i2c/adv7842.c30
-rw-r--r--drivers/media/i2c/ths8200.c12
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c1
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c1
-rw-r--r--drivers/media/platform/sh_vou.c2
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c5
-rw-r--r--drivers/media/tuners/e4000.c3
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c7
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c18
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c87
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c16
-rw-r--r--drivers/mtd/devices/m25p80.c17
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c2
-rw-r--r--drivers/mtd/nand/nand_base.c8
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c7
-rw-r--r--drivers/net/bonding/bond_main.c13
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/dev.c10
-rw-r--r--drivers/net/can/flexcan.c24
-rw-r--r--drivers/net/can/slcan.c139
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c40
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h38
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c189
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c412
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c34
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c52
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c23
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c56
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c76
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c38
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c162
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c5
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c7
-rw-r--r--drivers/net/ethernet/marvell/skge.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c16
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c24
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c52
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_mpi.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c87
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c28
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h56
-rw-r--r--drivers/net/ethernet/sfc/nic.c9
-rw-r--r--drivers/net/ethernet/sfc/nic.h12
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h6
-rw-r--r--drivers/net/ethernet/ti/cpsw.c19
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c3
-rw-r--r--drivers/net/ethernet/via/via-rhine.c9
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c6
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/ieee802154/mrf24j40.c31
-rw-r--r--drivers/net/slip/slip.c3
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/usb/ax88179_178a.c23
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c3
-rw-r--r--drivers/net/usb/usbnet.c29
-rw-r--r--drivers/net/virtio_net.c14
-rw-r--r--drivers/net/vxlan.c9
-rw-r--r--drivers/net/wan/farsync.c1
-rw-r--r--drivers/net/wan/wanxl.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c26
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c28
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c14
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c4
-rw-r--r--drivers/net/wireless/cw1200/cw1200_spi.c28
-rw-r--r--drivers/net/wireless/cw1200/fwio.c2
-rw-r--r--drivers/net/wireless/cw1200/hwbus.h1
-rw-r--r--drivers/net/wireless/cw1200/hwio.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c12
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c42
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c3
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.h2
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c5
-rw-r--r--drivers/net/wireless/mwifiex/join.c10
-rw-r--r--drivers/net/wireless/mwifiex/main.c6
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c3
-rw-r--r--drivers/net/wireless/mwifiex/usb.c7
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c3
-rw-r--r--drivers/net/wireless/p54/p54usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c3
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h2
-rw-r--r--drivers/net/xen-netback/xenbus.c152
-rw-r--r--drivers/of/Kconfig6
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/base.c4
-rw-r--r--drivers/of/fdt.c12
-rw-r--r--drivers/of/of_reserved_mem.c173
-rw-r--r--drivers/of/platform.c4
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c14
-rw-r--r--drivers/pinctrl/pinconf.c4
-rw-r--r--drivers/pinctrl/pinctrl-exynos.c12
-rw-r--r--drivers/pinctrl/pinctrl-palmas.c5
-rw-r--r--drivers/pinctrl/pinctrl-tegra114.c5
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/sony-laptop.c26
-rw-r--r--drivers/regulator/da9063-regulator.c2
-rw-r--r--drivers/regulator/palmas-regulator.c14
-rw-r--r--drivers/regulator/ti-abb-regulator.c16
-rw-r--r--drivers/regulator/wm831x-ldo.c4
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c98
-rw-r--r--drivers/s390/char/sclp.c4
-rw-r--r--drivers/s390/char/sclp_cmd.c8
-rw-r--r--drivers/s390/char/tty3270.c2
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/cio/cio.c4
-rw-r--r--drivers/s390/cio/qdio_main.c10
-rw-r--r--drivers/scsi/BusLogic.c16
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c9
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sg.c176
-rw-r--r--drivers/spi/spi-atmel.c3
-rw-r--r--drivers/spi/spi-clps711x.c3
-rw-r--r--drivers/spi/spi-fsl-dspi.c10
-rw-r--r--drivers/spi/spi-mpc512x-psc.c4
-rw-r--r--drivers/spi/spi-pxa2xx.c11
-rw-r--r--drivers/spi/spi-s3c64xx.c4
-rw-r--r--drivers/spi/spi-sh-hspi.c4
-rw-r--r--drivers/staging/bcm/Bcmchar.c1
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c25
-rw-r--r--drivers/staging/media/msi3101/Kconfig1
-rw-r--r--drivers/staging/media/msi3101/sdr-msi3101.c10
-rw-r--r--drivers/staging/ozwpan/ozcdev.c3
-rw-r--r--drivers/staging/sb105x/sb_pci_mp.c2
-rw-r--r--drivers/staging/wlags49_h2/wl_priv.c9
-rw-r--r--drivers/target/iscsi/iscsi_target.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c4
-rw-r--r--drivers/target/target_core_pscsi.c8
-rw-r--r--drivers/target/target_core_sbc.c33
-rw-r--r--drivers/target/target_core_transport.c20
-rw-r--r--drivers/target/target_core_xcopy.c57
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.c2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c12
-rw-r--r--drivers/thermal/samsung/exynos_tmu.h7
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.c30
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.h13
-rw-r--r--drivers/thermal/thermal_hwmon.c2
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c1
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c14
-rw-r--r--drivers/tty/hvc/hvc_xen.c1
-rw-r--r--drivers/tty/n_tty.c46
-rw-r--r--drivers/tty/serial/atmel_serial.c9
-rw-r--r--drivers/tty/serial/imx.c3
-rw-r--r--drivers/tty/serial/vt8500_serial.c5
-rw-r--r--drivers/uio/uio.c17
-rw-r--r--drivers/usb/chipidea/ci_hdrc_pci.c7
-rw-r--r--drivers/usb/chipidea/host.c6
-rw-r--r--drivers/usb/core/quirks.c6
-rw-r--r--drivers/usb/gadget/f_fs.c2
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c9
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c2
-rw-r--r--drivers/usb/host/pci-quirks.c4
-rw-r--r--drivers/usb/host/xhci-hub.c26
-rw-r--r--drivers/usb/host/xhci-pci.c25
-rw-r--r--drivers/usb/host/xhci.c14
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/misc/Kconfig2
-rw-r--r--drivers/usb/musb/musb_core.c46
-rw-r--r--drivers/usb/musb/musb_core.h1
-rw-r--r--drivers/usb/musb/musb_dsps.c3
-rw-r--r--drivers/usb/musb/musb_gadget.c6
-rw-r--r--drivers/usb/musb/musb_virthub.c46
-rw-r--r--drivers/usb/phy/phy-gpio-vbus-usb.c11
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/option.c228
-rw-r--r--drivers/usb/serial/pl2303.c274
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c1
-rw-r--r--drivers/usb/storage/scsiglue.c5
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/vfio/vfio_iommu_type1.c40
-rw-r--r--drivers/vhost/scsi.c9
-rw-r--r--drivers/video/au1100fb.c26
-rw-r--r--drivers/video/au1200fb.c23
-rw-r--r--drivers/w1/w1.c6
-rw-r--r--drivers/watchdog/hpwdt.c6
-rw-r--r--drivers/watchdog/kempld_wdt.c2
-rw-r--r--drivers/watchdog/sunxi_wdt.c4
-rw-r--r--drivers/watchdog/ts72xx_wdt.c3
367 files changed, 4818 insertions, 3138 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 22327e6a7236..6efe2ac6902f 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -24,7 +24,7 @@ menuconfig ACPI
24 are configured, ACPI is used. 24 are configured, ACPI is used.
25 25
26 The project home page for the Linux ACPI subsystem is here: 26 The project home page for the Linux ACPI subsystem is here:
27 <http://www.lesswatts.org/projects/acpi/> 27 <https://01.org/linux-acpi>
28 28
29 Linux support for ACPI is based on Intel Corporation's ACPI 29 Linux support for ACPI is based on Intel Corporation's ACPI
30 Component Architecture (ACPI CA). For more information on the 30 Component Architecture (ACPI CA). For more information on the
@@ -123,9 +123,9 @@ config ACPI_BUTTON
123 default y 123 default y
124 help 124 help
125 This driver handles events on the power, sleep, and lid buttons. 125 This driver handles events on the power, sleep, and lid buttons.
126 A daemon reads /proc/acpi/event and perform user-defined actions 126 A daemon reads events from input devices or via netlink and
127 such as shutting down the system. This is necessary for 127 performs user-defined actions such as shutting down the system.
128 software-controlled poweroff. 128 This is necessary for software-controlled poweroff.
129 129
130 To compile this driver as a module, choose M here: 130 To compile this driver as a module, choose M here:
131 the module will be called button. 131 the module will be called button.
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 59d3202f6b36..a94383d1f350 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1025,60 +1025,4 @@ void acpi_dev_pm_detach(struct device *dev, bool power_off)
1025 } 1025 }
1026} 1026}
1027EXPORT_SYMBOL_GPL(acpi_dev_pm_detach); 1027EXPORT_SYMBOL_GPL(acpi_dev_pm_detach);
1028
1029/**
1030 * acpi_dev_pm_add_dependent - Add physical device depending for PM.
1031 * @handle: Handle of ACPI device node.
1032 * @depdev: Device depending on that node for PM.
1033 */
1034void acpi_dev_pm_add_dependent(acpi_handle handle, struct device *depdev)
1035{
1036 struct acpi_device_physical_node *dep;
1037 struct acpi_device *adev;
1038
1039 if (!depdev || acpi_bus_get_device(handle, &adev))
1040 return;
1041
1042 mutex_lock(&adev->physical_node_lock);
1043
1044 list_for_each_entry(dep, &adev->power_dependent, node)
1045 if (dep->dev == depdev)
1046 goto out;
1047
1048 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1049 if (dep) {
1050 dep->dev = depdev;
1051 list_add_tail(&dep->node, &adev->power_dependent);
1052 }
1053
1054 out:
1055 mutex_unlock(&adev->physical_node_lock);
1056}
1057EXPORT_SYMBOL_GPL(acpi_dev_pm_add_dependent);
1058
1059/**
1060 * acpi_dev_pm_remove_dependent - Remove physical device depending for PM.
1061 * @handle: Handle of ACPI device node.
1062 * @depdev: Device depending on that node for PM.
1063 */
1064void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev)
1065{
1066 struct acpi_device_physical_node *dep;
1067 struct acpi_device *adev;
1068
1069 if (!depdev || acpi_bus_get_device(handle, &adev))
1070 return;
1071
1072 mutex_lock(&adev->physical_node_lock);
1073
1074 list_for_each_entry(dep, &adev->power_dependent, node)
1075 if (dep->dev == depdev) {
1076 list_del(&dep->node);
1077 kfree(dep);
1078 break;
1079 }
1080
1081 mutex_unlock(&adev->physical_node_lock);
1082}
1083EXPORT_SYMBOL_GPL(acpi_dev_pm_remove_dependent);
1084#endif /* CONFIG_PM */ 1028#endif /* CONFIG_PM */
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 0dbe5cdf3396..c2ad391d8041 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -59,16 +59,9 @@ ACPI_MODULE_NAME("power");
59#define ACPI_POWER_RESOURCE_STATE_ON 0x01 59#define ACPI_POWER_RESOURCE_STATE_ON 0x01
60#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF 60#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
61 61
62struct acpi_power_dependent_device {
63 struct list_head node;
64 struct acpi_device *adev;
65 struct work_struct work;
66};
67
68struct acpi_power_resource { 62struct acpi_power_resource {
69 struct acpi_device device; 63 struct acpi_device device;
70 struct list_head list_node; 64 struct list_head list_node;
71 struct list_head dependent;
72 char *name; 65 char *name;
73 u32 system_level; 66 u32 system_level;
74 u32 order; 67 u32 order;
@@ -233,32 +226,6 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
233 return 0; 226 return 0;
234} 227}
235 228
236static void acpi_power_resume_dependent(struct work_struct *work)
237{
238 struct acpi_power_dependent_device *dep;
239 struct acpi_device_physical_node *pn;
240 struct acpi_device *adev;
241 int state;
242
243 dep = container_of(work, struct acpi_power_dependent_device, work);
244 adev = dep->adev;
245 if (acpi_power_get_inferred_state(adev, &state))
246 return;
247
248 if (state > ACPI_STATE_D0)
249 return;
250
251 mutex_lock(&adev->physical_node_lock);
252
253 list_for_each_entry(pn, &adev->physical_node_list, node)
254 pm_request_resume(pn->dev);
255
256 list_for_each_entry(pn, &adev->power_dependent, node)
257 pm_request_resume(pn->dev);
258
259 mutex_unlock(&adev->physical_node_lock);
260}
261
262static int __acpi_power_on(struct acpi_power_resource *resource) 229static int __acpi_power_on(struct acpi_power_resource *resource)
263{ 230{
264 acpi_status status = AE_OK; 231 acpi_status status = AE_OK;
@@ -283,14 +250,8 @@ static int acpi_power_on_unlocked(struct acpi_power_resource *resource)
283 resource->name)); 250 resource->name));
284 } else { 251 } else {
285 result = __acpi_power_on(resource); 252 result = __acpi_power_on(resource);
286 if (result) { 253 if (result)
287 resource->ref_count--; 254 resource->ref_count--;
288 } else {
289 struct acpi_power_dependent_device *dep;
290
291 list_for_each_entry(dep, &resource->dependent, node)
292 schedule_work(&dep->work);
293 }
294 } 255 }
295 return result; 256 return result;
296} 257}
@@ -390,52 +351,6 @@ static int acpi_power_on_list(struct list_head *list)
390 return result; 351 return result;
391} 352}
392 353
393static void acpi_power_add_dependent(struct acpi_power_resource *resource,
394 struct acpi_device *adev)
395{
396 struct acpi_power_dependent_device *dep;
397
398 mutex_lock(&resource->resource_lock);
399
400 list_for_each_entry(dep, &resource->dependent, node)
401 if (dep->adev == adev)
402 goto out;
403
404 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
405 if (!dep)
406 goto out;
407
408 dep->adev = adev;
409 INIT_WORK(&dep->work, acpi_power_resume_dependent);
410 list_add_tail(&dep->node, &resource->dependent);
411
412 out:
413 mutex_unlock(&resource->resource_lock);
414}
415
416static void acpi_power_remove_dependent(struct acpi_power_resource *resource,
417 struct acpi_device *adev)
418{
419 struct acpi_power_dependent_device *dep;
420 struct work_struct *work = NULL;
421
422 mutex_lock(&resource->resource_lock);
423
424 list_for_each_entry(dep, &resource->dependent, node)
425 if (dep->adev == adev) {
426 list_del(&dep->node);
427 work = &dep->work;
428 break;
429 }
430
431 mutex_unlock(&resource->resource_lock);
432
433 if (work) {
434 cancel_work_sync(work);
435 kfree(dep);
436 }
437}
438
439static struct attribute *attrs[] = { 354static struct attribute *attrs[] = {
440 NULL, 355 NULL,
441}; 356};
@@ -524,8 +439,6 @@ static void acpi_power_expose_hide(struct acpi_device *adev,
524 439
525void acpi_power_add_remove_device(struct acpi_device *adev, bool add) 440void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
526{ 441{
527 struct acpi_device_power_state *ps;
528 struct acpi_power_resource_entry *entry;
529 int state; 442 int state;
530 443
531 if (adev->wakeup.flags.valid) 444 if (adev->wakeup.flags.valid)
@@ -535,16 +448,6 @@ void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
535 if (!adev->power.flags.power_resources) 448 if (!adev->power.flags.power_resources)
536 return; 449 return;
537 450
538 ps = &adev->power.states[ACPI_STATE_D0];
539 list_for_each_entry(entry, &ps->resources, node) {
540 struct acpi_power_resource *resource = entry->resource;
541
542 if (add)
543 acpi_power_add_dependent(resource, adev);
544 else
545 acpi_power_remove_dependent(resource, adev);
546 }
547
548 for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++) 451 for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++)
549 acpi_power_expose_hide(adev, 452 acpi_power_expose_hide(adev,
550 &adev->power.states[state].resources, 453 &adev->power.states[state].resources,
@@ -882,7 +785,6 @@ int acpi_add_power_resource(acpi_handle handle)
882 acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER, 785 acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER,
883 ACPI_STA_DEFAULT); 786 ACPI_STA_DEFAULT);
884 mutex_init(&resource->resource_lock); 787 mutex_init(&resource->resource_lock);
885 INIT_LIST_HEAD(&resource->dependent);
886 INIT_LIST_HEAD(&resource->list_node); 788 INIT_LIST_HEAD(&resource->list_node);
887 resource->name = device->pnp.bus_id; 789 resource->name = device->pnp.bus_id;
888 strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); 790 strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
@@ -936,8 +838,10 @@ void acpi_resume_power_resources(void)
936 mutex_lock(&resource->resource_lock); 838 mutex_lock(&resource->resource_lock);
937 839
938 result = acpi_power_get_state(resource->device.handle, &state); 840 result = acpi_power_get_state(resource->device.handle, &state);
939 if (result) 841 if (result) {
842 mutex_unlock(&resource->resource_lock);
940 continue; 843 continue;
844 }
941 845
942 if (state == ACPI_POWER_RESOURCE_STATE_OFF 846 if (state == ACPI_POWER_RESOURCE_STATE_OFF
943 && resource->ref_count) { 847 && resource->ref_count) {
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 611ce9061dc5..fee8a297c7d9 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -968,7 +968,7 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
968 } 968 }
969 return 0; 969 return 0;
970} 970}
971EXPORT_SYMBOL_GPL(acpi_bus_get_device); 971EXPORT_SYMBOL(acpi_bus_get_device);
972 972
973int acpi_device_add(struct acpi_device *device, 973int acpi_device_add(struct acpi_device *device,
974 void (*release)(struct device *)) 974 void (*release)(struct device *))
@@ -999,7 +999,6 @@ int acpi_device_add(struct acpi_device *device,
999 INIT_LIST_HEAD(&device->wakeup_list); 999 INIT_LIST_HEAD(&device->wakeup_list);
1000 INIT_LIST_HEAD(&device->physical_node_list); 1000 INIT_LIST_HEAD(&device->physical_node_list);
1001 mutex_init(&device->physical_node_lock); 1001 mutex_init(&device->physical_node_lock);
1002 INIT_LIST_HEAD(&device->power_dependent);
1003 1002
1004 new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL); 1003 new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
1005 if (!new_bus_id) { 1004 if (!new_bus_id) {
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 9d715ae5ff6b..8e28f923cf7f 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1343,7 +1343,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1343 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) 1343 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
1344 host->flags |= ATA_HOST_PARALLEL_SCAN; 1344 host->flags |= ATA_HOST_PARALLEL_SCAN;
1345 else 1345 else
1346 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); 1346 dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n");
1347 1347
1348 if (pi.flags & ATA_FLAG_EM) 1348 if (pi.flags & ATA_FLAG_EM)
1349 ahci_reset_em(host); 1349 ahci_reset_em(host);
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 2daaee05cab1..7d3b85385bfc 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -184,7 +184,7 @@ static int ahci_probe(struct platform_device *pdev)
184 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) 184 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
185 host->flags |= ATA_HOST_PARALLEL_SCAN; 185 host->flags |= ATA_HOST_PARALLEL_SCAN;
186 else 186 else
187 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); 187 dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
188 188
189 if (pi.flags & ATA_FLAG_EM) 189 if (pi.flags & ATA_FLAG_EM)
190 ahci_reset_em(host); 190 ahci_reset_em(host);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index acfd0f711069..aaac4fb0d564 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -778,8 +778,16 @@ static void ahci_start_port(struct ata_port *ap)
778 rc = ap->ops->transmit_led_message(ap, 778 rc = ap->ops->transmit_led_message(ap,
779 emp->led_state, 779 emp->led_state,
780 4); 780 4);
781 /*
782 * If busy, give a breather but do not
783 * release EH ownership by using msleep()
784 * instead of ata_msleep(). EM Transmit
785 * bit is busy for the whole host and
786 * releasing ownership will cause other
787 * ports to fail the same way.
788 */
781 if (rc == -EBUSY) 789 if (rc == -EBUSY)
782 ata_msleep(ap, 1); 790 msleep(1);
783 else 791 else
784 break; 792 break;
785 } 793 }
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 4ba8b0405572..ab714d2ad978 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -1035,17 +1035,3 @@ void ata_acpi_on_disable(struct ata_device *dev)
1035{ 1035{
1036 ata_acpi_clear_gtf(dev); 1036 ata_acpi_clear_gtf(dev);
1037} 1037}
1038
1039void ata_scsi_acpi_bind(struct ata_device *dev)
1040{
1041 acpi_handle handle = ata_dev_acpi_handle(dev);
1042 if (handle)
1043 acpi_dev_pm_add_dependent(handle, &dev->sdev->sdev_gendev);
1044}
1045
1046void ata_scsi_acpi_unbind(struct ata_device *dev)
1047{
1048 acpi_handle handle = ata_dev_acpi_handle(dev);
1049 if (handle)
1050 acpi_dev_pm_remove_dependent(handle, &dev->sdev->sdev_gendev);
1051}
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c69fcce505c0..370462fa8e01 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1322,14 +1322,14 @@ void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1322 * should be retried. To be used from EH. 1322 * should be retried. To be used from EH.
1323 * 1323 *
1324 * SCSI midlayer limits the number of retries to scmd->allowed. 1324 * SCSI midlayer limits the number of retries to scmd->allowed.
1325 * scmd->retries is decremented for commands which get retried 1325 * scmd->allowed is incremented for commands which get retried
1326 * due to unrelated failures (qc->err_mask is zero). 1326 * due to unrelated failures (qc->err_mask is zero).
1327 */ 1327 */
1328void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1328void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1329{ 1329{
1330 struct scsi_cmnd *scmd = qc->scsicmd; 1330 struct scsi_cmnd *scmd = qc->scsicmd;
1331 if (!qc->err_mask && scmd->retries) 1331 if (!qc->err_mask)
1332 scmd->retries--; 1332 scmd->allowed++;
1333 __ata_eh_qc_complete(qc); 1333 __ata_eh_qc_complete(qc);
1334} 1334}
1335 1335
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 97a0cef12959..db6dfcfa3e2e 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3679,7 +3679,6 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
3679 if (!IS_ERR(sdev)) { 3679 if (!IS_ERR(sdev)) {
3680 dev->sdev = sdev; 3680 dev->sdev = sdev;
3681 scsi_device_put(sdev); 3681 scsi_device_put(sdev);
3682 ata_scsi_acpi_bind(dev);
3683 } else { 3682 } else {
3684 dev->sdev = NULL; 3683 dev->sdev = NULL;
3685 } 3684 }
@@ -3767,8 +3766,6 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
3767 struct scsi_device *sdev; 3766 struct scsi_device *sdev;
3768 unsigned long flags; 3767 unsigned long flags;
3769 3768
3770 ata_scsi_acpi_unbind(dev);
3771
3772 /* Alas, we need to grab scan_mutex to ensure SCSI device 3769 /* Alas, we need to grab scan_mutex to ensure SCSI device
3773 * state doesn't change underneath us and thus 3770 * state doesn't change underneath us and thus
3774 * scsi_device_get() always succeeds. The mutex locking can 3771 * scsi_device_get() always succeeds. The mutex locking can
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index eeeb77845d48..45b5ab3a95d5 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -121,8 +121,6 @@ extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
121extern void ata_acpi_bind_port(struct ata_port *ap); 121extern void ata_acpi_bind_port(struct ata_port *ap);
122extern void ata_acpi_bind_dev(struct ata_device *dev); 122extern void ata_acpi_bind_dev(struct ata_device *dev);
123extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev); 123extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev);
124extern void ata_scsi_acpi_bind(struct ata_device *dev);
125extern void ata_scsi_acpi_unbind(struct ata_device *dev);
126#else 124#else
127static inline void ata_acpi_dissociate(struct ata_host *host) { } 125static inline void ata_acpi_dissociate(struct ata_host *host) { }
128static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; } 126static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
@@ -133,8 +131,6 @@ static inline void ata_acpi_set_state(struct ata_port *ap,
133 pm_message_t state) { } 131 pm_message_t state) { }
134static inline void ata_acpi_bind_port(struct ata_port *ap) {} 132static inline void ata_acpi_bind_port(struct ata_port *ap) {}
135static inline void ata_acpi_bind_dev(struct ata_device *dev) {} 133static inline void ata_acpi_bind_dev(struct ata_device *dev) {}
136static inline void ata_scsi_acpi_bind(struct ata_device *dev) {}
137static inline void ata_scsi_acpi_unbind(struct ata_device *dev) {}
138#endif 134#endif
139 135
140/* libata-scsi.c */ 136/* libata-scsi.c */
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
index 4bceb8803a10..b33d1f99b3a4 100644
--- a/drivers/ata/pata_isapnp.c
+++ b/drivers/ata/pata_isapnp.c
@@ -78,7 +78,7 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
78 78
79 ap->ioaddr.cmd_addr = cmd_addr; 79 ap->ioaddr.cmd_addr = cmd_addr;
80 80
81 if (pnp_port_valid(idev, 1) == 0) { 81 if (pnp_port_valid(idev, 1)) {
82 ctl_addr = devm_ioport_map(&idev->dev, 82 ctl_addr = devm_ioport_map(&idev->dev,
83 pnp_port_start(idev, 1), 1); 83 pnp_port_start(idev, 1), 1);
84 ap->ioaddr.altstatus_addr = ctl_addr; 84 ap->ioaddr.altstatus_addr = ctl_addr;
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 9e59f6535c44..bece691cb5d9 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -333,8 +333,10 @@ store_mem_state(struct device *dev,
333 online_type = ONLINE_KEEP; 333 online_type = ONLINE_KEEP;
334 else if (!strncmp(buf, "offline", min_t(int, count, 7))) 334 else if (!strncmp(buf, "offline", min_t(int, count, 7)))
335 online_type = -1; 335 online_type = -1;
336 else 336 else {
337 return -EINVAL; 337 ret = -EINVAL;
338 goto err;
339 }
338 340
339 switch (online_type) { 341 switch (online_type) {
340 case ONLINE_KERNEL: 342 case ONLINE_KERNEL:
@@ -357,6 +359,7 @@ store_mem_state(struct device *dev,
357 ret = -EINVAL; /* should never happen */ 359 ret = -EINVAL; /* should never happen */
358 } 360 }
359 361
362err:
360 unlock_device_hotplug(); 363 unlock_device_hotplug();
361 364
362 if (ret) 365 if (ret)
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index c9fd6943ce45..50329d1057ed 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -210,25 +210,6 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
210 } 210 }
211} 211}
212 212
213static void bcma_core_pci_power_save(struct bcma_drv_pci *pc, bool up)
214{
215 u16 data;
216
217 if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
218 data = up ? 0x74 : 0x7C;
219 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
220 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
221 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
222 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
223 } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
224 data = up ? 0x75 : 0x7D;
225 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
226 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
227 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
228 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
229 }
230}
231
232/************************************************** 213/**************************************************
233 * Init. 214 * Init.
234 **************************************************/ 215 **************************************************/
@@ -255,6 +236,32 @@ void bcma_core_pci_init(struct bcma_drv_pci *pc)
255 bcma_core_pci_clientmode_init(pc); 236 bcma_core_pci_clientmode_init(pc);
256} 237}
257 238
239void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
240{
241 struct bcma_drv_pci *pc;
242 u16 data;
243
244 if (bus->hosttype != BCMA_HOSTTYPE_PCI)
245 return;
246
247 pc = &bus->drv_pci[0];
248
249 if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
250 data = up ? 0x74 : 0x7C;
251 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
252 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
253 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
254 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
255 } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
256 data = up ? 0x75 : 0x7D;
257 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
258 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
259 bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
260 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
261 }
262}
263EXPORT_SYMBOL_GPL(bcma_core_pci_power_save);
264
258int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, 265int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
259 bool enable) 266 bool enable)
260{ 267{
@@ -310,8 +317,6 @@ void bcma_core_pci_up(struct bcma_bus *bus)
310 317
311 pc = &bus->drv_pci[0]; 318 pc = &bus->drv_pci[0];
312 319
313 bcma_core_pci_power_save(pc, true);
314
315 bcma_core_pci_extend_L1timer(pc, true); 320 bcma_core_pci_extend_L1timer(pc, true);
316} 321}
317EXPORT_SYMBOL_GPL(bcma_core_pci_up); 322EXPORT_SYMBOL_GPL(bcma_core_pci_up);
@@ -326,7 +331,5 @@ void bcma_core_pci_down(struct bcma_bus *bus)
326 pc = &bus->drv_pci[0]; 331 pc = &bus->drv_pci[0];
327 332
328 bcma_core_pci_extend_L1timer(pc, false); 333 bcma_core_pci_extend_L1timer(pc, false);
329
330 bcma_core_pci_power_save(pc, false);
331} 334}
332EXPORT_SYMBOL_GPL(bcma_core_pci_down); 335EXPORT_SYMBOL_GPL(bcma_core_pci_down);
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index a12b923bbaca..0a327f4154a2 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -85,6 +85,7 @@ static struct usb_device_id ath3k_table[] = {
85 { USB_DEVICE(0x04CA, 0x3008) }, 85 { USB_DEVICE(0x04CA, 0x3008) },
86 { USB_DEVICE(0x13d3, 0x3362) }, 86 { USB_DEVICE(0x13d3, 0x3362) },
87 { USB_DEVICE(0x0CF3, 0xE004) }, 87 { USB_DEVICE(0x0CF3, 0xE004) },
88 { USB_DEVICE(0x0CF3, 0xE005) },
88 { USB_DEVICE(0x0930, 0x0219) }, 89 { USB_DEVICE(0x0930, 0x0219) },
89 { USB_DEVICE(0x0489, 0xe057) }, 90 { USB_DEVICE(0x0489, 0xe057) },
90 { USB_DEVICE(0x13d3, 0x3393) }, 91 { USB_DEVICE(0x13d3, 0x3393) },
@@ -126,6 +127,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
126 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 127 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
127 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 128 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
128 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 129 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
130 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
129 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 131 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
130 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 132 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
131 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 133 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 8e16f0af6358..f3dfc0a88fdc 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -102,6 +102,7 @@ static struct usb_device_id btusb_table[] = {
102 102
103 /* Broadcom BCM20702A0 */ 103 /* Broadcom BCM20702A0 */
104 { USB_DEVICE(0x0b05, 0x17b5) }, 104 { USB_DEVICE(0x0b05, 0x17b5) },
105 { USB_DEVICE(0x0b05, 0x17cb) },
105 { USB_DEVICE(0x04ca, 0x2003) }, 106 { USB_DEVICE(0x04ca, 0x2003) },
106 { USB_DEVICE(0x0489, 0xe042) }, 107 { USB_DEVICE(0x0489, 0xe042) },
107 { USB_DEVICE(0x413c, 0x8197) }, 108 { USB_DEVICE(0x413c, 0x8197) },
@@ -112,6 +113,9 @@ static struct usb_device_id btusb_table[] = {
112 /*Broadcom devices with vendor specific id */ 113 /*Broadcom devices with vendor specific id */
113 { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) }, 114 { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
114 115
116 /* Belkin F8065bf - Broadcom based */
117 { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
118
115 { } /* Terminating entry */ 119 { } /* Terminating entry */
116}; 120};
117 121
@@ -148,6 +152,7 @@ static struct usb_device_id blacklist_table[] = {
148 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 152 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
149 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 153 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
150 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 154 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
155 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
151 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 156 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
152 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 157 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
153 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 158 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 19ab6ff53d59..2394e9753ef5 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -700,6 +700,7 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
700 phys_addr_t sdramwins_phys_base, 700 phys_addr_t sdramwins_phys_base,
701 size_t sdramwins_size) 701 size_t sdramwins_size)
702{ 702{
703 struct device_node *np;
703 int win; 704 int win;
704 705
705 mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size); 706 mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size);
@@ -712,8 +713,11 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
712 return -ENOMEM; 713 return -ENOMEM;
713 } 714 }
714 715
715 if (of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric")) 716 np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric");
717 if (np) {
716 mbus->hw_io_coherency = 1; 718 mbus->hw_io_coherency = 1;
719 of_node_put(np);
720 }
717 721
718 for (win = 0; win < mbus->soc->num_wins; win++) 722 for (win = 0; win < mbus->soc->num_wins; win++)
719 mvebu_mbus_disable_window(mbus, win); 723 mvebu_mbus_disable_window(mbus, win);
@@ -861,11 +865,13 @@ static void __init mvebu_mbus_get_pcie_resources(struct device_node *np,
861 int ret; 865 int ret;
862 866
863 /* 867 /*
864 * These are optional, so we clear them and they'll 868 * These are optional, so we make sure that resource_size(x) will
865 * be zero if they are missing from the DT. 869 * return 0.
866 */ 870 */
867 memset(mem, 0, sizeof(struct resource)); 871 memset(mem, 0, sizeof(struct resource));
872 mem->end = -1;
868 memset(io, 0, sizeof(struct resource)); 873 memset(io, 0, sizeof(struct resource));
874 io->end = -1;
869 875
870 ret = of_property_read_u32_array(np, "pcie-mem-aperture", reg, ARRAY_SIZE(reg)); 876 ret = of_property_read_u32_array(np, "pcie-mem-aperture", reg, ARRAY_SIZE(reg));
871 if (!ret) { 877 if (!ret) {
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 7737b5bd26af..7a744d391756 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -640,7 +640,7 @@ struct timer_rand_state {
640 */ 640 */
641void add_device_randomness(const void *buf, unsigned int size) 641void add_device_randomness(const void *buf, unsigned int size)
642{ 642{
643 unsigned long time = get_cycles() ^ jiffies; 643 unsigned long time = random_get_entropy() ^ jiffies;
644 644
645 mix_pool_bytes(&input_pool, buf, size, NULL); 645 mix_pool_bytes(&input_pool, buf, size, NULL);
646 mix_pool_bytes(&input_pool, &time, sizeof(time), NULL); 646 mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
@@ -677,7 +677,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
677 goto out; 677 goto out;
678 678
679 sample.jiffies = jiffies; 679 sample.jiffies = jiffies;
680 sample.cycles = get_cycles(); 680 sample.cycles = random_get_entropy();
681 sample.num = num; 681 sample.num = num;
682 mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL); 682 mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
683 683
@@ -744,7 +744,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
744 struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness); 744 struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
745 struct pt_regs *regs = get_irq_regs(); 745 struct pt_regs *regs = get_irq_regs();
746 unsigned long now = jiffies; 746 unsigned long now = jiffies;
747 __u32 input[4], cycles = get_cycles(); 747 __u32 input[4], cycles = random_get_entropy();
748 748
749 input[0] = cycles ^ jiffies; 749 input[0] = cycles ^ jiffies;
750 input[1] = irq; 750 input[1] = irq;
@@ -1459,12 +1459,11 @@ struct ctl_table random_table[] = {
1459 1459
1460static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; 1460static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
1461 1461
1462static int __init random_int_secret_init(void) 1462int random_int_secret_init(void)
1463{ 1463{
1464 get_random_bytes(random_int_secret, sizeof(random_int_secret)); 1464 get_random_bytes(random_int_secret, sizeof(random_int_secret));
1465 return 0; 1465 return 0;
1466} 1466}
1467late_initcall(random_int_secret_init);
1468 1467
1469/* 1468/*
1470 * Get a random word for internal kernel use only. Similar to urandom but 1469 * Get a random word for internal kernel use only. Similar to urandom but
@@ -1483,7 +1482,7 @@ unsigned int get_random_int(void)
1483 1482
1484 hash = get_cpu_var(get_random_int_hash); 1483 hash = get_cpu_var(get_random_int_hash);
1485 1484
1486 hash[0] += current->pid + jiffies + get_cycles(); 1485 hash[0] += current->pid + jiffies + random_get_entropy();
1487 md5_transform(hash, random_int_secret); 1486 md5_transform(hash, random_int_secret);
1488 ret = hash[0]; 1487 ret = hash[0];
1489 put_cpu_var(get_random_int_hash); 1488 put_cpu_var(get_random_int_hash);
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 06189e55b4e5..94c280d36e8b 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -10,6 +10,7 @@
10#include <linux/errno.h> 10#include <linux/errno.h>
11#include <linux/err.h> 11#include <linux/err.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <xen/xen.h>
13#include <xen/events.h> 14#include <xen/events.h>
14#include <xen/interface/io/tpmif.h> 15#include <xen/interface/io/tpmif.h>
15#include <xen/grant_table.h> 16#include <xen/grant_table.h>
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 51410c2ac2cb..4d978a3c88f7 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -27,6 +27,14 @@
27 */ 27 */
28 28
29#define SRC_CR 0x00U 29#define SRC_CR 0x00U
30#define SRC_CR_T0_ENSEL BIT(15)
31#define SRC_CR_T1_ENSEL BIT(17)
32#define SRC_CR_T2_ENSEL BIT(19)
33#define SRC_CR_T3_ENSEL BIT(21)
34#define SRC_CR_T4_ENSEL BIT(23)
35#define SRC_CR_T5_ENSEL BIT(25)
36#define SRC_CR_T6_ENSEL BIT(27)
37#define SRC_CR_T7_ENSEL BIT(29)
30#define SRC_XTALCR 0x0CU 38#define SRC_XTALCR 0x0CU
31#define SRC_XTALCR_XTALTIMEN BIT(20) 39#define SRC_XTALCR_XTALTIMEN BIT(20)
32#define SRC_XTALCR_SXTALDIS BIT(19) 40#define SRC_XTALCR_SXTALDIS BIT(19)
@@ -543,6 +551,19 @@ void __init nomadik_clk_init(void)
543 __func__, np->name); 551 __func__, np->name);
544 return; 552 return;
545 } 553 }
554
555 /* Set all timers to use the 2.4 MHz TIMCLK */
556 val = readl(src_base + SRC_CR);
557 val |= SRC_CR_T0_ENSEL;
558 val |= SRC_CR_T1_ENSEL;
559 val |= SRC_CR_T2_ENSEL;
560 val |= SRC_CR_T3_ENSEL;
561 val |= SRC_CR_T4_ENSEL;
562 val |= SRC_CR_T5_ENSEL;
563 val |= SRC_CR_T6_ENSEL;
564 val |= SRC_CR_T7_ENSEL;
565 writel(val, src_base + SRC_CR);
566
546 val = readl(src_base + SRC_XTALCR); 567 val = readl(src_base + SRC_XTALCR);
547 pr_info("SXTALO is %s\n", 568 pr_info("SXTALO is %s\n",
548 (val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled"); 569 (val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled");
diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c
index fc777bdc1886..81a202d12a7a 100644
--- a/drivers/clk/mvebu/armada-370.c
+++ b/drivers/clk/mvebu/armada-370.c
@@ -39,8 +39,8 @@ static const struct coreclk_ratio a370_coreclk_ratios[] __initconst = {
39}; 39};
40 40
41static const u32 a370_tclk_freqs[] __initconst = { 41static const u32 a370_tclk_freqs[] __initconst = {
42 16600000, 42 166000000,
43 20000000, 43 200000000,
44}; 44};
45 45
46static u32 __init a370_get_tclk_freq(void __iomem *sar) 46static u32 __init a370_get_tclk_freq(void __iomem *sar)
diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
index 5bb848cac6ec..81dd31a686df 100644
--- a/drivers/clk/socfpga/clk.c
+++ b/drivers/clk/socfpga/clk.c
@@ -49,7 +49,7 @@
49#define SOCFPGA_L4_SP_CLK "l4_sp_clk" 49#define SOCFPGA_L4_SP_CLK "l4_sp_clk"
50#define SOCFPGA_NAND_CLK "nand_clk" 50#define SOCFPGA_NAND_CLK "nand_clk"
51#define SOCFPGA_NAND_X_CLK "nand_x_clk" 51#define SOCFPGA_NAND_X_CLK "nand_x_clk"
52#define SOCFPGA_MMC_CLK "mmc_clk" 52#define SOCFPGA_MMC_CLK "sdmmc_clk"
53#define SOCFPGA_DB_CLK "gpio_db_clk" 53#define SOCFPGA_DB_CLK "gpio_db_clk"
54 54
55#define div_mask(width) ((1 << (width)) - 1) 55#define div_mask(width) ((1 << (width)) - 1)
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index 67ccf4aa7277..f5e4c21b301f 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -107,7 +107,7 @@ static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
107 107
108 vco = icst_hz_to_vco(icst->params, rate); 108 vco = icst_hz_to_vco(icst->params, rate);
109 icst->rate = icst_hz(icst->params, vco); 109 icst->rate = icst_hz(icst->params, vco);
110 vco_set(icst->vcoreg, icst->lockreg, vco); 110 vco_set(icst->lockreg, icst->vcoreg, vco);
111 return 0; 111 return 0;
112} 112}
113 113
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 08ae128cce9b..c73fc2b74de2 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -65,6 +65,7 @@ void proc_fork_connector(struct task_struct *task)
65 65
66 msg = (struct cn_msg *)buffer; 66 msg = (struct cn_msg *)buffer;
67 ev = (struct proc_event *)msg->data; 67 ev = (struct proc_event *)msg->data;
68 memset(&ev->event_data, 0, sizeof(ev->event_data));
68 get_seq(&msg->seq, &ev->cpu); 69 get_seq(&msg->seq, &ev->cpu);
69 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 70 ktime_get_ts(&ts); /* get high res monotonic timestamp */
70 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 71 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -80,6 +81,7 @@ void proc_fork_connector(struct task_struct *task)
80 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 81 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
81 msg->ack = 0; /* not used */ 82 msg->ack = 0; /* not used */
82 msg->len = sizeof(*ev); 83 msg->len = sizeof(*ev);
84 msg->flags = 0; /* not used */
83 /* If cn_netlink_send() failed, the data is not sent */ 85 /* If cn_netlink_send() failed, the data is not sent */
84 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 86 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
85} 87}
@@ -96,6 +98,7 @@ void proc_exec_connector(struct task_struct *task)
96 98
97 msg = (struct cn_msg *)buffer; 99 msg = (struct cn_msg *)buffer;
98 ev = (struct proc_event *)msg->data; 100 ev = (struct proc_event *)msg->data;
101 memset(&ev->event_data, 0, sizeof(ev->event_data));
99 get_seq(&msg->seq, &ev->cpu); 102 get_seq(&msg->seq, &ev->cpu);
100 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 103 ktime_get_ts(&ts); /* get high res monotonic timestamp */
101 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 104 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -106,6 +109,7 @@ void proc_exec_connector(struct task_struct *task)
106 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 109 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
107 msg->ack = 0; /* not used */ 110 msg->ack = 0; /* not used */
108 msg->len = sizeof(*ev); 111 msg->len = sizeof(*ev);
112 msg->flags = 0; /* not used */
109 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 113 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
110} 114}
111 115
@@ -122,6 +126,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
122 126
123 msg = (struct cn_msg *)buffer; 127 msg = (struct cn_msg *)buffer;
124 ev = (struct proc_event *)msg->data; 128 ev = (struct proc_event *)msg->data;
129 memset(&ev->event_data, 0, sizeof(ev->event_data));
125 ev->what = which_id; 130 ev->what = which_id;
126 ev->event_data.id.process_pid = task->pid; 131 ev->event_data.id.process_pid = task->pid;
127 ev->event_data.id.process_tgid = task->tgid; 132 ev->event_data.id.process_tgid = task->tgid;
@@ -145,6 +150,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
145 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 150 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
146 msg->ack = 0; /* not used */ 151 msg->ack = 0; /* not used */
147 msg->len = sizeof(*ev); 152 msg->len = sizeof(*ev);
153 msg->flags = 0; /* not used */
148 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 154 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
149} 155}
150 156
@@ -160,6 +166,7 @@ void proc_sid_connector(struct task_struct *task)
160 166
161 msg = (struct cn_msg *)buffer; 167 msg = (struct cn_msg *)buffer;
162 ev = (struct proc_event *)msg->data; 168 ev = (struct proc_event *)msg->data;
169 memset(&ev->event_data, 0, sizeof(ev->event_data));
163 get_seq(&msg->seq, &ev->cpu); 170 get_seq(&msg->seq, &ev->cpu);
164 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 171 ktime_get_ts(&ts); /* get high res monotonic timestamp */
165 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 172 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -170,6 +177,7 @@ void proc_sid_connector(struct task_struct *task)
170 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 177 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
171 msg->ack = 0; /* not used */ 178 msg->ack = 0; /* not used */
172 msg->len = sizeof(*ev); 179 msg->len = sizeof(*ev);
180 msg->flags = 0; /* not used */
173 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 181 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
174} 182}
175 183
@@ -185,6 +193,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
185 193
186 msg = (struct cn_msg *)buffer; 194 msg = (struct cn_msg *)buffer;
187 ev = (struct proc_event *)msg->data; 195 ev = (struct proc_event *)msg->data;
196 memset(&ev->event_data, 0, sizeof(ev->event_data));
188 get_seq(&msg->seq, &ev->cpu); 197 get_seq(&msg->seq, &ev->cpu);
189 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 198 ktime_get_ts(&ts); /* get high res monotonic timestamp */
190 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 199 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -203,6 +212,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
203 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 212 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
204 msg->ack = 0; /* not used */ 213 msg->ack = 0; /* not used */
205 msg->len = sizeof(*ev); 214 msg->len = sizeof(*ev);
215 msg->flags = 0; /* not used */
206 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 216 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
207} 217}
208 218
@@ -218,6 +228,7 @@ void proc_comm_connector(struct task_struct *task)
218 228
219 msg = (struct cn_msg *)buffer; 229 msg = (struct cn_msg *)buffer;
220 ev = (struct proc_event *)msg->data; 230 ev = (struct proc_event *)msg->data;
231 memset(&ev->event_data, 0, sizeof(ev->event_data));
221 get_seq(&msg->seq, &ev->cpu); 232 get_seq(&msg->seq, &ev->cpu);
222 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 233 ktime_get_ts(&ts); /* get high res monotonic timestamp */
223 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 234 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -229,6 +240,7 @@ void proc_comm_connector(struct task_struct *task)
229 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 240 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
230 msg->ack = 0; /* not used */ 241 msg->ack = 0; /* not used */
231 msg->len = sizeof(*ev); 242 msg->len = sizeof(*ev);
243 msg->flags = 0; /* not used */
232 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 244 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
233} 245}
234 246
@@ -244,6 +256,7 @@ void proc_coredump_connector(struct task_struct *task)
244 256
245 msg = (struct cn_msg *)buffer; 257 msg = (struct cn_msg *)buffer;
246 ev = (struct proc_event *)msg->data; 258 ev = (struct proc_event *)msg->data;
259 memset(&ev->event_data, 0, sizeof(ev->event_data));
247 get_seq(&msg->seq, &ev->cpu); 260 get_seq(&msg->seq, &ev->cpu);
248 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 261 ktime_get_ts(&ts); /* get high res monotonic timestamp */
249 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 262 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -254,6 +267,7 @@ void proc_coredump_connector(struct task_struct *task)
254 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 267 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
255 msg->ack = 0; /* not used */ 268 msg->ack = 0; /* not used */
256 msg->len = sizeof(*ev); 269 msg->len = sizeof(*ev);
270 msg->flags = 0; /* not used */
257 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 271 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
258} 272}
259 273
@@ -269,6 +283,7 @@ void proc_exit_connector(struct task_struct *task)
269 283
270 msg = (struct cn_msg *)buffer; 284 msg = (struct cn_msg *)buffer;
271 ev = (struct proc_event *)msg->data; 285 ev = (struct proc_event *)msg->data;
286 memset(&ev->event_data, 0, sizeof(ev->event_data));
272 get_seq(&msg->seq, &ev->cpu); 287 get_seq(&msg->seq, &ev->cpu);
273 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 288 ktime_get_ts(&ts); /* get high res monotonic timestamp */
274 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 289 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -281,6 +296,7 @@ void proc_exit_connector(struct task_struct *task)
281 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 296 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
282 msg->ack = 0; /* not used */ 297 msg->ack = 0; /* not used */
283 msg->len = sizeof(*ev); 298 msg->len = sizeof(*ev);
299 msg->flags = 0; /* not used */
284 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 300 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
285} 301}
286 302
@@ -304,6 +320,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
304 320
305 msg = (struct cn_msg *)buffer; 321 msg = (struct cn_msg *)buffer;
306 ev = (struct proc_event *)msg->data; 322 ev = (struct proc_event *)msg->data;
323 memset(&ev->event_data, 0, sizeof(ev->event_data));
307 msg->seq = rcvd_seq; 324 msg->seq = rcvd_seq;
308 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 325 ktime_get_ts(&ts); /* get high res monotonic timestamp */
309 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 326 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -313,6 +330,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
313 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 330 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
314 msg->ack = rcvd_ack + 1; 331 msg->ack = rcvd_ack + 1;
315 msg->len = sizeof(*ev); 332 msg->len = sizeof(*ev);
333 msg->flags = 0; /* not used */
316 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 334 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
317} 335}
318 336
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 6ecfa758942c..a36749f1e44a 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -109,7 +109,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
109 109
110 data = nlmsg_data(nlh); 110 data = nlmsg_data(nlh);
111 111
112 memcpy(data, msg, sizeof(*data) + msg->len); 112 memcpy(data, msg, size);
113 113
114 NETLINK_CB(skb).dst_group = group; 114 NETLINK_CB(skb).dst_group = group;
115 115
@@ -157,17 +157,18 @@ static int cn_call_callback(struct sk_buff *skb)
157static void cn_rx_skb(struct sk_buff *__skb) 157static void cn_rx_skb(struct sk_buff *__skb)
158{ 158{
159 struct nlmsghdr *nlh; 159 struct nlmsghdr *nlh;
160 int err;
161 struct sk_buff *skb; 160 struct sk_buff *skb;
161 int len, err;
162 162
163 skb = skb_get(__skb); 163 skb = skb_get(__skb);
164 164
165 if (skb->len >= NLMSG_HDRLEN) { 165 if (skb->len >= NLMSG_HDRLEN) {
166 nlh = nlmsg_hdr(skb); 166 nlh = nlmsg_hdr(skb);
167 len = nlmsg_len(nlh);
167 168
168 if (nlh->nlmsg_len < sizeof(struct cn_msg) || 169 if (len < (int)sizeof(struct cn_msg) ||
169 skb->len < nlh->nlmsg_len || 170 skb->len < nlh->nlmsg_len ||
170 nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) { 171 len > CONNECTOR_MAX_MSG_SIZE) {
171 kfree_skb(skb); 172 kfree_skb(skb);
172 return; 173 return;
173 } 174 }
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index d2c3253e015e..506fd23c7550 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -986,12 +986,12 @@ static int __init acpi_cpufreq_init(void)
986{ 986{
987 int ret; 987 int ret;
988 988
989 if (acpi_disabled)
990 return -ENODEV;
991
989 /* don't keep reloading if cpufreq_driver exists */ 992 /* don't keep reloading if cpufreq_driver exists */
990 if (cpufreq_get_current_driver()) 993 if (cpufreq_get_current_driver())
991 return 0; 994 return -EEXIST;
992
993 if (acpi_disabled)
994 return 0;
995 995
996 pr_debug("acpi_cpufreq_init\n"); 996 pr_debug("acpi_cpufreq_init\n");
997 997
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 78c49d8e0f4a..c522a95c0e16 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -229,7 +229,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
229 if (of_property_read_u32(np, "clock-latency", &transition_latency)) 229 if (of_property_read_u32(np, "clock-latency", &transition_latency))
230 transition_latency = CPUFREQ_ETERNAL; 230 transition_latency = CPUFREQ_ETERNAL;
231 231
232 if (cpu_reg) { 232 if (!IS_ERR(cpu_reg)) {
233 struct opp *opp; 233 struct opp *opp;
234 unsigned long min_uV, max_uV; 234 unsigned long min_uV, max_uV;
235 int i; 235 int i;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 9733f29ed148..eb3fdc755000 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -48,7 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
48} 48}
49 49
50struct sample { 50struct sample {
51 int core_pct_busy; 51 int32_t core_pct_busy;
52 u64 aperf; 52 u64 aperf;
53 u64 mperf; 53 u64 mperf;
54 int freq; 54 int freq;
@@ -68,7 +68,7 @@ struct _pid {
68 int32_t i_gain; 68 int32_t i_gain;
69 int32_t d_gain; 69 int32_t d_gain;
70 int deadband; 70 int deadband;
71 int last_err; 71 int32_t last_err;
72}; 72};
73 73
74struct cpudata { 74struct cpudata {
@@ -153,16 +153,15 @@ static inline void pid_d_gain_set(struct _pid *pid, int percent)
153 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); 153 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
154} 154}
155 155
156static signed int pid_calc(struct _pid *pid, int busy) 156static signed int pid_calc(struct _pid *pid, int32_t busy)
157{ 157{
158 signed int err, result; 158 signed int result;
159 int32_t pterm, dterm, fp_error; 159 int32_t pterm, dterm, fp_error;
160 int32_t integral_limit; 160 int32_t integral_limit;
161 161
162 err = pid->setpoint - busy; 162 fp_error = int_tofp(pid->setpoint) - busy;
163 fp_error = int_tofp(err);
164 163
165 if (abs(err) <= pid->deadband) 164 if (abs(fp_error) <= int_tofp(pid->deadband))
166 return 0; 165 return 0;
167 166
168 pterm = mul_fp(pid->p_gain, fp_error); 167 pterm = mul_fp(pid->p_gain, fp_error);
@@ -176,8 +175,8 @@ static signed int pid_calc(struct _pid *pid, int busy)
176 if (pid->integral < -integral_limit) 175 if (pid->integral < -integral_limit)
177 pid->integral = -integral_limit; 176 pid->integral = -integral_limit;
178 177
179 dterm = mul_fp(pid->d_gain, (err - pid->last_err)); 178 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
180 pid->last_err = err; 179 pid->last_err = fp_error;
181 180
182 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 181 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
183 182
@@ -367,12 +366,13 @@ static int intel_pstate_turbo_pstate(void)
367static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 366static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
368{ 367{
369 int max_perf = cpu->pstate.turbo_pstate; 368 int max_perf = cpu->pstate.turbo_pstate;
369 int max_perf_adj;
370 int min_perf; 370 int min_perf;
371 if (limits.no_turbo) 371 if (limits.no_turbo)
372 max_perf = cpu->pstate.max_pstate; 372 max_perf = cpu->pstate.max_pstate;
373 373
374 max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); 374 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
375 *max = clamp_t(int, max_perf, 375 *max = clamp_t(int, max_perf_adj,
376 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 376 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
377 377
378 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); 378 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
@@ -383,6 +383,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
383static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 383static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
384{ 384{
385 int max_perf, min_perf; 385 int max_perf, min_perf;
386 u64 val;
386 387
387 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 388 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
388 389
@@ -394,8 +395,11 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
394 trace_cpu_frequency(pstate * 100000, cpu->cpu); 395 trace_cpu_frequency(pstate * 100000, cpu->cpu);
395 396
396 cpu->pstate.current_pstate = pstate; 397 cpu->pstate.current_pstate = pstate;
397 wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); 398 val = pstate << 8;
399 if (limits.no_turbo)
400 val |= (u64)1 << 32;
398 401
402 wrmsrl(MSR_IA32_PERF_CTL, val);
399} 403}
400 404
401static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps) 405static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
@@ -432,8 +436,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
432 struct sample *sample) 436 struct sample *sample)
433{ 437{
434 u64 core_pct; 438 u64 core_pct;
435 core_pct = div64_u64(sample->aperf * 100, sample->mperf); 439 core_pct = div64_u64(int_tofp(sample->aperf * 100),
436 sample->freq = cpu->pstate.max_pstate * core_pct * 1000; 440 sample->mperf);
441 sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
437 442
438 sample->core_pct_busy = core_pct; 443 sample->core_pct_busy = core_pct;
439} 444}
@@ -465,22 +470,19 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
465 mod_timer_pinned(&cpu->timer, jiffies + delay); 470 mod_timer_pinned(&cpu->timer, jiffies + delay);
466} 471}
467 472
468static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) 473static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
469{ 474{
470 int32_t busy_scaled;
471 int32_t core_busy, max_pstate, current_pstate; 475 int32_t core_busy, max_pstate, current_pstate;
472 476
473 core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy); 477 core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
474 max_pstate = int_tofp(cpu->pstate.max_pstate); 478 max_pstate = int_tofp(cpu->pstate.max_pstate);
475 current_pstate = int_tofp(cpu->pstate.current_pstate); 479 current_pstate = int_tofp(cpu->pstate.current_pstate);
476 busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 480 return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
477
478 return fp_toint(busy_scaled);
479} 481}
480 482
481static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 483static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
482{ 484{
483 int busy_scaled; 485 int32_t busy_scaled;
484 struct _pid *pid; 486 struct _pid *pid;
485 signed int ctl = 0; 487 signed int ctl = 0;
486 int steps; 488 int steps;
@@ -634,8 +636,8 @@ static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
634 636
635static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 637static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
636{ 638{
637 int rc, min_pstate, max_pstate;
638 struct cpudata *cpu; 639 struct cpudata *cpu;
640 int rc;
639 641
640 rc = intel_pstate_init_cpu(policy->cpu); 642 rc = intel_pstate_init_cpu(policy->cpu);
641 if (rc) 643 if (rc)
@@ -649,9 +651,8 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
649 else 651 else
650 policy->policy = CPUFREQ_POLICY_POWERSAVE; 652 policy->policy = CPUFREQ_POLICY_POWERSAVE;
651 653
652 intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate); 654 policy->min = cpu->pstate.min_pstate * 100000;
653 policy->min = min_pstate * 100000; 655 policy->max = cpu->pstate.turbo_pstate * 100000;
654 policy->max = max_pstate * 100000;
655 656
656 /* cpuinfo and default policy values */ 657 /* cpuinfo and default policy values */
657 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000; 658 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 8a72b0c555f8..15631f92ab7d 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -166,7 +166,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
166 if (freq->frequency == CPUFREQ_ENTRY_INVALID) 166 if (freq->frequency == CPUFREQ_ENTRY_INVALID)
167 continue; 167 continue;
168 168
169 dvfs = &s3c64xx_dvfs_table[freq->index]; 169 dvfs = &s3c64xx_dvfs_table[freq->driver_data];
170 found = 0; 170 found = 0;
171 171
172 for (i = 0; i < count; i++) { 172 for (i = 0; i < count; i++) {
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 19e364fa5955..3f418166ce02 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -113,7 +113,7 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
113 unsigned int target_freq, unsigned int relation) 113 unsigned int target_freq, unsigned int relation)
114{ 114{
115 struct cpufreq_freqs freqs; 115 struct cpufreq_freqs freqs;
116 unsigned long newfreq; 116 long newfreq;
117 struct clk *srcclk; 117 struct clk *srcclk;
118 int index, ret, mult = 1; 118 int index, ret, mult = 1;
119 119
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 526ec77c7ba0..f238cfd33847 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -198,6 +198,7 @@ config TI_EDMA
198 depends on ARCH_DAVINCI || ARCH_OMAP 198 depends on ARCH_DAVINCI || ARCH_OMAP
199 select DMA_ENGINE 199 select DMA_ENGINE
200 select DMA_VIRTUAL_CHANNELS 200 select DMA_VIRTUAL_CHANNELS
201 select TI_PRIV_EDMA
201 default n 202 default n
202 help 203 help
203 Enable support for the TI EDMA controller. This DMA 204 Enable support for the TI EDMA controller. This DMA
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index ff50ff4c6a57..10b577fcf48d 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -305,7 +305,9 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
305 edma_alloc_slot(EDMA_CTLR(echan->ch_num), 305 edma_alloc_slot(EDMA_CTLR(echan->ch_num),
306 EDMA_SLOT_ANY); 306 EDMA_SLOT_ANY);
307 if (echan->slot[i] < 0) { 307 if (echan->slot[i] < 0) {
308 kfree(edesc);
308 dev_err(dev, "Failed to allocate slot\n"); 309 dev_err(dev, "Failed to allocate slot\n");
310 kfree(edesc);
309 return NULL; 311 return NULL;
310 } 312 }
311 } 313 }
@@ -345,6 +347,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
345 ccnt = sg_dma_len(sg) / (acnt * bcnt); 347 ccnt = sg_dma_len(sg) / (acnt * bcnt);
346 if (ccnt > (SZ_64K - 1)) { 348 if (ccnt > (SZ_64K - 1)) {
347 dev_err(dev, "Exceeded max SG segment size\n"); 349 dev_err(dev, "Exceeded max SG segment size\n");
350 kfree(edesc);
348 return NULL; 351 return NULL;
349 } 352 }
350 cidx = acnt * bcnt; 353 cidx = acnt * bcnt;
@@ -749,6 +752,6 @@ static void __exit edma_exit(void)
749} 752}
750module_exit(edma_exit); 753module_exit(edma_exit);
751 754
752MODULE_AUTHOR("Matt Porter <mporter@ti.com>"); 755MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
753MODULE_DESCRIPTION("TI EDMA DMA engine driver"); 756MODULE_DESCRIPTION("TI EDMA DMA engine driver");
754MODULE_LICENSE("GPL v2"); 757MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 78f8ca5fccee..55852c026791 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -437,17 +437,18 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
437 struct imxdma_engine *imxdma = imxdmac->imxdma; 437 struct imxdma_engine *imxdma = imxdmac->imxdma;
438 int chno = imxdmac->channel; 438 int chno = imxdmac->channel;
439 struct imxdma_desc *desc; 439 struct imxdma_desc *desc;
440 unsigned long flags;
440 441
441 spin_lock(&imxdma->lock); 442 spin_lock_irqsave(&imxdma->lock, flags);
442 if (list_empty(&imxdmac->ld_active)) { 443 if (list_empty(&imxdmac->ld_active)) {
443 spin_unlock(&imxdma->lock); 444 spin_unlock_irqrestore(&imxdma->lock, flags);
444 goto out; 445 goto out;
445 } 446 }
446 447
447 desc = list_first_entry(&imxdmac->ld_active, 448 desc = list_first_entry(&imxdmac->ld_active,
448 struct imxdma_desc, 449 struct imxdma_desc,
449 node); 450 node);
450 spin_unlock(&imxdma->lock); 451 spin_unlock_irqrestore(&imxdma->lock, flags);
451 452
452 if (desc->sg) { 453 if (desc->sg) {
453 u32 tmp; 454 u32 tmp;
@@ -519,7 +520,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
519{ 520{
520 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); 521 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
521 struct imxdma_engine *imxdma = imxdmac->imxdma; 522 struct imxdma_engine *imxdma = imxdmac->imxdma;
522 unsigned long flags;
523 int slot = -1; 523 int slot = -1;
524 int i; 524 int i;
525 525
@@ -527,7 +527,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
527 switch (d->type) { 527 switch (d->type) {
528 case IMXDMA_DESC_INTERLEAVED: 528 case IMXDMA_DESC_INTERLEAVED:
529 /* Try to get a free 2D slot */ 529 /* Try to get a free 2D slot */
530 spin_lock_irqsave(&imxdma->lock, flags);
531 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) { 530 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
532 if ((imxdma->slots_2d[i].count > 0) && 531 if ((imxdma->slots_2d[i].count > 0) &&
533 ((imxdma->slots_2d[i].xsr != d->x) || 532 ((imxdma->slots_2d[i].xsr != d->x) ||
@@ -537,10 +536,8 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
537 slot = i; 536 slot = i;
538 break; 537 break;
539 } 538 }
540 if (slot < 0) { 539 if (slot < 0)
541 spin_unlock_irqrestore(&imxdma->lock, flags);
542 return -EBUSY; 540 return -EBUSY;
543 }
544 541
545 imxdma->slots_2d[slot].xsr = d->x; 542 imxdma->slots_2d[slot].xsr = d->x;
546 imxdma->slots_2d[slot].ysr = d->y; 543 imxdma->slots_2d[slot].ysr = d->y;
@@ -549,7 +546,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
549 546
550 imxdmac->slot_2d = slot; 547 imxdmac->slot_2d = slot;
551 imxdmac->enabled_2d = true; 548 imxdmac->enabled_2d = true;
552 spin_unlock_irqrestore(&imxdma->lock, flags);
553 549
554 if (slot == IMX_DMA_2D_SLOT_A) { 550 if (slot == IMX_DMA_2D_SLOT_A) {
555 d->config_mem &= ~CCR_MSEL_B; 551 d->config_mem &= ~CCR_MSEL_B;
@@ -625,18 +621,17 @@ static void imxdma_tasklet(unsigned long data)
625 struct imxdma_channel *imxdmac = (void *)data; 621 struct imxdma_channel *imxdmac = (void *)data;
626 struct imxdma_engine *imxdma = imxdmac->imxdma; 622 struct imxdma_engine *imxdma = imxdmac->imxdma;
627 struct imxdma_desc *desc; 623 struct imxdma_desc *desc;
624 unsigned long flags;
628 625
629 spin_lock(&imxdma->lock); 626 spin_lock_irqsave(&imxdma->lock, flags);
630 627
631 if (list_empty(&imxdmac->ld_active)) { 628 if (list_empty(&imxdmac->ld_active)) {
632 /* Someone might have called terminate all */ 629 /* Someone might have called terminate all */
633 goto out; 630 spin_unlock_irqrestore(&imxdma->lock, flags);
631 return;
634 } 632 }
635 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); 633 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
636 634
637 if (desc->desc.callback)
638 desc->desc.callback(desc->desc.callback_param);
639
640 /* If we are dealing with a cyclic descriptor, keep it on ld_active 635 /* If we are dealing with a cyclic descriptor, keep it on ld_active
641 * and dont mark the descriptor as complete. 636 * and dont mark the descriptor as complete.
642 * Only in non-cyclic cases it would be marked as complete 637 * Only in non-cyclic cases it would be marked as complete
@@ -663,7 +658,11 @@ static void imxdma_tasklet(unsigned long data)
663 __func__, imxdmac->channel); 658 __func__, imxdmac->channel);
664 } 659 }
665out: 660out:
666 spin_unlock(&imxdma->lock); 661 spin_unlock_irqrestore(&imxdma->lock, flags);
662
663 if (desc->desc.callback)
664 desc->desc.callback(desc->desc.callback_param);
665
667} 666}
668 667
669static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 668static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -883,7 +882,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
883 kfree(imxdmac->sg_list); 882 kfree(imxdmac->sg_list);
884 883
885 imxdmac->sg_list = kcalloc(periods + 1, 884 imxdmac->sg_list = kcalloc(periods + 1,
886 sizeof(struct scatterlist), GFP_KERNEL); 885 sizeof(struct scatterlist), GFP_ATOMIC);
887 if (!imxdmac->sg_list) 886 if (!imxdmac->sg_list)
888 return NULL; 887 return NULL;
889 888
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
index 45a520281ce1..ebad84591a6e 100644
--- a/drivers/dma/sh/rcar-hpbdma.c
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -93,6 +93,7 @@ struct hpb_dmae_chan {
93 void __iomem *base; 93 void __iomem *base;
94 const struct hpb_dmae_slave_config *cfg; 94 const struct hpb_dmae_slave_config *cfg;
95 char dev_id[16]; /* unique name per DMAC of channel */ 95 char dev_id[16]; /* unique name per DMAC of channel */
96 dma_addr_t slave_addr;
96}; 97};
97 98
98struct hpb_dmae_device { 99struct hpb_dmae_device {
@@ -432,7 +433,6 @@ hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
432 hpb_chan->xfer_mode = XFER_DOUBLE; 433 hpb_chan->xfer_mode = XFER_DOUBLE;
433 } else { 434 } else {
434 dev_err(hpb_chan->shdma_chan.dev, "DCR setting error"); 435 dev_err(hpb_chan->shdma_chan.dev, "DCR setting error");
435 shdma_free_irq(&hpb_chan->shdma_chan);
436 return -EINVAL; 436 return -EINVAL;
437 } 437 }
438 438
@@ -446,7 +446,8 @@ hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
446 return 0; 446 return 0;
447} 447}
448 448
449static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try) 449static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id,
450 dma_addr_t slave_addr, bool try)
450{ 451{
451 struct hpb_dmae_chan *chan = to_chan(schan); 452 struct hpb_dmae_chan *chan = to_chan(schan);
452 const struct hpb_dmae_slave_config *sc = 453 const struct hpb_dmae_slave_config *sc =
@@ -457,6 +458,7 @@ static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try)
457 if (try) 458 if (try)
458 return 0; 459 return 0;
459 chan->cfg = sc; 460 chan->cfg = sc;
461 chan->slave_addr = slave_addr ? : sc->addr;
460 return hpb_dmae_alloc_chan_resources(chan, sc); 462 return hpb_dmae_alloc_chan_resources(chan, sc);
461} 463}
462 464
@@ -468,7 +470,7 @@ static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan)
468{ 470{
469 struct hpb_dmae_chan *chan = to_chan(schan); 471 struct hpb_dmae_chan *chan = to_chan(schan);
470 472
471 return chan->cfg->addr; 473 return chan->slave_addr;
472} 474}
473 475
474static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i) 476static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i)
@@ -614,7 +616,6 @@ static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
614 shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) { 616 shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) {
615 BUG_ON(!schan); 617 BUG_ON(!schan);
616 618
617 shdma_free_irq(schan);
618 shdma_chan_remove(schan); 619 shdma_chan_remove(schan);
619 } 620 }
620 dma_dev->chancnt = 0; 621 dma_dev->chancnt = 0;
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index 2d9ca6055e5e..41b5913ddabe 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -248,14 +248,15 @@ static void lp_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
248 struct lp_gpio *lg = irq_data_get_irq_handler_data(data); 248 struct lp_gpio *lg = irq_data_get_irq_handler_data(data);
249 struct irq_chip *chip = irq_data_get_irq_chip(data); 249 struct irq_chip *chip = irq_data_get_irq_chip(data);
250 u32 base, pin, mask; 250 u32 base, pin, mask;
251 unsigned long reg, pending; 251 unsigned long reg, ena, pending;
252 unsigned virq; 252 unsigned virq;
253 253
254 /* check from GPIO controller which pin triggered the interrupt */ 254 /* check from GPIO controller which pin triggered the interrupt */
255 for (base = 0; base < lg->chip.ngpio; base += 32) { 255 for (base = 0; base < lg->chip.ngpio; base += 32) {
256 reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT); 256 reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
257 ena = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
257 258
258 while ((pending = inl(reg))) { 259 while ((pending = (inl(reg) & inl(ena)))) {
259 pin = __ffs(pending); 260 pin = __ffs(pending);
260 mask = BIT(pin); 261 mask = BIT(pin);
261 /* Clear before handling so we don't lose an edge */ 262 /* Clear before handling so we don't lose an edge */
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 0ff43552d472..89675f862308 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -63,6 +63,7 @@ struct gpio_bank {
63 struct gpio_chip chip; 63 struct gpio_chip chip;
64 struct clk *dbck; 64 struct clk *dbck;
65 u32 mod_usage; 65 u32 mod_usage;
66 u32 irq_usage;
66 u32 dbck_enable_mask; 67 u32 dbck_enable_mask;
67 bool dbck_enabled; 68 bool dbck_enabled;
68 struct device *dev; 69 struct device *dev;
@@ -86,6 +87,9 @@ struct gpio_bank {
86#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio)) 87#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
87#define GPIO_MOD_CTRL_BIT BIT(0) 88#define GPIO_MOD_CTRL_BIT BIT(0)
88 89
90#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
91#define LINE_USED(line, offset) (line & (1 << offset))
92
89static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) 93static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
90{ 94{
91 return bank->chip.base + gpio_irq; 95 return bank->chip.base + gpio_irq;
@@ -420,15 +424,69 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
420 return 0; 424 return 0;
421} 425}
422 426
427static void _enable_gpio_module(struct gpio_bank *bank, unsigned offset)
428{
429 if (bank->regs->pinctrl) {
430 void __iomem *reg = bank->base + bank->regs->pinctrl;
431
432 /* Claim the pin for MPU */
433 __raw_writel(__raw_readl(reg) | (1 << offset), reg);
434 }
435
436 if (bank->regs->ctrl && !BANK_USED(bank)) {
437 void __iomem *reg = bank->base + bank->regs->ctrl;
438 u32 ctrl;
439
440 ctrl = __raw_readl(reg);
441 /* Module is enabled, clocks are not gated */
442 ctrl &= ~GPIO_MOD_CTRL_BIT;
443 __raw_writel(ctrl, reg);
444 bank->context.ctrl = ctrl;
445 }
446}
447
448static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset)
449{
450 void __iomem *base = bank->base;
451
452 if (bank->regs->wkup_en &&
453 !LINE_USED(bank->mod_usage, offset) &&
454 !LINE_USED(bank->irq_usage, offset)) {
455 /* Disable wake-up during idle for dynamic tick */
456 _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
457 bank->context.wake_en =
458 __raw_readl(bank->base + bank->regs->wkup_en);
459 }
460
461 if (bank->regs->ctrl && !BANK_USED(bank)) {
462 void __iomem *reg = bank->base + bank->regs->ctrl;
463 u32 ctrl;
464
465 ctrl = __raw_readl(reg);
466 /* Module is disabled, clocks are gated */
467 ctrl |= GPIO_MOD_CTRL_BIT;
468 __raw_writel(ctrl, reg);
469 bank->context.ctrl = ctrl;
470 }
471}
472
473static int gpio_is_input(struct gpio_bank *bank, int mask)
474{
475 void __iomem *reg = bank->base + bank->regs->direction;
476
477 return __raw_readl(reg) & mask;
478}
479
423static int gpio_irq_type(struct irq_data *d, unsigned type) 480static int gpio_irq_type(struct irq_data *d, unsigned type)
424{ 481{
425 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 482 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
426 unsigned gpio = 0; 483 unsigned gpio = 0;
427 int retval; 484 int retval;
428 unsigned long flags; 485 unsigned long flags;
486 unsigned offset;
429 487
430 if (WARN_ON(!bank->mod_usage)) 488 if (!BANK_USED(bank))
431 return -EINVAL; 489 pm_runtime_get_sync(bank->dev);
432 490
433#ifdef CONFIG_ARCH_OMAP1 491#ifdef CONFIG_ARCH_OMAP1
434 if (d->irq > IH_MPUIO_BASE) 492 if (d->irq > IH_MPUIO_BASE)
@@ -446,7 +504,17 @@ static int gpio_irq_type(struct irq_data *d, unsigned type)
446 return -EINVAL; 504 return -EINVAL;
447 505
448 spin_lock_irqsave(&bank->lock, flags); 506 spin_lock_irqsave(&bank->lock, flags);
449 retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type); 507 offset = GPIO_INDEX(bank, gpio);
508 retval = _set_gpio_triggering(bank, offset, type);
509 if (!LINE_USED(bank->mod_usage, offset)) {
510 _enable_gpio_module(bank, offset);
511 _set_gpio_direction(bank, offset, 1);
512 } else if (!gpio_is_input(bank, 1 << offset)) {
513 spin_unlock_irqrestore(&bank->lock, flags);
514 return -EINVAL;
515 }
516
517 bank->irq_usage |= 1 << GPIO_INDEX(bank, gpio);
450 spin_unlock_irqrestore(&bank->lock, flags); 518 spin_unlock_irqrestore(&bank->lock, flags);
451 519
452 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 520 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
@@ -603,35 +671,19 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
603 * If this is the first gpio_request for the bank, 671 * If this is the first gpio_request for the bank,
604 * enable the bank module. 672 * enable the bank module.
605 */ 673 */
606 if (!bank->mod_usage) 674 if (!BANK_USED(bank))
607 pm_runtime_get_sync(bank->dev); 675 pm_runtime_get_sync(bank->dev);
608 676
609 spin_lock_irqsave(&bank->lock, flags); 677 spin_lock_irqsave(&bank->lock, flags);
610 /* Set trigger to none. You need to enable the desired trigger with 678 /* Set trigger to none. You need to enable the desired trigger with
611 * request_irq() or set_irq_type(). 679 * request_irq() or set_irq_type(). Only do this if the IRQ line has
680 * not already been requested.
612 */ 681 */
613 _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 682 if (!LINE_USED(bank->irq_usage, offset)) {
614 683 _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
615 if (bank->regs->pinctrl) { 684 _enable_gpio_module(bank, offset);
616 void __iomem *reg = bank->base + bank->regs->pinctrl;
617
618 /* Claim the pin for MPU */
619 __raw_writel(__raw_readl(reg) | (1 << offset), reg);
620 }
621
622 if (bank->regs->ctrl && !bank->mod_usage) {
623 void __iomem *reg = bank->base + bank->regs->ctrl;
624 u32 ctrl;
625
626 ctrl = __raw_readl(reg);
627 /* Module is enabled, clocks are not gated */
628 ctrl &= ~GPIO_MOD_CTRL_BIT;
629 __raw_writel(ctrl, reg);
630 bank->context.ctrl = ctrl;
631 } 685 }
632
633 bank->mod_usage |= 1 << offset; 686 bank->mod_usage |= 1 << offset;
634
635 spin_unlock_irqrestore(&bank->lock, flags); 687 spin_unlock_irqrestore(&bank->lock, flags);
636 688
637 return 0; 689 return 0;
@@ -640,31 +692,11 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
640static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) 692static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
641{ 693{
642 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip); 694 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
643 void __iomem *base = bank->base;
644 unsigned long flags; 695 unsigned long flags;
645 696
646 spin_lock_irqsave(&bank->lock, flags); 697 spin_lock_irqsave(&bank->lock, flags);
647
648 if (bank->regs->wkup_en) {
649 /* Disable wake-up during idle for dynamic tick */
650 _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
651 bank->context.wake_en =
652 __raw_readl(bank->base + bank->regs->wkup_en);
653 }
654
655 bank->mod_usage &= ~(1 << offset); 698 bank->mod_usage &= ~(1 << offset);
656 699 _disable_gpio_module(bank, offset);
657 if (bank->regs->ctrl && !bank->mod_usage) {
658 void __iomem *reg = bank->base + bank->regs->ctrl;
659 u32 ctrl;
660
661 ctrl = __raw_readl(reg);
662 /* Module is disabled, clocks are gated */
663 ctrl |= GPIO_MOD_CTRL_BIT;
664 __raw_writel(ctrl, reg);
665 bank->context.ctrl = ctrl;
666 }
667
668 _reset_gpio(bank, bank->chip.base + offset); 700 _reset_gpio(bank, bank->chip.base + offset);
669 spin_unlock_irqrestore(&bank->lock, flags); 701 spin_unlock_irqrestore(&bank->lock, flags);
670 702
@@ -672,7 +704,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
672 * If this is the last gpio to be freed in the bank, 704 * If this is the last gpio to be freed in the bank,
673 * disable the bank module. 705 * disable the bank module.
674 */ 706 */
675 if (!bank->mod_usage) 707 if (!BANK_USED(bank))
676 pm_runtime_put(bank->dev); 708 pm_runtime_put(bank->dev);
677} 709}
678 710
@@ -762,10 +794,20 @@ static void gpio_irq_shutdown(struct irq_data *d)
762 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 794 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
763 unsigned int gpio = irq_to_gpio(bank, d->hwirq); 795 unsigned int gpio = irq_to_gpio(bank, d->hwirq);
764 unsigned long flags; 796 unsigned long flags;
797 unsigned offset = GPIO_INDEX(bank, gpio);
765 798
766 spin_lock_irqsave(&bank->lock, flags); 799 spin_lock_irqsave(&bank->lock, flags);
800 bank->irq_usage &= ~(1 << offset);
801 _disable_gpio_module(bank, offset);
767 _reset_gpio(bank, gpio); 802 _reset_gpio(bank, gpio);
768 spin_unlock_irqrestore(&bank->lock, flags); 803 spin_unlock_irqrestore(&bank->lock, flags);
804
805 /*
806 * If this is the last IRQ to be freed in the bank,
807 * disable the bank module.
808 */
809 if (!BANK_USED(bank))
810 pm_runtime_put(bank->dev);
769} 811}
770 812
771static void gpio_ack_irq(struct irq_data *d) 813static void gpio_ack_irq(struct irq_data *d)
@@ -897,13 +939,6 @@ static int gpio_input(struct gpio_chip *chip, unsigned offset)
897 return 0; 939 return 0;
898} 940}
899 941
900static int gpio_is_input(struct gpio_bank *bank, int mask)
901{
902 void __iomem *reg = bank->base + bank->regs->direction;
903
904 return __raw_readl(reg) & mask;
905}
906
907static int gpio_get(struct gpio_chip *chip, unsigned offset) 942static int gpio_get(struct gpio_chip *chip, unsigned offset)
908{ 943{
909 struct gpio_bank *bank; 944 struct gpio_bank *bank;
@@ -922,13 +957,22 @@ static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
922{ 957{
923 struct gpio_bank *bank; 958 struct gpio_bank *bank;
924 unsigned long flags; 959 unsigned long flags;
960 int retval = 0;
925 961
926 bank = container_of(chip, struct gpio_bank, chip); 962 bank = container_of(chip, struct gpio_bank, chip);
927 spin_lock_irqsave(&bank->lock, flags); 963 spin_lock_irqsave(&bank->lock, flags);
964
965 if (LINE_USED(bank->irq_usage, offset)) {
966 retval = -EINVAL;
967 goto exit;
968 }
969
928 bank->set_dataout(bank, offset, value); 970 bank->set_dataout(bank, offset, value);
929 _set_gpio_direction(bank, offset, 0); 971 _set_gpio_direction(bank, offset, 0);
972
973exit:
930 spin_unlock_irqrestore(&bank->lock, flags); 974 spin_unlock_irqrestore(&bank->lock, flags);
931 return 0; 975 return retval;
932} 976}
933 977
934static int gpio_debounce(struct gpio_chip *chip, unsigned offset, 978static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
@@ -1400,7 +1444,7 @@ void omap2_gpio_prepare_for_idle(int pwr_mode)
1400 struct gpio_bank *bank; 1444 struct gpio_bank *bank;
1401 1445
1402 list_for_each_entry(bank, &omap_gpio_list, node) { 1446 list_for_each_entry(bank, &omap_gpio_list, node) {
1403 if (!bank->mod_usage || !bank->loses_context) 1447 if (!BANK_USED(bank) || !bank->loses_context)
1404 continue; 1448 continue;
1405 1449
1406 bank->power_mode = pwr_mode; 1450 bank->power_mode = pwr_mode;
@@ -1414,7 +1458,7 @@ void omap2_gpio_resume_after_idle(void)
1414 struct gpio_bank *bank; 1458 struct gpio_bank *bank;
1415 1459
1416 list_for_each_entry(bank, &omap_gpio_list, node) { 1460 list_for_each_entry(bank, &omap_gpio_list, node) {
1417 if (!bank->mod_usage || !bank->loses_context) 1461 if (!BANK_USED(bank) || !bank->loses_context)
1418 continue; 1462 continue;
1419 1463
1420 pm_runtime_get_sync(bank->dev); 1464 pm_runtime_get_sync(bank->dev);
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index e3745eb07570..6038966ab045 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -293,10 +293,9 @@ static void gpio_rcar_parse_pdata(struct gpio_rcar_priv *p)
293 if (pdata) { 293 if (pdata) {
294 p->config = *pdata; 294 p->config = *pdata;
295 } else if (IS_ENABLED(CONFIG_OF) && np) { 295 } else if (IS_ENABLED(CONFIG_OF) && np) {
296 ret = of_parse_phandle_with_args(np, "gpio-ranges", 296 ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0,
297 "#gpio-range-cells", 0, &args); 297 &args);
298 p->config.number_of_pins = ret == 0 && args.args_count == 3 298 p->config.number_of_pins = ret == 0 ? args.args[2]
299 ? args.args[2]
300 : RCAR_MAX_GPIO_PER_BANK; 299 : RCAR_MAX_GPIO_PER_BANK;
301 p->config.gpio_base = -1; 300 p->config.gpio_base = -1;
302 } 301 }
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 86ef3461ec06..0dee0e0c247a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -136,7 +136,7 @@ static struct gpio_desc *gpio_to_desc(unsigned gpio)
136 */ 136 */
137static int desc_to_gpio(const struct gpio_desc *desc) 137static int desc_to_gpio(const struct gpio_desc *desc)
138{ 138{
139 return desc->chip->base + gpio_chip_hwgpio(desc); 139 return desc - &gpio_desc[0];
140} 140}
141 141
142 142
@@ -1398,7 +1398,7 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
1398 int status = -EPROBE_DEFER; 1398 int status = -EPROBE_DEFER;
1399 unsigned long flags; 1399 unsigned long flags;
1400 1400
1401 if (!desc || !desc->chip) { 1401 if (!desc) {
1402 pr_warn("%s: invalid GPIO\n", __func__); 1402 pr_warn("%s: invalid GPIO\n", __func__);
1403 return -EINVAL; 1403 return -EINVAL;
1404 } 1404 }
@@ -1406,6 +1406,8 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
1406 spin_lock_irqsave(&gpio_lock, flags); 1406 spin_lock_irqsave(&gpio_lock, flags);
1407 1407
1408 chip = desc->chip; 1408 chip = desc->chip;
1409 if (chip == NULL)
1410 goto done;
1409 1411
1410 if (!try_module_get(chip->owner)) 1412 if (!try_module_get(chip->owner))
1411 goto done; 1413 goto done;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 138de14134f0..d9137e49c4e8 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -61,7 +61,7 @@ static int drm_version(struct drm_device *dev, void *data,
61 61
62/** Ioctl table */ 62/** Ioctl table */
63static const struct drm_ioctl_desc drm_ioctls[] = { 63static const struct drm_ioctl_desc drm_ioctls[] = {
64 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED), 64 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
65 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), 65 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
66 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), 66 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
67 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), 67 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
@@ -332,9 +332,16 @@ long drm_ioctl(struct file *filp,
332 cmd = ioctl->cmd_drv; 332 cmd = ioctl->cmd_drv;
333 } 333 }
334 else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { 334 else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
335 u32 drv_size;
336
335 ioctl = &drm_ioctls[nr]; 337 ioctl = &drm_ioctls[nr];
336 cmd = ioctl->cmd; 338
339 drv_size = _IOC_SIZE(ioctl->cmd);
337 usize = asize = _IOC_SIZE(cmd); 340 usize = asize = _IOC_SIZE(cmd);
341 if (drv_size > asize)
342 asize = drv_size;
343
344 cmd = ioctl->cmd;
338 } else 345 } else
339 goto err_i1; 346 goto err_i1;
340 347
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 720352345452..0a19401aff80 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -412,14 +412,6 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
412 return; 412 return;
413 413
414 /* 414 /*
415 * fbdev->blank can be called from irq context in case of a panic.
416 * Since we already have our own special panic handler which will
417 * restore the fbdev console mode completely, just bail out early.
418 */
419 if (oops_in_progress)
420 return;
421
422 /*
423 * For each CRTC in this fb, turn the connectors on/off. 415 * For each CRTC in this fb, turn the connectors on/off.
424 */ 416 */
425 drm_modeset_lock_all(dev); 417 drm_modeset_lock_all(dev);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 92babac362ec..2db731f00930 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -204,6 +204,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
204 if (IS_ERR(pages)) 204 if (IS_ERR(pages))
205 return PTR_ERR(pages); 205 return PTR_ERR(pages);
206 206
207 gt->npage = gt->gem.size / PAGE_SIZE;
207 gt->pages = pages; 208 gt->pages = pages;
208 209
209 return 0; 210 return 0;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 454e186f7368..43866221cd4c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1772,13 +1772,18 @@ static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
1772 struct drm_i915_private *dev_priv = info->dev->dev_private; 1772 struct drm_i915_private *dev_priv = info->dev->dev_private;
1773 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 1773 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
1774 1774
1775 if (!atomic_dec_and_test(&pipe_crc->available)) { 1775 spin_lock_irq(&pipe_crc->lock);
1776 atomic_inc(&pipe_crc->available); 1776
1777 if (pipe_crc->opened) {
1778 spin_unlock_irq(&pipe_crc->lock);
1777 return -EBUSY; /* already open */ 1779 return -EBUSY; /* already open */
1778 } 1780 }
1779 1781
1782 pipe_crc->opened = true;
1780 filep->private_data = inode->i_private; 1783 filep->private_data = inode->i_private;
1781 1784
1785 spin_unlock_irq(&pipe_crc->lock);
1786
1782 return 0; 1787 return 0;
1783} 1788}
1784 1789
@@ -1788,7 +1793,9 @@ static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
1788 struct drm_i915_private *dev_priv = info->dev->dev_private; 1793 struct drm_i915_private *dev_priv = info->dev->dev_private;
1789 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 1794 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
1790 1795
1791 atomic_inc(&pipe_crc->available); /* release the device */ 1796 spin_lock_irq(&pipe_crc->lock);
1797 pipe_crc->opened = false;
1798 spin_unlock_irq(&pipe_crc->lock);
1792 1799
1793 return 0; 1800 return 0;
1794} 1801}
@@ -1800,12 +1807,9 @@ static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
1800 1807
1801static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) 1808static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
1802{ 1809{
1803 int head, tail; 1810 assert_spin_locked(&pipe_crc->lock);
1804 1811 return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
1805 head = atomic_read(&pipe_crc->head); 1812 INTEL_PIPE_CRC_ENTRIES_NR);
1806 tail = atomic_read(&pipe_crc->tail);
1807
1808 return CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR);
1809} 1813}
1810 1814
1811static ssize_t 1815static ssize_t
@@ -1831,20 +1835,30 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
1831 return 0; 1835 return 0;
1832 1836
1833 /* nothing to read */ 1837 /* nothing to read */
1838 spin_lock_irq(&pipe_crc->lock);
1834 while (pipe_crc_data_count(pipe_crc) == 0) { 1839 while (pipe_crc_data_count(pipe_crc) == 0) {
1835 if (filep->f_flags & O_NONBLOCK) 1840 int ret;
1841
1842 if (filep->f_flags & O_NONBLOCK) {
1843 spin_unlock_irq(&pipe_crc->lock);
1836 return -EAGAIN; 1844 return -EAGAIN;
1845 }
1837 1846
1838 if (wait_event_interruptible(pipe_crc->wq, 1847 ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
1839 pipe_crc_data_count(pipe_crc))) 1848 pipe_crc_data_count(pipe_crc), pipe_crc->lock);
1840 return -ERESTARTSYS; 1849 if (ret) {
1850 spin_unlock_irq(&pipe_crc->lock);
1851 return ret;
1852 }
1841 } 1853 }
1842 1854
1843 /* We now have one or more entries to read */ 1855 /* We now have one or more entries to read */
1844 head = atomic_read(&pipe_crc->head); 1856 head = pipe_crc->head;
1845 tail = atomic_read(&pipe_crc->tail); 1857 tail = pipe_crc->tail;
1846 n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR), 1858 n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
1847 count / PIPE_CRC_LINE_LEN); 1859 count / PIPE_CRC_LINE_LEN);
1860 spin_unlock_irq(&pipe_crc->lock);
1861
1848 bytes_read = 0; 1862 bytes_read = 0;
1849 n = 0; 1863 n = 0;
1850 do { 1864 do {
@@ -1864,10 +1878,13 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
1864 1878
1865 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); 1879 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
1866 tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1880 tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1867 atomic_set(&pipe_crc->tail, tail);
1868 n++; 1881 n++;
1869 } while (--n_entries); 1882 } while (--n_entries);
1870 1883
1884 spin_lock_irq(&pipe_crc->lock);
1885 pipe_crc->tail = tail;
1886 spin_unlock_irq(&pipe_crc->lock);
1887
1871 return bytes_read; 1888 return bytes_read;
1872} 1889}
1873 1890
@@ -1915,6 +1932,11 @@ static const char * const pipe_crc_sources[] = {
1915 "plane2", 1932 "plane2",
1916 "pf", 1933 "pf",
1917 "pipe", 1934 "pipe",
1935 "TV",
1936 "DP-B",
1937 "DP-C",
1938 "DP-D",
1939 "auto",
1918}; 1940};
1919 1941
1920static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) 1942static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
@@ -1943,33 +1965,279 @@ static int display_crc_ctl_open(struct inode *inode, struct file *file)
1943 return single_open(file, display_crc_ctl_show, dev); 1965 return single_open(file, display_crc_ctl_show, dev);
1944} 1966}
1945 1967
1946static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source source, 1968static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
1969 uint32_t *val)
1970{
1971 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
1972 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
1973
1974 switch (*source) {
1975 case INTEL_PIPE_CRC_SOURCE_PIPE:
1976 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
1977 break;
1978 case INTEL_PIPE_CRC_SOURCE_NONE:
1979 *val = 0;
1980 break;
1981 default:
1982 return -EINVAL;
1983 }
1984
1985 return 0;
1986}
1987
1988static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
1989 enum intel_pipe_crc_source *source)
1990{
1991 struct intel_encoder *encoder;
1992 struct intel_crtc *crtc;
1993 struct intel_digital_port *dig_port;
1994 int ret = 0;
1995
1996 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
1997
1998 mutex_lock(&dev->mode_config.mutex);
1999 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2000 base.head) {
2001 if (!encoder->base.crtc)
2002 continue;
2003
2004 crtc = to_intel_crtc(encoder->base.crtc);
2005
2006 if (crtc->pipe != pipe)
2007 continue;
2008
2009 switch (encoder->type) {
2010 case INTEL_OUTPUT_TVOUT:
2011 *source = INTEL_PIPE_CRC_SOURCE_TV;
2012 break;
2013 case INTEL_OUTPUT_DISPLAYPORT:
2014 case INTEL_OUTPUT_EDP:
2015 dig_port = enc_to_dig_port(&encoder->base);
2016 switch (dig_port->port) {
2017 case PORT_B:
2018 *source = INTEL_PIPE_CRC_SOURCE_DP_B;
2019 break;
2020 case PORT_C:
2021 *source = INTEL_PIPE_CRC_SOURCE_DP_C;
2022 break;
2023 case PORT_D:
2024 *source = INTEL_PIPE_CRC_SOURCE_DP_D;
2025 break;
2026 default:
2027 WARN(1, "nonexisting DP port %c\n",
2028 port_name(dig_port->port));
2029 break;
2030 }
2031 break;
2032 }
2033 }
2034 mutex_unlock(&dev->mode_config.mutex);
2035
2036 return ret;
2037}
2038
2039static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
2040 enum pipe pipe,
2041 enum intel_pipe_crc_source *source,
1947 uint32_t *val) 2042 uint32_t *val)
1948{ 2043{
1949 switch (source) { 2044 struct drm_i915_private *dev_priv = dev->dev_private;
2045 bool need_stable_symbols = false;
2046
2047 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
2048 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
2049 if (ret)
2050 return ret;
2051 }
2052
2053 switch (*source) {
2054 case INTEL_PIPE_CRC_SOURCE_PIPE:
2055 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
2056 break;
2057 case INTEL_PIPE_CRC_SOURCE_DP_B:
2058 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
2059 need_stable_symbols = true;
2060 break;
2061 case INTEL_PIPE_CRC_SOURCE_DP_C:
2062 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
2063 need_stable_symbols = true;
2064 break;
2065 case INTEL_PIPE_CRC_SOURCE_NONE:
2066 *val = 0;
2067 break;
2068 default:
2069 return -EINVAL;
2070 }
2071
2072 /*
2073 * When the pipe CRC tap point is after the transcoders we need
2074 * to tweak symbol-level features to produce a deterministic series of
2075 * symbols for a given frame. We need to reset those features only once
2076 * a frame (instead of every nth symbol):
2077 * - DC-balance: used to ensure a better clock recovery from the data
2078 * link (SDVO)
2079 * - DisplayPort scrambling: used for EMI reduction
2080 */
2081 if (need_stable_symbols) {
2082 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2083
2084 WARN_ON(!IS_G4X(dev));
2085
2086 tmp |= DC_BALANCE_RESET_VLV;
2087 if (pipe == PIPE_A)
2088 tmp |= PIPE_A_SCRAMBLE_RESET;
2089 else
2090 tmp |= PIPE_B_SCRAMBLE_RESET;
2091
2092 I915_WRITE(PORT_DFT2_G4X, tmp);
2093 }
2094
2095 return 0;
2096}
2097
2098static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
2099 enum pipe pipe,
2100 enum intel_pipe_crc_source *source,
2101 uint32_t *val)
2102{
2103 struct drm_i915_private *dev_priv = dev->dev_private;
2104 bool need_stable_symbols = false;
2105
2106 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
2107 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
2108 if (ret)
2109 return ret;
2110 }
2111
2112 switch (*source) {
2113 case INTEL_PIPE_CRC_SOURCE_PIPE:
2114 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
2115 break;
2116 case INTEL_PIPE_CRC_SOURCE_TV:
2117 if (!SUPPORTS_TV(dev))
2118 return -EINVAL;
2119 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
2120 break;
2121 case INTEL_PIPE_CRC_SOURCE_DP_B:
2122 if (!IS_G4X(dev))
2123 return -EINVAL;
2124 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
2125 need_stable_symbols = true;
2126 break;
2127 case INTEL_PIPE_CRC_SOURCE_DP_C:
2128 if (!IS_G4X(dev))
2129 return -EINVAL;
2130 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
2131 need_stable_symbols = true;
2132 break;
2133 case INTEL_PIPE_CRC_SOURCE_DP_D:
2134 if (!IS_G4X(dev))
2135 return -EINVAL;
2136 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
2137 need_stable_symbols = true;
2138 break;
2139 case INTEL_PIPE_CRC_SOURCE_NONE:
2140 *val = 0;
2141 break;
2142 default:
2143 return -EINVAL;
2144 }
2145
2146 /*
2147 * When the pipe CRC tap point is after the transcoders we need
2148 * to tweak symbol-level features to produce a deterministic series of
2149 * symbols for a given frame. We need to reset those features only once
2150 * a frame (instead of every nth symbol):
2151 * - DC-balance: used to ensure a better clock recovery from the data
2152 * link (SDVO)
2153 * - DisplayPort scrambling: used for EMI reduction
2154 */
2155 if (need_stable_symbols) {
2156 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2157
2158 WARN_ON(!IS_G4X(dev));
2159
2160 I915_WRITE(PORT_DFT_I9XX,
2161 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
2162
2163 if (pipe == PIPE_A)
2164 tmp |= PIPE_A_SCRAMBLE_RESET;
2165 else
2166 tmp |= PIPE_B_SCRAMBLE_RESET;
2167
2168 I915_WRITE(PORT_DFT2_G4X, tmp);
2169 }
2170
2171 return 0;
2172}
2173
2174static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
2175 enum pipe pipe)
2176{
2177 struct drm_i915_private *dev_priv = dev->dev_private;
2178 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2179
2180 if (pipe == PIPE_A)
2181 tmp &= ~PIPE_A_SCRAMBLE_RESET;
2182 else
2183 tmp &= ~PIPE_B_SCRAMBLE_RESET;
2184 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
2185 tmp &= ~DC_BALANCE_RESET_VLV;
2186 I915_WRITE(PORT_DFT2_G4X, tmp);
2187
2188}
2189
2190static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
2191 enum pipe pipe)
2192{
2193 struct drm_i915_private *dev_priv = dev->dev_private;
2194 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2195
2196 if (pipe == PIPE_A)
2197 tmp &= ~PIPE_A_SCRAMBLE_RESET;
2198 else
2199 tmp &= ~PIPE_B_SCRAMBLE_RESET;
2200 I915_WRITE(PORT_DFT2_G4X, tmp);
2201
2202 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
2203 I915_WRITE(PORT_DFT_I9XX,
2204 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
2205 }
2206}
2207
2208static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2209 uint32_t *val)
2210{
2211 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2212 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2213
2214 switch (*source) {
1950 case INTEL_PIPE_CRC_SOURCE_PLANE1: 2215 case INTEL_PIPE_CRC_SOURCE_PLANE1:
1951 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK; 2216 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
1952 break; 2217 break;
1953 case INTEL_PIPE_CRC_SOURCE_PLANE2: 2218 case INTEL_PIPE_CRC_SOURCE_PLANE2:
1954 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK; 2219 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
1955 break; 2220 break;
1956 case INTEL_PIPE_CRC_SOURCE_PF:
1957 return -EINVAL;
1958 case INTEL_PIPE_CRC_SOURCE_PIPE: 2221 case INTEL_PIPE_CRC_SOURCE_PIPE:
1959 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK; 2222 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
1960 break; 2223 break;
1961 default: 2224 case INTEL_PIPE_CRC_SOURCE_NONE:
1962 *val = 0; 2225 *val = 0;
1963 break; 2226 break;
2227 default:
2228 return -EINVAL;
1964 } 2229 }
1965 2230
1966 return 0; 2231 return 0;
1967} 2232}
1968 2233
1969static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source source, 2234static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
1970 uint32_t *val) 2235 uint32_t *val)
1971{ 2236{
1972 switch (source) { 2237 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2238 *source = INTEL_PIPE_CRC_SOURCE_PF;
2239
2240 switch (*source) {
1973 case INTEL_PIPE_CRC_SOURCE_PLANE1: 2241 case INTEL_PIPE_CRC_SOURCE_PLANE1:
1974 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB; 2242 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
1975 break; 2243 break;
@@ -1979,11 +2247,11 @@ static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source source,
1979 case INTEL_PIPE_CRC_SOURCE_PF: 2247 case INTEL_PIPE_CRC_SOURCE_PF:
1980 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; 2248 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
1981 break; 2249 break;
1982 case INTEL_PIPE_CRC_SOURCE_PIPE: 2250 case INTEL_PIPE_CRC_SOURCE_NONE:
1983 return -EINVAL;
1984 default:
1985 *val = 0; 2251 *val = 0;
1986 break; 2252 break;
2253 default:
2254 return -EINVAL;
1987 } 2255 }
1988 2256
1989 return 0; 2257 return 0;
@@ -1997,9 +2265,6 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
1997 u32 val; 2265 u32 val;
1998 int ret; 2266 int ret;
1999 2267
2000 if (!(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)))
2001 return -ENODEV;
2002
2003 if (pipe_crc->source == source) 2268 if (pipe_crc->source == source)
2004 return 0; 2269 return 0;
2005 2270
@@ -2007,10 +2272,16 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
2007 if (pipe_crc->source && source) 2272 if (pipe_crc->source && source)
2008 return -EINVAL; 2273 return -EINVAL;
2009 2274
2010 if (IS_GEN5(dev) || IS_GEN6(dev)) 2275 if (IS_GEN2(dev))
2011 ret = ilk_pipe_crc_ctl_reg(source, &val); 2276 ret = i8xx_pipe_crc_ctl_reg(&source, &val);
2277 else if (INTEL_INFO(dev)->gen < 5)
2278 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
2279 else if (IS_VALLEYVIEW(dev))
2280 ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val);
2281 else if (IS_GEN5(dev) || IS_GEN6(dev))
2282 ret = ilk_pipe_crc_ctl_reg(&source, &val);
2012 else 2283 else
2013 ret = ivb_pipe_crc_ctl_reg(source, &val); 2284 ret = ivb_pipe_crc_ctl_reg(&source, &val);
2014 2285
2015 if (ret != 0) 2286 if (ret != 0)
2016 return ret; 2287 return ret;
@@ -2026,8 +2297,10 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
2026 if (!pipe_crc->entries) 2297 if (!pipe_crc->entries)
2027 return -ENOMEM; 2298 return -ENOMEM;
2028 2299
2029 atomic_set(&pipe_crc->head, 0); 2300 spin_lock_irq(&pipe_crc->lock);
2030 atomic_set(&pipe_crc->tail, 0); 2301 pipe_crc->head = 0;
2302 pipe_crc->tail = 0;
2303 spin_unlock_irq(&pipe_crc->lock);
2031 } 2304 }
2032 2305
2033 pipe_crc->source = source; 2306 pipe_crc->source = source;
@@ -2037,13 +2310,24 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
2037 2310
2038 /* real source -> none transition */ 2311 /* real source -> none transition */
2039 if (source == INTEL_PIPE_CRC_SOURCE_NONE) { 2312 if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
2313 struct intel_pipe_crc_entry *entries;
2314
2040 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", 2315 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
2041 pipe_name(pipe)); 2316 pipe_name(pipe));
2042 2317
2043 intel_wait_for_vblank(dev, pipe); 2318 intel_wait_for_vblank(dev, pipe);
2044 2319
2045 kfree(pipe_crc->entries); 2320 spin_lock_irq(&pipe_crc->lock);
2321 entries = pipe_crc->entries;
2046 pipe_crc->entries = NULL; 2322 pipe_crc->entries = NULL;
2323 spin_unlock_irq(&pipe_crc->lock);
2324
2325 kfree(entries);
2326
2327 if (IS_G4X(dev))
2328 g4x_undo_pipe_scramble_reset(dev, pipe);
2329 else if (IS_VALLEYVIEW(dev))
2330 vlv_undo_pipe_scramble_reset(dev, pipe);
2047 } 2331 }
2048 2332
2049 return 0; 2333 return 0;
@@ -2738,7 +3022,8 @@ void intel_display_crc_init(struct drm_device *dev)
2738 for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) { 3022 for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
2739 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i]; 3023 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i];
2740 3024
2741 atomic_set(&pipe_crc->available, 1); 3025 pipe_crc->opened = false;
3026 spin_lock_init(&pipe_crc->lock);
2742 init_waitqueue_head(&pipe_crc->wq); 3027 init_waitqueue_head(&pipe_crc->wq);
2743 } 3028 }
2744} 3029}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 437886641d90..0cab2d045135 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1290,12 +1290,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
1290 * then we do not take part in VGA arbitration and the 1290 * then we do not take part in VGA arbitration and the
1291 * vga_client_register() fails with -ENODEV. 1291 * vga_client_register() fails with -ENODEV.
1292 */ 1292 */
1293 if (!HAS_PCH_SPLIT(dev)) { 1293 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1294 ret = vga_client_register(dev->pdev, dev, NULL, 1294 if (ret && ret != -ENODEV)
1295 i915_vga_set_decode); 1295 goto out;
1296 if (ret && ret != -ENODEV)
1297 goto out;
1298 }
1299 1296
1300 intel_register_dsm_handler(); 1297 intel_register_dsm_handler();
1301 1298
@@ -1314,10 +1311,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1314 if (ret) 1311 if (ret)
1315 goto cleanup_gem_stolen; 1312 goto cleanup_gem_stolen;
1316 1313
1317 intel_init_power_well(dev); 1314 intel_power_domains_init_hw(dev);
1318
1319 /* Keep VGA alive until i915_disable_vga_mem() */
1320 intel_display_power_get(dev, POWER_DOMAIN_VGA);
1321 1315
1322 /* Important: The output setup functions called by modeset_init need 1316 /* Important: The output setup functions called by modeset_init need
1323 * working irqs for e.g. gmbus and dp aux transfers. */ 1317 * working irqs for e.g. gmbus and dp aux transfers. */
@@ -1358,13 +1352,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
1358 */ 1352 */
1359 intel_fbdev_initial_config(dev); 1353 intel_fbdev_initial_config(dev);
1360 1354
1361 /*
1362 * Must do this after fbcon init so that
1363 * vgacon_save_screen() works during the handover.
1364 */
1365 i915_disable_vga_mem(dev);
1366 intel_display_power_put(dev, POWER_DOMAIN_VGA);
1367
1368 /* Only enable hotplug handling once the fbdev is fully set up. */ 1355 /* Only enable hotplug handling once the fbdev is fully set up. */
1369 dev_priv->enable_hotplug_processing = true; 1356 dev_priv->enable_hotplug_processing = true;
1370 1357
@@ -1653,7 +1640,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1653 } 1640 }
1654 1641
1655 if (HAS_POWER_WELL(dev)) 1642 if (HAS_POWER_WELL(dev))
1656 i915_init_power_well(dev); 1643 intel_power_domains_init(dev);
1657 1644
1658 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1645 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1659 ret = i915_load_modeset_init(dev); 1646 ret = i915_load_modeset_init(dev);
@@ -1681,7 +1668,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1681 1668
1682out_power_well: 1669out_power_well:
1683 if (HAS_POWER_WELL(dev)) 1670 if (HAS_POWER_WELL(dev))
1684 i915_remove_power_well(dev); 1671 intel_power_domains_remove(dev);
1685 drm_vblank_cleanup(dev); 1672 drm_vblank_cleanup(dev);
1686out_gem_unload: 1673out_gem_unload:
1687 if (dev_priv->mm.inactive_shrinker.scan_objects) 1674 if (dev_priv->mm.inactive_shrinker.scan_objects)
@@ -1723,8 +1710,8 @@ int i915_driver_unload(struct drm_device *dev)
1723 /* The i915.ko module is still not prepared to be loaded when 1710 /* The i915.ko module is still not prepared to be loaded when
1724 * the power well is not enabled, so just enable it in case 1711 * the power well is not enabled, so just enable it in case
1725 * we're going to unload/reload. */ 1712 * we're going to unload/reload. */
1726 intel_set_power_well(dev, true); 1713 intel_display_set_init_power(dev, true);
1727 i915_remove_power_well(dev); 1714 intel_power_domains_remove(dev);
1728 } 1715 }
1729 1716
1730 i915_teardown_sysfs(dev); 1717 i915_teardown_sysfs(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 1060a96d2184..a0804fa1e306 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -477,7 +477,7 @@ static int i915_drm_freeze(struct drm_device *dev)
477 /* We do a lot of poking in a lot of registers, make sure they work 477 /* We do a lot of poking in a lot of registers, make sure they work
478 * properly. */ 478 * properly. */
479 hsw_disable_package_c8(dev_priv); 479 hsw_disable_package_c8(dev_priv);
480 intel_set_power_well(dev, true); 480 intel_display_set_init_power(dev, true);
481 481
482 drm_kms_helper_poll_disable(dev); 482 drm_kms_helper_poll_disable(dev);
483 483
@@ -508,6 +508,8 @@ static int i915_drm_freeze(struct drm_device *dev)
508 intel_modeset_suspend_hw(dev); 508 intel_modeset_suspend_hw(dev);
509 } 509 }
510 510
511 i915_gem_suspend_gtt_mappings(dev);
512
511 i915_save_state(dev); 513 i915_save_state(dev);
512 514
513 intel_opregion_fini(dev); 515 intel_opregion_fini(dev);
@@ -595,7 +597,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
595 mutex_unlock(&dev->struct_mutex); 597 mutex_unlock(&dev->struct_mutex);
596 } 598 }
597 599
598 intel_init_power_well(dev); 600 intel_power_domains_init_hw(dev);
599 601
600 i915_restore_state(dev); 602 i915_restore_state(dev);
601 intel_opregion_setup(dev); 603 intel_opregion_setup(dev);
@@ -656,6 +658,9 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
656 658
657static int i915_drm_thaw(struct drm_device *dev) 659static int i915_drm_thaw(struct drm_device *dev)
658{ 660{
661 if (drm_core_check_feature(dev, DRIVER_MODESET))
662 i915_check_and_clear_faults(dev);
663
659 return __i915_drm_thaw(dev, true); 664 return __i915_drm_thaw(dev, true);
660} 665}
661 666
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2ea33eebf01c..b0dd4ea8133f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -54,6 +54,7 @@
54#define DRIVER_DATE "20080730" 54#define DRIVER_DATE "20080730"
55 55
56enum pipe { 56enum pipe {
57 INVALID_PIPE = -1,
57 PIPE_A = 0, 58 PIPE_A = 0,
58 PIPE_B, 59 PIPE_B,
59 PIPE_C, 60 PIPE_C,
@@ -98,14 +99,25 @@ enum intel_display_power_domain {
98 POWER_DOMAIN_TRANSCODER_A, 99 POWER_DOMAIN_TRANSCODER_A,
99 POWER_DOMAIN_TRANSCODER_B, 100 POWER_DOMAIN_TRANSCODER_B,
100 POWER_DOMAIN_TRANSCODER_C, 101 POWER_DOMAIN_TRANSCODER_C,
101 POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF, 102 POWER_DOMAIN_TRANSCODER_EDP,
102 POWER_DOMAIN_VGA, 103 POWER_DOMAIN_VGA,
104 POWER_DOMAIN_INIT,
105
106 POWER_DOMAIN_NUM,
103}; 107};
104 108
109#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
110
105#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) 111#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
106#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ 112#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
107 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) 113 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
108#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A) 114#define POWER_DOMAIN_TRANSCODER(tran) \
115 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
116 (tran) + POWER_DOMAIN_TRANSCODER_A)
117
118#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
119 BIT(POWER_DOMAIN_PIPE_A) | \
120 BIT(POWER_DOMAIN_TRANSCODER_EDP))
109 121
110enum hpd_pin { 122enum hpd_pin {
111 HPD_NONE = 0, 123 HPD_NONE = 0,
@@ -231,6 +243,7 @@ struct intel_opregion {
231 struct opregion_asle __iomem *asle; 243 struct opregion_asle __iomem *asle;
232 void __iomem *vbt; 244 void __iomem *vbt;
233 u32 __iomem *lid_state; 245 u32 __iomem *lid_state;
246 struct work_struct asle_work;
234}; 247};
235#define OPREGION_SIZE (8*1024) 248#define OPREGION_SIZE (8*1024)
236 249
@@ -288,6 +301,7 @@ struct drm_i915_error_state {
288 u32 cpu_ring_tail[I915_NUM_RINGS]; 301 u32 cpu_ring_tail[I915_NUM_RINGS];
289 u32 error; /* gen6+ */ 302 u32 error; /* gen6+ */
290 u32 err_int; /* gen7 */ 303 u32 err_int; /* gen7 */
304 u32 bbstate[I915_NUM_RINGS];
291 u32 instpm[I915_NUM_RINGS]; 305 u32 instpm[I915_NUM_RINGS];
292 u32 instps[I915_NUM_RINGS]; 306 u32 instps[I915_NUM_RINGS];
293 u32 extra_instdone[I915_NUM_INSTDONE_REG]; 307 u32 extra_instdone[I915_NUM_INSTDONE_REG];
@@ -516,10 +530,12 @@ struct i915_address_space {
516 530
517 /* FIXME: Need a more generic return type */ 531 /* FIXME: Need a more generic return type */
518 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, 532 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
519 enum i915_cache_level level); 533 enum i915_cache_level level,
534 bool valid); /* Create a valid PTE */
520 void (*clear_range)(struct i915_address_space *vm, 535 void (*clear_range)(struct i915_address_space *vm,
521 unsigned int first_entry, 536 unsigned int first_entry,
522 unsigned int num_entries); 537 unsigned int num_entries,
538 bool use_scratch);
523 void (*insert_entries)(struct i915_address_space *vm, 539 void (*insert_entries)(struct i915_address_space *vm,
524 struct sg_table *st, 540 struct sg_table *st,
525 unsigned int first_entry, 541 unsigned int first_entry,
@@ -729,6 +745,9 @@ struct i915_suspend_saved_registers {
729 u32 saveBLC_HIST_CTL; 745 u32 saveBLC_HIST_CTL;
730 u32 saveBLC_PWM_CTL; 746 u32 saveBLC_PWM_CTL;
731 u32 saveBLC_PWM_CTL2; 747 u32 saveBLC_PWM_CTL2;
748 u32 saveBLC_HIST_CTL_B;
749 u32 saveBLC_PWM_CTL_B;
750 u32 saveBLC_PWM_CTL2_B;
732 u32 saveBLC_CPU_PWM_CTL; 751 u32 saveBLC_CPU_PWM_CTL;
733 u32 saveBLC_CPU_PWM_CTL2; 752 u32 saveBLC_CPU_PWM_CTL2;
734 u32 saveFPB0; 753 u32 saveFPB0;
@@ -898,11 +917,21 @@ struct intel_ilk_power_mgmt {
898 917
899/* Power well structure for haswell */ 918/* Power well structure for haswell */
900struct i915_power_well { 919struct i915_power_well {
901 struct drm_device *device;
902 spinlock_t lock;
903 /* power well enable/disable usage count */ 920 /* power well enable/disable usage count */
904 int count; 921 int count;
905 int i915_request; 922};
923
924#define I915_MAX_POWER_WELLS 1
925
926struct i915_power_domains {
927 /*
928 * Power wells needed for initialization at driver init and suspend
929 * time are on. They are kept on until after the first modeset.
930 */
931 bool init_power_on;
932
933 struct mutex lock;
934 struct i915_power_well power_wells[I915_MAX_POWER_WELLS];
906}; 935};
907 936
908struct i915_dri1_state { 937struct i915_dri1_state {
@@ -1224,6 +1253,12 @@ enum intel_pipe_crc_source {
1224 INTEL_PIPE_CRC_SOURCE_PLANE2, 1253 INTEL_PIPE_CRC_SOURCE_PLANE2,
1225 INTEL_PIPE_CRC_SOURCE_PF, 1254 INTEL_PIPE_CRC_SOURCE_PF,
1226 INTEL_PIPE_CRC_SOURCE_PIPE, 1255 INTEL_PIPE_CRC_SOURCE_PIPE,
1256 /* TV/DP on pre-gen5/vlv can't use the pipe source. */
1257 INTEL_PIPE_CRC_SOURCE_TV,
1258 INTEL_PIPE_CRC_SOURCE_DP_B,
1259 INTEL_PIPE_CRC_SOURCE_DP_C,
1260 INTEL_PIPE_CRC_SOURCE_DP_D,
1261 INTEL_PIPE_CRC_SOURCE_AUTO,
1227 INTEL_PIPE_CRC_SOURCE_MAX, 1262 INTEL_PIPE_CRC_SOURCE_MAX,
1228}; 1263};
1229 1264
@@ -1234,10 +1269,11 @@ struct intel_pipe_crc_entry {
1234 1269
1235#define INTEL_PIPE_CRC_ENTRIES_NR 128 1270#define INTEL_PIPE_CRC_ENTRIES_NR 128
1236struct intel_pipe_crc { 1271struct intel_pipe_crc {
1237 atomic_t available; /* exclusive access to the device */ 1272 spinlock_t lock;
1273 bool opened; /* exclusive access to the result file */
1238 struct intel_pipe_crc_entry *entries; 1274 struct intel_pipe_crc_entry *entries;
1239 enum intel_pipe_crc_source source; 1275 enum intel_pipe_crc_source source;
1240 atomic_t head, tail; 1276 int head, tail;
1241 wait_queue_head_t wq; 1277 wait_queue_head_t wq;
1242}; 1278};
1243 1279
@@ -1365,6 +1401,10 @@ typedef struct drm_i915_private {
1365 struct drm_crtc *pipe_to_crtc_mapping[3]; 1401 struct drm_crtc *pipe_to_crtc_mapping[3];
1366 wait_queue_head_t pending_flip_queue; 1402 wait_queue_head_t pending_flip_queue;
1367 1403
1404#ifdef CONFIG_DEBUG_FS
1405 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
1406#endif
1407
1368 int num_shared_dpll; 1408 int num_shared_dpll;
1369 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1409 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1370 struct intel_ddi_plls ddi_plls; 1410 struct intel_ddi_plls ddi_plls;
@@ -1390,8 +1430,7 @@ typedef struct drm_i915_private {
1390 * mchdev_lock in intel_pm.c */ 1430 * mchdev_lock in intel_pm.c */
1391 struct intel_ilk_power_mgmt ips; 1431 struct intel_ilk_power_mgmt ips;
1392 1432
1393 /* Haswell power well */ 1433 struct i915_power_domains power_domains;
1394 struct i915_power_well power_well;
1395 1434
1396 struct i915_psr psr; 1435 struct i915_psr psr;
1397 1436
@@ -1445,10 +1484,6 @@ typedef struct drm_i915_private {
1445 struct i915_dri1_state dri1; 1484 struct i915_dri1_state dri1;
1446 /* Old ums support infrastructure, same warning applies. */ 1485 /* Old ums support infrastructure, same warning applies. */
1447 struct i915_ums_state ums; 1486 struct i915_ums_state ums;
1448
1449#ifdef CONFIG_DEBUG_FS
1450 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
1451#endif
1452} drm_i915_private_t; 1487} drm_i915_private_t;
1453 1488
1454static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 1489static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
@@ -1784,27 +1819,6 @@ struct drm_i915_file_private {
1784 1819
1785#include "i915_trace.h" 1820#include "i915_trace.h"
1786 1821
1787/**
1788 * RC6 is a special power stage which allows the GPU to enter an very
1789 * low-voltage mode when idle, using down to 0V while at this stage. This
1790 * stage is entered automatically when the GPU is idle when RC6 support is
1791 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
1792 *
1793 * There are different RC6 modes available in Intel GPU, which differentiate
1794 * among each other with the latency required to enter and leave RC6 and
1795 * voltage consumed by the GPU in different states.
1796 *
1797 * The combination of the following flags define which states GPU is allowed
1798 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
1799 * RC6pp is deepest RC6. Their support by hardware varies according to the
1800 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
1801 * which brings the most power savings; deeper states save more power, but
1802 * require higher latency to switch to and wake up.
1803 */
1804#define INTEL_RC6_ENABLE (1<<0)
1805#define INTEL_RC6p_ENABLE (1<<1)
1806#define INTEL_RC6pp_ENABLE (1<<2)
1807
1808extern const struct drm_ioctl_desc i915_ioctls[]; 1822extern const struct drm_ioctl_desc i915_ioctls[];
1809extern int i915_max_ioctl; 1823extern int i915_max_ioctl;
1810extern unsigned int i915_fbpercrtc __always_unused; 1824extern unsigned int i915_fbpercrtc __always_unused;
@@ -1878,10 +1892,10 @@ extern void intel_uncore_check_errors(struct drm_device *dev);
1878extern void intel_uncore_fini(struct drm_device *dev); 1892extern void intel_uncore_fini(struct drm_device *dev);
1879 1893
1880void 1894void
1881i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1895i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
1882 1896
1883void 1897void
1884i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1898i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
1885 1899
1886/* i915_gem.c */ 1900/* i915_gem.c */
1887int i915_gem_init_ioctl(struct drm_device *dev, void *data, 1901int i915_gem_init_ioctl(struct drm_device *dev, void *data,
@@ -2175,6 +2189,8 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
2175void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 2189void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
2176 struct drm_i915_gem_object *obj); 2190 struct drm_i915_gem_object *obj);
2177 2191
2192void i915_check_and_clear_faults(struct drm_device *dev);
2193void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
2178void i915_gem_restore_gtt_mappings(struct drm_device *dev); 2194void i915_gem_restore_gtt_mappings(struct drm_device *dev);
2179int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); 2195int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
2180void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, 2196void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 34df59b660f8..e7b39d731db6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -261,7 +261,7 @@ i915_gem_dumb_create(struct drm_file *file,
261 struct drm_mode_create_dumb *args) 261 struct drm_mode_create_dumb *args)
262{ 262{
263 /* have to work out size/pitch and return them */ 263 /* have to work out size/pitch and return them */
264 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); 264 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
265 args->size = args->pitch * args->height; 265 args->size = args->pitch * args->height;
266 return i915_gem_create(file, dev, 266 return i915_gem_create(file, dev,
267 args->size, &args->handle); 267 args->size, &args->handle);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e999496532c6..c4c42e7cbd7b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -58,9 +58,10 @@
58#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) 58#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
59 59
60static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, 60static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
61 enum i915_cache_level level) 61 enum i915_cache_level level,
62 bool valid)
62{ 63{
63 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 64 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
64 pte |= GEN6_PTE_ADDR_ENCODE(addr); 65 pte |= GEN6_PTE_ADDR_ENCODE(addr);
65 66
66 switch (level) { 67 switch (level) {
@@ -79,9 +80,10 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
79} 80}
80 81
81static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, 82static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
82 enum i915_cache_level level) 83 enum i915_cache_level level,
84 bool valid)
83{ 85{
84 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 86 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
85 pte |= GEN6_PTE_ADDR_ENCODE(addr); 87 pte |= GEN6_PTE_ADDR_ENCODE(addr);
86 88
87 switch (level) { 89 switch (level) {
@@ -105,9 +107,10 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
105#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) 107#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
106 108
107static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, 109static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
108 enum i915_cache_level level) 110 enum i915_cache_level level,
111 bool valid)
109{ 112{
110 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 113 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
111 pte |= GEN6_PTE_ADDR_ENCODE(addr); 114 pte |= GEN6_PTE_ADDR_ENCODE(addr);
112 115
113 /* Mark the page as writeable. Other platforms don't have a 116 /* Mark the page as writeable. Other platforms don't have a
@@ -122,9 +125,10 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
122} 125}
123 126
124static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, 127static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
125 enum i915_cache_level level) 128 enum i915_cache_level level,
129 bool valid)
126{ 130{
127 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 131 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
128 pte |= HSW_PTE_ADDR_ENCODE(addr); 132 pte |= HSW_PTE_ADDR_ENCODE(addr);
129 133
130 if (level != I915_CACHE_NONE) 134 if (level != I915_CACHE_NONE)
@@ -134,9 +138,10 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
134} 138}
135 139
136static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, 140static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
137 enum i915_cache_level level) 141 enum i915_cache_level level,
142 bool valid)
138{ 143{
139 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 144 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
140 pte |= HSW_PTE_ADDR_ENCODE(addr); 145 pte |= HSW_PTE_ADDR_ENCODE(addr);
141 146
142 switch (level) { 147 switch (level) {
@@ -236,7 +241,8 @@ static int gen6_ppgtt_enable(struct drm_device *dev)
236/* PPGTT support for Sandybdrige/Gen6 and later */ 241/* PPGTT support for Sandybdrige/Gen6 and later */
237static void gen6_ppgtt_clear_range(struct i915_address_space *vm, 242static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
238 unsigned first_entry, 243 unsigned first_entry,
239 unsigned num_entries) 244 unsigned num_entries,
245 bool use_scratch)
240{ 246{
241 struct i915_hw_ppgtt *ppgtt = 247 struct i915_hw_ppgtt *ppgtt =
242 container_of(vm, struct i915_hw_ppgtt, base); 248 container_of(vm, struct i915_hw_ppgtt, base);
@@ -245,7 +251,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
245 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 251 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
246 unsigned last_pte, i; 252 unsigned last_pte, i;
247 253
248 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); 254 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
249 255
250 while (num_entries) { 256 while (num_entries) {
251 last_pte = first_pte + num_entries; 257 last_pte = first_pte + num_entries;
@@ -282,7 +288,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
282 dma_addr_t page_addr; 288 dma_addr_t page_addr;
283 289
284 page_addr = sg_page_iter_dma_address(&sg_iter); 290 page_addr = sg_page_iter_dma_address(&sg_iter);
285 pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level); 291 pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
286 if (++act_pte == I915_PPGTT_PT_ENTRIES) { 292 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
287 kunmap_atomic(pt_vaddr); 293 kunmap_atomic(pt_vaddr);
288 act_pt++; 294 act_pt++;
@@ -367,7 +373,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
367 } 373 }
368 374
369 ppgtt->base.clear_range(&ppgtt->base, 0, 375 ppgtt->base.clear_range(&ppgtt->base, 0,
370 ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES); 376 ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
371 377
372 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); 378 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
373 379
@@ -444,7 +450,8 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
444{ 450{
445 ppgtt->base.clear_range(&ppgtt->base, 451 ppgtt->base.clear_range(&ppgtt->base,
446 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, 452 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
447 obj->base.size >> PAGE_SHIFT); 453 obj->base.size >> PAGE_SHIFT,
454 true);
448} 455}
449 456
450extern int intel_iommu_gfx_mapped; 457extern int intel_iommu_gfx_mapped;
@@ -485,15 +492,65 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
485 dev_priv->mm.interruptible = interruptible; 492 dev_priv->mm.interruptible = interruptible;
486} 493}
487 494
495void i915_check_and_clear_faults(struct drm_device *dev)
496{
497 struct drm_i915_private *dev_priv = dev->dev_private;
498 struct intel_ring_buffer *ring;
499 int i;
500
501 if (INTEL_INFO(dev)->gen < 6)
502 return;
503
504 for_each_ring(ring, dev_priv, i) {
505 u32 fault_reg;
506 fault_reg = I915_READ(RING_FAULT_REG(ring));
507 if (fault_reg & RING_FAULT_VALID) {
508 DRM_DEBUG_DRIVER("Unexpected fault\n"
509 "\tAddr: 0x%08lx\\n"
510 "\tAddress space: %s\n"
511 "\tSource ID: %d\n"
512 "\tType: %d\n",
513 fault_reg & PAGE_MASK,
514 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
515 RING_FAULT_SRCID(fault_reg),
516 RING_FAULT_FAULT_TYPE(fault_reg));
517 I915_WRITE(RING_FAULT_REG(ring),
518 fault_reg & ~RING_FAULT_VALID);
519 }
520 }
521 POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
522}
523
524void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
525{
526 struct drm_i915_private *dev_priv = dev->dev_private;
527
528 /* Don't bother messing with faults pre GEN6 as we have little
529 * documentation supporting that it's a good idea.
530 */
531 if (INTEL_INFO(dev)->gen < 6)
532 return;
533
534 i915_check_and_clear_faults(dev);
535
536 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
537 dev_priv->gtt.base.start / PAGE_SIZE,
538 dev_priv->gtt.base.total / PAGE_SIZE,
539 false);
540}
541
488void i915_gem_restore_gtt_mappings(struct drm_device *dev) 542void i915_gem_restore_gtt_mappings(struct drm_device *dev)
489{ 543{
490 struct drm_i915_private *dev_priv = dev->dev_private; 544 struct drm_i915_private *dev_priv = dev->dev_private;
491 struct drm_i915_gem_object *obj; 545 struct drm_i915_gem_object *obj;
492 546
547 i915_check_and_clear_faults(dev);
548
493 /* First fill our portion of the GTT with scratch pages */ 549 /* First fill our portion of the GTT with scratch pages */
494 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, 550 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
495 dev_priv->gtt.base.start / PAGE_SIZE, 551 dev_priv->gtt.base.start / PAGE_SIZE,
496 dev_priv->gtt.base.total / PAGE_SIZE); 552 dev_priv->gtt.base.total / PAGE_SIZE,
553 true);
497 554
498 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 555 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
499 i915_gem_clflush_object(obj, obj->pin_display); 556 i915_gem_clflush_object(obj, obj->pin_display);
@@ -536,7 +593,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
536 593
537 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 594 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
538 addr = sg_page_iter_dma_address(&sg_iter); 595 addr = sg_page_iter_dma_address(&sg_iter);
539 iowrite32(vm->pte_encode(addr, level), &gtt_entries[i]); 596 iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
540 i++; 597 i++;
541 } 598 }
542 599
@@ -548,7 +605,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
548 */ 605 */
549 if (i != 0) 606 if (i != 0)
550 WARN_ON(readl(&gtt_entries[i-1]) != 607 WARN_ON(readl(&gtt_entries[i-1]) !=
551 vm->pte_encode(addr, level)); 608 vm->pte_encode(addr, level, true));
552 609
553 /* This next bit makes the above posting read even more important. We 610 /* This next bit makes the above posting read even more important. We
554 * want to flush the TLBs only after we're certain all the PTE updates 611 * want to flush the TLBs only after we're certain all the PTE updates
@@ -560,7 +617,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
560 617
561static void gen6_ggtt_clear_range(struct i915_address_space *vm, 618static void gen6_ggtt_clear_range(struct i915_address_space *vm,
562 unsigned int first_entry, 619 unsigned int first_entry,
563 unsigned int num_entries) 620 unsigned int num_entries,
621 bool use_scratch)
564{ 622{
565 struct drm_i915_private *dev_priv = vm->dev->dev_private; 623 struct drm_i915_private *dev_priv = vm->dev->dev_private;
566 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = 624 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
@@ -573,7 +631,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
573 first_entry, num_entries, max_entries)) 631 first_entry, num_entries, max_entries))
574 num_entries = max_entries; 632 num_entries = max_entries;
575 633
576 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); 634 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
635
577 for (i = 0; i < num_entries; i++) 636 for (i = 0; i < num_entries; i++)
578 iowrite32(scratch_pte, &gtt_base[i]); 637 iowrite32(scratch_pte, &gtt_base[i]);
579 readl(gtt_base); 638 readl(gtt_base);
@@ -594,7 +653,8 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
594 653
595static void i915_ggtt_clear_range(struct i915_address_space *vm, 654static void i915_ggtt_clear_range(struct i915_address_space *vm,
596 unsigned int first_entry, 655 unsigned int first_entry,
597 unsigned int num_entries) 656 unsigned int num_entries,
657 bool unused)
598{ 658{
599 intel_gtt_clear_range(first_entry, num_entries); 659 intel_gtt_clear_range(first_entry, num_entries);
600} 660}
@@ -622,7 +682,8 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
622 682
623 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, 683 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
624 entry, 684 entry,
625 obj->base.size >> PAGE_SHIFT); 685 obj->base.size >> PAGE_SHIFT,
686 true);
626 687
627 obj->has_global_gtt_mapping = 0; 688 obj->has_global_gtt_mapping = 0;
628} 689}
@@ -709,11 +770,11 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
709 const unsigned long count = (hole_end - hole_start) / PAGE_SIZE; 770 const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
710 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", 771 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
711 hole_start, hole_end); 772 hole_start, hole_end);
712 ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count); 773 ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
713 } 774 }
714 775
715 /* And finally clear the reserved guard page */ 776 /* And finally clear the reserved guard page */
716 ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1); 777 ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
717} 778}
718 779
719static bool 780static bool
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 5dde81026471..a8bb213da79f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -249,7 +249,8 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
249 err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); 249 err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
250 if (ring == RCS && INTEL_INFO(dev)->gen >= 4) 250 if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
251 err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); 251 err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
252 252 if (INTEL_INFO(dev)->gen >= 4)
253 err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]);
253 if (INTEL_INFO(dev)->gen >= 4) 254 if (INTEL_INFO(dev)->gen >= 4)
254 err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 255 err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
255 err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 256 err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
@@ -725,6 +726,7 @@ static void i915_record_ring_state(struct drm_device *dev,
725 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); 726 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
726 if (ring->id == RCS) 727 if (ring->id == RCS)
727 error->bbaddr = I915_READ64(BB_ADDR); 728 error->bbaddr = I915_READ64(BB_ADDR);
729 error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
728 } else { 730 } else {
729 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); 731 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
730 error->ipeir[ring->id] = I915_READ(IPEIR); 732 error->ipeir[ring->id] = I915_READ(IPEIR);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a2274c713273..d26f65212472 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -442,7 +442,7 @@ done:
442 442
443 443
444void 444void
445i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 445i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
446{ 446{
447 u32 reg = PIPESTAT(pipe); 447 u32 reg = PIPESTAT(pipe);
448 u32 pipestat = I915_READ(reg) & 0x7fff0000; 448 u32 pipestat = I915_READ(reg) & 0x7fff0000;
@@ -459,7 +459,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
459} 459}
460 460
461void 461void
462i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 462i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
463{ 463{
464 u32 reg = PIPESTAT(pipe); 464 u32 reg = PIPESTAT(pipe);
465 u32 pipestat = I915_READ(reg) & 0x7fff0000; 465 u32 pipestat = I915_READ(reg) & 0x7fff0000;
@@ -487,9 +487,10 @@ static void i915_enable_asle_pipestat(struct drm_device *dev)
487 487
488 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 488 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
489 489
490 i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); 490 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
491 if (INTEL_INFO(dev)->gen >= 4) 491 if (INTEL_INFO(dev)->gen >= 4)
492 i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); 492 i915_enable_pipestat(dev_priv, PIPE_A,
493 PIPE_LEGACY_BLC_EVENT_ENABLE);
493 494
494 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 495 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
495} 496}
@@ -1222,25 +1223,29 @@ static void dp_aux_irq_handler(struct drm_device *dev)
1222} 1223}
1223 1224
1224#if defined(CONFIG_DEBUG_FS) 1225#if defined(CONFIG_DEBUG_FS)
1225static void display_pipe_crc_update(struct drm_device *dev, enum pipe pipe, 1226static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1226 uint32_t crc0, uint32_t crc1, 1227 uint32_t crc0, uint32_t crc1,
1227 uint32_t crc2, uint32_t crc3, 1228 uint32_t crc2, uint32_t crc3,
1228 uint32_t crc4) 1229 uint32_t crc4)
1229{ 1230{
1230 struct drm_i915_private *dev_priv = dev->dev_private; 1231 struct drm_i915_private *dev_priv = dev->dev_private;
1231 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1232 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1232 struct intel_pipe_crc_entry *entry; 1233 struct intel_pipe_crc_entry *entry;
1233 int head, tail; 1234 int head, tail;
1234 1235
1236 spin_lock(&pipe_crc->lock);
1237
1235 if (!pipe_crc->entries) { 1238 if (!pipe_crc->entries) {
1239 spin_unlock(&pipe_crc->lock);
1236 DRM_ERROR("spurious interrupt\n"); 1240 DRM_ERROR("spurious interrupt\n");
1237 return; 1241 return;
1238 } 1242 }
1239 1243
1240 head = atomic_read(&pipe_crc->head); 1244 head = pipe_crc->head;
1241 tail = atomic_read(&pipe_crc->tail); 1245 tail = pipe_crc->tail;
1242 1246
1243 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1247 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1248 spin_unlock(&pipe_crc->lock);
1244 DRM_ERROR("CRC buffer overflowing\n"); 1249 DRM_ERROR("CRC buffer overflowing\n");
1245 return; 1250 return;
1246 } 1251 }
@@ -1255,48 +1260,63 @@ static void display_pipe_crc_update(struct drm_device *dev, enum pipe pipe,
1255 entry->crc[4] = crc4; 1260 entry->crc[4] = crc4;
1256 1261
1257 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1262 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1258 atomic_set(&pipe_crc->head, head); 1263 pipe_crc->head = head;
1264
1265 spin_unlock(&pipe_crc->lock);
1259 1266
1260 wake_up_interruptible(&pipe_crc->wq); 1267 wake_up_interruptible(&pipe_crc->wq);
1261} 1268}
1269#else
1270static inline void
1271display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1272 uint32_t crc0, uint32_t crc1,
1273 uint32_t crc2, uint32_t crc3,
1274 uint32_t crc4) {}
1275#endif
1276
1262 1277
1263static void hsw_pipe_crc_update(struct drm_device *dev, enum pipe pipe) 1278static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1264{ 1279{
1265 struct drm_i915_private *dev_priv = dev->dev_private; 1280 struct drm_i915_private *dev_priv = dev->dev_private;
1266 1281
1267 display_pipe_crc_update(dev, pipe, 1282 display_pipe_crc_irq_handler(dev, pipe,
1268 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1283 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1269 0, 0, 0, 0); 1284 0, 0, 0, 0);
1270} 1285}
1271 1286
1272static void ivb_pipe_crc_update(struct drm_device *dev, enum pipe pipe) 1287static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1273{ 1288{
1274 struct drm_i915_private *dev_priv = dev->dev_private; 1289 struct drm_i915_private *dev_priv = dev->dev_private;
1275 1290
1276 display_pipe_crc_update(dev, pipe, 1291 display_pipe_crc_irq_handler(dev, pipe,
1277 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1292 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1278 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1293 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1279 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1294 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1280 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1295 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1281 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1296 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1282} 1297}
1283 1298
1284static void ilk_pipe_crc_update(struct drm_device *dev, enum pipe pipe) 1299static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1285{ 1300{
1286 struct drm_i915_private *dev_priv = dev->dev_private; 1301 struct drm_i915_private *dev_priv = dev->dev_private;
1302 uint32_t res1, res2;
1287 1303
1288 display_pipe_crc_update(dev, pipe, 1304 if (INTEL_INFO(dev)->gen >= 3)
1289 I915_READ(PIPE_CRC_RES_RED_ILK(pipe)), 1305 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1290 I915_READ(PIPE_CRC_RES_GREEN_ILK(pipe)), 1306 else
1291 I915_READ(PIPE_CRC_RES_BLUE_ILK(pipe)), 1307 res1 = 0;
1292 I915_READ(PIPE_CRC_RES_RES1_ILK(pipe)), 1308
1293 I915_READ(PIPE_CRC_RES_RES2_ILK(pipe))); 1309 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1310 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1311 else
1312 res2 = 0;
1313
1314 display_pipe_crc_irq_handler(dev, pipe,
1315 I915_READ(PIPE_CRC_RES_RED(pipe)),
1316 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1317 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1318 res1, res2);
1294} 1319}
1295#else
1296static inline void hsw_pipe_crc_update(struct drm_device *dev, int pipe) {}
1297static inline void ivb_pipe_crc_update(struct drm_device *dev, int pipe) {}
1298static inline void ilk_pipe_crc_update(struct drm_device *dev, int pipe) {}
1299#endif
1300 1320
1301/* The RPS events need forcewake, so we add them to a work queue and mask their 1321/* The RPS events need forcewake, so we add them to a work queue and mask their
1302 * IMR bits until the work is done. Other interrupts can be processed without 1322 * IMR bits until the work is done. Other interrupts can be processed without
@@ -1365,13 +1385,16 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1365 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1385 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1366 1386
1367 for_each_pipe(pipe) { 1387 for_each_pipe(pipe) {
1368 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1388 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1369 drm_handle_vblank(dev, pipe); 1389 drm_handle_vblank(dev, pipe);
1370 1390
1371 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 1391 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1372 intel_prepare_page_flip(dev, pipe); 1392 intel_prepare_page_flip(dev, pipe);
1373 intel_finish_page_flip(dev, pipe); 1393 intel_finish_page_flip(dev, pipe);
1374 } 1394 }
1395
1396 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1397 i9xx_pipe_crc_irq_handler(dev, pipe);
1375 } 1398 }
1376 1399
1377 /* Consume port. Then clear IIR or we'll miss events */ 1400 /* Consume port. Then clear IIR or we'll miss events */
@@ -1475,9 +1498,9 @@ static void ivb_err_int_handler(struct drm_device *dev)
1475 1498
1476 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1499 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1477 if (IS_IVYBRIDGE(dev)) 1500 if (IS_IVYBRIDGE(dev))
1478 ivb_pipe_crc_update(dev, pipe); 1501 ivb_pipe_crc_irq_handler(dev, pipe);
1479 else 1502 else
1480 hsw_pipe_crc_update(dev, pipe); 1503 hsw_pipe_crc_irq_handler(dev, pipe);
1481 } 1504 }
1482 } 1505 }
1483 1506
@@ -1550,6 +1573,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1550static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1573static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1551{ 1574{
1552 struct drm_i915_private *dev_priv = dev->dev_private; 1575 struct drm_i915_private *dev_priv = dev->dev_private;
1576 enum pipe pipe;
1553 1577
1554 if (de_iir & DE_AUX_CHANNEL_A) 1578 if (de_iir & DE_AUX_CHANNEL_A)
1555 dp_aux_irq_handler(dev); 1579 dp_aux_irq_handler(dev);
@@ -1557,37 +1581,26 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1557 if (de_iir & DE_GSE) 1581 if (de_iir & DE_GSE)
1558 intel_opregion_asle_intr(dev); 1582 intel_opregion_asle_intr(dev);
1559 1583
1560 if (de_iir & DE_PIPEA_VBLANK)
1561 drm_handle_vblank(dev, 0);
1562
1563 if (de_iir & DE_PIPEB_VBLANK)
1564 drm_handle_vblank(dev, 1);
1565
1566 if (de_iir & DE_POISON) 1584 if (de_iir & DE_POISON)
1567 DRM_ERROR("Poison interrupt\n"); 1585 DRM_ERROR("Poison interrupt\n");
1568 1586
1569 if (de_iir & DE_PIPEA_FIFO_UNDERRUN) 1587 for_each_pipe(pipe) {
1570 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 1588 if (de_iir & DE_PIPE_VBLANK(pipe))
1571 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 1589 drm_handle_vblank(dev, pipe);
1572
1573 if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1574 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1575 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1576
1577 if (de_iir & DE_PIPEA_CRC_DONE)
1578 ilk_pipe_crc_update(dev, PIPE_A);
1579 1590
1580 if (de_iir & DE_PIPEB_CRC_DONE) 1591 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1581 ilk_pipe_crc_update(dev, PIPE_B); 1592 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
1593 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1594 pipe_name(pipe));
1582 1595
1583 if (de_iir & DE_PLANEA_FLIP_DONE) { 1596 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1584 intel_prepare_page_flip(dev, 0); 1597 i9xx_pipe_crc_irq_handler(dev, pipe);
1585 intel_finish_page_flip_plane(dev, 0);
1586 }
1587 1598
1588 if (de_iir & DE_PLANEB_FLIP_DONE) { 1599 /* plane/pipes map 1:1 on ilk+ */
1589 intel_prepare_page_flip(dev, 1); 1600 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
1590 intel_finish_page_flip_plane(dev, 1); 1601 intel_prepare_page_flip(dev, pipe);
1602 intel_finish_page_flip_plane(dev, pipe);
1603 }
1591 } 1604 }
1592 1605
1593 /* check event from PCH */ 1606 /* check event from PCH */
@@ -1610,7 +1623,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1610static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 1623static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1611{ 1624{
1612 struct drm_i915_private *dev_priv = dev->dev_private; 1625 struct drm_i915_private *dev_priv = dev->dev_private;
1613 int i; 1626 enum pipe i;
1614 1627
1615 if (de_iir & DE_ERR_INT_IVB) 1628 if (de_iir & DE_ERR_INT_IVB)
1616 ivb_err_int_handler(dev); 1629 ivb_err_int_handler(dev);
@@ -1621,10 +1634,12 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1621 if (de_iir & DE_GSE_IVB) 1634 if (de_iir & DE_GSE_IVB)
1622 intel_opregion_asle_intr(dev); 1635 intel_opregion_asle_intr(dev);
1623 1636
1624 for (i = 0; i < 3; i++) { 1637 for_each_pipe(i) {
1625 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 1638 if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
1626 drm_handle_vblank(dev, i); 1639 drm_handle_vblank(dev, i);
1627 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 1640
1641 /* plane/pipes map 1:1 on ilk+ */
1642 if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
1628 intel_prepare_page_flip(dev, i); 1643 intel_prepare_page_flip(dev, i);
1629 intel_finish_page_flip_plane(dev, i); 1644 intel_finish_page_flip_plane(dev, i);
1630 } 1645 }
@@ -2027,7 +2042,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2027 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2042 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2028 unsigned long irqflags; 2043 unsigned long irqflags;
2029 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2044 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2030 DE_PIPE_VBLANK_ILK(pipe); 2045 DE_PIPE_VBLANK(pipe);
2031 2046
2032 if (!i915_pipe_enabled(dev, pipe)) 2047 if (!i915_pipe_enabled(dev, pipe))
2033 return -EINVAL; 2048 return -EINVAL;
@@ -2050,7 +2065,7 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2050 2065
2051 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2066 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2052 imr = I915_READ(VLV_IMR); 2067 imr = I915_READ(VLV_IMR);
2053 if (pipe == 0) 2068 if (pipe == PIPE_A)
2054 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2069 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2055 else 2070 else
2056 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2071 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
@@ -2085,7 +2100,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2085 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2100 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2086 unsigned long irqflags; 2101 unsigned long irqflags;
2087 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2102 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2088 DE_PIPE_VBLANK_ILK(pipe); 2103 DE_PIPE_VBLANK(pipe);
2089 2104
2090 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2105 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2091 ironlake_disable_display_irq(dev_priv, bit); 2106 ironlake_disable_display_irq(dev_priv, bit);
@@ -2102,7 +2117,7 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2102 i915_disable_pipestat(dev_priv, pipe, 2117 i915_disable_pipestat(dev_priv, pipe,
2103 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2118 PIPE_START_VBLANK_INTERRUPT_ENABLE);
2104 imr = I915_READ(VLV_IMR); 2119 imr = I915_READ(VLV_IMR);
2105 if (pipe == 0) 2120 if (pipe == PIPE_A)
2106 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2121 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2107 else 2122 else
2108 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2123 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
@@ -2268,8 +2283,12 @@ static void i915_hangcheck_elapsed(unsigned long data)
2268 if (waitqueue_active(&ring->irq_queue)) { 2283 if (waitqueue_active(&ring->irq_queue)) {
2269 /* Issue a wake-up to catch stuck h/w. */ 2284 /* Issue a wake-up to catch stuck h/w. */
2270 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 2285 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2271 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2286 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2272 ring->name); 2287 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2288 ring->name);
2289 else
2290 DRM_INFO("Fake missed irq on %s\n",
2291 ring->name);
2273 wake_up_all(&ring->irq_queue); 2292 wake_up_all(&ring->irq_queue);
2274 } 2293 }
2275 /* Safeguard against driver failure */ 2294 /* Safeguard against driver failure */
@@ -2593,7 +2612,8 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2593{ 2612{
2594 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2613 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2595 u32 enable_mask; 2614 u32 enable_mask;
2596 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2615 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
2616 PIPE_CRC_DONE_ENABLE;
2597 unsigned long irqflags; 2617 unsigned long irqflags;
2598 2618
2599 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 2619 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
@@ -2623,9 +2643,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2623 /* Interrupt setup is already guaranteed to be single-threaded, this is 2643 /* Interrupt setup is already guaranteed to be single-threaded, this is
2624 * just to make the assert_spin_locked check happy. */ 2644 * just to make the assert_spin_locked check happy. */
2625 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2645 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2626 i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2646 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
2627 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2647 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
2628 i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2648 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
2629 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2649 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2630 2650
2631 I915_WRITE(VLV_IIR, 0xffffffff); 2651 I915_WRITE(VLV_IIR, 0xffffffff);
@@ -2716,6 +2736,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
2716static int i8xx_irq_postinstall(struct drm_device *dev) 2736static int i8xx_irq_postinstall(struct drm_device *dev)
2717{ 2737{
2718 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2738 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2739 unsigned long irqflags;
2719 2740
2720 I915_WRITE16(EMR, 2741 I915_WRITE16(EMR,
2721 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2742 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -2736,6 +2757,13 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
2736 I915_USER_INTERRUPT); 2757 I915_USER_INTERRUPT);
2737 POSTING_READ16(IER); 2758 POSTING_READ16(IER);
2738 2759
2760 /* Interrupt setup is already guaranteed to be single-threaded, this is
2761 * just to make the assert_spin_locked check happy. */
2762 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2763 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
2764 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
2765 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2766
2739 return 0; 2767 return 0;
2740} 2768}
2741 2769
@@ -2822,13 +2850,14 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2822 if (iir & I915_USER_INTERRUPT) 2850 if (iir & I915_USER_INTERRUPT)
2823 notify_ring(dev, &dev_priv->ring[RCS]); 2851 notify_ring(dev, &dev_priv->ring[RCS]);
2824 2852
2825 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && 2853 for_each_pipe(pipe) {
2826 i8xx_handle_vblank(dev, 0, iir)) 2854 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2827 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); 2855 i8xx_handle_vblank(dev, pipe, iir))
2856 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
2828 2857
2829 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && 2858 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2830 i8xx_handle_vblank(dev, 1, iir)) 2859 i9xx_pipe_crc_irq_handler(dev, pipe);
2831 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); 2860 }
2832 2861
2833 iir = new_iir; 2862 iir = new_iir;
2834 } 2863 }
@@ -2875,6 +2904,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
2875{ 2904{
2876 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2905 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2877 u32 enable_mask; 2906 u32 enable_mask;
2907 unsigned long irqflags;
2878 2908
2879 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2909 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2880 2910
@@ -2910,6 +2940,13 @@ static int i915_irq_postinstall(struct drm_device *dev)
2910 2940
2911 i915_enable_asle_pipestat(dev); 2941 i915_enable_asle_pipestat(dev);
2912 2942
2943 /* Interrupt setup is already guaranteed to be single-threaded, this is
2944 * just to make the assert_spin_locked check happy. */
2945 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2946 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
2947 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
2948 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2949
2913 return 0; 2950 return 0;
2914} 2951}
2915 2952
@@ -3021,6 +3058,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3021 3058
3022 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3059 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3023 blc_event = true; 3060 blc_event = true;
3061
3062 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3063 i9xx_pipe_crc_irq_handler(dev, pipe);
3024 } 3064 }
3025 3065
3026 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3066 if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -3119,7 +3159,9 @@ static int i965_irq_postinstall(struct drm_device *dev)
3119 /* Interrupt setup is already guaranteed to be single-threaded, this is 3159 /* Interrupt setup is already guaranteed to be single-threaded, this is
3120 * just to make the assert_spin_locked check happy. */ 3160 * just to make the assert_spin_locked check happy. */
3121 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3161 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3122 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 3162 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
3163 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3164 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
3123 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3165 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3124 3166
3125 /* 3167 /*
@@ -3265,6 +3307,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
3265 3307
3266 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3308 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3267 blc_event = true; 3309 blc_event = true;
3310
3311 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3312 i9xx_pipe_crc_irq_handler(dev, pipe);
3268 } 3313 }
3269 3314
3270 3315
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0e7488b64965..04896da9001c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -444,7 +444,7 @@
444 444
445#define _DPIO_TX3_SWING_CTL4_A 0x690 445#define _DPIO_TX3_SWING_CTL4_A 0x690
446#define _DPIO_TX3_SWING_CTL4_B 0x2a90 446#define _DPIO_TX3_SWING_CTL4_B 0x2a90
447#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX_SWING_CTL4_A, \ 447#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX3_SWING_CTL4_A, \
448 _DPIO_TX3_SWING_CTL4_B) 448 _DPIO_TX3_SWING_CTL4_B)
449 449
450/* 450/*
@@ -657,6 +657,10 @@
657#define ARB_MODE_SWIZZLE_IVB (1<<5) 657#define ARB_MODE_SWIZZLE_IVB (1<<5)
658#define RENDER_HWS_PGA_GEN7 (0x04080) 658#define RENDER_HWS_PGA_GEN7 (0x04080)
659#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) 659#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
660#define RING_FAULT_GTTSEL_MASK (1<<11)
661#define RING_FAULT_SRCID(x) ((x >> 3) & 0xff)
662#define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3)
663#define RING_FAULT_VALID (1<<0)
660#define DONE_REG 0x40b0 664#define DONE_REG 0x40b0
661#define BSD_HWS_PGA_GEN7 (0x04180) 665#define BSD_HWS_PGA_GEN7 (0x04180)
662#define BLT_HWS_PGA_GEN7 (0x04280) 666#define BLT_HWS_PGA_GEN7 (0x04280)
@@ -718,6 +722,7 @@
718#define NOPID 0x02094 722#define NOPID 0x02094
719#define HWSTAM 0x02098 723#define HWSTAM 0x02098
720#define DMA_FADD_I8XX 0x020d0 724#define DMA_FADD_I8XX 0x020d0
725#define RING_BBSTATE(base) ((base)+0x110)
721 726
722#define ERROR_GEN6 0x040a0 727#define ERROR_GEN6 0x040a0
723#define GEN7_ERR_INT 0x44040 728#define GEN7_ERR_INT 0x44040
@@ -1106,9 +1111,6 @@
1106 _HSW_PIPE_SLICE_CHICKEN_1_A, + \ 1111 _HSW_PIPE_SLICE_CHICKEN_1_A, + \
1107 _HSW_PIPE_SLICE_CHICKEN_1_B) 1112 _HSW_PIPE_SLICE_CHICKEN_1_B)
1108 1113
1109#define HSW_CLKGATE_DISABLE_PART_1 0x46500
1110#define HSW_DPFC_GATING_DISABLE (1<<23)
1111
1112/* 1114/*
1113 * GPIO regs 1115 * GPIO regs
1114 */ 1116 */
@@ -1476,7 +1478,7 @@
1476#define MCHBAR_MIRROR_BASE_SNB 0x140000 1478#define MCHBAR_MIRROR_BASE_SNB 0x140000
1477 1479
1478/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */ 1480/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
1479#define DCLK 0x5e04 1481#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04)
1480 1482
1481/** 915-945 and GM965 MCH register controlling DRAM channel access */ 1483/** 915-945 and GM965 MCH register controlling DRAM channel access */
1482#define DCC 0x10200 1484#define DCC 0x10200
@@ -1771,9 +1773,9 @@
1771#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7 1773#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
1772#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16)) 1774#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
1773 1775
1774#define GEN6_GT_PERF_STATUS 0x145948 1776#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948)
1775#define GEN6_RP_STATE_LIMITS 0x145994 1777#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
1776#define GEN6_RP_STATE_CAP 0x145998 1778#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998)
1777 1779
1778/* 1780/*
1779 * Logical Context regs 1781 * Logical Context regs
@@ -1843,36 +1845,58 @@
1843/* Pipe A CRC regs */ 1845/* Pipe A CRC regs */
1844#define _PIPE_CRC_CTL_A (dev_priv->info->display_mmio_offset + 0x60050) 1846#define _PIPE_CRC_CTL_A (dev_priv->info->display_mmio_offset + 0x60050)
1845#define PIPE_CRC_ENABLE (1 << 31) 1847#define PIPE_CRC_ENABLE (1 << 31)
1848/* ivb+ source selection */
1846#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29) 1849#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29)
1847#define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29) 1850#define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29)
1848#define PIPE_CRC_SOURCE_PF_IVB (2 << 29) 1851#define PIPE_CRC_SOURCE_PF_IVB (2 << 29)
1852/* ilk+ source selection */
1849#define PIPE_CRC_SOURCE_PRIMARY_ILK (0 << 28) 1853#define PIPE_CRC_SOURCE_PRIMARY_ILK (0 << 28)
1850#define PIPE_CRC_SOURCE_SPRITE_ILK (1 << 28) 1854#define PIPE_CRC_SOURCE_SPRITE_ILK (1 << 28)
1851#define PIPE_CRC_SOURCE_PIPE_ILK (2 << 28) 1855#define PIPE_CRC_SOURCE_PIPE_ILK (2 << 28)
1852/* embedded DP port on the north display block, reserved on ivb */ 1856/* embedded DP port on the north display block, reserved on ivb */
1853#define PIPE_CRC_SOURCE_PORT_A_ILK (4 << 28) 1857#define PIPE_CRC_SOURCE_PORT_A_ILK (4 << 28)
1854#define PIPE_CRC_SOURCE_FDI_ILK (5 << 28) /* reserved on ivb */ 1858#define PIPE_CRC_SOURCE_FDI_ILK (5 << 28) /* reserved on ivb */
1859/* vlv source selection */
1860#define PIPE_CRC_SOURCE_PIPE_VLV (0 << 27)
1861#define PIPE_CRC_SOURCE_HDMIB_VLV (1 << 27)
1862#define PIPE_CRC_SOURCE_HDMIC_VLV (2 << 27)
1863/* with DP port the pipe source is invalid */
1864#define PIPE_CRC_SOURCE_DP_D_VLV (3 << 27)
1865#define PIPE_CRC_SOURCE_DP_B_VLV (6 << 27)
1866#define PIPE_CRC_SOURCE_DP_C_VLV (7 << 27)
1867/* gen3+ source selection */
1868#define PIPE_CRC_SOURCE_PIPE_I9XX (0 << 28)
1869#define PIPE_CRC_SOURCE_SDVOB_I9XX (1 << 28)
1870#define PIPE_CRC_SOURCE_SDVOC_I9XX (2 << 28)
1871/* with DP/TV port the pipe source is invalid */
1872#define PIPE_CRC_SOURCE_DP_D_G4X (3 << 28)
1873#define PIPE_CRC_SOURCE_TV_PRE (4 << 28)
1874#define PIPE_CRC_SOURCE_TV_POST (5 << 28)
1875#define PIPE_CRC_SOURCE_DP_B_G4X (6 << 28)
1876#define PIPE_CRC_SOURCE_DP_C_G4X (7 << 28)
1877/* gen2 doesn't have source selection bits */
1878#define PIPE_CRC_INCLUDE_BORDER_I8XX (1 << 30)
1879
1855#define _PIPE_CRC_RES_1_A_IVB 0x60064 1880#define _PIPE_CRC_RES_1_A_IVB 0x60064
1856#define _PIPE_CRC_RES_2_A_IVB 0x60068 1881#define _PIPE_CRC_RES_2_A_IVB 0x60068
1857#define _PIPE_CRC_RES_3_A_IVB 0x6006c 1882#define _PIPE_CRC_RES_3_A_IVB 0x6006c
1858#define _PIPE_CRC_RES_4_A_IVB 0x60070 1883#define _PIPE_CRC_RES_4_A_IVB 0x60070
1859#define _PIPE_CRC_RES_5_A_IVB 0x60074 1884#define _PIPE_CRC_RES_5_A_IVB 0x60074
1860 1885
1861#define _PIPE_CRC_RES_RED_A_ILK 0x60060 1886#define _PIPE_CRC_RES_RED_A (dev_priv->info->display_mmio_offset + 0x60060)
1862#define _PIPE_CRC_RES_GREEN_A_ILK 0x60064 1887#define _PIPE_CRC_RES_GREEN_A (dev_priv->info->display_mmio_offset + 0x60064)
1863#define _PIPE_CRC_RES_BLUE_A_ILK 0x60068 1888#define _PIPE_CRC_RES_BLUE_A (dev_priv->info->display_mmio_offset + 0x60068)
1864#define _PIPE_CRC_RES_RES1_A_ILK 0x6006c 1889#define _PIPE_CRC_RES_RES1_A_I915 (dev_priv->info->display_mmio_offset + 0x6006c)
1865#define _PIPE_CRC_RES_RES2_A_ILK 0x60080 1890#define _PIPE_CRC_RES_RES2_A_G4X (dev_priv->info->display_mmio_offset + 0x60080)
1866 1891
1867/* Pipe B CRC regs */ 1892/* Pipe B CRC regs */
1868#define _PIPE_CRC_CTL_B 0x61050
1869#define _PIPE_CRC_RES_1_B_IVB 0x61064 1893#define _PIPE_CRC_RES_1_B_IVB 0x61064
1870#define _PIPE_CRC_RES_2_B_IVB 0x61068 1894#define _PIPE_CRC_RES_2_B_IVB 0x61068
1871#define _PIPE_CRC_RES_3_B_IVB 0x6106c 1895#define _PIPE_CRC_RES_3_B_IVB 0x6106c
1872#define _PIPE_CRC_RES_4_B_IVB 0x61070 1896#define _PIPE_CRC_RES_4_B_IVB 0x61070
1873#define _PIPE_CRC_RES_5_B_IVB 0x61074 1897#define _PIPE_CRC_RES_5_B_IVB 0x61074
1874 1898
1875#define PIPE_CRC_CTL(pipe) _PIPE(pipe, _PIPE_CRC_CTL_A, _PIPE_CRC_CTL_B) 1899#define PIPE_CRC_CTL(pipe) _PIPE_INC(pipe, _PIPE_CRC_CTL_A, 0x01000)
1876#define PIPE_CRC_RES_1_IVB(pipe) \ 1900#define PIPE_CRC_RES_1_IVB(pipe) \
1877 _PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB) 1901 _PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB)
1878#define PIPE_CRC_RES_2_IVB(pipe) \ 1902#define PIPE_CRC_RES_2_IVB(pipe) \
@@ -1884,16 +1908,16 @@
1884#define PIPE_CRC_RES_5_IVB(pipe) \ 1908#define PIPE_CRC_RES_5_IVB(pipe) \
1885 _PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB) 1909 _PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB)
1886 1910
1887#define PIPE_CRC_RES_RED_ILK(pipe) \ 1911#define PIPE_CRC_RES_RED(pipe) \
1888 _PIPE_INC(pipe, _PIPE_CRC_RES_RED_A_ILK, 0x01000) 1912 _PIPE_INC(pipe, _PIPE_CRC_RES_RED_A, 0x01000)
1889#define PIPE_CRC_RES_GREEN_ILK(pipe) \ 1913#define PIPE_CRC_RES_GREEN(pipe) \
1890 _PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A_ILK, 0x01000) 1914 _PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A, 0x01000)
1891#define PIPE_CRC_RES_BLUE_ILK(pipe) \ 1915#define PIPE_CRC_RES_BLUE(pipe) \
1892 _PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A_ILK, 0x01000) 1916 _PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A, 0x01000)
1893#define PIPE_CRC_RES_RES1_ILK(pipe) \ 1917#define PIPE_CRC_RES_RES1_I915(pipe) \
1894 _PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_ILK, 0x01000) 1918 _PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_I915, 0x01000)
1895#define PIPE_CRC_RES_RES2_ILK(pipe) \ 1919#define PIPE_CRC_RES_RES2_G4X(pipe) \
1896 _PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_ILK, 0x01000) 1920 _PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_G4X, 0x01000)
1897 1921
1898/* Pipe A timing regs */ 1922/* Pipe A timing regs */
1899#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000) 1923#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000)
@@ -2130,6 +2154,14 @@
2130#define PCH_HDMIC 0xe1150 2154#define PCH_HDMIC 0xe1150
2131#define PCH_HDMID 0xe1160 2155#define PCH_HDMID 0xe1160
2132 2156
2157#define PORT_DFT_I9XX 0x61150
2158#define DC_BALANCE_RESET (1 << 25)
2159#define PORT_DFT2_G4X 0x61154
2160#define DC_BALANCE_RESET_VLV (1 << 31)
2161#define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0)
2162#define PIPE_B_SCRAMBLE_RESET (1 << 1)
2163#define PIPE_A_SCRAMBLE_RESET (1 << 0)
2164
2133/* Gen 3 SDVO bits: */ 2165/* Gen 3 SDVO bits: */
2134#define SDVO_ENABLE (1 << 31) 2166#define SDVO_ENABLE (1 << 31)
2135#define SDVO_PIPE_SEL(pipe) ((pipe) << 30) 2167#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
@@ -2363,6 +2395,21 @@
2363 2395
2364#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238) 2396#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
2365 2397
2398#define _VLV_BLC_PWM_CTL2_A (dev_priv->info->display_mmio_offset + 0x61250)
2399#define _VLV_BLC_PWM_CTL2_B (dev_priv->info->display_mmio_offset + 0x61350)
2400#define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
2401 _VLV_BLC_PWM_CTL2_B)
2402
2403#define _VLV_BLC_PWM_CTL_A (dev_priv->info->display_mmio_offset + 0x61254)
2404#define _VLV_BLC_PWM_CTL_B (dev_priv->info->display_mmio_offset + 0x61354)
2405#define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
2406 _VLV_BLC_PWM_CTL_B)
2407
2408#define _VLV_BLC_HIST_CTL_A (dev_priv->info->display_mmio_offset + 0x61260)
2409#define _VLV_BLC_HIST_CTL_B (dev_priv->info->display_mmio_offset + 0x61360)
2410#define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
2411 _VLV_BLC_HIST_CTL_B)
2412
2366/* Backlight control */ 2413/* Backlight control */
2367#define BLC_PWM_CTL2 (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */ 2414#define BLC_PWM_CTL2 (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */
2368#define BLM_PWM_ENABLE (1 << 31) 2415#define BLM_PWM_ENABLE (1 << 31)
@@ -3906,6 +3953,7 @@
3906#define DE_SPRITEA_FLIP_DONE (1 << 28) 3953#define DE_SPRITEA_FLIP_DONE (1 << 28)
3907#define DE_PLANEB_FLIP_DONE (1 << 27) 3954#define DE_PLANEB_FLIP_DONE (1 << 27)
3908#define DE_PLANEA_FLIP_DONE (1 << 26) 3955#define DE_PLANEA_FLIP_DONE (1 << 26)
3956#define DE_PLANE_FLIP_DONE(plane) (1 << (26 + (plane)))
3909#define DE_PCU_EVENT (1 << 25) 3957#define DE_PCU_EVENT (1 << 25)
3910#define DE_GTT_FAULT (1 << 24) 3958#define DE_GTT_FAULT (1 << 24)
3911#define DE_POISON (1 << 23) 3959#define DE_POISON (1 << 23)
@@ -3922,12 +3970,15 @@
3922#define DE_PIPEB_CRC_DONE (1 << 10) 3970#define DE_PIPEB_CRC_DONE (1 << 10)
3923#define DE_PIPEB_FIFO_UNDERRUN (1 << 8) 3971#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
3924#define DE_PIPEA_VBLANK (1 << 7) 3972#define DE_PIPEA_VBLANK (1 << 7)
3973#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8*(pipe)))
3925#define DE_PIPEA_EVEN_FIELD (1 << 6) 3974#define DE_PIPEA_EVEN_FIELD (1 << 6)
3926#define DE_PIPEA_ODD_FIELD (1 << 5) 3975#define DE_PIPEA_ODD_FIELD (1 << 5)
3927#define DE_PIPEA_LINE_COMPARE (1 << 4) 3976#define DE_PIPEA_LINE_COMPARE (1 << 4)
3928#define DE_PIPEA_VSYNC (1 << 3) 3977#define DE_PIPEA_VSYNC (1 << 3)
3929#define DE_PIPEA_CRC_DONE (1 << 2) 3978#define DE_PIPEA_CRC_DONE (1 << 2)
3979#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8*(pipe)))
3930#define DE_PIPEA_FIFO_UNDERRUN (1 << 0) 3980#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
3981#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8*(pipe)))
3931 3982
3932/* More Ivybridge lolz */ 3983/* More Ivybridge lolz */
3933#define DE_ERR_INT_IVB (1<<30) 3984#define DE_ERR_INT_IVB (1<<30)
@@ -3943,9 +3994,8 @@
3943#define DE_PIPEB_VBLANK_IVB (1<<5) 3994#define DE_PIPEB_VBLANK_IVB (1<<5)
3944#define DE_SPRITEA_FLIP_DONE_IVB (1<<4) 3995#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
3945#define DE_PLANEA_FLIP_DONE_IVB (1<<3) 3996#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
3997#define DE_PLANE_FLIP_DONE_IVB(plane) (1<< (3 + 5*(plane)))
3946#define DE_PIPEA_VBLANK_IVB (1<<0) 3998#define DE_PIPEA_VBLANK_IVB (1<<0)
3947
3948#define DE_PIPE_VBLANK_ILK(pipe) (1 << ((pipe * 8) + 7))
3949#define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5)) 3999#define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5))
3950 4000
3951#define VLV_MASTER_IER 0x4400c /* Gunit master IER */ 4001#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
@@ -4013,6 +4063,9 @@
4013#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 4063#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
4014#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) 4064#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
4015 4065
4066#define HSW_SCRATCH1 0xb038
4067#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
4068
4016#define HSW_FUSE_STRAP 0x42014 4069#define HSW_FUSE_STRAP 0x42014
4017#define HSW_CDCLK_LIMIT (1 << 24) 4070#define HSW_CDCLK_LIMIT (1 << 24)
4018 4071
@@ -4408,7 +4461,9 @@
4408#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN) 4461#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
4409 4462
4410#define SOUTH_DSPCLK_GATE_D 0xc2020 4463#define SOUTH_DSPCLK_GATE_D 0xc2020
4464#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
4411#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) 4465#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
4466#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
4412#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12) 4467#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
4413 4468
4414/* CPU: FDI_TX */ 4469/* CPU: FDI_TX */
@@ -4864,6 +4919,9 @@
4864#define GEN7_ROW_CHICKEN2_GT2 0xf4f4 4919#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
4865#define DOP_CLOCK_GATING_DISABLE (1<<0) 4920#define DOP_CLOCK_GATING_DISABLE (1<<0)
4866 4921
4922#define HSW_ROW_CHICKEN3 0xe49c
4923#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
4924
4867#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020) 4925#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
4868#define INTEL_AUDIO_DEVCL 0x808629FB 4926#define INTEL_AUDIO_DEVCL 0x808629FB
4869#define INTEL_AUDIO_DEVBLC 0x80862801 4927#define INTEL_AUDIO_DEVBLC 0x80862801
@@ -4905,6 +4963,18 @@
4905 CPT_AUD_CNTL_ST_B) 4963 CPT_AUD_CNTL_ST_B)
4906#define CPT_AUD_CNTRL_ST2 0xE50C0 4964#define CPT_AUD_CNTRL_ST2 0xE50C0
4907 4965
4966#define VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
4967#define VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
4968#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
4969 VLV_HDMIW_HDMIEDID_A, \
4970 VLV_HDMIW_HDMIEDID_B)
4971#define VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
4972#define VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
4973#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \
4974 VLV_AUD_CNTL_ST_A, \
4975 VLV_AUD_CNTL_ST_B)
4976#define VLV_AUD_CNTL_ST2 (VLV_DISPLAY_BASE + 0x620C0)
4977
4908/* These are the 4 32-bit write offset registers for each stream 4978/* These are the 4 32-bit write offset registers for each stream
4909 * output buffer. It determines the offset from the 4979 * output buffer. It determines the offset from the
4910 * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to. 4980 * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
@@ -4921,6 +4991,12 @@
4921#define CPT_AUD_CFG(pipe) _PIPE(pipe, \ 4991#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
4922 CPT_AUD_CONFIG_A, \ 4992 CPT_AUD_CONFIG_A, \
4923 CPT_AUD_CONFIG_B) 4993 CPT_AUD_CONFIG_B)
4994#define VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
4995#define VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
4996#define VLV_AUD_CFG(pipe) _PIPE(pipe, \
4997 VLV_AUD_CONFIG_A, \
4998 VLV_AUD_CONFIG_B)
4999
4924#define AUD_CONFIG_N_VALUE_INDEX (1 << 29) 5000#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
4925#define AUD_CONFIG_N_PROG_ENABLE (1 << 28) 5001#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
4926#define AUD_CONFIG_UPPER_N_SHIFT 20 5002#define AUD_CONFIG_UPPER_N_SHIFT 20
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a088f1f46bdb..98790c7cccb1 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -214,6 +214,22 @@ static void i915_save_display(struct drm_device *dev)
214 dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); 214 dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
215 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 215 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
216 dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); 216 dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
217 } else if (IS_VALLEYVIEW(dev)) {
218 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
219 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
220
221 dev_priv->regfile.saveBLC_PWM_CTL =
222 I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
223 dev_priv->regfile.saveBLC_HIST_CTL =
224 I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
225 dev_priv->regfile.saveBLC_PWM_CTL2 =
226 I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
227 dev_priv->regfile.saveBLC_PWM_CTL_B =
228 I915_READ(VLV_BLC_PWM_CTL(PIPE_B));
229 dev_priv->regfile.saveBLC_HIST_CTL_B =
230 I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
231 dev_priv->regfile.saveBLC_PWM_CTL2_B =
232 I915_READ(VLV_BLC_PWM_CTL2(PIPE_B));
217 } else { 233 } else {
218 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); 234 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
219 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); 235 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
@@ -302,6 +318,19 @@ static void i915_restore_display(struct drm_device *dev)
302 I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL); 318 I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
303 I915_WRITE(RSTDBYCTL, 319 I915_WRITE(RSTDBYCTL,
304 dev_priv->regfile.saveMCHBAR_RENDER_STANDBY); 320 dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
321 } else if (IS_VALLEYVIEW(dev)) {
322 I915_WRITE(VLV_BLC_PWM_CTL(PIPE_A),
323 dev_priv->regfile.saveBLC_PWM_CTL);
324 I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
325 dev_priv->regfile.saveBLC_HIST_CTL);
326 I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_A),
327 dev_priv->regfile.saveBLC_PWM_CTL2);
328 I915_WRITE(VLV_BLC_PWM_CTL(PIPE_B),
329 dev_priv->regfile.saveBLC_PWM_CTL);
330 I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
331 dev_priv->regfile.saveBLC_HIST_CTL);
332 I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_B),
333 dev_priv->regfile.saveBLC_PWM_CTL2);
305 } else { 334 } else {
306 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS); 335 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
307 I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL); 336 I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index e29bcae1ef81..6dd622d733b9 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -624,11 +624,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
624 624
625 aux_channel = child->raw[25]; 625 aux_channel = child->raw[25];
626 626
627 is_dvi = child->common.device_type & (1 << 4); 627 is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
628 is_dp = child->common.device_type & (1 << 2); 628 is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
629 is_crt = child->common.device_type & (1 << 0); 629 is_crt = child->common.device_type & DEVICE_TYPE_ANALOG_OUTPUT;
630 is_hdmi = is_dvi && (child->common.device_type & (1 << 11)) == 0; 630 is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
631 is_edp = is_dp && (child->common.device_type & (1 << 12)); 631 is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
632 632
633 info->supports_dvi = is_dvi; 633 info->supports_dvi = is_dvi;
634 info->supports_hdmi = is_hdmi; 634 info->supports_hdmi = is_hdmi;
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 287cc5a21c2e..f580a2b0ddd3 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -638,6 +638,40 @@ int intel_parse_bios(struct drm_device *dev);
638#define DEVICE_TYPE_DP 0x68C6 638#define DEVICE_TYPE_DP 0x68C6
639#define DEVICE_TYPE_eDP 0x78C6 639#define DEVICE_TYPE_eDP 0x78C6
640 640
641#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
642#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
643#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
644#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
645#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
646#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
647#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
648#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
649#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
650#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
651#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
652#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
653#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
654#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
655#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
656
657/*
658 * Bits we care about when checking for DEVICE_TYPE_eDP
659 * Depending on the system, the other bits may or may not
660 * be set for eDP outputs.
661 */
662#define DEVICE_TYPE_eDP_BITS \
663 (DEVICE_TYPE_INTERNAL_CONNECTOR | \
664 DEVICE_TYPE_NOT_HDMI_OUTPUT | \
665 DEVICE_TYPE_MIPI_OUTPUT | \
666 DEVICE_TYPE_COMPOSITE_OUTPUT | \
667 DEVICE_TYPE_DUAL_CHANNEL | \
668 DEVICE_TYPE_LVDS_SINGALING | \
669 DEVICE_TYPE_TMDS_DVI_SIGNALING | \
670 DEVICE_TYPE_VIDEO_SIGNALING | \
671 DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
672 DEVICE_TYPE_DIGITAL_OUTPUT | \
673 DEVICE_TYPE_ANALOG_OUTPUT)
674
641/* define the DVO port for HDMI output type */ 675/* define the DVO port for HDMI output type */
642#define DVO_B 1 676#define DVO_B 1
643#define DVO_C 2 677#define DVO_C 2
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3cf284fa8502..e92f170f55f7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2421,9 +2421,10 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
2421 FDI_FE_ERRC_ENABLE); 2421 FDI_FE_ERRC_ENABLE);
2422} 2422}
2423 2423
2424static bool pipe_has_enabled_pch(struct intel_crtc *intel_crtc) 2424static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
2425{ 2425{
2426 return intel_crtc->base.enabled && intel_crtc->config.has_pch_encoder; 2426 return crtc->base.enabled && crtc->active &&
2427 crtc->config.has_pch_encoder;
2427} 2428}
2428 2429
2429static void ivb_modeset_global_resources(struct drm_device *dev) 2430static void ivb_modeset_global_resources(struct drm_device *dev)
@@ -3074,6 +3075,48 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3074 I915_READ(VSYNCSHIFT(cpu_transcoder))); 3075 I915_READ(VSYNCSHIFT(cpu_transcoder)));
3075} 3076}
3076 3077
3078static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
3079{
3080 struct drm_i915_private *dev_priv = dev->dev_private;
3081 uint32_t temp;
3082
3083 temp = I915_READ(SOUTH_CHICKEN1);
3084 if (temp & FDI_BC_BIFURCATION_SELECT)
3085 return;
3086
3087 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3088 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3089
3090 temp |= FDI_BC_BIFURCATION_SELECT;
3091 DRM_DEBUG_KMS("enabling fdi C rx\n");
3092 I915_WRITE(SOUTH_CHICKEN1, temp);
3093 POSTING_READ(SOUTH_CHICKEN1);
3094}
3095
3096static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3097{
3098 struct drm_device *dev = intel_crtc->base.dev;
3099 struct drm_i915_private *dev_priv = dev->dev_private;
3100
3101 switch (intel_crtc->pipe) {
3102 case PIPE_A:
3103 break;
3104 case PIPE_B:
3105 if (intel_crtc->config.fdi_lanes > 2)
3106 WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
3107 else
3108 cpt_enable_fdi_bc_bifurcation(dev);
3109
3110 break;
3111 case PIPE_C:
3112 cpt_enable_fdi_bc_bifurcation(dev);
3113
3114 break;
3115 default:
3116 BUG();
3117 }
3118}
3119
3077/* 3120/*
3078 * Enable PCH resources required for PCH ports: 3121 * Enable PCH resources required for PCH ports:
3079 * - PCH PLLs 3122 * - PCH PLLs
@@ -3092,6 +3135,9 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
3092 3135
3093 assert_pch_transcoder_disabled(dev_priv, pipe); 3136 assert_pch_transcoder_disabled(dev_priv, pipe);
3094 3137
3138 if (IS_IVYBRIDGE(dev))
3139 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3140
3095 /* Write the TU size bits before fdi link training, so that error 3141 /* Write the TU size bits before fdi link training, so that error
3096 * detection works. */ 3142 * detection works. */
3097 I915_WRITE(FDI_RX_TUSIZE1(pipe), 3143 I915_WRITE(FDI_RX_TUSIZE1(pipe),
@@ -4156,8 +4202,6 @@ static void intel_connector_check_state(struct intel_connector *connector)
4156 * consider. */ 4202 * consider. */
4157void intel_connector_dpms(struct drm_connector *connector, int mode) 4203void intel_connector_dpms(struct drm_connector *connector, int mode)
4158{ 4204{
4159 struct intel_encoder *encoder = intel_attached_encoder(connector);
4160
4161 /* All the simple cases only support two dpms states. */ 4205 /* All the simple cases only support two dpms states. */
4162 if (mode != DRM_MODE_DPMS_ON) 4206 if (mode != DRM_MODE_DPMS_ON)
4163 mode = DRM_MODE_DPMS_OFF; 4207 mode = DRM_MODE_DPMS_OFF;
@@ -4168,10 +4212,8 @@ void intel_connector_dpms(struct drm_connector *connector, int mode)
4168 connector->dpms = mode; 4212 connector->dpms = mode;
4169 4213
4170 /* Only need to change hw state when actually enabled */ 4214 /* Only need to change hw state when actually enabled */
4171 if (encoder->base.crtc) 4215 if (connector->encoder)
4172 intel_encoder_dpms(encoder, mode); 4216 intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
4173 else
4174 WARN_ON(encoder->connectors_active != false);
4175 4217
4176 intel_modeset_check_state(connector->dev); 4218 intel_modeset_check_state(connector->dev);
4177} 4219}
@@ -5849,48 +5891,6 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
5849 return true; 5891 return true;
5850} 5892}
5851 5893
5852static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
5853{
5854 struct drm_i915_private *dev_priv = dev->dev_private;
5855 uint32_t temp;
5856
5857 temp = I915_READ(SOUTH_CHICKEN1);
5858 if (temp & FDI_BC_BIFURCATION_SELECT)
5859 return;
5860
5861 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5862 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5863
5864 temp |= FDI_BC_BIFURCATION_SELECT;
5865 DRM_DEBUG_KMS("enabling fdi C rx\n");
5866 I915_WRITE(SOUTH_CHICKEN1, temp);
5867 POSTING_READ(SOUTH_CHICKEN1);
5868}
5869
5870static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
5871{
5872 struct drm_device *dev = intel_crtc->base.dev;
5873 struct drm_i915_private *dev_priv = dev->dev_private;
5874
5875 switch (intel_crtc->pipe) {
5876 case PIPE_A:
5877 break;
5878 case PIPE_B:
5879 if (intel_crtc->config.fdi_lanes > 2)
5880 WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
5881 else
5882 cpt_enable_fdi_bc_bifurcation(dev);
5883
5884 break;
5885 case PIPE_C:
5886 cpt_enable_fdi_bc_bifurcation(dev);
5887
5888 break;
5889 default:
5890 BUG();
5891 }
5892}
5893
5894int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 5894int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
5895{ 5895{
5896 /* 5896 /*
@@ -6079,9 +6079,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6079 &intel_crtc->config.fdi_m_n); 6079 &intel_crtc->config.fdi_m_n);
6080 } 6080 }
6081 6081
6082 if (IS_IVYBRIDGE(dev))
6083 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
6084
6085 ironlake_set_pipeconf(crtc); 6082 ironlake_set_pipeconf(crtc);
6086 6083
6087 /* Set up the display plane register */ 6084 /* Set up the display plane register */
@@ -6557,22 +6554,79 @@ static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
6557 } 6554 }
6558} 6555}
6559 6556
6560static void haswell_modeset_global_resources(struct drm_device *dev) 6557#define for_each_power_domain(domain, mask) \
6558 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
6559 if ((1 << (domain)) & (mask))
6560
6561static unsigned long get_pipe_power_domains(struct drm_device *dev,
6562 enum pipe pipe, bool pfit_enabled)
6561{ 6563{
6562 bool enable = false; 6564 unsigned long mask;
6565 enum transcoder transcoder;
6566
6567 transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
6568
6569 mask = BIT(POWER_DOMAIN_PIPE(pipe));
6570 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
6571 if (pfit_enabled)
6572 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6573
6574 return mask;
6575}
6576
6577void intel_display_set_init_power(struct drm_device *dev, bool enable)
6578{
6579 struct drm_i915_private *dev_priv = dev->dev_private;
6580
6581 if (dev_priv->power_domains.init_power_on == enable)
6582 return;
6583
6584 if (enable)
6585 intel_display_power_get(dev, POWER_DOMAIN_INIT);
6586 else
6587 intel_display_power_put(dev, POWER_DOMAIN_INIT);
6588
6589 dev_priv->power_domains.init_power_on = enable;
6590}
6591
6592static void modeset_update_power_wells(struct drm_device *dev)
6593{
6594 unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
6563 struct intel_crtc *crtc; 6595 struct intel_crtc *crtc;
6564 6596
6597 /*
6598 * First get all needed power domains, then put all unneeded, to avoid
6599 * any unnecessary toggling of the power wells.
6600 */
6565 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 6601 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
6602 enum intel_display_power_domain domain;
6603
6566 if (!crtc->base.enabled) 6604 if (!crtc->base.enabled)
6567 continue; 6605 continue;
6568 6606
6569 if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled || 6607 pipe_domains[crtc->pipe] = get_pipe_power_domains(dev,
6570 crtc->config.cpu_transcoder != TRANSCODER_EDP) 6608 crtc->pipe,
6571 enable = true; 6609 crtc->config.pch_pfit.enabled);
6610
6611 for_each_power_domain(domain, pipe_domains[crtc->pipe])
6612 intel_display_power_get(dev, domain);
6613 }
6614
6615 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
6616 enum intel_display_power_domain domain;
6617
6618 for_each_power_domain(domain, crtc->enabled_power_domains)
6619 intel_display_power_put(dev, domain);
6620
6621 crtc->enabled_power_domains = pipe_domains[crtc->pipe];
6572 } 6622 }
6573 6623
6574 intel_set_power_well(dev, enable); 6624 intel_display_set_init_power(dev, false);
6625}
6575 6626
6627static void haswell_modeset_global_resources(struct drm_device *dev)
6628{
6629 modeset_update_power_wells(dev);
6576 hsw_update_package_c8(dev); 6630 hsw_update_package_c8(dev);
6577} 6631}
6578 6632
@@ -6935,6 +6989,11 @@ static void ironlake_write_eld(struct drm_connector *connector,
6935 aud_config = IBX_AUD_CFG(pipe); 6989 aud_config = IBX_AUD_CFG(pipe);
6936 aud_cntl_st = IBX_AUD_CNTL_ST(pipe); 6990 aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
6937 aud_cntrl_st2 = IBX_AUD_CNTL_ST2; 6991 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
6992 } else if (IS_VALLEYVIEW(connector->dev)) {
6993 hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
6994 aud_config = VLV_AUD_CFG(pipe);
6995 aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
6996 aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
6938 } else { 6997 } else {
6939 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe); 6998 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
6940 aud_config = CPT_AUD_CFG(pipe); 6999 aud_config = CPT_AUD_CFG(pipe);
@@ -6944,8 +7003,19 @@ static void ironlake_write_eld(struct drm_connector *connector,
6944 7003
6945 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); 7004 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
6946 7005
6947 i = I915_READ(aud_cntl_st); 7006 if (IS_VALLEYVIEW(connector->dev)) {
6948 i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ 7007 struct intel_encoder *intel_encoder;
7008 struct intel_digital_port *intel_dig_port;
7009
7010 intel_encoder = intel_attached_encoder(connector);
7011 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
7012 i = intel_dig_port->port;
7013 } else {
7014 i = I915_READ(aud_cntl_st);
7015 i = (i >> 29) & DIP_PORT_SEL_MASK;
7016 /* DIP_Port_Select, 0x1 = PortB */
7017 }
7018
6949 if (!i) { 7019 if (!i) {
6950 DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); 7020 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
6951 /* operate blindly on all ports */ 7021 /* operate blindly on all ports */
@@ -7276,8 +7346,8 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
7276{ 7346{
7277 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7347 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7278 7348
7279 intel_crtc->cursor_x = x; 7349 intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
7280 intel_crtc->cursor_y = y; 7350 intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
7281 7351
7282 if (intel_crtc->active) 7352 if (intel_crtc->active)
7283 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); 7353 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
@@ -9804,6 +9874,18 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
9804 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 9874 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
9805} 9875}
9806 9876
9877enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
9878{
9879 struct drm_encoder *encoder = connector->base.encoder;
9880
9881 WARN_ON(!mutex_is_locked(&connector->base.dev->mode_config.mutex));
9882
9883 if (!encoder)
9884 return INVALID_PIPE;
9885
9886 return to_intel_crtc(encoder->crtc)->pipe;
9887}
9888
9807int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 9889int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
9808 struct drm_file *file) 9890 struct drm_file *file)
9809{ 9891{
@@ -10263,7 +10345,8 @@ static void intel_init_display(struct drm_device *dev)
10263 } 10345 }
10264 } else if (IS_G4X(dev)) { 10346 } else if (IS_G4X(dev)) {
10265 dev_priv->display.write_eld = g4x_write_eld; 10347 dev_priv->display.write_eld = g4x_write_eld;
10266 } 10348 } else if (IS_VALLEYVIEW(dev))
10349 dev_priv->display.write_eld = ironlake_write_eld;
10267 10350
10268 /* Default just returns -ENODEV to indicate unsupported */ 10351 /* Default just returns -ENODEV to indicate unsupported */
10269 dev_priv->display.queue_flip = intel_default_queue_flip; 10352 dev_priv->display.queue_flip = intel_default_queue_flip;
@@ -10441,33 +10524,6 @@ static void i915_disable_vga(struct drm_device *dev)
10441 POSTING_READ(vga_reg); 10524 POSTING_READ(vga_reg);
10442} 10525}
10443 10526
10444static void i915_enable_vga_mem(struct drm_device *dev)
10445{
10446 /* Enable VGA memory on Intel HD */
10447 if (HAS_PCH_SPLIT(dev)) {
10448 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
10449 outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE);
10450 vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
10451 VGA_RSRC_LEGACY_MEM |
10452 VGA_RSRC_NORMAL_IO |
10453 VGA_RSRC_NORMAL_MEM);
10454 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
10455 }
10456}
10457
10458void i915_disable_vga_mem(struct drm_device *dev)
10459{
10460 /* Disable VGA memory on Intel HD */
10461 if (HAS_PCH_SPLIT(dev)) {
10462 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
10463 outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE);
10464 vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
10465 VGA_RSRC_NORMAL_IO |
10466 VGA_RSRC_NORMAL_MEM);
10467 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
10468 }
10469}
10470
10471void intel_modeset_init_hw(struct drm_device *dev) 10527void intel_modeset_init_hw(struct drm_device *dev)
10472{ 10528{
10473 struct drm_i915_private *dev_priv = dev->dev_private; 10529 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -10753,7 +10809,6 @@ void i915_redisable_vga(struct drm_device *dev)
10753 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 10809 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
10754 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 10810 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
10755 i915_disable_vga(dev); 10811 i915_disable_vga(dev);
10756 i915_disable_vga_mem(dev);
10757 } 10812 }
10758} 10813}
10759 10814
@@ -10960,8 +11015,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
10960 11015
10961 intel_disable_fbc(dev); 11016 intel_disable_fbc(dev);
10962 11017
10963 i915_enable_vga_mem(dev);
10964
10965 intel_disable_gt_powersave(dev); 11018 intel_disable_gt_powersave(dev);
10966 11019
10967 ironlake_teardown_rc6(dev); 11020 ironlake_teardown_rc6(dev);
@@ -11073,7 +11126,7 @@ intel_display_capture_error_state(struct drm_device *dev)
11073 if (INTEL_INFO(dev)->num_pipes == 0) 11126 if (INTEL_INFO(dev)->num_pipes == 0)
11074 return NULL; 11127 return NULL;
11075 11128
11076 error = kmalloc(sizeof(*error), GFP_ATOMIC); 11129 error = kzalloc(sizeof(*error), GFP_ATOMIC);
11077 if (error == NULL) 11130 if (error == NULL)
11078 return NULL; 11131 return NULL;
11079 11132
@@ -11081,6 +11134,9 @@ intel_display_capture_error_state(struct drm_device *dev)
11081 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 11134 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
11082 11135
11083 for_each_pipe(i) { 11136 for_each_pipe(i) {
11137 if (!intel_display_power_enabled(dev, POWER_DOMAIN_PIPE(i)))
11138 continue;
11139
11084 if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { 11140 if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
11085 error->cursor[i].control = I915_READ(CURCNTR(i)); 11141 error->cursor[i].control = I915_READ(CURCNTR(i));
11086 error->cursor[i].position = I915_READ(CURPOS(i)); 11142 error->cursor[i].position = I915_READ(CURPOS(i));
@@ -11114,6 +11170,10 @@ intel_display_capture_error_state(struct drm_device *dev)
11114 for (i = 0; i < error->num_transcoders; i++) { 11170 for (i = 0; i < error->num_transcoders; i++) {
11115 enum transcoder cpu_transcoder = transcoders[i]; 11171 enum transcoder cpu_transcoder = transcoders[i];
11116 11172
11173 if (!intel_display_power_enabled(dev,
11174 POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
11175 continue;
11176
11117 error->transcoder[i].cpu_transcoder = cpu_transcoder; 11177 error->transcoder[i].cpu_transcoder = cpu_transcoder;
11118 11178
11119 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 11179 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
@@ -11125,12 +11185,6 @@ intel_display_capture_error_state(struct drm_device *dev)
11125 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 11185 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
11126 } 11186 }
11127 11187
11128 /* In the code above we read the registers without checking if the power
11129 * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
11130 * prevent the next I915_WRITE from detecting it and printing an error
11131 * message. */
11132 intel_uncore_clear_errors(dev);
11133
11134 return error; 11188 return error;
11135} 11189}
11136 11190
@@ -11175,7 +11229,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
11175 } 11229 }
11176 11230
11177 for (i = 0; i < error->num_transcoders; i++) { 11231 for (i = 0; i < error->num_transcoders; i++) {
11178 err_printf(m, " CPU transcoder: %c\n", 11232 err_printf(m, "CPU transcoder: %c\n",
11179 transcoder_name(error->transcoder[i].cpu_transcoder)); 11233 transcoder_name(error->transcoder[i].cpu_transcoder));
11180 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 11234 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
11181 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 11235 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1e3d2720d811..045d46475121 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -623,6 +623,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
623 int reply_bytes; 623 int reply_bytes;
624 int ret; 624 int ret;
625 625
626 ironlake_edp_panel_vdd_on(intel_dp);
626 intel_dp_check_edp(intel_dp); 627 intel_dp_check_edp(intel_dp);
627 /* Set up the command byte */ 628 /* Set up the command byte */
628 if (mode & MODE_I2C_READ) 629 if (mode & MODE_I2C_READ)
@@ -665,7 +666,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
665 reply, reply_bytes); 666 reply, reply_bytes);
666 if (ret < 0) { 667 if (ret < 0) {
667 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 668 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
668 return ret; 669 goto out;
669 } 670 }
670 671
671 switch (reply[0] & AUX_NATIVE_REPLY_MASK) { 672 switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
@@ -676,7 +677,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
676 break; 677 break;
677 case AUX_NATIVE_REPLY_NACK: 678 case AUX_NATIVE_REPLY_NACK:
678 DRM_DEBUG_KMS("aux_ch native nack\n"); 679 DRM_DEBUG_KMS("aux_ch native nack\n");
679 return -EREMOTEIO; 680 ret = -EREMOTEIO;
681 goto out;
680 case AUX_NATIVE_REPLY_DEFER: 682 case AUX_NATIVE_REPLY_DEFER:
681 /* 683 /*
682 * For now, just give more slack to branch devices. We 684 * For now, just give more slack to branch devices. We
@@ -694,7 +696,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
694 default: 696 default:
695 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 697 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
696 reply[0]); 698 reply[0]);
697 return -EREMOTEIO; 699 ret = -EREMOTEIO;
700 goto out;
698 } 701 }
699 702
700 switch (reply[0] & AUX_I2C_REPLY_MASK) { 703 switch (reply[0] & AUX_I2C_REPLY_MASK) {
@@ -702,22 +705,29 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
702 if (mode == MODE_I2C_READ) { 705 if (mode == MODE_I2C_READ) {
703 *read_byte = reply[1]; 706 *read_byte = reply[1];
704 } 707 }
705 return reply_bytes - 1; 708 ret = reply_bytes - 1;
709 goto out;
706 case AUX_I2C_REPLY_NACK: 710 case AUX_I2C_REPLY_NACK:
707 DRM_DEBUG_KMS("aux_i2c nack\n"); 711 DRM_DEBUG_KMS("aux_i2c nack\n");
708 return -EREMOTEIO; 712 ret = -EREMOTEIO;
713 goto out;
709 case AUX_I2C_REPLY_DEFER: 714 case AUX_I2C_REPLY_DEFER:
710 DRM_DEBUG_KMS("aux_i2c defer\n"); 715 DRM_DEBUG_KMS("aux_i2c defer\n");
711 udelay(100); 716 udelay(100);
712 break; 717 break;
713 default: 718 default:
714 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); 719 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
715 return -EREMOTEIO; 720 ret = -EREMOTEIO;
721 goto out;
716 } 722 }
717 } 723 }
718 724
719 DRM_ERROR("too many retries, giving up\n"); 725 DRM_ERROR("too many retries, giving up\n");
720 return -EREMOTEIO; 726 ret = -EREMOTEIO;
727
728out:
729 ironlake_edp_panel_vdd_off(intel_dp, false);
730 return ret;
721} 731}
722 732
723static int 733static int
@@ -739,9 +749,7 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
739 intel_dp->adapter.algo_data = &intel_dp->algo; 749 intel_dp->adapter.algo_data = &intel_dp->algo;
740 intel_dp->adapter.dev.parent = intel_connector->base.kdev; 750 intel_dp->adapter.dev.parent = intel_connector->base.kdev;
741 751
742 ironlake_edp_panel_vdd_on(intel_dp);
743 ret = i2c_dp_aux_add_bus(&intel_dp->adapter); 752 ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
744 ironlake_edp_panel_vdd_off(intel_dp, false);
745 return ret; 753 return ret;
746} 754}
747 755
@@ -1069,17 +1077,16 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1069 1077
1070 if (!is_edp(intel_dp)) 1078 if (!is_edp(intel_dp))
1071 return; 1079 return;
1072 DRM_DEBUG_KMS("Turn eDP VDD on\n");
1073 1080
1074 WARN(intel_dp->want_panel_vdd, 1081 WARN(intel_dp->want_panel_vdd,
1075 "eDP VDD already requested on\n"); 1082 "eDP VDD already requested on\n");
1076 1083
1077 intel_dp->want_panel_vdd = true; 1084 intel_dp->want_panel_vdd = true;
1078 1085
1079 if (ironlake_edp_have_panel_vdd(intel_dp)) { 1086 if (ironlake_edp_have_panel_vdd(intel_dp))
1080 DRM_DEBUG_KMS("eDP VDD already on\n");
1081 return; 1087 return;
1082 } 1088
1089 DRM_DEBUG_KMS("Turning eDP VDD on\n");
1083 1090
1084 if (!ironlake_edp_have_panel_power(intel_dp)) 1091 if (!ironlake_edp_have_panel_power(intel_dp))
1085 ironlake_wait_panel_power_cycle(intel_dp); 1092 ironlake_wait_panel_power_cycle(intel_dp);
@@ -1113,11 +1120,13 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1113 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 1120 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1114 1121
1115 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1122 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
1123 DRM_DEBUG_KMS("Turning eDP VDD off\n");
1124
1116 pp = ironlake_get_pp_control(intel_dp); 1125 pp = ironlake_get_pp_control(intel_dp);
1117 pp &= ~EDP_FORCE_VDD; 1126 pp &= ~EDP_FORCE_VDD;
1118 1127
1119 pp_stat_reg = _pp_ctrl_reg(intel_dp); 1128 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1120 pp_ctrl_reg = _pp_stat_reg(intel_dp); 1129 pp_stat_reg = _pp_stat_reg(intel_dp);
1121 1130
1122 I915_WRITE(pp_ctrl_reg, pp); 1131 I915_WRITE(pp_ctrl_reg, pp);
1123 POSTING_READ(pp_ctrl_reg); 1132 POSTING_READ(pp_ctrl_reg);
@@ -1145,7 +1154,6 @@ void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1145 if (!is_edp(intel_dp)) 1154 if (!is_edp(intel_dp))
1146 return; 1155 return;
1147 1156
1148 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
1149 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1157 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1150 1158
1151 intel_dp->want_panel_vdd = false; 1159 intel_dp->want_panel_vdd = false;
@@ -1241,7 +1249,6 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1241 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1249 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1242 struct drm_device *dev = intel_dig_port->base.base.dev; 1250 struct drm_device *dev = intel_dig_port->base.base.dev;
1243 struct drm_i915_private *dev_priv = dev->dev_private; 1251 struct drm_i915_private *dev_priv = dev->dev_private;
1244 int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
1245 u32 pp; 1252 u32 pp;
1246 u32 pp_ctrl_reg; 1253 u32 pp_ctrl_reg;
1247 1254
@@ -1264,7 +1271,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1264 I915_WRITE(pp_ctrl_reg, pp); 1271 I915_WRITE(pp_ctrl_reg, pp);
1265 POSTING_READ(pp_ctrl_reg); 1272 POSTING_READ(pp_ctrl_reg);
1266 1273
1267 intel_panel_enable_backlight(dev, pipe); 1274 intel_panel_enable_backlight(intel_dp->attached_connector);
1268} 1275}
1269 1276
1270void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 1277void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
@@ -1277,7 +1284,7 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1277 if (!is_edp(intel_dp)) 1284 if (!is_edp(intel_dp))
1278 return; 1285 return;
1279 1286
1280 intel_panel_disable_backlight(dev); 1287 intel_panel_disable_backlight(intel_dp->attached_connector);
1281 1288
1282 DRM_DEBUG_KMS("\n"); 1289 DRM_DEBUG_KMS("\n");
1283 pp = ironlake_get_pp_control(intel_dp); 1290 pp = ironlake_get_pp_control(intel_dp);
@@ -1476,6 +1483,26 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1476 ironlake_check_encoder_dotclock(pipe_config, dotclock); 1483 ironlake_check_encoder_dotclock(pipe_config, dotclock);
1477 1484
1478 pipe_config->adjusted_mode.crtc_clock = dotclock; 1485 pipe_config->adjusted_mode.crtc_clock = dotclock;
1486
1487 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
1488 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
1489 /*
1490 * This is a big fat ugly hack.
1491 *
1492 * Some machines in UEFI boot mode provide us a VBT that has 18
1493 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
1494 * unknown we fail to light up. Yet the same BIOS boots up with
1495 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
1496 * max, not what it tells us to use.
1497 *
1498 * Note: This will still be broken if the eDP panel is not lit
1499 * up by the BIOS, and thus we can't get the mode at module
1500 * load.
1501 */
1502 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
1503 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
1504 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
1505 }
1479} 1506}
1480 1507
1481static bool is_edp_psr(struct drm_device *dev) 1508static bool is_edp_psr(struct drm_device *dev)
@@ -1543,7 +1570,7 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1543 1570
1544 /* Avoid continuous PSR exit by masking memup and hpd */ 1571 /* Avoid continuous PSR exit by masking memup and hpd */
1545 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | 1572 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
1546 EDP_PSR_DEBUG_MASK_HPD); 1573 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
1547 1574
1548 intel_dp->psr_setup_done = true; 1575 intel_dp->psr_setup_done = true;
1549} 1576}
@@ -3256,7 +3283,8 @@ bool intel_dpd_is_edp(struct drm_device *dev)
3256 p_child = dev_priv->vbt.child_dev + i; 3283 p_child = dev_priv->vbt.child_dev + i;
3257 3284
3258 if (p_child->common.dvo_port == PORT_IDPD && 3285 if (p_child->common.dvo_port == PORT_IDPD &&
3259 p_child->common.device_type == DEVICE_TYPE_eDP) 3286 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
3287 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
3260 return true; 3288 return true;
3261 } 3289 }
3262 return false; 3290 return false;
@@ -3478,7 +3506,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3478 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 3506 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
3479 &power_seq); 3507 &power_seq);
3480 3508
3481 ironlake_edp_panel_vdd_on(intel_dp);
3482 edid = drm_get_edid(connector, &intel_dp->adapter); 3509 edid = drm_get_edid(connector, &intel_dp->adapter);
3483 if (edid) { 3510 if (edid) {
3484 if (drm_add_edid_modes(connector, edid)) { 3511 if (drm_add_edid_modes(connector, edid)) {
@@ -3510,8 +3537,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3510 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 3537 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
3511 } 3538 }
3512 3539
3513 ironlake_edp_panel_vdd_off(intel_dp, false);
3514
3515 intel_panel_init(&intel_connector->panel, fixed_mode); 3540 intel_panel_init(&intel_connector->panel, fixed_mode);
3516 intel_panel_setup_backlight(connector); 3541 intel_panel_setup_backlight(connector);
3517 3542
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index e33f387d4185..1e49aa8f5377 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -326,6 +326,7 @@ struct intel_crtc {
326 * some outputs connected to this crtc. 326 * some outputs connected to this crtc.
327 */ 327 */
328 bool active; 328 bool active;
329 unsigned long enabled_power_domains;
329 bool eld_vld; 330 bool eld_vld;
330 bool primary_enabled; /* is the primary plane (partially) visible? */ 331 bool primary_enabled; /* is the primary plane (partially) visible? */
331 bool lowfreq_avail; 332 bool lowfreq_avail;
@@ -629,6 +630,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
629struct drm_encoder *intel_best_encoder(struct drm_connector *connector); 630struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
630struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 631struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
631 struct drm_crtc *crtc); 632 struct drm_crtc *crtc);
633enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
632int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 634int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
633 struct drm_file *file_priv); 635 struct drm_file *file_priv);
634enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 636enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
@@ -691,6 +693,7 @@ bool intel_crtc_active(struct drm_crtc *crtc);
691void i915_disable_vga_mem(struct drm_device *dev); 693void i915_disable_vga_mem(struct drm_device *dev);
692void hsw_enable_ips(struct intel_crtc *crtc); 694void hsw_enable_ips(struct intel_crtc *crtc);
693void hsw_disable_ips(struct intel_crtc *crtc); 695void hsw_disable_ips(struct intel_crtc *crtc);
696void intel_display_set_init_power(struct drm_device *dev, bool enable);
694 697
695 698
696/* intel_dp.c */ 699/* intel_dp.c */
@@ -800,10 +803,11 @@ void intel_pch_panel_fitting(struct intel_crtc *crtc,
800void intel_gmch_panel_fitting(struct intel_crtc *crtc, 803void intel_gmch_panel_fitting(struct intel_crtc *crtc,
801 struct intel_crtc_config *pipe_config, 804 struct intel_crtc_config *pipe_config,
802 int fitting_mode); 805 int fitting_mode);
803void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max); 806void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
807 u32 max);
804int intel_panel_setup_backlight(struct drm_connector *connector); 808int intel_panel_setup_backlight(struct drm_connector *connector);
805void intel_panel_enable_backlight(struct drm_device *dev, enum pipe pipe); 809void intel_panel_enable_backlight(struct intel_connector *connector);
806void intel_panel_disable_backlight(struct drm_device *dev); 810void intel_panel_disable_backlight(struct intel_connector *connector);
807void intel_panel_destroy_backlight(struct drm_device *dev); 811void intel_panel_destroy_backlight(struct drm_device *dev);
808enum drm_connector_status intel_panel_detect(struct drm_device *dev); 812enum drm_connector_status intel_panel_detect(struct drm_device *dev);
809 813
@@ -821,15 +825,15 @@ bool intel_fbc_enabled(struct drm_device *dev);
821void intel_update_fbc(struct drm_device *dev); 825void intel_update_fbc(struct drm_device *dev);
822void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 826void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
823void intel_gpu_ips_teardown(void); 827void intel_gpu_ips_teardown(void);
824int i915_init_power_well(struct drm_device *dev); 828int intel_power_domains_init(struct drm_device *dev);
825void i915_remove_power_well(struct drm_device *dev); 829void intel_power_domains_remove(struct drm_device *dev);
826bool intel_display_power_enabled(struct drm_device *dev, 830bool intel_display_power_enabled(struct drm_device *dev,
827 enum intel_display_power_domain domain); 831 enum intel_display_power_domain domain);
828void intel_display_power_get(struct drm_device *dev, 832void intel_display_power_get(struct drm_device *dev,
829 enum intel_display_power_domain domain); 833 enum intel_display_power_domain domain);
830void intel_display_power_put(struct drm_device *dev, 834void intel_display_power_put(struct drm_device *dev,
831 enum intel_display_power_domain domain); 835 enum intel_display_power_domain domain);
832void intel_init_power_well(struct drm_device *dev); 836void intel_power_domains_init_hw(struct drm_device *dev);
833void intel_set_power_well(struct drm_device *dev, bool enable); 837void intel_set_power_well(struct drm_device *dev, bool enable);
834void intel_enable_gt_powersave(struct drm_device *dev); 838void intel_enable_gt_powersave(struct drm_device *dev);
835void intel_disable_gt_powersave(struct drm_device *dev); 839void intel_disable_gt_powersave(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 1b64145c669a..3c7736546856 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -173,11 +173,16 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
173{ 173{
174 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 174 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
175 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 175 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
176 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
176 u32 dvo_reg = intel_dvo->dev.dvo_reg; 177 u32 dvo_reg = intel_dvo->dev.dvo_reg;
177 u32 temp = I915_READ(dvo_reg); 178 u32 temp = I915_READ(dvo_reg);
178 179
179 I915_WRITE(dvo_reg, temp | DVO_ENABLE); 180 I915_WRITE(dvo_reg, temp | DVO_ENABLE);
180 I915_READ(dvo_reg); 181 I915_READ(dvo_reg);
182 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
183 &crtc->config.requested_mode,
184 &crtc->config.adjusted_mode);
185
181 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); 186 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
182} 187}
183 188
@@ -186,6 +191,7 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
186{ 191{
187 struct intel_dvo *intel_dvo = intel_attached_dvo(connector); 192 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
188 struct drm_crtc *crtc; 193 struct drm_crtc *crtc;
194 struct intel_crtc_config *config;
189 195
190 /* dvo supports only 2 dpms states. */ 196 /* dvo supports only 2 dpms states. */
191 if (mode != DRM_MODE_DPMS_ON) 197 if (mode != DRM_MODE_DPMS_ON)
@@ -206,10 +212,16 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
206 /* We call connector dpms manually below in case pipe dpms doesn't 212 /* We call connector dpms manually below in case pipe dpms doesn't
207 * change due to cloning. */ 213 * change due to cloning. */
208 if (mode == DRM_MODE_DPMS_ON) { 214 if (mode == DRM_MODE_DPMS_ON) {
215 config = &to_intel_crtc(crtc)->config;
216
209 intel_dvo->base.connectors_active = true; 217 intel_dvo->base.connectors_active = true;
210 218
211 intel_crtc_update_dpms(crtc); 219 intel_crtc_update_dpms(crtc);
212 220
221 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
222 &config->requested_mode,
223 &config->adjusted_mode);
224
213 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); 225 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
214 } else { 226 } else {
215 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false); 227 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
@@ -296,10 +308,6 @@ static void intel_dvo_mode_set(struct intel_encoder *encoder)
296 break; 308 break;
297 } 309 }
298 310
299 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
300 &crtc->config.requested_mode,
301 adjusted_mode);
302
303 /* Save the data order, since I don't know what it should be set to. */ 311 /* Save the data order, since I don't know what it should be set to. */
304 dvo_val = I915_READ(dvo_reg) & 312 dvo_val = I915_READ(dvo_reg) &
305 (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG); 313 (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index acc839569c3f..895fcb4fbd94 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -78,8 +78,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
78 mode_cmd.width = sizes->surface_width; 78 mode_cmd.width = sizes->surface_width;
79 mode_cmd.height = sizes->surface_height; 79 mode_cmd.height = sizes->surface_height;
80 80
81 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) / 81 mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
82 8), 64); 82 DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
83 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, 83 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
84 sizes->surface_depth); 84 sizes->surface_depth);
85 85
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index ae0c843dd263..c3b4da7895ed 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -206,7 +206,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
206{ 206{
207 struct drm_device *dev = encoder->base.dev; 207 struct drm_device *dev = encoder->base.dev;
208 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 208 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
209 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 209 struct intel_connector *intel_connector =
210 &lvds_encoder->attached_connector->base;
210 struct drm_i915_private *dev_priv = dev->dev_private; 211 struct drm_i915_private *dev_priv = dev->dev_private;
211 u32 ctl_reg, stat_reg; 212 u32 ctl_reg, stat_reg;
212 213
@@ -225,13 +226,15 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
225 if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000)) 226 if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
226 DRM_ERROR("timed out waiting for panel to power on\n"); 227 DRM_ERROR("timed out waiting for panel to power on\n");
227 228
228 intel_panel_enable_backlight(dev, intel_crtc->pipe); 229 intel_panel_enable_backlight(intel_connector);
229} 230}
230 231
231static void intel_disable_lvds(struct intel_encoder *encoder) 232static void intel_disable_lvds(struct intel_encoder *encoder)
232{ 233{
233 struct drm_device *dev = encoder->base.dev; 234 struct drm_device *dev = encoder->base.dev;
234 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 235 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
236 struct intel_connector *intel_connector =
237 &lvds_encoder->attached_connector->base;
235 struct drm_i915_private *dev_priv = dev->dev_private; 238 struct drm_i915_private *dev_priv = dev->dev_private;
236 u32 ctl_reg, stat_reg; 239 u32 ctl_reg, stat_reg;
237 240
@@ -243,7 +246,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
243 stat_reg = PP_STATUS; 246 stat_reg = PP_STATUS;
244 } 247 }
245 248
246 intel_panel_disable_backlight(dev); 249 intel_panel_disable_backlight(intel_connector);
247 250
248 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); 251 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
249 if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) 252 if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
@@ -707,6 +710,22 @@ static const struct dmi_system_id intel_no_lvds[] = {
707 }, 710 },
708 { 711 {
709 .callback = intel_no_lvds_dmi_callback, 712 .callback = intel_no_lvds_dmi_callback,
713 .ident = "Intel D410PT",
714 .matches = {
715 DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
716 DMI_MATCH(DMI_BOARD_NAME, "D410PT"),
717 },
718 },
719 {
720 .callback = intel_no_lvds_dmi_callback,
721 .ident = "Intel D425KT",
722 .matches = {
723 DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
724 DMI_EXACT_MATCH(DMI_BOARD_NAME, "D425KT"),
725 },
726 },
727 {
728 .callback = intel_no_lvds_dmi_callback,
710 .ident = "Intel D510MO", 729 .ident = "Intel D510MO",
711 .matches = { 730 .matches = {
712 DMI_MATCH(DMI_BOARD_VENDOR, "Intel"), 731 DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index b82050c96f3e..91b68dca0641 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -396,7 +396,13 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
396static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) 396static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
397{ 397{
398 struct drm_i915_private *dev_priv = dev->dev_private; 398 struct drm_i915_private *dev_priv = dev->dev_private;
399 struct drm_encoder *encoder;
400 struct drm_connector *connector;
401 struct intel_connector *intel_connector = NULL;
402 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
399 struct opregion_asle __iomem *asle = dev_priv->opregion.asle; 403 struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
404 u32 ret = 0;
405 bool found = false;
400 406
401 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); 407 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
402 408
@@ -407,11 +413,39 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
407 if (bclp > 255) 413 if (bclp > 255)
408 return ASLC_BACKLIGHT_FAILED; 414 return ASLC_BACKLIGHT_FAILED;
409 415
416 mutex_lock(&dev->mode_config.mutex);
417 /*
418 * Could match the OpRegion connector here instead, but we'd also need
419 * to verify the connector could handle a backlight call.
420 */
421 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
422 if (encoder->crtc == crtc) {
423 found = true;
424 break;
425 }
426
427 if (!found) {
428 ret = ASLC_BACKLIGHT_FAILED;
429 goto out;
430 }
431
432 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
433 if (connector->encoder == encoder)
434 intel_connector = to_intel_connector(connector);
435
436 if (!intel_connector) {
437 ret = ASLC_BACKLIGHT_FAILED;
438 goto out;
439 }
440
410 DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp); 441 DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
411 intel_panel_set_backlight(dev, bclp, 255); 442 intel_panel_set_backlight(intel_connector, bclp, 255);
412 iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); 443 iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
413 444
414 return 0; 445out:
446 mutex_unlock(&dev->mode_config.mutex);
447
448 return ret;
415} 449}
416 450
417static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) 451static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
@@ -486,9 +520,13 @@ static u32 asle_isct_state(struct drm_device *dev)
486 return ASLC_ISCT_STATE_FAILED; 520 return ASLC_ISCT_STATE_FAILED;
487} 521}
488 522
489void intel_opregion_asle_intr(struct drm_device *dev) 523static void asle_work(struct work_struct *work)
490{ 524{
491 struct drm_i915_private *dev_priv = dev->dev_private; 525 struct intel_opregion *opregion =
526 container_of(work, struct intel_opregion, asle_work);
527 struct drm_i915_private *dev_priv =
528 container_of(opregion, struct drm_i915_private, opregion);
529 struct drm_device *dev = dev_priv->dev;
492 struct opregion_asle __iomem *asle = dev_priv->opregion.asle; 530 struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
493 u32 aslc_stat = 0; 531 u32 aslc_stat = 0;
494 u32 aslc_req; 532 u32 aslc_req;
@@ -535,6 +573,14 @@ void intel_opregion_asle_intr(struct drm_device *dev)
535 iowrite32(aslc_stat, &asle->aslc); 573 iowrite32(aslc_stat, &asle->aslc);
536} 574}
537 575
576void intel_opregion_asle_intr(struct drm_device *dev)
577{
578 struct drm_i915_private *dev_priv = dev->dev_private;
579
580 if (dev_priv->opregion.asle)
581 schedule_work(&dev_priv->opregion.asle_work);
582}
583
538#define ACPI_EV_DISPLAY_SWITCH (1<<0) 584#define ACPI_EV_DISPLAY_SWITCH (1<<0)
539#define ACPI_EV_LID (1<<1) 585#define ACPI_EV_LID (1<<1)
540#define ACPI_EV_DOCK (1<<2) 586#define ACPI_EV_DOCK (1<<2)
@@ -735,6 +781,8 @@ void intel_opregion_fini(struct drm_device *dev)
735 if (opregion->asle) 781 if (opregion->asle)
736 iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy); 782 iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
737 783
784 cancel_work_sync(&dev_priv->opregion.asle_work);
785
738 if (opregion->acpi) { 786 if (opregion->acpi) {
739 iowrite32(0, &opregion->acpi->drdy); 787 iowrite32(0, &opregion->acpi->drdy);
740 788
@@ -828,6 +876,8 @@ int intel_opregion_setup(struct drm_device *dev)
828 return -ENOTSUPP; 876 return -ENOTSUPP;
829 } 877 }
830 878
879 INIT_WORK(&opregion->asle_work, asle_work);
880
831 base = acpi_os_ioremap(asls, OPREGION_SIZE); 881 base = acpi_os_ioremap(asls, OPREGION_SIZE);
832 if (!base) 882 if (!base)
833 return -ENOMEM; 883 return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 09b2994c9b37..f161ac02c4f6 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -341,7 +341,7 @@ static int is_backlight_combination_mode(struct drm_device *dev)
341/* XXX: query mode clock or hardware clock and program max PWM appropriately 341/* XXX: query mode clock or hardware clock and program max PWM appropriately
342 * when it's 0. 342 * when it's 0.
343 */ 343 */
344static u32 i915_read_blc_pwm_ctl(struct drm_device *dev) 344static u32 i915_read_blc_pwm_ctl(struct drm_device *dev, enum pipe pipe)
345{ 345{
346 struct drm_i915_private *dev_priv = dev->dev_private; 346 struct drm_i915_private *dev_priv = dev->dev_private;
347 u32 val; 347 u32 val;
@@ -358,6 +358,21 @@ static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
358 val = dev_priv->regfile.saveBLC_PWM_CTL2; 358 val = dev_priv->regfile.saveBLC_PWM_CTL2;
359 I915_WRITE(BLC_PWM_PCH_CTL2, val); 359 I915_WRITE(BLC_PWM_PCH_CTL2, val);
360 } 360 }
361 } else if (IS_VALLEYVIEW(dev)) {
362 val = I915_READ(VLV_BLC_PWM_CTL(pipe));
363 if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
364 dev_priv->regfile.saveBLC_PWM_CTL = val;
365 dev_priv->regfile.saveBLC_PWM_CTL2 =
366 I915_READ(VLV_BLC_PWM_CTL2(pipe));
367 } else if (val == 0) {
368 val = dev_priv->regfile.saveBLC_PWM_CTL;
369 I915_WRITE(VLV_BLC_PWM_CTL(pipe), val);
370 I915_WRITE(VLV_BLC_PWM_CTL2(pipe),
371 dev_priv->regfile.saveBLC_PWM_CTL2);
372 }
373
374 if (!val)
375 val = 0x0f42ffff;
361 } else { 376 } else {
362 val = I915_READ(BLC_PWM_CTL); 377 val = I915_READ(BLC_PWM_CTL);
363 if (dev_priv->regfile.saveBLC_PWM_CTL == 0) { 378 if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
@@ -372,19 +387,17 @@ static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
372 I915_WRITE(BLC_PWM_CTL2, 387 I915_WRITE(BLC_PWM_CTL2,
373 dev_priv->regfile.saveBLC_PWM_CTL2); 388 dev_priv->regfile.saveBLC_PWM_CTL2);
374 } 389 }
375
376 if (IS_VALLEYVIEW(dev) && !val)
377 val = 0x0f42ffff;
378 } 390 }
379 391
380 return val; 392 return val;
381} 393}
382 394
383static u32 intel_panel_get_max_backlight(struct drm_device *dev) 395static u32 intel_panel_get_max_backlight(struct drm_device *dev,
396 enum pipe pipe)
384{ 397{
385 u32 max; 398 u32 max;
386 399
387 max = i915_read_blc_pwm_ctl(dev); 400 max = i915_read_blc_pwm_ctl(dev, pipe);
388 401
389 if (HAS_PCH_SPLIT(dev)) { 402 if (HAS_PCH_SPLIT(dev)) {
390 max >>= 16; 403 max >>= 16;
@@ -410,7 +423,8 @@ MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
410 "to dri-devel@lists.freedesktop.org, if your machine needs it. " 423 "to dri-devel@lists.freedesktop.org, if your machine needs it. "
411 "It will then be included in an upcoming module version."); 424 "It will then be included in an upcoming module version.");
412module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600); 425module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
413static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val) 426static u32 intel_panel_compute_brightness(struct drm_device *dev,
427 enum pipe pipe, u32 val)
414{ 428{
415 struct drm_i915_private *dev_priv = dev->dev_private; 429 struct drm_i915_private *dev_priv = dev->dev_private;
416 430
@@ -419,7 +433,7 @@ static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
419 433
420 if (i915_panel_invert_brightness > 0 || 434 if (i915_panel_invert_brightness > 0 ||
421 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { 435 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
422 u32 max = intel_panel_get_max_backlight(dev); 436 u32 max = intel_panel_get_max_backlight(dev, pipe);
423 if (max) 437 if (max)
424 return max - val; 438 return max - val;
425 } 439 }
@@ -427,18 +441,25 @@ static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
427 return val; 441 return val;
428} 442}
429 443
430static u32 intel_panel_get_backlight(struct drm_device *dev) 444static u32 intel_panel_get_backlight(struct drm_device *dev,
445 enum pipe pipe)
431{ 446{
432 struct drm_i915_private *dev_priv = dev->dev_private; 447 struct drm_i915_private *dev_priv = dev->dev_private;
433 u32 val; 448 u32 val;
434 unsigned long flags; 449 unsigned long flags;
450 int reg;
435 451
436 spin_lock_irqsave(&dev_priv->backlight.lock, flags); 452 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
437 453
438 if (HAS_PCH_SPLIT(dev)) { 454 if (HAS_PCH_SPLIT(dev)) {
439 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 455 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
440 } else { 456 } else {
441 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 457 if (IS_VALLEYVIEW(dev))
458 reg = VLV_BLC_PWM_CTL(pipe);
459 else
460 reg = BLC_PWM_CTL;
461
462 val = I915_READ(reg) & BACKLIGHT_DUTY_CYCLE_MASK;
442 if (INTEL_INFO(dev)->gen < 4) 463 if (INTEL_INFO(dev)->gen < 4)
443 val >>= 1; 464 val >>= 1;
444 465
@@ -450,7 +471,7 @@ static u32 intel_panel_get_backlight(struct drm_device *dev)
450 } 471 }
451 } 472 }
452 473
453 val = intel_panel_compute_brightness(dev, val); 474 val = intel_panel_compute_brightness(dev, pipe, val);
454 475
455 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); 476 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
456 477
@@ -466,19 +487,20 @@ static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
466} 487}
467 488
468static void intel_panel_actually_set_backlight(struct drm_device *dev, 489static void intel_panel_actually_set_backlight(struct drm_device *dev,
469 u32 level) 490 enum pipe pipe, u32 level)
470{ 491{
471 struct drm_i915_private *dev_priv = dev->dev_private; 492 struct drm_i915_private *dev_priv = dev->dev_private;
472 u32 tmp; 493 u32 tmp;
494 int reg;
473 495
474 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); 496 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
475 level = intel_panel_compute_brightness(dev, level); 497 level = intel_panel_compute_brightness(dev, pipe, level);
476 498
477 if (HAS_PCH_SPLIT(dev)) 499 if (HAS_PCH_SPLIT(dev))
478 return intel_pch_panel_set_backlight(dev, level); 500 return intel_pch_panel_set_backlight(dev, level);
479 501
480 if (is_backlight_combination_mode(dev)) { 502 if (is_backlight_combination_mode(dev)) {
481 u32 max = intel_panel_get_max_backlight(dev); 503 u32 max = intel_panel_get_max_backlight(dev, pipe);
482 u8 lbpc; 504 u8 lbpc;
483 505
484 /* we're screwed, but keep behaviour backwards compatible */ 506 /* we're screwed, but keep behaviour backwards compatible */
@@ -490,23 +512,34 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev,
490 pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc); 512 pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
491 } 513 }
492 514
493 tmp = I915_READ(BLC_PWM_CTL); 515 if (IS_VALLEYVIEW(dev))
516 reg = VLV_BLC_PWM_CTL(pipe);
517 else
518 reg = BLC_PWM_CTL;
519
520 tmp = I915_READ(reg);
494 if (INTEL_INFO(dev)->gen < 4) 521 if (INTEL_INFO(dev)->gen < 4)
495 level <<= 1; 522 level <<= 1;
496 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; 523 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
497 I915_WRITE(BLC_PWM_CTL, tmp | level); 524 I915_WRITE(reg, tmp | level);
498} 525}
499 526
500/* set backlight brightness to level in range [0..max] */ 527/* set backlight brightness to level in range [0..max] */
501void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max) 528void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
529 u32 max)
502{ 530{
531 struct drm_device *dev = connector->base.dev;
503 struct drm_i915_private *dev_priv = dev->dev_private; 532 struct drm_i915_private *dev_priv = dev->dev_private;
533 enum pipe pipe = intel_get_pipe_from_connector(connector);
504 u32 freq; 534 u32 freq;
505 unsigned long flags; 535 unsigned long flags;
506 536
537 if (pipe == INVALID_PIPE)
538 return;
539
507 spin_lock_irqsave(&dev_priv->backlight.lock, flags); 540 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
508 541
509 freq = intel_panel_get_max_backlight(dev); 542 freq = intel_panel_get_max_backlight(dev, pipe);
510 if (!freq) { 543 if (!freq) {
511 /* we are screwed, bail out */ 544 /* we are screwed, bail out */
512 goto out; 545 goto out;
@@ -523,16 +556,21 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max)
523 dev_priv->backlight.device->props.brightness = level; 556 dev_priv->backlight.device->props.brightness = level;
524 557
525 if (dev_priv->backlight.enabled) 558 if (dev_priv->backlight.enabled)
526 intel_panel_actually_set_backlight(dev, level); 559 intel_panel_actually_set_backlight(dev, pipe, level);
527out: 560out:
528 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); 561 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
529} 562}
530 563
531void intel_panel_disable_backlight(struct drm_device *dev) 564void intel_panel_disable_backlight(struct intel_connector *connector)
532{ 565{
566 struct drm_device *dev = connector->base.dev;
533 struct drm_i915_private *dev_priv = dev->dev_private; 567 struct drm_i915_private *dev_priv = dev->dev_private;
568 enum pipe pipe = intel_get_pipe_from_connector(connector);
534 unsigned long flags; 569 unsigned long flags;
535 570
571 if (pipe == INVALID_PIPE)
572 return;
573
536 /* 574 /*
537 * Do not disable backlight on the vgaswitcheroo path. When switching 575 * Do not disable backlight on the vgaswitcheroo path. When switching
538 * away from i915, the other client may depend on i915 to handle the 576 * away from i915, the other client may depend on i915 to handle the
@@ -547,12 +585,17 @@ void intel_panel_disable_backlight(struct drm_device *dev)
547 spin_lock_irqsave(&dev_priv->backlight.lock, flags); 585 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
548 586
549 dev_priv->backlight.enabled = false; 587 dev_priv->backlight.enabled = false;
550 intel_panel_actually_set_backlight(dev, 0); 588 intel_panel_actually_set_backlight(dev, pipe, 0);
551 589
552 if (INTEL_INFO(dev)->gen >= 4) { 590 if (INTEL_INFO(dev)->gen >= 4) {
553 uint32_t reg, tmp; 591 uint32_t reg, tmp;
554 592
555 reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2; 593 if (HAS_PCH_SPLIT(dev))
594 reg = BLC_PWM_CPU_CTL2;
595 else if (IS_VALLEYVIEW(dev))
596 reg = VLV_BLC_PWM_CTL2(pipe);
597 else
598 reg = BLC_PWM_CTL2;
556 599
557 I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE); 600 I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE);
558 601
@@ -566,20 +609,25 @@ void intel_panel_disable_backlight(struct drm_device *dev)
566 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); 609 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
567} 610}
568 611
569void intel_panel_enable_backlight(struct drm_device *dev, 612void intel_panel_enable_backlight(struct intel_connector *connector)
570 enum pipe pipe)
571{ 613{
614 struct drm_device *dev = connector->base.dev;
572 struct drm_i915_private *dev_priv = dev->dev_private; 615 struct drm_i915_private *dev_priv = dev->dev_private;
616 enum pipe pipe = intel_get_pipe_from_connector(connector);
573 enum transcoder cpu_transcoder = 617 enum transcoder cpu_transcoder =
574 intel_pipe_to_cpu_transcoder(dev_priv, pipe); 618 intel_pipe_to_cpu_transcoder(dev_priv, pipe);
575 unsigned long flags; 619 unsigned long flags;
576 620
621 if (pipe == INVALID_PIPE)
622 return;
623
577 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); 624 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
578 625
579 spin_lock_irqsave(&dev_priv->backlight.lock, flags); 626 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
580 627
581 if (dev_priv->backlight.level == 0) { 628 if (dev_priv->backlight.level == 0) {
582 dev_priv->backlight.level = intel_panel_get_max_backlight(dev); 629 dev_priv->backlight.level = intel_panel_get_max_backlight(dev,
630 pipe);
583 if (dev_priv->backlight.device) 631 if (dev_priv->backlight.device)
584 dev_priv->backlight.device->props.brightness = 632 dev_priv->backlight.device->props.brightness =
585 dev_priv->backlight.level; 633 dev_priv->backlight.level;
@@ -588,8 +636,12 @@ void intel_panel_enable_backlight(struct drm_device *dev,
588 if (INTEL_INFO(dev)->gen >= 4) { 636 if (INTEL_INFO(dev)->gen >= 4) {
589 uint32_t reg, tmp; 637 uint32_t reg, tmp;
590 638
591 reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2; 639 if (HAS_PCH_SPLIT(dev))
592 640 reg = BLC_PWM_CPU_CTL2;
641 else if (IS_VALLEYVIEW(dev))
642 reg = VLV_BLC_PWM_CTL2(pipe);
643 else
644 reg = BLC_PWM_CTL2;
593 645
594 tmp = I915_READ(reg); 646 tmp = I915_READ(reg);
595 647
@@ -629,7 +681,8 @@ set_level:
629 * registers are set. 681 * registers are set.
630 */ 682 */
631 dev_priv->backlight.enabled = true; 683 dev_priv->backlight.enabled = true;
632 intel_panel_actually_set_backlight(dev, dev_priv->backlight.level); 684 intel_panel_actually_set_backlight(dev, pipe,
685 dev_priv->backlight.level);
633 686
634 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); 687 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
635} 688}
@@ -640,9 +693,19 @@ static void intel_panel_init_backlight_regs(struct drm_device *dev)
640 struct drm_i915_private *dev_priv = dev->dev_private; 693 struct drm_i915_private *dev_priv = dev->dev_private;
641 694
642 if (IS_VALLEYVIEW(dev)) { 695 if (IS_VALLEYVIEW(dev)) {
643 u32 cur_val = I915_READ(BLC_PWM_CTL) & 696 enum pipe pipe;
644 BACKLIGHT_DUTY_CYCLE_MASK; 697
645 I915_WRITE(BLC_PWM_CTL, (0xf42 << 16) | cur_val); 698 for_each_pipe(pipe) {
699 u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
700
701 /* Skip if the modulation freq is already set */
702 if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
703 continue;
704
705 cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
706 I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
707 cur_val);
708 }
646 } 709 }
647} 710}
648 711
@@ -652,7 +715,7 @@ static void intel_panel_init_backlight(struct drm_device *dev)
652 715
653 intel_panel_init_backlight_regs(dev); 716 intel_panel_init_backlight_regs(dev);
654 717
655 dev_priv->backlight.level = intel_panel_get_backlight(dev); 718 dev_priv->backlight.level = intel_panel_get_backlight(dev, 0);
656 dev_priv->backlight.enabled = dev_priv->backlight.level != 0; 719 dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
657} 720}
658 721
@@ -681,18 +744,31 @@ intel_panel_detect(struct drm_device *dev)
681#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) 744#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
682static int intel_panel_update_status(struct backlight_device *bd) 745static int intel_panel_update_status(struct backlight_device *bd)
683{ 746{
684 struct drm_device *dev = bl_get_data(bd); 747 struct intel_connector *connector = bl_get_data(bd);
748 struct drm_device *dev = connector->base.dev;
749
750 mutex_lock(&dev->mode_config.mutex);
685 DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n", 751 DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
686 bd->props.brightness, bd->props.max_brightness); 752 bd->props.brightness, bd->props.max_brightness);
687 intel_panel_set_backlight(dev, bd->props.brightness, 753 intel_panel_set_backlight(connector, bd->props.brightness,
688 bd->props.max_brightness); 754 bd->props.max_brightness);
755 mutex_unlock(&dev->mode_config.mutex);
689 return 0; 756 return 0;
690} 757}
691 758
692static int intel_panel_get_brightness(struct backlight_device *bd) 759static int intel_panel_get_brightness(struct backlight_device *bd)
693{ 760{
694 struct drm_device *dev = bl_get_data(bd); 761 struct intel_connector *connector = bl_get_data(bd);
695 return intel_panel_get_backlight(dev); 762 struct drm_device *dev = connector->base.dev;
763 enum pipe pipe;
764
765 mutex_lock(&dev->mode_config.mutex);
766 pipe = intel_get_pipe_from_connector(connector);
767 mutex_unlock(&dev->mode_config.mutex);
768 if (pipe == INVALID_PIPE)
769 return 0;
770
771 return intel_panel_get_backlight(connector->base.dev, pipe);
696} 772}
697 773
698static const struct backlight_ops intel_panel_bl_ops = { 774static const struct backlight_ops intel_panel_bl_ops = {
@@ -717,7 +793,7 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
717 props.brightness = dev_priv->backlight.level; 793 props.brightness = dev_priv->backlight.level;
718 794
719 spin_lock_irqsave(&dev_priv->backlight.lock, flags); 795 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
720 props.max_brightness = intel_panel_get_max_backlight(dev); 796 props.max_brightness = intel_panel_get_max_backlight(dev, 0);
721 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); 797 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
722 798
723 if (props.max_brightness == 0) { 799 if (props.max_brightness == 0) {
@@ -726,7 +802,8 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
726 } 802 }
727 dev_priv->backlight.device = 803 dev_priv->backlight.device =
728 backlight_device_register("intel_backlight", 804 backlight_device_register("intel_backlight",
729 connector->kdev, dev, 805 connector->kdev,
806 to_intel_connector(connector),
730 &intel_panel_bl_ops, &props); 807 &intel_panel_bl_ops, &props);
731 808
732 if (IS_ERR(dev_priv->backlight.device)) { 809 if (IS_ERR(dev_priv->backlight.device)) {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 8064ff927bcc..09ac9e79830f 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -32,6 +32,27 @@
32#include <linux/module.h> 32#include <linux/module.h>
33#include <drm/i915_powerwell.h> 33#include <drm/i915_powerwell.h>
34 34
35/**
36 * RC6 is a special power stage which allows the GPU to enter an very
37 * low-voltage mode when idle, using down to 0V while at this stage. This
38 * stage is entered automatically when the GPU is idle when RC6 support is
39 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
40 *
41 * There are different RC6 modes available in Intel GPU, which differentiate
42 * among each other with the latency required to enter and leave RC6 and
43 * voltage consumed by the GPU in different states.
44 *
45 * The combination of the following flags define which states GPU is allowed
46 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
47 * RC6pp is deepest RC6. Their support by hardware varies according to the
48 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
49 * which brings the most power savings; deeper states save more power, but
50 * require higher latency to switch to and wake up.
51 */
52#define INTEL_RC6_ENABLE (1<<0)
53#define INTEL_RC6p_ENABLE (1<<1)
54#define INTEL_RC6pp_ENABLE (1<<2)
55
35/* FBC, or Frame Buffer Compression, is a technique employed to compress the 56/* FBC, or Frame Buffer Compression, is a technique employed to compress the
36 * framebuffer contents in-memory, aiming at reducing the required bandwidth 57 * framebuffer contents in-memory, aiming at reducing the required bandwidth
37 * during in-memory transfers and, therefore, reduce the power packet. 58 * during in-memory transfers and, therefore, reduce the power packet.
@@ -233,18 +254,6 @@ static void ironlake_disable_fbc(struct drm_device *dev)
233 dpfc_ctl &= ~DPFC_CTL_EN; 254 dpfc_ctl &= ~DPFC_CTL_EN;
234 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); 255 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
235 256
236 if (IS_IVYBRIDGE(dev))
237 /* WaFbcDisableDpfcClockGating:ivb */
238 I915_WRITE(ILK_DSPCLK_GATE_D,
239 I915_READ(ILK_DSPCLK_GATE_D) &
240 ~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
241
242 if (IS_HASWELL(dev))
243 /* WaFbcDisableDpfcClockGating:hsw */
244 I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
245 I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
246 ~HSW_DPFC_GATING_DISABLE);
247
248 DRM_DEBUG_KMS("disabled FBC\n"); 257 DRM_DEBUG_KMS("disabled FBC\n");
249 } 258 }
250} 259}
@@ -274,18 +283,10 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
274 if (IS_IVYBRIDGE(dev)) { 283 if (IS_IVYBRIDGE(dev)) {
275 /* WaFbcAsynchFlipDisableFbcQueue:ivb */ 284 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
276 I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS); 285 I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
277 /* WaFbcDisableDpfcClockGating:ivb */
278 I915_WRITE(ILK_DSPCLK_GATE_D,
279 I915_READ(ILK_DSPCLK_GATE_D) |
280 ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
281 } else { 286 } else {
282 /* WaFbcAsynchFlipDisableFbcQueue:hsw */ 287 /* WaFbcAsynchFlipDisableFbcQueue:hsw */
283 I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe), 288 I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
284 HSW_BYPASS_FBC_QUEUE); 289 HSW_BYPASS_FBC_QUEUE);
285 /* WaFbcDisableDpfcClockGating:hsw */
286 I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
287 I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
288 HSW_DPFC_GATING_DISABLE);
289 } 290 }
290 291
291 I915_WRITE(SNB_DPFC_CTL_SA, 292 I915_WRITE(SNB_DPFC_CTL_SA,
@@ -3685,6 +3686,20 @@ static void valleyview_disable_rps(struct drm_device *dev)
3685 } 3686 }
3686} 3687}
3687 3688
3689static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3690{
3691 if (IS_GEN6(dev))
3692 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
3693
3694 if (IS_HASWELL(dev))
3695 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
3696
3697 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3698 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3699 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3700 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3701}
3702
3688int intel_enable_rc6(const struct drm_device *dev) 3703int intel_enable_rc6(const struct drm_device *dev)
3689{ 3704{
3690 /* No RC6 before Ironlake */ 3705 /* No RC6 before Ironlake */
@@ -3699,18 +3714,13 @@ int intel_enable_rc6(const struct drm_device *dev)
3699 if (INTEL_INFO(dev)->gen == 5) 3714 if (INTEL_INFO(dev)->gen == 5)
3700 return 0; 3715 return 0;
3701 3716
3702 if (IS_HASWELL(dev)) { 3717 if (IS_HASWELL(dev))
3703 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
3704 return INTEL_RC6_ENABLE; 3718 return INTEL_RC6_ENABLE;
3705 }
3706 3719
3707 /* snb/ivb have more than one rc6 state. */ 3720 /* snb/ivb have more than one rc6 state. */
3708 if (INTEL_INFO(dev)->gen == 6) { 3721 if (INTEL_INFO(dev)->gen == 6)
3709 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
3710 return INTEL_RC6_ENABLE; 3722 return INTEL_RC6_ENABLE;
3711 }
3712 3723
3713 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
3714 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 3724 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3715} 3725}
3716 3726
@@ -3812,10 +3822,7 @@ static void gen6_enable_rps(struct drm_device *dev)
3812 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; 3822 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
3813 } 3823 }
3814 3824
3815 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", 3825 intel_print_rc6_info(dev, rc6_mask);
3816 (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3817 (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3818 (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3819 3826
3820 I915_WRITE(GEN6_RC_CONTROL, 3827 I915_WRITE(GEN6_RC_CONTROL,
3821 rc6_mask | 3828 rc6_mask |
@@ -3888,7 +3895,7 @@ void gen6_update_ring_freq(struct drm_device *dev)
3888 /* Convert from kHz to MHz */ 3895 /* Convert from kHz to MHz */
3889 max_ia_freq /= 1000; 3896 max_ia_freq /= 1000;
3890 3897
3891 min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK) & 0xf; 3898 min_ring_freq = I915_READ(DCLK) & 0xf;
3892 /* convert DDR frequency from units of 266.6MHz to bandwidth */ 3899 /* convert DDR frequency from units of 266.6MHz to bandwidth */
3893 min_ring_freq = mult_frac(min_ring_freq, 8, 3); 3900 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
3894 3901
@@ -4051,6 +4058,9 @@ static void valleyview_enable_rps(struct drm_device *dev)
4051 VLV_RENDER_RC6_COUNT_EN)); 4058 VLV_RENDER_RC6_COUNT_EN));
4052 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 4059 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4053 rc6_mode = GEN7_RC_CTL_TO_MODE; 4060 rc6_mode = GEN7_RC_CTL_TO_MODE;
4061
4062 intel_print_rc6_info(dev, rc6_mode);
4063
4054 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 4064 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
4055 4065
4056 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 4066 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
@@ -4222,6 +4232,8 @@ static void ironlake_enable_rc6(struct drm_device *dev)
4222 4232
4223 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN); 4233 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
4224 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 4234 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
4235
4236 intel_print_rc6_info(dev, INTEL_RC6_ENABLE);
4225} 4237}
4226 4238
4227static unsigned long intel_pxfreq(u32 vidfreq) 4239static unsigned long intel_pxfreq(u32 vidfreq)
@@ -4996,7 +5008,9 @@ static void cpt_init_clock_gating(struct drm_device *dev)
4996 * gating for the panel power sequencer or it will fail to 5008 * gating for the panel power sequencer or it will fail to
4997 * start up when no ports are active. 5009 * start up when no ports are active.
4998 */ 5010 */
4999 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 5011 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
5012 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
5013 PCH_CPUNIT_CLOCK_GATE_DISABLE);
5000 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | 5014 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
5001 DPLS_EDP_PPS_FIX_DIS); 5015 DPLS_EDP_PPS_FIX_DIS);
5002 /* The below fixes the weird display corruption, a few pixels shifted 5016 /* The below fixes the weird display corruption, a few pixels shifted
@@ -5190,6 +5204,11 @@ static void haswell_init_clock_gating(struct drm_device *dev)
5190 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, 5204 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
5191 GEN7_WA_L3_CHICKEN_MODE); 5205 GEN7_WA_L3_CHICKEN_MODE);
5192 5206
5207 /* L3 caching of data atomics doesn't work -- disable it. */
5208 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
5209 I915_WRITE(HSW_ROW_CHICKEN3,
5210 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
5211
5193 /* This is required by WaCatErrorRejectionIssue:hsw */ 5212 /* This is required by WaCatErrorRejectionIssue:hsw */
5194 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 5213 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5195 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 5214 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
@@ -5485,6 +5504,23 @@ void intel_suspend_hw(struct drm_device *dev)
5485 lpt_suspend_hw(dev); 5504 lpt_suspend_hw(dev);
5486} 5505}
5487 5506
5507static bool is_always_on_power_domain(struct drm_device *dev,
5508 enum intel_display_power_domain domain)
5509{
5510 unsigned long always_on_domains;
5511
5512 BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK);
5513
5514 if (IS_HASWELL(dev)) {
5515 always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS;
5516 } else {
5517 WARN_ON(1);
5518 return true;
5519 }
5520
5521 return BIT(domain) & always_on_domains;
5522}
5523
5488/** 5524/**
5489 * We should only use the power well if we explicitly asked the hardware to 5525 * We should only use the power well if we explicitly asked the hardware to
5490 * enable it, so check if it's enabled and also check if we've requested it to 5526 * enable it, so check if it's enabled and also check if we've requested it to
@@ -5498,24 +5534,11 @@ bool intel_display_power_enabled(struct drm_device *dev,
5498 if (!HAS_POWER_WELL(dev)) 5534 if (!HAS_POWER_WELL(dev))
5499 return true; 5535 return true;
5500 5536
5501 switch (domain) { 5537 if (is_always_on_power_domain(dev, domain))
5502 case POWER_DOMAIN_PIPE_A:
5503 case POWER_DOMAIN_TRANSCODER_EDP:
5504 return true; 5538 return true;
5505 case POWER_DOMAIN_VGA: 5539
5506 case POWER_DOMAIN_PIPE_B: 5540 return I915_READ(HSW_PWR_WELL_DRIVER) ==
5507 case POWER_DOMAIN_PIPE_C:
5508 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
5509 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
5510 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
5511 case POWER_DOMAIN_TRANSCODER_A:
5512 case POWER_DOMAIN_TRANSCODER_B:
5513 case POWER_DOMAIN_TRANSCODER_C:
5514 return I915_READ(HSW_PWR_WELL_DRIVER) ==
5515 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); 5541 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
5516 default:
5517 BUG();
5518 }
5519} 5542}
5520 5543
5521static void __intel_set_power_well(struct drm_device *dev, bool enable) 5544static void __intel_set_power_well(struct drm_device *dev, bool enable)
@@ -5565,169 +5588,130 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5565 } 5588 }
5566} 5589}
5567 5590
5568static void __intel_power_well_get(struct i915_power_well *power_well) 5591static void __intel_power_well_get(struct drm_device *dev,
5592 struct i915_power_well *power_well)
5569{ 5593{
5570 if (!power_well->count++) 5594 if (!power_well->count++)
5571 __intel_set_power_well(power_well->device, true); 5595 __intel_set_power_well(dev, true);
5572} 5596}
5573 5597
5574static void __intel_power_well_put(struct i915_power_well *power_well) 5598static void __intel_power_well_put(struct drm_device *dev,
5599 struct i915_power_well *power_well)
5575{ 5600{
5576 WARN_ON(!power_well->count); 5601 WARN_ON(!power_well->count);
5577 if (!--power_well->count) 5602 if (!--power_well->count && i915_disable_power_well)
5578 __intel_set_power_well(power_well->device, false); 5603 __intel_set_power_well(dev, false);
5579} 5604}
5580 5605
5581void intel_display_power_get(struct drm_device *dev, 5606void intel_display_power_get(struct drm_device *dev,
5582 enum intel_display_power_domain domain) 5607 enum intel_display_power_domain domain)
5583{ 5608{
5584 struct drm_i915_private *dev_priv = dev->dev_private; 5609 struct drm_i915_private *dev_priv = dev->dev_private;
5585 struct i915_power_well *power_well = &dev_priv->power_well; 5610 struct i915_power_domains *power_domains;
5586 5611
5587 if (!HAS_POWER_WELL(dev)) 5612 if (!HAS_POWER_WELL(dev))
5588 return; 5613 return;
5589 5614
5590 switch (domain) { 5615 if (is_always_on_power_domain(dev, domain))
5591 case POWER_DOMAIN_PIPE_A:
5592 case POWER_DOMAIN_TRANSCODER_EDP:
5593 return; 5616 return;
5594 case POWER_DOMAIN_VGA: 5617
5595 case POWER_DOMAIN_PIPE_B: 5618 power_domains = &dev_priv->power_domains;
5596 case POWER_DOMAIN_PIPE_C: 5619
5597 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 5620 mutex_lock(&power_domains->lock);
5598 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 5621 __intel_power_well_get(dev, &power_domains->power_wells[0]);
5599 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 5622 mutex_unlock(&power_domains->lock);
5600 case POWER_DOMAIN_TRANSCODER_A:
5601 case POWER_DOMAIN_TRANSCODER_B:
5602 case POWER_DOMAIN_TRANSCODER_C:
5603 spin_lock_irq(&power_well->lock);
5604 __intel_power_well_get(power_well);
5605 spin_unlock_irq(&power_well->lock);
5606 return;
5607 default:
5608 BUG();
5609 }
5610} 5623}
5611 5624
5612void intel_display_power_put(struct drm_device *dev, 5625void intel_display_power_put(struct drm_device *dev,
5613 enum intel_display_power_domain domain) 5626 enum intel_display_power_domain domain)
5614{ 5627{
5615 struct drm_i915_private *dev_priv = dev->dev_private; 5628 struct drm_i915_private *dev_priv = dev->dev_private;
5616 struct i915_power_well *power_well = &dev_priv->power_well; 5629 struct i915_power_domains *power_domains;
5617 5630
5618 if (!HAS_POWER_WELL(dev)) 5631 if (!HAS_POWER_WELL(dev))
5619 return; 5632 return;
5620 5633
5621 switch (domain) { 5634 if (is_always_on_power_domain(dev, domain))
5622 case POWER_DOMAIN_PIPE_A:
5623 case POWER_DOMAIN_TRANSCODER_EDP:
5624 return; 5635 return;
5625 case POWER_DOMAIN_VGA: 5636
5626 case POWER_DOMAIN_PIPE_B: 5637 power_domains = &dev_priv->power_domains;
5627 case POWER_DOMAIN_PIPE_C: 5638
5628 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 5639 mutex_lock(&power_domains->lock);
5629 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 5640 __intel_power_well_put(dev, &power_domains->power_wells[0]);
5630 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 5641 mutex_unlock(&power_domains->lock);
5631 case POWER_DOMAIN_TRANSCODER_A:
5632 case POWER_DOMAIN_TRANSCODER_B:
5633 case POWER_DOMAIN_TRANSCODER_C:
5634 spin_lock_irq(&power_well->lock);
5635 __intel_power_well_put(power_well);
5636 spin_unlock_irq(&power_well->lock);
5637 return;
5638 default:
5639 BUG();
5640 }
5641} 5642}
5642 5643
5643static struct i915_power_well *hsw_pwr; 5644static struct i915_power_domains *hsw_pwr;
5644 5645
5645/* Display audio driver power well request */ 5646/* Display audio driver power well request */
5646void i915_request_power_well(void) 5647void i915_request_power_well(void)
5647{ 5648{
5649 struct drm_i915_private *dev_priv;
5650
5648 if (WARN_ON(!hsw_pwr)) 5651 if (WARN_ON(!hsw_pwr))
5649 return; 5652 return;
5650 5653
5651 spin_lock_irq(&hsw_pwr->lock); 5654 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
5652 __intel_power_well_get(hsw_pwr); 5655 power_domains);
5653 spin_unlock_irq(&hsw_pwr->lock); 5656
5657 mutex_lock(&hsw_pwr->lock);
5658 __intel_power_well_get(dev_priv->dev, &hsw_pwr->power_wells[0]);
5659 mutex_unlock(&hsw_pwr->lock);
5654} 5660}
5655EXPORT_SYMBOL_GPL(i915_request_power_well); 5661EXPORT_SYMBOL_GPL(i915_request_power_well);
5656 5662
5657/* Display audio driver power well release */ 5663/* Display audio driver power well release */
5658void i915_release_power_well(void) 5664void i915_release_power_well(void)
5659{ 5665{
5666 struct drm_i915_private *dev_priv;
5667
5660 if (WARN_ON(!hsw_pwr)) 5668 if (WARN_ON(!hsw_pwr))
5661 return; 5669 return;
5662 5670
5663 spin_lock_irq(&hsw_pwr->lock); 5671 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
5664 __intel_power_well_put(hsw_pwr); 5672 power_domains);
5665 spin_unlock_irq(&hsw_pwr->lock); 5673
5674 mutex_lock(&hsw_pwr->lock);
5675 __intel_power_well_put(dev_priv->dev, &hsw_pwr->power_wells[0]);
5676 mutex_unlock(&hsw_pwr->lock);
5666} 5677}
5667EXPORT_SYMBOL_GPL(i915_release_power_well); 5678EXPORT_SYMBOL_GPL(i915_release_power_well);
5668 5679
5669int i915_init_power_well(struct drm_device *dev) 5680int intel_power_domains_init(struct drm_device *dev)
5670{ 5681{
5671 struct drm_i915_private *dev_priv = dev->dev_private; 5682 struct drm_i915_private *dev_priv = dev->dev_private;
5683 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5684 struct i915_power_well *power_well;
5672 5685
5673 hsw_pwr = &dev_priv->power_well; 5686 mutex_init(&power_domains->lock);
5687 hsw_pwr = power_domains;
5674 5688
5675 hsw_pwr->device = dev; 5689 power_well = &power_domains->power_wells[0];
5676 spin_lock_init(&hsw_pwr->lock); 5690 power_well->count = 0;
5677 hsw_pwr->count = 0;
5678 5691
5679 return 0; 5692 return 0;
5680} 5693}
5681 5694
5682void i915_remove_power_well(struct drm_device *dev) 5695void intel_power_domains_remove(struct drm_device *dev)
5683{ 5696{
5684 hsw_pwr = NULL; 5697 hsw_pwr = NULL;
5685} 5698}
5686 5699
5687void intel_set_power_well(struct drm_device *dev, bool enable) 5700static void intel_power_domains_resume(struct drm_device *dev)
5688{ 5701{
5689 struct drm_i915_private *dev_priv = dev->dev_private; 5702 struct drm_i915_private *dev_priv = dev->dev_private;
5690 struct i915_power_well *power_well = &dev_priv->power_well; 5703 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5704 struct i915_power_well *power_well;
5691 5705
5692 if (!HAS_POWER_WELL(dev)) 5706 if (!HAS_POWER_WELL(dev))
5693 return; 5707 return;
5694 5708
5695 if (!i915_disable_power_well && !enable) 5709 mutex_lock(&power_domains->lock);
5696 return;
5697
5698 spin_lock_irq(&power_well->lock);
5699
5700 /*
5701 * This function will only ever contribute one
5702 * to the power well reference count. i915_request
5703 * is what tracks whether we have or have not
5704 * added the one to the reference count.
5705 */
5706 if (power_well->i915_request == enable)
5707 goto out;
5708
5709 power_well->i915_request = enable;
5710
5711 if (enable)
5712 __intel_power_well_get(power_well);
5713 else
5714 __intel_power_well_put(power_well);
5715 5710
5716 out: 5711 power_well = &power_domains->power_wells[0];
5717 spin_unlock_irq(&power_well->lock);
5718}
5719
5720static void intel_resume_power_well(struct drm_device *dev)
5721{
5722 struct drm_i915_private *dev_priv = dev->dev_private;
5723 struct i915_power_well *power_well = &dev_priv->power_well;
5724
5725 if (!HAS_POWER_WELL(dev))
5726 return;
5727
5728 spin_lock_irq(&power_well->lock);
5729 __intel_set_power_well(dev, power_well->count > 0); 5712 __intel_set_power_well(dev, power_well->count > 0);
5730 spin_unlock_irq(&power_well->lock); 5713
5714 mutex_unlock(&power_domains->lock);
5731} 5715}
5732 5716
5733/* 5717/*
@@ -5736,7 +5720,7 @@ static void intel_resume_power_well(struct drm_device *dev)
5736 * to be enabled, and it will only be disabled if none of the registers is 5720 * to be enabled, and it will only be disabled if none of the registers is
5737 * requesting it to be enabled. 5721 * requesting it to be enabled.
5738 */ 5722 */
5739void intel_init_power_well(struct drm_device *dev) 5723void intel_power_domains_init_hw(struct drm_device *dev)
5740{ 5724{
5741 struct drm_i915_private *dev_priv = dev->dev_private; 5725 struct drm_i915_private *dev_priv = dev->dev_private;
5742 5726
@@ -5744,8 +5728,8 @@ void intel_init_power_well(struct drm_device *dev)
5744 return; 5728 return;
5745 5729
5746 /* For now, we need the power well to be always enabled. */ 5730 /* For now, we need the power well to be always enabled. */
5747 intel_set_power_well(dev, true); 5731 intel_display_set_init_power(dev, true);
5748 intel_resume_power_well(dev); 5732 intel_power_domains_resume(dev);
5749 5733
5750 /* We're taking over the BIOS, so clear any requests made by it since 5734 /* We're taking over the BIOS, so clear any requests made by it since
5751 * the driver is in charge now. */ 5735 * the driver is in charge now. */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index bc19aa497d5b..b4b9943773bc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -40,7 +40,6 @@ nouveau_mc_intr(int irq, void *arg)
40 struct nouveau_mc *pmc = arg; 40 struct nouveau_mc *pmc = arg;
41 const struct nouveau_mc_oclass *oclass = (void *)nv_object(pmc)->oclass; 41 const struct nouveau_mc_oclass *oclass = (void *)nv_object(pmc)->oclass;
42 const struct nouveau_mc_intr *map = oclass->intr; 42 const struct nouveau_mc_intr *map = oclass->intr;
43 struct nouveau_device *device = nv_device(pmc);
44 struct nouveau_subdev *unit; 43 struct nouveau_subdev *unit;
45 u32 intr; 44 u32 intr;
46 45
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index eb3ba60a2e92..a42d61571f49 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -707,24 +707,37 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
707 switch (connector->connector_type) { 707 switch (connector->connector_type) {
708 case DRM_MODE_CONNECTOR_DVII: 708 case DRM_MODE_CONNECTOR_DVII:
709 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ 709 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
710 if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || 710 if (radeon_audio != 0) {
711 (drm_detect_hdmi_monitor(radeon_connector->edid) && 711 if (radeon_connector->use_digital &&
712 (radeon_connector->audio == RADEON_AUDIO_AUTO))) 712 (radeon_connector->audio == RADEON_AUDIO_ENABLE))
713 return ATOM_ENCODER_MODE_HDMI; 713 return ATOM_ENCODER_MODE_HDMI;
714 else if (radeon_connector->use_digital) 714 else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
715 (radeon_connector->audio == RADEON_AUDIO_AUTO))
716 return ATOM_ENCODER_MODE_HDMI;
717 else if (radeon_connector->use_digital)
718 return ATOM_ENCODER_MODE_DVI;
719 else
720 return ATOM_ENCODER_MODE_CRT;
721 } else if (radeon_connector->use_digital) {
715 return ATOM_ENCODER_MODE_DVI; 722 return ATOM_ENCODER_MODE_DVI;
716 else 723 } else {
717 return ATOM_ENCODER_MODE_CRT; 724 return ATOM_ENCODER_MODE_CRT;
725 }
718 break; 726 break;
719 case DRM_MODE_CONNECTOR_DVID: 727 case DRM_MODE_CONNECTOR_DVID:
720 case DRM_MODE_CONNECTOR_HDMIA: 728 case DRM_MODE_CONNECTOR_HDMIA:
721 default: 729 default:
722 if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || 730 if (radeon_audio != 0) {
723 (drm_detect_hdmi_monitor(radeon_connector->edid) && 731 if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
724 (radeon_connector->audio == RADEON_AUDIO_AUTO))) 732 return ATOM_ENCODER_MODE_HDMI;
725 return ATOM_ENCODER_MODE_HDMI; 733 else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
726 else 734 (radeon_connector->audio == RADEON_AUDIO_AUTO))
735 return ATOM_ENCODER_MODE_HDMI;
736 else
737 return ATOM_ENCODER_MODE_DVI;
738 } else {
727 return ATOM_ENCODER_MODE_DVI; 739 return ATOM_ENCODER_MODE_DVI;
740 }
728 break; 741 break;
729 case DRM_MODE_CONNECTOR_LVDS: 742 case DRM_MODE_CONNECTOR_LVDS:
730 return ATOM_ENCODER_MODE_LVDS; 743 return ATOM_ENCODER_MODE_LVDS;
@@ -732,14 +745,19 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
732 case DRM_MODE_CONNECTOR_DisplayPort: 745 case DRM_MODE_CONNECTOR_DisplayPort:
733 dig_connector = radeon_connector->con_priv; 746 dig_connector = radeon_connector->con_priv;
734 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 747 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
735 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 748 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
736 return ATOM_ENCODER_MODE_DP; 749 return ATOM_ENCODER_MODE_DP;
737 else if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || 750 } else if (radeon_audio != 0) {
738 (drm_detect_hdmi_monitor(radeon_connector->edid) && 751 if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
739 (radeon_connector->audio == RADEON_AUDIO_AUTO))) 752 return ATOM_ENCODER_MODE_HDMI;
740 return ATOM_ENCODER_MODE_HDMI; 753 else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
741 else 754 (radeon_connector->audio == RADEON_AUDIO_AUTO))
755 return ATOM_ENCODER_MODE_HDMI;
756 else
757 return ATOM_ENCODER_MODE_DVI;
758 } else {
742 return ATOM_ENCODER_MODE_DVI; 759 return ATOM_ENCODER_MODE_DVI;
760 }
743 break; 761 break;
744 case DRM_MODE_CONNECTOR_eDP: 762 case DRM_MODE_CONNECTOR_eDP:
745 return ATOM_ENCODER_MODE_DP; 763 return ATOM_ENCODER_MODE_DP;
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index b162e98a2953..9b6950d9b3c0 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -1930,7 +1930,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
1930 } 1930 }
1931 j++; 1931 j++;
1932 1932
1933 if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE) 1933 if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
1934 return -EINVAL; 1934 return -EINVAL;
1935 1935
1936 tmp = RREG32(MC_PMG_CMD_MRS); 1936 tmp = RREG32(MC_PMG_CMD_MRS);
@@ -1945,7 +1945,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
1945 } 1945 }
1946 j++; 1946 j++;
1947 1947
1948 if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE) 1948 if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
1949 return -EINVAL; 1949 return -EINVAL;
1950 break; 1950 break;
1951 case MC_SEQ_RESERVE_M >> 2: 1951 case MC_SEQ_RESERVE_M >> 2:
@@ -1959,7 +1959,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
1959 } 1959 }
1960 j++; 1960 j++;
1961 1961
1962 if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE) 1962 if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
1963 return -EINVAL; 1963 return -EINVAL;
1964 break; 1964 break;
1965 default: 1965 default:
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 2e48f902e3b5..e8544758b569 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -72,6 +72,8 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev);
72static void cik_program_aspm(struct radeon_device *rdev); 72static void cik_program_aspm(struct radeon_device *rdev);
73static void cik_init_pg(struct radeon_device *rdev); 73static void cik_init_pg(struct radeon_device *rdev);
74static void cik_init_cg(struct radeon_device *rdev); 74static void cik_init_cg(struct radeon_device *rdev);
75static void cik_fini_pg(struct radeon_device *rdev);
76static void cik_fini_cg(struct radeon_device *rdev);
75static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev, 77static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
76 bool enable); 78 bool enable);
77 79
@@ -1687,6 +1689,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
1687 fw_name); 1689 fw_name);
1688 release_firmware(rdev->smc_fw); 1690 release_firmware(rdev->smc_fw);
1689 rdev->smc_fw = NULL; 1691 rdev->smc_fw = NULL;
1692 err = 0;
1690 } else if (rdev->smc_fw->size != smc_req_size) { 1693 } else if (rdev->smc_fw->size != smc_req_size) {
1691 printk(KERN_ERR 1694 printk(KERN_ERR
1692 "cik_smc: Bogus length %zu in firmware \"%s\"\n", 1695 "cik_smc: Bogus length %zu in firmware \"%s\"\n",
@@ -3254,6 +3257,7 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3254 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); 3257 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3255 if (r) { 3258 if (r) {
3256 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 3259 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3260 radeon_scratch_free(rdev, scratch);
3257 return r; 3261 return r;
3258 } 3262 }
3259 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); 3263 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
@@ -3270,6 +3274,8 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3270 r = radeon_fence_wait(ib.fence, false); 3274 r = radeon_fence_wait(ib.fence, false);
3271 if (r) { 3275 if (r) {
3272 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3276 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3277 radeon_scratch_free(rdev, scratch);
3278 radeon_ib_free(rdev, &ib);
3273 return r; 3279 return r;
3274 } 3280 }
3275 for (i = 0; i < rdev->usec_timeout; i++) { 3281 for (i = 0; i < rdev->usec_timeout; i++) {
@@ -4259,6 +4265,10 @@ static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
4259 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 4265 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
4260 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); 4266 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
4261 4267
4268 /* disable CG/PG */
4269 cik_fini_pg(rdev);
4270 cik_fini_cg(rdev);
4271
4262 /* stop the rlc */ 4272 /* stop the rlc */
4263 cik_rlc_stop(rdev); 4273 cik_rlc_stop(rdev);
4264 4274
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 5929056beca6..ab92620ed83a 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -156,6 +156,9 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
156 u8 *sadb; 156 u8 *sadb;
157 int sad_count; 157 int sad_count;
158 158
159 /* XXX: setting this register causes hangs on some asics */
160 return;
161
159 if (!dig->afmt->pin) 162 if (!dig->afmt->pin)
160 return; 163 return;
161 164
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7a84d0cdeda7..52f1ae16f653 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3187,7 +3187,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
3187 rdev->config.evergreen.sx_max_export_size = 256; 3187 rdev->config.evergreen.sx_max_export_size = 256;
3188 rdev->config.evergreen.sx_max_export_pos_size = 64; 3188 rdev->config.evergreen.sx_max_export_pos_size = 64;
3189 rdev->config.evergreen.sx_max_export_smx_size = 192; 3189 rdev->config.evergreen.sx_max_export_smx_size = 192;
3190 rdev->config.evergreen.max_hw_contexts = 8; 3190 rdev->config.evergreen.max_hw_contexts = 4;
3191 rdev->config.evergreen.sq_num_cf_insts = 2; 3191 rdev->config.evergreen.sq_num_cf_insts = 2;
3192 3192
3193 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 3193 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 678736542ed8..a82b6f78d7f2 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -105,6 +105,9 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
105 u8 *sadb; 105 u8 *sadb;
106 int sad_count; 106 int sad_count;
107 107
108 /* XXX: setting this register causes hangs on some asics */
109 return;
110
108 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 111 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
109 if (connector->encoder == encoder) { 112 if (connector->encoder == encoder) {
110 radeon_connector = to_radeon_connector(connector); 113 radeon_connector = to_radeon_connector(connector);
@@ -330,8 +333,8 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
330 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */ 333 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
331 334
332 WREG32(HDMI_ACR_PACKET_CONTROL + offset, 335 WREG32(HDMI_ACR_PACKET_CONTROL + offset,
333 HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */ 336 HDMI_ACR_SOURCE | /* select SW CTS value */
334 HDMI_ACR_SOURCE); /* select SW CTS value */ 337 HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
335 338
336 evergreen_hdmi_update_ACR(encoder, mode->clock); 339 evergreen_hdmi_update_ACR(encoder, mode->clock);
337 340
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 11e002a47b55..17f990798992 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1571,7 +1571,7 @@
1571 * 6. COMMAND [29:22] | BYTE_COUNT [20:0] 1571 * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
1572 */ 1572 */
1573# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20) 1573# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
1574 /* 0 - SRC_ADDR 1574 /* 0 - DST_ADDR
1575 * 1 - GDS 1575 * 1 - GDS
1576 */ 1576 */
1577# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27) 1577# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
@@ -1586,7 +1586,7 @@
1586# define PACKET3_CP_DMA_CP_SYNC (1 << 31) 1586# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
1587/* COMMAND */ 1587/* COMMAND */
1588# define PACKET3_CP_DMA_DIS_WC (1 << 21) 1588# define PACKET3_CP_DMA_DIS_WC (1 << 21)
1589# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23) 1589# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
1590 /* 0 - none 1590 /* 0 - none
1591 * 1 - 8 in 16 1591 * 1 - 8 in 16
1592 * 2 - 8 in 32 1592 * 2 - 8 in 32
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 71399065db04..b41905573cd2 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2635,7 +2635,7 @@ int kv_dpm_init(struct radeon_device *rdev)
2635 pi->caps_sclk_ds = true; 2635 pi->caps_sclk_ds = true;
2636 pi->enable_auto_thermal_throttling = true; 2636 pi->enable_auto_thermal_throttling = true;
2637 pi->disable_nb_ps3_in_battery = false; 2637 pi->disable_nb_ps3_in_battery = false;
2638 pi->bapm_enable = true; 2638 pi->bapm_enable = false;
2639 pi->voltage_drop_t = 0; 2639 pi->voltage_drop_t = 0;
2640 pi->caps_sclk_throttle_low_notification = false; 2640 pi->caps_sclk_throttle_low_notification = false;
2641 pi->caps_fps = false; /* true? */ 2641 pi->caps_fps = false; /* true? */
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index e299a38e683a..11aab2ab54ce 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -799,6 +799,7 @@ int ni_init_microcode(struct radeon_device *rdev)
799 fw_name); 799 fw_name);
800 release_firmware(rdev->smc_fw); 800 release_firmware(rdev->smc_fw);
801 rdev->smc_fw = NULL; 801 rdev->smc_fw = NULL;
802 err = 0;
802 } else if (rdev->smc_fw->size != smc_req_size) { 803 } else if (rdev->smc_fw->size != smc_req_size) {
803 printk(KERN_ERR 804 printk(KERN_ERR
804 "ni_mc: Bogus length %zu in firmware \"%s\"\n", 805 "ni_mc: Bogus length %zu in firmware \"%s\"\n",
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index af922e297a12..4e609e8a8d2b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2355,6 +2355,7 @@ int r600_init_microcode(struct radeon_device *rdev)
2355 fw_name); 2355 fw_name);
2356 release_firmware(rdev->smc_fw); 2356 release_firmware(rdev->smc_fw);
2357 rdev->smc_fw = NULL; 2357 rdev->smc_fw = NULL;
2358 err = 0;
2358 } else if (rdev->smc_fw->size != smc_req_size) { 2359 } else if (rdev->smc_fw->size != smc_req_size) {
2359 printk(KERN_ERR 2360 printk(KERN_ERR
2360 "smc: Bogus length %zu in firmware \"%s\"\n", 2361 "smc: Bogus length %zu in firmware \"%s\"\n",
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 21f2b74e20e6..0977c303aeec 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -57,15 +57,15 @@ enum r600_hdmi_iec_status_bits {
57static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = { 57static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
58 /* 32kHz 44.1kHz 48kHz */ 58 /* 32kHz 44.1kHz 48kHz */
59 /* Clock N CTS N CTS N CTS */ 59 /* Clock N CTS N CTS N CTS */
60 { 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */ 60 { 25175, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
61 { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */ 61 { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
62 { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */ 62 { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
63 { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */ 63 { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
64 { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */ 64 { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
65 { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */ 65 { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
66 { 74175, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */ 66 { 74176, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */
67 { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */ 67 { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
68 { 148351, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */ 68 { 148352, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */
69 { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */ 69 { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
70 { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */ 70 { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
71}; 71};
@@ -75,8 +75,15 @@ static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
75 */ 75 */
76static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq) 76static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
77{ 77{
78 if (*CTS == 0) 78 u64 n;
79 *CTS = clock * N / (128 * freq) * 1000; 79 u32 d;
80
81 if (*CTS == 0) {
82 n = (u64)clock * (u64)N * 1000ULL;
83 d = 128 * freq;
84 do_div(n, d);
85 *CTS = n;
86 }
80 DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n", 87 DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
81 N, *CTS, freq); 88 N, *CTS, freq);
82} 89}
@@ -302,6 +309,9 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
302 u8 *sadb; 309 u8 *sadb;
303 int sad_count; 310 int sad_count;
304 311
312 /* XXX: setting this register causes hangs on some asics */
313 return;
314
305 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 315 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
306 if (connector->encoder == encoder) { 316 if (connector->encoder == encoder) {
307 radeon_connector = to_radeon_connector(connector); 317 radeon_connector = to_radeon_connector(connector);
@@ -448,8 +458,8 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
448 } 458 }
449 459
450 WREG32(HDMI0_ACR_PACKET_CONTROL + offset, 460 WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
451 HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */ 461 HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */
452 HDMI0_ACR_SOURCE); /* select SW CTS value */ 462 HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
453 463
454 WREG32(HDMI0_VBI_PACKET_CONTROL + offset, 464 WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
455 HDMI0_NULL_SEND | /* send null packets when required */ 465 HDMI0_NULL_SEND | /* send null packets when required */
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 8e01b126aaeb..ebe38724a976 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1551,7 +1551,7 @@
1551 */ 1551 */
1552# define PACKET3_CP_DMA_CP_SYNC (1 << 31) 1552# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
1553/* COMMAND */ 1553/* COMMAND */
1554# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23) 1554# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
1555 /* 0 - none 1555 /* 0 - none
1556 * 1 - 8 in 16 1556 * 1 - 8 in 16
1557 * 2 - 8 in 32 1557 * 2 - 8 in 32
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b264af6e8b9c..b9ee99258602 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1281,8 +1281,8 @@ struct radeon_blacklist_clocks
1281struct radeon_clock_and_voltage_limits { 1281struct radeon_clock_and_voltage_limits {
1282 u32 sclk; 1282 u32 sclk;
1283 u32 mclk; 1283 u32 mclk;
1284 u32 vddc; 1284 u16 vddc;
1285 u32 vddci; 1285 u16 vddci;
1286}; 1286};
1287 1287
1288struct radeon_clock_array { 1288struct radeon_clock_array {
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index e972143e5a36..f60b310b1399 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1720,12 +1720,18 @@ radeon_add_atom_connector(struct drm_device *dev,
1720 drm_object_attach_property(&radeon_connector->base.base, 1720 drm_object_attach_property(&radeon_connector->base.base,
1721 rdev->mode_info.underscan_vborder_property, 1721 rdev->mode_info.underscan_vborder_property,
1722 0); 1722 0);
1723 drm_object_attach_property(&radeon_connector->base.base, 1723
1724 rdev->mode_info.audio_property,
1725 RADEON_AUDIO_AUTO);
1726 drm_object_attach_property(&radeon_connector->base.base, 1724 drm_object_attach_property(&radeon_connector->base.base,
1727 rdev->mode_info.dither_property, 1725 rdev->mode_info.dither_property,
1728 RADEON_FMT_DITHER_DISABLE); 1726 RADEON_FMT_DITHER_DISABLE);
1727
1728 if (radeon_audio != 0)
1729 drm_object_attach_property(&radeon_connector->base.base,
1730 rdev->mode_info.audio_property,
1731 (radeon_audio == 1) ?
1732 RADEON_AUDIO_AUTO :
1733 RADEON_AUDIO_DISABLE);
1734
1729 subpixel_order = SubPixelHorizontalRGB; 1735 subpixel_order = SubPixelHorizontalRGB;
1730 connector->interlace_allowed = true; 1736 connector->interlace_allowed = true;
1731 if (connector_type == DRM_MODE_CONNECTOR_HDMIB) 1737 if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1819,10 +1825,12 @@ radeon_add_atom_connector(struct drm_device *dev,
1819 rdev->mode_info.underscan_vborder_property, 1825 rdev->mode_info.underscan_vborder_property,
1820 0); 1826 0);
1821 } 1827 }
1822 if (ASIC_IS_DCE2(rdev)) { 1828 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
1823 drm_object_attach_property(&radeon_connector->base.base, 1829 drm_object_attach_property(&radeon_connector->base.base,
1824 rdev->mode_info.audio_property, 1830 rdev->mode_info.audio_property,
1825 RADEON_AUDIO_AUTO); 1831 (radeon_audio == 1) ?
1832 RADEON_AUDIO_AUTO :
1833 RADEON_AUDIO_DISABLE);
1826 } 1834 }
1827 if (ASIC_IS_AVIVO(rdev)) { 1835 if (ASIC_IS_AVIVO(rdev)) {
1828 drm_object_attach_property(&radeon_connector->base.base, 1836 drm_object_attach_property(&radeon_connector->base.base,
@@ -1869,10 +1877,12 @@ radeon_add_atom_connector(struct drm_device *dev,
1869 rdev->mode_info.underscan_vborder_property, 1877 rdev->mode_info.underscan_vborder_property,
1870 0); 1878 0);
1871 } 1879 }
1872 if (ASIC_IS_DCE2(rdev)) { 1880 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
1873 drm_object_attach_property(&radeon_connector->base.base, 1881 drm_object_attach_property(&radeon_connector->base.base,
1874 rdev->mode_info.audio_property, 1882 rdev->mode_info.audio_property,
1875 RADEON_AUDIO_AUTO); 1883 (radeon_audio == 1) ?
1884 RADEON_AUDIO_AUTO :
1885 RADEON_AUDIO_DISABLE);
1876 } 1886 }
1877 if (ASIC_IS_AVIVO(rdev)) { 1887 if (ASIC_IS_AVIVO(rdev)) {
1878 drm_object_attach_property(&radeon_connector->base.base, 1888 drm_object_attach_property(&radeon_connector->base.base,
@@ -1918,15 +1928,18 @@ radeon_add_atom_connector(struct drm_device *dev,
1918 rdev->mode_info.underscan_vborder_property, 1928 rdev->mode_info.underscan_vborder_property,
1919 0); 1929 0);
1920 } 1930 }
1921 if (ASIC_IS_DCE2(rdev)) { 1931 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
1922 drm_object_attach_property(&radeon_connector->base.base, 1932 drm_object_attach_property(&radeon_connector->base.base,
1923 rdev->mode_info.audio_property, 1933 rdev->mode_info.audio_property,
1924 RADEON_AUDIO_AUTO); 1934 (radeon_audio == 1) ?
1935 RADEON_AUDIO_AUTO :
1936 RADEON_AUDIO_DISABLE);
1925 } 1937 }
1926 if (ASIC_IS_AVIVO(rdev)) { 1938 if (ASIC_IS_AVIVO(rdev)) {
1927 drm_object_attach_property(&radeon_connector->base.base, 1939 drm_object_attach_property(&radeon_connector->base.base,
1928 rdev->mode_info.dither_property, 1940 rdev->mode_info.dither_property,
1929 RADEON_FMT_DITHER_DISABLE); 1941 RADEON_FMT_DITHER_DISABLE);
1942
1930 } 1943 }
1931 connector->interlace_allowed = true; 1944 connector->interlace_allowed = true;
1932 /* in theory with a DP to VGA converter... */ 1945 /* in theory with a DP to VGA converter... */
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index c8ab019a980e..26ca223d12d6 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -85,9 +85,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
85 VRAM, also but everything into VRAM on AGP cards to avoid 85 VRAM, also but everything into VRAM on AGP cards to avoid
86 image corruptions */ 86 image corruptions */
87 if (p->ring == R600_RING_TYPE_UVD_INDEX && 87 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
88 p->rdev->family < CHIP_PALM &&
89 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) { 88 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
90 89 /* TODO: is this still needed for NI+ ? */
91 p->relocs[i].lobj.domain = 90 p->relocs[i].lobj.domain =
92 RADEON_GEM_DOMAIN_VRAM; 91 RADEON_GEM_DOMAIN_VRAM;
93 92
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index aab24173d2e9..1aee32213f66 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -156,7 +156,7 @@ int radeon_benchmarking = 0;
156int radeon_testing = 0; 156int radeon_testing = 0;
157int radeon_connector_table = 0; 157int radeon_connector_table = 0;
158int radeon_tv = 1; 158int radeon_tv = 1;
159int radeon_audio = 1; 159int radeon_audio = -1;
160int radeon_disp_priority = 0; 160int radeon_disp_priority = 0;
161int radeon_hw_i2c = 0; 161int radeon_hw_i2c = 0;
162int radeon_pcie_gen2 = -1; 162int radeon_pcie_gen2 = -1;
@@ -200,7 +200,7 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
200MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); 200MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
201module_param_named(tv, radeon_tv, int, 0444); 201module_param_named(tv, radeon_tv, int, 0444);
202 202
203MODULE_PARM_DESC(audio, "Audio enable (1 = enable)"); 203MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
204module_param_named(audio, radeon_audio, int, 0444); 204module_param_named(audio, radeon_audio, int, 0444);
205 205
206MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)"); 206MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 981fd06e1f9a..00bdcd3e47ba 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -954,6 +954,8 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
954 if (enable) { 954 if (enable) {
955 mutex_lock(&rdev->pm.mutex); 955 mutex_lock(&rdev->pm.mutex);
956 rdev->pm.dpm.uvd_active = true; 956 rdev->pm.dpm.uvd_active = true;
957 /* disable this for now */
958#if 0
957 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0)) 959 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
958 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD; 960 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
959 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0)) 961 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
@@ -963,6 +965,7 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
963 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2)) 965 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
964 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2; 966 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
965 else 967 else
968#endif
966 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD; 969 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
967 rdev->pm.dpm.state = dpm_state; 970 rdev->pm.dpm.state = dpm_state;
968 mutex_unlock(&rdev->pm.mutex); 971 mutex_unlock(&rdev->pm.mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index f4d6bcee9006..12e8099a0823 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -36,8 +36,8 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
36 struct radeon_bo *vram_obj = NULL; 36 struct radeon_bo *vram_obj = NULL;
37 struct radeon_bo **gtt_obj = NULL; 37 struct radeon_bo **gtt_obj = NULL;
38 uint64_t gtt_addr, vram_addr; 38 uint64_t gtt_addr, vram_addr;
39 unsigned i, n, size; 39 unsigned n, size;
40 int r, ring; 40 int i, r, ring;
41 41
42 switch (flag) { 42 switch (flag) {
43 case RADEON_TEST_COPY_DMA: 43 case RADEON_TEST_COPY_DMA:
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 234b2a3b8add..ab0a17248d55 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -478,7 +478,8 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
478 return -EINVAL; 478 return -EINVAL;
479 } 479 }
480 480
481 if (p->rdev->family < CHIP_PALM && (cmd == 0 || cmd == 0x3) && 481 /* TODO: is this still necessary on NI+ ? */
482 if ((cmd == 0 || cmd == 0x3) &&
482 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { 483 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
483 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", 484 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
484 start, end); 485 start, end);
@@ -800,7 +801,8 @@ void radeon_uvd_note_usage(struct radeon_device *rdev)
800 (rdev->pm.dpm.hd != hd)) { 801 (rdev->pm.dpm.hd != hd)) {
801 rdev->pm.dpm.sd = sd; 802 rdev->pm.dpm.sd = sd;
802 rdev->pm.dpm.hd = hd; 803 rdev->pm.dpm.hd = hd;
803 streams_changed = true; 804 /* disable this for now */
805 /*streams_changed = true;*/
804 } 806 }
805 } 807 }
806 808
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 2acfe561796f..6a64ccaa0695 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -80,6 +80,9 @@ extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
80extern bool evergreen_is_display_hung(struct radeon_device *rdev); 80extern bool evergreen_is_display_hung(struct radeon_device *rdev);
81static void si_enable_gui_idle_interrupt(struct radeon_device *rdev, 81static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
82 bool enable); 82 bool enable);
83static void si_fini_pg(struct radeon_device *rdev);
84static void si_fini_cg(struct radeon_device *rdev);
85static void si_rlc_stop(struct radeon_device *rdev);
83 86
84static const u32 verde_rlc_save_restore_register_list[] = 87static const u32 verde_rlc_save_restore_register_list[] =
85{ 88{
@@ -1673,6 +1676,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1673 fw_name); 1676 fw_name);
1674 release_firmware(rdev->smc_fw); 1677 release_firmware(rdev->smc_fw);
1675 rdev->smc_fw = NULL; 1678 rdev->smc_fw = NULL;
1679 err = 0;
1676 } else if (rdev->smc_fw->size != smc_req_size) { 1680 } else if (rdev->smc_fw->size != smc_req_size) {
1677 printk(KERN_ERR 1681 printk(KERN_ERR
1678 "si_smc: Bogus length %zu in firmware \"%s\"\n", 1682 "si_smc: Bogus length %zu in firmware \"%s\"\n",
@@ -3603,6 +3607,13 @@ static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
3603 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 3607 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3604 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); 3608 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
3605 3609
3610 /* disable PG/CG */
3611 si_fini_pg(rdev);
3612 si_fini_cg(rdev);
3613
3614 /* stop the rlc */
3615 si_rlc_stop(rdev);
3616
3606 /* Disable CP parsing/prefetching */ 3617 /* Disable CP parsing/prefetching */
3607 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); 3618 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
3608 3619
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index a24ada406046..0b00c790fb77 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -5213,7 +5213,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
5213 table->mc_reg_table_entry[k].mc_data[j] |= 0x100; 5213 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
5214 } 5214 }
5215 j++; 5215 j++;
5216 if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) 5216 if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5217 return -EINVAL; 5217 return -EINVAL;
5218 5218
5219 if (!pi->mem_gddr5) { 5219 if (!pi->mem_gddr5) {
@@ -5223,7 +5223,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
5223 table->mc_reg_table_entry[k].mc_data[j] = 5223 table->mc_reg_table_entry[k].mc_data[j] =
5224 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; 5224 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
5225 j++; 5225 j++;
5226 if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) 5226 if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5227 return -EINVAL; 5227 return -EINVAL;
5228 } 5228 }
5229 break; 5229 break;
@@ -5236,7 +5236,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
5236 (temp_reg & 0xffff0000) | 5236 (temp_reg & 0xffff0000) |
5237 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 5237 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
5238 j++; 5238 j++;
5239 if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) 5239 if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5240 return -EINVAL; 5240 return -EINVAL;
5241 break; 5241 break;
5242 default: 5242 default:
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 5691a7c30686..b322acc48097 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1598,7 +1598,7 @@
1598 * 6. COMMAND [30:21] | BYTE_COUNT [20:0] 1598 * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
1599 */ 1599 */
1600# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20) 1600# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
1601 /* 0 - SRC_ADDR 1601 /* 0 - DST_ADDR
1602 * 1 - GDS 1602 * 1 - GDS
1603 */ 1603 */
1604# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27) 1604# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
@@ -1613,7 +1613,7 @@
1613# define PACKET3_CP_DMA_CP_SYNC (1 << 31) 1613# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
1614/* COMMAND */ 1614/* COMMAND */
1615# define PACKET3_CP_DMA_DIS_WC (1 << 21) 1615# define PACKET3_CP_DMA_DIS_WC (1 << 21)
1616# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23) 1616# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
1617 /* 0 - none 1617 /* 0 - none
1618 * 1 - 8 in 16 1618 * 1 - 8 in 16
1619 * 2 - 8 in 32 1619 * 2 - 8 in 32
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 7f998bf1cc9d..9364129ba292 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1868,7 +1868,7 @@ int trinity_dpm_init(struct radeon_device *rdev)
1868 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 1868 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
1869 pi->at[i] = TRINITY_AT_DFLT; 1869 pi->at[i] = TRINITY_AT_DFLT;
1870 1870
1871 pi->enable_bapm = true; 1871 pi->enable_bapm = false;
1872 pi->enable_nbps_policy = true; 1872 pi->enable_nbps_policy = true;
1873 pi->enable_sclk_ds = true; 1873 pi->enable_sclk_ds = true;
1874 pi->enable_gfx_power_gating = true; 1874 pi->enable_gfx_power_gating = true;
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 3100fa9cb52f..7266805d9786 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -212,8 +212,8 @@ int uvd_v1_0_start(struct radeon_device *rdev)
212 /* enable VCPU clock */ 212 /* enable VCPU clock */
213 WREG32(UVD_VCPU_CNTL, 1 << 9); 213 WREG32(UVD_VCPU_CNTL, 1 << 9);
214 214
215 /* enable UMC and NC0 */ 215 /* enable UMC */
216 WREG32_P(UVD_LMI_CTRL2, 1 << 13, ~((1 << 8) | (1 << 13))); 216 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
217 217
218 /* boot up the VCPU */ 218 /* boot up the VCPU */
219 WREG32(UVD_SOFT_RESET, 0); 219 WREG32(UVD_SOFT_RESET, 0);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index ba1f8f1c6d5d..814665b7a117 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -825,9 +825,17 @@ static void vmw_postclose(struct drm_device *dev,
825 struct vmw_fpriv *vmw_fp; 825 struct vmw_fpriv *vmw_fp;
826 826
827 vmw_fp = vmw_fpriv(file_priv); 827 vmw_fp = vmw_fpriv(file_priv);
828 ttm_object_file_release(&vmw_fp->tfile); 828
829 if (vmw_fp->locked_master) 829 if (vmw_fp->locked_master) {
830 struct vmw_master *vmaster =
831 vmw_master(vmw_fp->locked_master);
832
833 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
834 ttm_vt_unlock(&vmaster->lock);
830 drm_master_put(&vmw_fp->locked_master); 835 drm_master_put(&vmw_fp->locked_master);
836 }
837
838 ttm_object_file_release(&vmw_fp->tfile);
831 kfree(vmw_fp); 839 kfree(vmw_fp);
832} 840}
833 841
@@ -1010,14 +1018,13 @@ static void vmw_master_drop(struct drm_device *dev,
1010 1018
1011 vmw_fp->locked_master = drm_master_get(file_priv->master); 1019 vmw_fp->locked_master = drm_master_get(file_priv->master);
1012 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); 1020 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
1013 vmw_execbuf_release_pinned_bo(dev_priv);
1014
1015 if (unlikely((ret != 0))) { 1021 if (unlikely((ret != 0))) {
1016 DRM_ERROR("Unable to lock TTM at VT switch.\n"); 1022 DRM_ERROR("Unable to lock TTM at VT switch.\n");
1017 drm_master_put(&vmw_fp->locked_master); 1023 drm_master_put(&vmw_fp->locked_master);
1018 } 1024 }
1019 1025
1020 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 1026 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1027 vmw_execbuf_release_pinned_bo(dev_priv);
1021 1028
1022 if (!dev_priv->enable_fb) { 1029 if (!dev_priv->enable_fb) {
1023 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); 1030 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 0e67cf41065d..37fb4befec82 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -970,7 +970,7 @@ void vmw_resource_unreserve(struct vmw_resource *res,
970 if (new_backup) 970 if (new_backup)
971 res->backup_offset = new_backup_offset; 971 res->backup_offset = new_backup_offset;
972 972
973 if (!res->func->may_evict) 973 if (!res->func->may_evict || res->id == -1)
974 return; 974 return;
975 975
976 write_lock(&dev_priv->resource_lock); 976 write_lock(&dev_priv->resource_lock);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 71b70e3a7a71..c91d547191dd 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -241,6 +241,7 @@ config HID_HOLTEK
241 - Sharkoon Drakonia / Perixx MX-2000 gaming mice 241 - Sharkoon Drakonia / Perixx MX-2000 gaming mice
242 - Tracer Sniper TRM-503 / NOVA Gaming Slider X200 / 242 - Tracer Sniper TRM-503 / NOVA Gaming Slider X200 /
243 Zalman ZM-GM1 243 Zalman ZM-GM1
244 - SHARKOON DarkGlider Gaming mouse
244 245
245config HOLTEK_FF 246config HOLTEK_FF
246 bool "Holtek On Line Grip force feedback support" 247 bool "Holtek On Line Grip force feedback support"
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index b8470b1a10fe..e80da62363bc 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -319,7 +319,7 @@ static s32 item_sdata(struct hid_item *item)
319 319
320static int hid_parser_global(struct hid_parser *parser, struct hid_item *item) 320static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
321{ 321{
322 __u32 raw_value; 322 __s32 raw_value;
323 switch (item->tag) { 323 switch (item->tag) {
324 case HID_GLOBAL_ITEM_TAG_PUSH: 324 case HID_GLOBAL_ITEM_TAG_PUSH:
325 325
@@ -370,10 +370,11 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
370 return 0; 370 return 0;
371 371
372 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT: 372 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
373 /* Units exponent negative numbers are given through a 373 /* Many devices provide unit exponent as a two's complement
374 * two's complement. 374 * nibble due to the common misunderstanding of HID
375 * See "6.2.2.7 Global Items" for more information. */ 375 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
376 raw_value = item_udata(item); 376 * both this and the standard encoding. */
377 raw_value = item_sdata(item);
377 if (!(raw_value & 0xfffffff0)) 378 if (!(raw_value & 0xfffffff0))
378 parser->global.unit_exponent = hid_snto32(raw_value, 4); 379 parser->global.unit_exponent = hid_snto32(raw_value, 4);
379 else 380 else
@@ -1715,6 +1716,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1715 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) }, 1716 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
1716 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, 1717 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
1717 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, 1718 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
1719 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
1718 { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) }, 1720 { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) },
1719 { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, 1721 { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
1720 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, 1722 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
@@ -1869,6 +1871,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1869 1871
1870 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) }, 1872 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
1871 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) }, 1873 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
1874 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
1872 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, 1875 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
1873 { } 1876 { }
1874}; 1877};
diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c
index 7e6db3cf46f9..e696566cde46 100644
--- a/drivers/hid/hid-holtek-mouse.c
+++ b/drivers/hid/hid-holtek-mouse.c
@@ -27,6 +27,7 @@
27 * - USB ID 04d9:a067, sold as Sharkoon Drakonia and Perixx MX-2000 27 * - USB ID 04d9:a067, sold as Sharkoon Drakonia and Perixx MX-2000
28 * - USB ID 04d9:a04a, sold as Tracer Sniper TRM-503, NOVA Gaming Slider X200 28 * - USB ID 04d9:a04a, sold as Tracer Sniper TRM-503, NOVA Gaming Slider X200
29 * and Zalman ZM-GM1 29 * and Zalman ZM-GM1
30 * - USB ID 04d9:a081, sold as SHARKOON DarkGlider Gaming mouse
30 */ 31 */
31 32
32static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, 33static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
@@ -46,6 +47,7 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
46 } 47 }
47 break; 48 break;
48 case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A: 49 case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A:
50 case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081:
49 if (*rsize >= 113 && rdesc[106] == 0xff && rdesc[107] == 0x7f 51 if (*rsize >= 113 && rdesc[106] == 0xff && rdesc[107] == 0x7f
50 && rdesc[111] == 0xff && rdesc[112] == 0x7f) { 52 && rdesc[111] == 0xff && rdesc[112] == 0x7f) {
51 hid_info(hdev, "Fixing up report descriptor\n"); 53 hid_info(hdev, "Fixing up report descriptor\n");
@@ -63,6 +65,8 @@ static const struct hid_device_id holtek_mouse_devices[] = {
63 USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, 65 USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
64 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, 66 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
65 USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, 67 USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
68 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
69 USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
66 { } 70 { }
67}; 71};
68MODULE_DEVICE_TABLE(hid, holtek_mouse_devices); 72MODULE_DEVICE_TABLE(hid, holtek_mouse_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index e60e8d530697..f0296a50be5f 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -450,6 +450,7 @@
450#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055 450#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055
451#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067 0xa067 451#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067 0xa067
452#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A 0xa04a 452#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A 0xa04a
453#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081 0xa081
453 454
454#define USB_VENDOR_ID_IMATION 0x0718 455#define USB_VENDOR_ID_IMATION 0x0718
455#define USB_DEVICE_ID_DISC_STAKKA 0xd000 456#define USB_DEVICE_ID_DISC_STAKKA 0xd000
@@ -632,6 +633,7 @@
632#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003 633#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
633 634
634#define USB_VENDOR_ID_NINTENDO 0x057e 635#define USB_VENDOR_ID_NINTENDO 0x057e
636#define USB_VENDOR_ID_NINTENDO2 0x054c
635#define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306 637#define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306
636#define USB_DEVICE_ID_NINTENDO_WIIMOTE2 0x0330 638#define USB_DEVICE_ID_NINTENDO_WIIMOTE2 0x0330
637 639
@@ -791,6 +793,8 @@
791#define USB_DEVICE_ID_SYNAPTICS_COMP_TP 0x0009 793#define USB_DEVICE_ID_SYNAPTICS_COMP_TP 0x0009
792#define USB_DEVICE_ID_SYNAPTICS_WTP 0x0010 794#define USB_DEVICE_ID_SYNAPTICS_WTP 0x0010
793#define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013 795#define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013
796#define USB_DEVICE_ID_SYNAPTICS_LTS1 0x0af8
797#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
794 798
795#define USB_VENDOR_ID_THINGM 0x27b8 799#define USB_VENDOR_ID_THINGM 0x27b8
796#define USB_DEVICE_ID_BLINK1 0x01ed 800#define USB_DEVICE_ID_BLINK1 0x01ed
@@ -918,4 +922,7 @@
918#define USB_VENDOR_ID_PRIMAX 0x0461 922#define USB_VENDOR_ID_PRIMAX 0x0461
919#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05 923#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
920 924
925#define USB_VENDOR_ID_SIS 0x0457
926#define USB_DEVICE_ID_SIS_TS 0x1013
927
921#endif 928#endif
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 8741d953dcc8..d97f2323af57 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -192,6 +192,7 @@ static int hidinput_setkeycode(struct input_dev *dev,
192 return -EINVAL; 192 return -EINVAL;
193} 193}
194 194
195
195/** 196/**
196 * hidinput_calc_abs_res - calculate an absolute axis resolution 197 * hidinput_calc_abs_res - calculate an absolute axis resolution
197 * @field: the HID report field to calculate resolution for 198 * @field: the HID report field to calculate resolution for
@@ -234,23 +235,17 @@ __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
234 case ABS_MT_TOOL_Y: 235 case ABS_MT_TOOL_Y:
235 case ABS_MT_TOUCH_MAJOR: 236 case ABS_MT_TOUCH_MAJOR:
236 case ABS_MT_TOUCH_MINOR: 237 case ABS_MT_TOUCH_MINOR:
237 if (field->unit & 0xffffff00) /* Not a length */ 238 if (field->unit == 0x11) { /* If centimeters */
238 return 0;
239 unit_exponent += hid_snto32(field->unit >> 4, 4) - 1;
240 switch (field->unit & 0xf) {
241 case 0x1: /* If centimeters */
242 /* Convert to millimeters */ 239 /* Convert to millimeters */
243 unit_exponent += 1; 240 unit_exponent += 1;
244 break; 241 } else if (field->unit == 0x13) { /* If inches */
245 case 0x3: /* If inches */
246 /* Convert to millimeters */ 242 /* Convert to millimeters */
247 prev = physical_extents; 243 prev = physical_extents;
248 physical_extents *= 254; 244 physical_extents *= 254;
249 if (physical_extents < prev) 245 if (physical_extents < prev)
250 return 0; 246 return 0;
251 unit_exponent -= 1; 247 unit_exponent -= 1;
252 break; 248 } else {
253 default:
254 return 0; 249 return 0;
255 } 250 }
256 break; 251 break;
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 602c188e9d86..6101816a7ddd 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -382,7 +382,7 @@ static ssize_t kone_sysfs_write_profilex(struct file *fp,
382} 382}
383#define PROFILE_ATTR(number) \ 383#define PROFILE_ATTR(number) \
384static struct bin_attribute bin_attr_profile##number = { \ 384static struct bin_attribute bin_attr_profile##number = { \
385 .attr = { .name = "profile##number", .mode = 0660 }, \ 385 .attr = { .name = "profile" #number, .mode = 0660 }, \
386 .size = sizeof(struct kone_profile), \ 386 .size = sizeof(struct kone_profile), \
387 .read = kone_sysfs_read_profilex, \ 387 .read = kone_sysfs_read_profilex, \
388 .write = kone_sysfs_write_profilex, \ 388 .write = kone_sysfs_write_profilex, \
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
index 5ddf605b6b89..5e99fcdc71b9 100644
--- a/drivers/hid/hid-roccat-koneplus.c
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -229,13 +229,13 @@ static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp,
229 229
230#define PROFILE_ATTR(number) \ 230#define PROFILE_ATTR(number) \
231static struct bin_attribute bin_attr_profile##number##_settings = { \ 231static struct bin_attribute bin_attr_profile##number##_settings = { \
232 .attr = { .name = "profile##number##_settings", .mode = 0440 }, \ 232 .attr = { .name = "profile" #number "_settings", .mode = 0440 }, \
233 .size = KONEPLUS_SIZE_PROFILE_SETTINGS, \ 233 .size = KONEPLUS_SIZE_PROFILE_SETTINGS, \
234 .read = koneplus_sysfs_read_profilex_settings, \ 234 .read = koneplus_sysfs_read_profilex_settings, \
235 .private = &profile_numbers[number-1], \ 235 .private = &profile_numbers[number-1], \
236}; \ 236}; \
237static struct bin_attribute bin_attr_profile##number##_buttons = { \ 237static struct bin_attribute bin_attr_profile##number##_buttons = { \
238 .attr = { .name = "profile##number##_buttons", .mode = 0440 }, \ 238 .attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
239 .size = KONEPLUS_SIZE_PROFILE_BUTTONS, \ 239 .size = KONEPLUS_SIZE_PROFILE_BUTTONS, \
240 .read = koneplus_sysfs_read_profilex_buttons, \ 240 .read = koneplus_sysfs_read_profilex_buttons, \
241 .private = &profile_numbers[number-1], \ 241 .private = &profile_numbers[number-1], \
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 515bc03136c0..0c8e1ef0b67d 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -257,13 +257,13 @@ static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp,
257 257
258#define PROFILE_ATTR(number) \ 258#define PROFILE_ATTR(number) \
259static struct bin_attribute bin_attr_profile##number##_settings = { \ 259static struct bin_attribute bin_attr_profile##number##_settings = { \
260 .attr = { .name = "profile##number##_settings", .mode = 0440 }, \ 260 .attr = { .name = "profile" #number "_settings", .mode = 0440 }, \
261 .size = KOVAPLUS_SIZE_PROFILE_SETTINGS, \ 261 .size = KOVAPLUS_SIZE_PROFILE_SETTINGS, \
262 .read = kovaplus_sysfs_read_profilex_settings, \ 262 .read = kovaplus_sysfs_read_profilex_settings, \
263 .private = &profile_numbers[number-1], \ 263 .private = &profile_numbers[number-1], \
264}; \ 264}; \
265static struct bin_attribute bin_attr_profile##number##_buttons = { \ 265static struct bin_attribute bin_attr_profile##number##_buttons = { \
266 .attr = { .name = "profile##number##_buttons", .mode = 0440 }, \ 266 .attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
267 .size = KOVAPLUS_SIZE_PROFILE_BUTTONS, \ 267 .size = KOVAPLUS_SIZE_PROFILE_BUTTONS, \
268 .read = kovaplus_sysfs_read_profilex_buttons, \ 268 .read = kovaplus_sysfs_read_profilex_buttons, \
269 .private = &profile_numbers[number-1], \ 269 .private = &profile_numbers[number-1], \
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 5a6dbbeee790..1a07e07d99a0 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -225,13 +225,13 @@ static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp,
225 225
226#define PROFILE_ATTR(number) \ 226#define PROFILE_ATTR(number) \
227static struct bin_attribute bin_attr_profile##number##_settings = { \ 227static struct bin_attribute bin_attr_profile##number##_settings = { \
228 .attr = { .name = "profile##number##_settings", .mode = 0440 }, \ 228 .attr = { .name = "profile" #number "_settings", .mode = 0440 }, \
229 .size = PYRA_SIZE_PROFILE_SETTINGS, \ 229 .size = PYRA_SIZE_PROFILE_SETTINGS, \
230 .read = pyra_sysfs_read_profilex_settings, \ 230 .read = pyra_sysfs_read_profilex_settings, \
231 .private = &profile_numbers[number-1], \ 231 .private = &profile_numbers[number-1], \
232}; \ 232}; \
233static struct bin_attribute bin_attr_profile##number##_buttons = { \ 233static struct bin_attribute bin_attr_profile##number##_buttons = { \
234 .attr = { .name = "profile##number##_buttons", .mode = 0440 }, \ 234 .attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
235 .size = PYRA_SIZE_PROFILE_BUTTONS, \ 235 .size = PYRA_SIZE_PROFILE_BUTTONS, \
236 .read = pyra_sysfs_read_profilex_buttons, \ 236 .read = pyra_sysfs_read_profilex_buttons, \
237 .private = &profile_numbers[number-1], \ 237 .private = &profile_numbers[number-1], \
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index abb20db2b443..1446f526ee8b 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -834,7 +834,8 @@ static void wiimote_init_set_type(struct wiimote_data *wdata,
834 goto done; 834 goto done;
835 } 835 }
836 836
837 if (vendor == USB_VENDOR_ID_NINTENDO) { 837 if (vendor == USB_VENDOR_ID_NINTENDO ||
838 vendor == USB_VENDOR_ID_NINTENDO2) {
838 if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE) { 839 if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE) {
839 devtype = WIIMOTE_DEV_GEN10; 840 devtype = WIIMOTE_DEV_GEN10;
840 goto done; 841 goto done;
@@ -1855,6 +1856,8 @@ static void wiimote_hid_remove(struct hid_device *hdev)
1855static const struct hid_device_id wiimote_hid_devices[] = { 1856static const struct hid_device_id wiimote_hid_devices[] = {
1856 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, 1857 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
1857 USB_DEVICE_ID_NINTENDO_WIIMOTE) }, 1858 USB_DEVICE_ID_NINTENDO_WIIMOTE) },
1859 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2,
1860 USB_DEVICE_ID_NINTENDO_WIIMOTE) },
1858 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, 1861 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
1859 USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, 1862 USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
1860 { } 1863 { }
diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c
index 2e7d644dba18..71adf9e60b13 100644
--- a/drivers/hid/hid-wiimote-modules.c
+++ b/drivers/hid/hid-wiimote-modules.c
@@ -119,12 +119,22 @@ static const struct wiimod_ops wiimod_keys = {
119 * the rumble motor, this flag shouldn't be set. 119 * the rumble motor, this flag shouldn't be set.
120 */ 120 */
121 121
122/* used by wiimod_rumble and wiipro_rumble */
123static void wiimod_rumble_worker(struct work_struct *work)
124{
125 struct wiimote_data *wdata = container_of(work, struct wiimote_data,
126 rumble_worker);
127
128 spin_lock_irq(&wdata->state.lock);
129 wiiproto_req_rumble(wdata, wdata->state.cache_rumble);
130 spin_unlock_irq(&wdata->state.lock);
131}
132
122static int wiimod_rumble_play(struct input_dev *dev, void *data, 133static int wiimod_rumble_play(struct input_dev *dev, void *data,
123 struct ff_effect *eff) 134 struct ff_effect *eff)
124{ 135{
125 struct wiimote_data *wdata = input_get_drvdata(dev); 136 struct wiimote_data *wdata = input_get_drvdata(dev);
126 __u8 value; 137 __u8 value;
127 unsigned long flags;
128 138
129 /* 139 /*
130 * The wiimote supports only a single rumble motor so if any magnitude 140 * The wiimote supports only a single rumble motor so if any magnitude
@@ -137,9 +147,10 @@ static int wiimod_rumble_play(struct input_dev *dev, void *data,
137 else 147 else
138 value = 0; 148 value = 0;
139 149
140 spin_lock_irqsave(&wdata->state.lock, flags); 150 /* Locking state.lock here might deadlock with input_event() calls.
141 wiiproto_req_rumble(wdata, value); 151 * schedule_work acts as barrier. Merging multiple changes is fine. */
142 spin_unlock_irqrestore(&wdata->state.lock, flags); 152 wdata->state.cache_rumble = value;
153 schedule_work(&wdata->rumble_worker);
143 154
144 return 0; 155 return 0;
145} 156}
@@ -147,6 +158,8 @@ static int wiimod_rumble_play(struct input_dev *dev, void *data,
147static int wiimod_rumble_probe(const struct wiimod_ops *ops, 158static int wiimod_rumble_probe(const struct wiimod_ops *ops,
148 struct wiimote_data *wdata) 159 struct wiimote_data *wdata)
149{ 160{
161 INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker);
162
150 set_bit(FF_RUMBLE, wdata->input->ffbit); 163 set_bit(FF_RUMBLE, wdata->input->ffbit);
151 if (input_ff_create_memless(wdata->input, NULL, wiimod_rumble_play)) 164 if (input_ff_create_memless(wdata->input, NULL, wiimod_rumble_play))
152 return -ENOMEM; 165 return -ENOMEM;
@@ -159,6 +172,8 @@ static void wiimod_rumble_remove(const struct wiimod_ops *ops,
159{ 172{
160 unsigned long flags; 173 unsigned long flags;
161 174
175 cancel_work_sync(&wdata->rumble_worker);
176
162 spin_lock_irqsave(&wdata->state.lock, flags); 177 spin_lock_irqsave(&wdata->state.lock, flags);
163 wiiproto_req_rumble(wdata, 0); 178 wiiproto_req_rumble(wdata, 0);
164 spin_unlock_irqrestore(&wdata->state.lock, flags); 179 spin_unlock_irqrestore(&wdata->state.lock, flags);
@@ -1731,7 +1746,6 @@ static int wiimod_pro_play(struct input_dev *dev, void *data,
1731{ 1746{
1732 struct wiimote_data *wdata = input_get_drvdata(dev); 1747 struct wiimote_data *wdata = input_get_drvdata(dev);
1733 __u8 value; 1748 __u8 value;
1734 unsigned long flags;
1735 1749
1736 /* 1750 /*
1737 * The wiimote supports only a single rumble motor so if any magnitude 1751 * The wiimote supports only a single rumble motor so if any magnitude
@@ -1744,9 +1758,10 @@ static int wiimod_pro_play(struct input_dev *dev, void *data,
1744 else 1758 else
1745 value = 0; 1759 value = 0;
1746 1760
1747 spin_lock_irqsave(&wdata->state.lock, flags); 1761 /* Locking state.lock here might deadlock with input_event() calls.
1748 wiiproto_req_rumble(wdata, value); 1762 * schedule_work acts as barrier. Merging multiple changes is fine. */
1749 spin_unlock_irqrestore(&wdata->state.lock, flags); 1763 wdata->state.cache_rumble = value;
1764 schedule_work(&wdata->rumble_worker);
1750 1765
1751 return 0; 1766 return 0;
1752} 1767}
@@ -1756,6 +1771,8 @@ static int wiimod_pro_probe(const struct wiimod_ops *ops,
1756{ 1771{
1757 int ret, i; 1772 int ret, i;
1758 1773
1774 INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker);
1775
1759 wdata->extension.input = input_allocate_device(); 1776 wdata->extension.input = input_allocate_device();
1760 if (!wdata->extension.input) 1777 if (!wdata->extension.input)
1761 return -ENOMEM; 1778 return -ENOMEM;
@@ -1817,12 +1834,13 @@ static void wiimod_pro_remove(const struct wiimod_ops *ops,
1817 if (!wdata->extension.input) 1834 if (!wdata->extension.input)
1818 return; 1835 return;
1819 1836
1837 input_unregister_device(wdata->extension.input);
1838 wdata->extension.input = NULL;
1839 cancel_work_sync(&wdata->rumble_worker);
1840
1820 spin_lock_irqsave(&wdata->state.lock, flags); 1841 spin_lock_irqsave(&wdata->state.lock, flags);
1821 wiiproto_req_rumble(wdata, 0); 1842 wiiproto_req_rumble(wdata, 0);
1822 spin_unlock_irqrestore(&wdata->state.lock, flags); 1843 spin_unlock_irqrestore(&wdata->state.lock, flags);
1823
1824 input_unregister_device(wdata->extension.input);
1825 wdata->extension.input = NULL;
1826} 1844}
1827 1845
1828static const struct wiimod_ops wiimod_pro = { 1846static const struct wiimod_ops wiimod_pro = {
diff --git a/drivers/hid/hid-wiimote.h b/drivers/hid/hid-wiimote.h
index f1474f372c0b..75db0c400037 100644
--- a/drivers/hid/hid-wiimote.h
+++ b/drivers/hid/hid-wiimote.h
@@ -133,13 +133,15 @@ struct wiimote_state {
133 __u8 *cmd_read_buf; 133 __u8 *cmd_read_buf;
134 __u8 cmd_read_size; 134 __u8 cmd_read_size;
135 135
136 /* calibration data */ 136 /* calibration/cache data */
137 __u16 calib_bboard[4][3]; 137 __u16 calib_bboard[4][3];
138 __u8 cache_rumble;
138}; 139};
139 140
140struct wiimote_data { 141struct wiimote_data {
141 struct hid_device *hdev; 142 struct hid_device *hdev;
142 struct input_dev *input; 143 struct input_dev *input;
144 struct work_struct rumble_worker;
143 struct led_classdev *leds[4]; 145 struct led_classdev *leds[4];
144 struct input_dev *accel; 146 struct input_dev *accel;
145 struct input_dev *ir; 147 struct input_dev *ir;
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 8918dd12bb69..6a6dd5cd7833 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -308,18 +308,25 @@ static int hidraw_fasync(int fd, struct file *file, int on)
308static void drop_ref(struct hidraw *hidraw, int exists_bit) 308static void drop_ref(struct hidraw *hidraw, int exists_bit)
309{ 309{
310 if (exists_bit) { 310 if (exists_bit) {
311 hid_hw_close(hidraw->hid);
312 hidraw->exist = 0; 311 hidraw->exist = 0;
313 if (hidraw->open) 312 if (hidraw->open) {
313 hid_hw_close(hidraw->hid);
314 wake_up_interruptible(&hidraw->wait); 314 wake_up_interruptible(&hidraw->wait);
315 }
315 } else { 316 } else {
316 --hidraw->open; 317 --hidraw->open;
317 } 318 }
318 319 if (!hidraw->open) {
319 if (!hidraw->open && !hidraw->exist) { 320 if (!hidraw->exist) {
320 device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor)); 321 device_destroy(hidraw_class,
321 hidraw_table[hidraw->minor] = NULL; 322 MKDEV(hidraw_major, hidraw->minor));
322 kfree(hidraw); 323 hidraw_table[hidraw->minor] = NULL;
324 kfree(hidraw);
325 } else {
326 /* close device for last reader */
327 hid_hw_power(hidraw->hid, PM_HINT_NORMAL);
328 hid_hw_close(hidraw->hid);
329 }
323 } 330 }
324} 331}
325 332
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 5bf2fb785844..93b00d76374c 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -615,7 +615,7 @@ static const struct file_operations uhid_fops = {
615 615
616static struct miscdevice uhid_misc = { 616static struct miscdevice uhid_misc = {
617 .fops = &uhid_fops, 617 .fops = &uhid_fops,
618 .minor = MISC_DYNAMIC_MINOR, 618 .minor = UHID_MINOR,
619 .name = UHID_NAME, 619 .name = UHID_NAME,
620}; 620};
621 621
@@ -634,4 +634,5 @@ module_exit(uhid_exit);
634MODULE_LICENSE("GPL"); 634MODULE_LICENSE("GPL");
635MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>"); 635MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
636MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem"); 636MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
637MODULE_ALIAS_MISCDEV(UHID_MINOR);
637MODULE_ALIAS("devname:" UHID_NAME); 638MODULE_ALIAS("devname:" UHID_NAME);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 07345521f421..3fca3be08337 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -110,6 +110,9 @@ static const struct hid_blacklist {
110 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT }, 110 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
111 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT }, 111 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
112 { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS }, 112 { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
113 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
114 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
115 { USB_VENDOR_ID_SIS, USB_DEVICE_ID_SIS_TS, HID_QUIRK_NO_INIT_REPORTS },
113 116
114 { 0, 0 } 117 { 0, 0 }
115}; 118};
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 98814d12a604..3288f13d2d87 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -230,6 +230,7 @@ static int send_argument(const char *key)
230 230
231static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) 231static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
232{ 232{
233 u8 status, data = 0;
233 int i; 234 int i;
234 235
235 if (send_command(cmd) || send_argument(key)) { 236 if (send_command(cmd) || send_argument(key)) {
@@ -237,6 +238,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
237 return -EIO; 238 return -EIO;
238 } 239 }
239 240
241 /* This has no effect on newer (2012) SMCs */
240 if (send_byte(len, APPLESMC_DATA_PORT)) { 242 if (send_byte(len, APPLESMC_DATA_PORT)) {
241 pr_warn("%.4s: read len fail\n", key); 243 pr_warn("%.4s: read len fail\n", key);
242 return -EIO; 244 return -EIO;
@@ -250,6 +252,17 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
250 buffer[i] = inb(APPLESMC_DATA_PORT); 252 buffer[i] = inb(APPLESMC_DATA_PORT);
251 } 253 }
252 254
255 /* Read the data port until bit0 is cleared */
256 for (i = 0; i < 16; i++) {
257 udelay(APPLESMC_MIN_WAIT);
258 status = inb(APPLESMC_CMD_PORT);
259 if (!(status & 0x01))
260 break;
261 data = inb(APPLESMC_DATA_PORT);
262 }
263 if (i)
264 pr_warn("flushed %d bytes, last value is: %d\n", i, data);
265
253 return 0; 266 return 0;
254} 267}
255 268
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 4c1b60539a25..0aa01136f8d9 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -270,7 +270,8 @@ static SIMPLE_DEV_PM_OPS(dw_i2c_dev_pm_ops, dw_i2c_suspend, dw_i2c_resume);
270MODULE_ALIAS("platform:i2c_designware"); 270MODULE_ALIAS("platform:i2c_designware");
271 271
272static struct platform_driver dw_i2c_driver = { 272static struct platform_driver dw_i2c_driver = {
273 .remove = dw_i2c_remove, 273 .probe = dw_i2c_probe,
274 .remove = dw_i2c_remove,
274 .driver = { 275 .driver = {
275 .name = "i2c_designware", 276 .name = "i2c_designware",
276 .owner = THIS_MODULE, 277 .owner = THIS_MODULE,
@@ -282,7 +283,7 @@ static struct platform_driver dw_i2c_driver = {
282 283
283static int __init dw_i2c_init_driver(void) 284static int __init dw_i2c_init_driver(void)
284{ 285{
285 return platform_driver_probe(&dw_i2c_driver, dw_i2c_probe); 286 return platform_driver_register(&dw_i2c_driver);
286} 287}
287subsys_initcall(dw_i2c_init_driver); 288subsys_initcall(dw_i2c_init_driver);
288 289
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index ccf46656bdad..1d7efa3169cd 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -365,7 +365,7 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
365 clk_disable_unprepare(i2c_imx->clk); 365 clk_disable_unprepare(i2c_imx->clk);
366} 366}
367 367
368static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx, 368static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
369 unsigned int rate) 369 unsigned int rate)
370{ 370{
371 struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div; 371 struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div;
@@ -589,7 +589,7 @@ static struct i2c_algorithm i2c_imx_algo = {
589 .functionality = i2c_imx_func, 589 .functionality = i2c_imx_func,
590}; 590};
591 591
592static int __init i2c_imx_probe(struct platform_device *pdev) 592static int i2c_imx_probe(struct platform_device *pdev)
593{ 593{
594 const struct of_device_id *of_id = of_match_device(i2c_imx_dt_ids, 594 const struct of_device_id *of_id = of_match_device(i2c_imx_dt_ids,
595 &pdev->dev); 595 &pdev->dev);
@@ -697,7 +697,7 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
697 return 0; /* Return OK */ 697 return 0; /* Return OK */
698} 698}
699 699
700static int __exit i2c_imx_remove(struct platform_device *pdev) 700static int i2c_imx_remove(struct platform_device *pdev)
701{ 701{
702 struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev); 702 struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
703 703
@@ -715,7 +715,8 @@ static int __exit i2c_imx_remove(struct platform_device *pdev)
715} 715}
716 716
717static struct platform_driver i2c_imx_driver = { 717static struct platform_driver i2c_imx_driver = {
718 .remove = __exit_p(i2c_imx_remove), 718 .probe = i2c_imx_probe,
719 .remove = i2c_imx_remove,
719 .driver = { 720 .driver = {
720 .name = DRIVER_NAME, 721 .name = DRIVER_NAME,
721 .owner = THIS_MODULE, 722 .owner = THIS_MODULE,
@@ -726,7 +727,7 @@ static struct platform_driver i2c_imx_driver = {
726 727
727static int __init i2c_adap_imx_init(void) 728static int __init i2c_adap_imx_init(void)
728{ 729{
729 return platform_driver_probe(&i2c_imx_driver, i2c_imx_probe); 730 return platform_driver_register(&i2c_imx_driver);
730} 731}
731subsys_initcall(i2c_adap_imx_init); 732subsys_initcall(i2c_adap_imx_init);
732 733
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index f4a01675fa71..b7c857774708 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -780,12 +780,13 @@ static struct platform_driver mxs_i2c_driver = {
780 .owner = THIS_MODULE, 780 .owner = THIS_MODULE,
781 .of_match_table = mxs_i2c_dt_ids, 781 .of_match_table = mxs_i2c_dt_ids,
782 }, 782 },
783 .probe = mxs_i2c_probe,
783 .remove = mxs_i2c_remove, 784 .remove = mxs_i2c_remove,
784}; 785};
785 786
786static int __init mxs_i2c_init(void) 787static int __init mxs_i2c_init(void)
787{ 788{
788 return platform_driver_probe(&mxs_i2c_driver, mxs_i2c_probe); 789 return platform_driver_register(&mxs_i2c_driver);
789} 790}
790subsys_initcall(mxs_i2c_init); 791subsys_initcall(mxs_i2c_init);
791 792
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 6d8308d5dc4e..9967a6f9c2ff 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -939,6 +939,9 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
939 /* 939 /*
940 * ProDB0017052: Clear ARDY bit twice 940 * ProDB0017052: Clear ARDY bit twice
941 */ 941 */
942 if (stat & OMAP_I2C_STAT_ARDY)
943 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_ARDY);
944
942 if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | 945 if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
943 OMAP_I2C_STAT_AL)) { 946 OMAP_I2C_STAT_AL)) {
944 omap_i2c_ack_stat(dev, (OMAP_I2C_STAT_RRDY | 947 omap_i2c_ack_stat(dev, (OMAP_I2C_STAT_RRDY |
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index f8f6f2e552db..04a17b9b38bb 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -859,8 +859,7 @@ static const struct i2c_algorithm stu300_algo = {
859 .functionality = stu300_func, 859 .functionality = stu300_func,
860}; 860};
861 861
862static int __init 862static int stu300_probe(struct platform_device *pdev)
863stu300_probe(struct platform_device *pdev)
864{ 863{
865 struct stu300_dev *dev; 864 struct stu300_dev *dev;
866 struct i2c_adapter *adap; 865 struct i2c_adapter *adap;
@@ -966,8 +965,7 @@ static SIMPLE_DEV_PM_OPS(stu300_pm, stu300_suspend, stu300_resume);
966#define STU300_I2C_PM NULL 965#define STU300_I2C_PM NULL
967#endif 966#endif
968 967
969static int __exit 968static int stu300_remove(struct platform_device *pdev)
970stu300_remove(struct platform_device *pdev)
971{ 969{
972 struct stu300_dev *dev = platform_get_drvdata(pdev); 970 struct stu300_dev *dev = platform_get_drvdata(pdev);
973 971
@@ -989,13 +987,14 @@ static struct platform_driver stu300_i2c_driver = {
989 .pm = STU300_I2C_PM, 987 .pm = STU300_I2C_PM,
990 .of_match_table = stu300_dt_match, 988 .of_match_table = stu300_dt_match,
991 }, 989 },
992 .remove = __exit_p(stu300_remove), 990 .probe = stu300_probe,
991 .remove = stu300_remove,
993 992
994}; 993};
995 994
996static int __init stu300_init(void) 995static int __init stu300_init(void)
997{ 996{
998 return platform_driver_probe(&stu300_i2c_driver, stu300_probe); 997 return platform_driver_register(&stu300_i2c_driver);
999} 998}
1000 999
1001static void __exit stu300_exit(void) 1000static void __exit stu300_exit(void)
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 29d3f045a2bf..3be58f89ac77 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1134,6 +1134,9 @@ static void acpi_i2c_register_devices(struct i2c_adapter *adap)
1134 acpi_handle handle; 1134 acpi_handle handle;
1135 acpi_status status; 1135 acpi_status status;
1136 1136
1137 if (!adap->dev.parent)
1138 return;
1139
1137 handle = ACPI_HANDLE(adap->dev.parent); 1140 handle = ACPI_HANDLE(adap->dev.parent);
1138 if (!handle) 1141 if (!handle)
1139 return; 1142 return;
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
index 74b41ae690f3..928656e241dd 100644
--- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
+++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
@@ -200,7 +200,7 @@ static int i2c_arbitrator_probe(struct platform_device *pdev)
200 arb->parent = of_find_i2c_adapter_by_node(parent_np); 200 arb->parent = of_find_i2c_adapter_by_node(parent_np);
201 if (!arb->parent) { 201 if (!arb->parent) {
202 dev_err(dev, "Cannot find parent bus\n"); 202 dev_err(dev, "Cannot find parent bus\n");
203 return -EINVAL; 203 return -EPROBE_DEFER;
204 } 204 }
205 205
206 /* Actually add the mux adapter */ 206 /* Actually add the mux adapter */
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 5d4a99ba743e..a764da777f08 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -66,7 +66,7 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
66 struct device_node *adapter_np, *child; 66 struct device_node *adapter_np, *child;
67 struct i2c_adapter *adapter; 67 struct i2c_adapter *adapter;
68 unsigned *values, *gpios; 68 unsigned *values, *gpios;
69 int i = 0; 69 int i = 0, ret;
70 70
71 if (!np) 71 if (!np)
72 return -ENODEV; 72 return -ENODEV;
@@ -79,7 +79,7 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
79 adapter = of_find_i2c_adapter_by_node(adapter_np); 79 adapter = of_find_i2c_adapter_by_node(adapter_np);
80 if (!adapter) { 80 if (!adapter) {
81 dev_err(&pdev->dev, "Cannot find parent bus\n"); 81 dev_err(&pdev->dev, "Cannot find parent bus\n");
82 return -ENODEV; 82 return -EPROBE_DEFER;
83 } 83 }
84 mux->data.parent = i2c_adapter_id(adapter); 84 mux->data.parent = i2c_adapter_id(adapter);
85 put_device(&adapter->dev); 85 put_device(&adapter->dev);
@@ -116,8 +116,12 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
116 return -ENOMEM; 116 return -ENOMEM;
117 } 117 }
118 118
119 for (i = 0; i < mux->data.n_gpios; i++) 119 for (i = 0; i < mux->data.n_gpios; i++) {
120 gpios[i] = of_get_named_gpio(np, "mux-gpios", i); 120 ret = of_get_named_gpio(np, "mux-gpios", i);
121 if (ret < 0)
122 return ret;
123 gpios[i] = ret;
124 }
121 125
122 mux->data.gpios = gpios; 126 mux->data.gpios = gpios;
123 127
@@ -177,7 +181,7 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
177 if (!parent) { 181 if (!parent) {
178 dev_err(&pdev->dev, "Parent adapter (%d) not found\n", 182 dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
179 mux->data.parent); 183 mux->data.parent);
180 return -ENODEV; 184 return -EPROBE_DEFER;
181 } 185 }
182 186
183 mux->parent = parent; 187 mux->parent = parent;
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index 69a91732ae65..68a37157377d 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -113,7 +113,7 @@ static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
113 adapter = of_find_i2c_adapter_by_node(adapter_np); 113 adapter = of_find_i2c_adapter_by_node(adapter_np);
114 if (!adapter) { 114 if (!adapter) {
115 dev_err(mux->dev, "Cannot find parent bus\n"); 115 dev_err(mux->dev, "Cannot find parent bus\n");
116 return -ENODEV; 116 return -EPROBE_DEFER;
117 } 117 }
118 mux->pdata->parent_bus_num = i2c_adapter_id(adapter); 118 mux->pdata->parent_bus_num = i2c_adapter_id(adapter);
119 put_device(&adapter->dev); 119 put_device(&adapter->dev);
@@ -211,7 +211,7 @@ static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
211 if (!mux->parent) { 211 if (!mux->parent) {
212 dev_err(&pdev->dev, "Parent adapter (%d) not found\n", 212 dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
213 mux->pdata->parent_bus_num); 213 mux->pdata->parent_bus_num);
214 ret = -ENODEV; 214 ret = -EPROBE_DEFER;
215 goto err; 215 goto err;
216 } 216 }
217 217
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
index d0a79a4bce1c..ba6f6a91dfff 100644
--- a/drivers/iio/amplifiers/ad8366.c
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -185,10 +185,8 @@ static int ad8366_remove(struct spi_device *spi)
185 185
186 iio_device_unregister(indio_dev); 186 iio_device_unregister(indio_dev);
187 187
188 if (!IS_ERR(reg)) { 188 if (!IS_ERR(reg))
189 regulator_disable(reg); 189 regulator_disable(reg);
190 regulator_put(reg);
191 }
192 190
193 return 0; 191 return 0;
194} 192}
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index a7b30be86ae0..52605c0ea3a6 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -525,8 +525,10 @@ static int adf4350_probe(struct spi_device *spi)
525 } 525 }
526 526
527 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); 527 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
528 if (indio_dev == NULL) 528 if (indio_dev == NULL) {
529 return -ENOMEM; 529 ret = -ENOMEM;
530 goto error_disable_clk;
531 }
530 532
531 st = iio_priv(indio_dev); 533 st = iio_priv(indio_dev);
532 534
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 2710f7245c3b..2db7dcd826b9 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -477,6 +477,9 @@ void iio_disable_all_buffers(struct iio_dev *indio_dev)
477 indio_dev->currentmode = INDIO_DIRECT_MODE; 477 indio_dev->currentmode = INDIO_DIRECT_MODE;
478 if (indio_dev->setup_ops->postdisable) 478 if (indio_dev->setup_ops->postdisable)
479 indio_dev->setup_ops->postdisable(indio_dev); 479 indio_dev->setup_ops->postdisable(indio_dev);
480
481 if (indio_dev->available_scan_masks == NULL)
482 kfree(indio_dev->active_scan_mask);
480} 483}
481 484
482int iio_update_buffers(struct iio_dev *indio_dev, 485int iio_update_buffers(struct iio_dev *indio_dev,
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 8e84cd522e49..f95c6979efd8 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -852,7 +852,6 @@ static void iio_dev_release(struct device *device)
852 iio_device_unregister_trigger_consumer(indio_dev); 852 iio_device_unregister_trigger_consumer(indio_dev);
853 iio_device_unregister_eventset(indio_dev); 853 iio_device_unregister_eventset(indio_dev);
854 iio_device_unregister_sysfs(indio_dev); 854 iio_device_unregister_sysfs(indio_dev);
855 iio_device_unregister_debugfs(indio_dev);
856 855
857 ida_simple_remove(&iio_ida, indio_dev->id); 856 ida_simple_remove(&iio_ida, indio_dev->id);
858 kfree(indio_dev); 857 kfree(indio_dev);
@@ -1087,6 +1086,7 @@ void iio_device_unregister(struct iio_dev *indio_dev)
1087 1086
1088 if (indio_dev->chrdev.dev) 1087 if (indio_dev->chrdev.dev)
1089 cdev_del(&indio_dev->chrdev); 1088 cdev_del(&indio_dev->chrdev);
1089 iio_device_unregister_debugfs(indio_dev);
1090 1090
1091 iio_disable_all_buffers(indio_dev); 1091 iio_disable_all_buffers(indio_dev);
1092 1092
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index e8d2849cc81d..cab3bc7494a2 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -29,9 +29,9 @@
29#define ST_MAGN_NUMBER_DATA_CHANNELS 3 29#define ST_MAGN_NUMBER_DATA_CHANNELS 3
30 30
31/* DEFAULT VALUE FOR SENSORS */ 31/* DEFAULT VALUE FOR SENSORS */
32#define ST_MAGN_DEFAULT_OUT_X_L_ADDR 0X04 32#define ST_MAGN_DEFAULT_OUT_X_H_ADDR 0X03
33#define ST_MAGN_DEFAULT_OUT_Y_L_ADDR 0X08 33#define ST_MAGN_DEFAULT_OUT_Y_H_ADDR 0X07
34#define ST_MAGN_DEFAULT_OUT_Z_L_ADDR 0X06 34#define ST_MAGN_DEFAULT_OUT_Z_H_ADDR 0X05
35 35
36/* FULLSCALE */ 36/* FULLSCALE */
37#define ST_MAGN_FS_AVL_1300MG 1300 37#define ST_MAGN_FS_AVL_1300MG 1300
@@ -117,16 +117,16 @@
117static const struct iio_chan_spec st_magn_16bit_channels[] = { 117static const struct iio_chan_spec st_magn_16bit_channels[] = {
118 ST_SENSORS_LSM_CHANNELS(IIO_MAGN, 118 ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
119 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), 119 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
120 ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_LE, 16, 16, 120 ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_BE, 16, 16,
121 ST_MAGN_DEFAULT_OUT_X_L_ADDR), 121 ST_MAGN_DEFAULT_OUT_X_H_ADDR),
122 ST_SENSORS_LSM_CHANNELS(IIO_MAGN, 122 ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
123 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), 123 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
124 ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_LE, 16, 16, 124 ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_BE, 16, 16,
125 ST_MAGN_DEFAULT_OUT_Y_L_ADDR), 125 ST_MAGN_DEFAULT_OUT_Y_H_ADDR),
126 ST_SENSORS_LSM_CHANNELS(IIO_MAGN, 126 ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
127 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), 127 BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
128 ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_LE, 16, 16, 128 ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_BE, 16, 16,
129 ST_MAGN_DEFAULT_OUT_Z_L_ADDR), 129 ST_MAGN_DEFAULT_OUT_Z_H_ADDR),
130 IIO_CHAN_SOFT_TIMESTAMP(3) 130 IIO_CHAN_SOFT_TIMESTAMP(3)
131}; 131};
132 132
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 5ceda710f516..b84791f03a27 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -31,6 +31,17 @@ config INFINIBAND_USER_ACCESS
31 libibverbs, libibcm and a hardware driver library from 31 libibverbs, libibcm and a hardware driver library from
32 <http://www.openfabrics.org/git/>. 32 <http://www.openfabrics.org/git/>.
33 33
34config INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
35 bool "Experimental and unstable ABI for userspace access to flow steering verbs"
36 depends on INFINIBAND_USER_ACCESS
37 depends on STAGING
38 ---help---
39 The final ABI for userspace access to flow steering verbs
40 has not been defined. To use the current ABI, *WHICH WILL
41 CHANGE IN THE FUTURE*, say Y here.
42
43 If unsure, say N.
44
34config INFINIBAND_USER_MEM 45config INFINIBAND_USER_MEM
35 bool 46 bool
36 depends on INFINIBAND_USER_ACCESS != n 47 depends on INFINIBAND_USER_ACCESS != n
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index d040b877475f..d8f9c6c272d7 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -217,7 +217,9 @@ IB_UVERBS_DECLARE_CMD(destroy_srq);
217IB_UVERBS_DECLARE_CMD(create_xsrq); 217IB_UVERBS_DECLARE_CMD(create_xsrq);
218IB_UVERBS_DECLARE_CMD(open_xrcd); 218IB_UVERBS_DECLARE_CMD(open_xrcd);
219IB_UVERBS_DECLARE_CMD(close_xrcd); 219IB_UVERBS_DECLARE_CMD(close_xrcd);
220#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
220IB_UVERBS_DECLARE_CMD(create_flow); 221IB_UVERBS_DECLARE_CMD(create_flow);
221IB_UVERBS_DECLARE_CMD(destroy_flow); 222IB_UVERBS_DECLARE_CMD(destroy_flow);
223#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
222 224
223#endif /* UVERBS_H */ 225#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index f2b81b9ee0d6..2f0f01b70e3b 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -54,7 +54,9 @@ static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
54static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; 54static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
55static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; 55static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
56static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; 56static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
57#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
57static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; 58static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
59#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
58 60
59#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 61#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
60 do { \ 62 do { \
@@ -2599,6 +2601,7 @@ out_put:
2599 return ret ? ret : in_len; 2601 return ret ? ret : in_len;
2600} 2602}
2601 2603
2604#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
2602static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec, 2605static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec,
2603 union ib_flow_spec *ib_spec) 2606 union ib_flow_spec *ib_spec)
2604{ 2607{
@@ -2824,6 +2827,7 @@ ssize_t ib_uverbs_destroy_flow(struct ib_uverbs_file *file,
2824 2827
2825 return ret ? ret : in_len; 2828 return ret ? ret : in_len;
2826} 2829}
2830#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
2827 2831
2828static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 2832static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
2829 struct ib_uverbs_create_xsrq *cmd, 2833 struct ib_uverbs_create_xsrq *cmd,
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 75ad86c4abf8..2df31f68ea09 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -115,8 +115,10 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
115 [IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd, 115 [IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd,
116 [IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq, 116 [IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq,
117 [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp, 117 [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp,
118#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
118 [IB_USER_VERBS_CMD_CREATE_FLOW] = ib_uverbs_create_flow, 119 [IB_USER_VERBS_CMD_CREATE_FLOW] = ib_uverbs_create_flow,
119 [IB_USER_VERBS_CMD_DESTROY_FLOW] = ib_uverbs_destroy_flow 120 [IB_USER_VERBS_CMD_DESTROY_FLOW] = ib_uverbs_destroy_flow
121#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
120}; 122};
121 123
122static void ib_uverbs_add_one(struct ib_device *device); 124static void ib_uverbs_add_one(struct ib_device *device);
@@ -605,6 +607,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
605 if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command))) 607 if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
606 return -ENOSYS; 608 return -ENOSYS;
607 609
610#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
608 if (hdr.command >= IB_USER_VERBS_CMD_THRESHOLD) { 611 if (hdr.command >= IB_USER_VERBS_CMD_THRESHOLD) {
609 struct ib_uverbs_cmd_hdr_ex hdr_ex; 612 struct ib_uverbs_cmd_hdr_ex hdr_ex;
610 613
@@ -621,6 +624,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
621 (hdr_ex.out_words + 624 (hdr_ex.out_words +
622 hdr_ex.provider_out_words) * 4); 625 hdr_ex.provider_out_words) * 4);
623 } else { 626 } else {
627#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
624 if (hdr.in_words * 4 != count) 628 if (hdr.in_words * 4 != count)
625 return -EINVAL; 629 return -EINVAL;
626 630
@@ -628,7 +632,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
628 buf + sizeof(hdr), 632 buf + sizeof(hdr),
629 hdr.in_words * 4, 633 hdr.in_words * 4,
630 hdr.out_words * 4); 634 hdr.out_words * 4);
635#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
631 } 636 }
637#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
632} 638}
633 639
634static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma) 640static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c
index d5d1929753e4..cedda25232be 100644
--- a/drivers/infiniband/hw/amso1100/c2_ae.c
+++ b/drivers/infiniband/hw/amso1100/c2_ae.c
@@ -141,7 +141,7 @@ static const char *to_qp_state_str(int state)
141 return "C2_QP_STATE_ERROR"; 141 return "C2_QP_STATE_ERROR";
142 default: 142 default:
143 return "<invalid QP state>"; 143 return "<invalid QP state>";
144 }; 144 }
145} 145}
146 146
147void c2_ae_event(struct c2_dev *c2dev, u32 mq_index) 147void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index d6c5a73becf4..f0612645de99 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1691,9 +1691,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
1691 ibdev->ib_dev.create_flow = mlx4_ib_create_flow; 1691 ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
1692 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow; 1692 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
1693 1693
1694#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
1694 ibdev->ib_dev.uverbs_cmd_mask |= 1695 ibdev->ib_dev.uverbs_cmd_mask |=
1695 (1ull << IB_USER_VERBS_CMD_CREATE_FLOW) | 1696 (1ull << IB_USER_VERBS_CMD_CREATE_FLOW) |
1696 (1ull << IB_USER_VERBS_CMD_DESTROY_FLOW); 1697 (1ull << IB_USER_VERBS_CMD_DESTROY_FLOW);
1698#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
1697 } 1699 }
1698 1700
1699 mlx4_ib_alloc_eqs(dev, ibdev); 1701 mlx4_ib_alloc_eqs(dev, ibdev);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 3f831de9a4d8..b1a6cb3a2809 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -164,6 +164,7 @@ int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
164static int alloc_comp_eqs(struct mlx5_ib_dev *dev) 164static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
165{ 165{
166 struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; 166 struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
167 char name[MLX5_MAX_EQ_NAME];
167 struct mlx5_eq *eq, *n; 168 struct mlx5_eq *eq, *n;
168 int ncomp_vec; 169 int ncomp_vec;
169 int nent; 170 int nent;
@@ -180,11 +181,10 @@ static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
180 goto clean; 181 goto clean;
181 } 182 }
182 183
183 snprintf(eq->name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i); 184 snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
184 err = mlx5_create_map_eq(&dev->mdev, eq, 185 err = mlx5_create_map_eq(&dev->mdev, eq,
185 i + MLX5_EQ_VEC_COMP_BASE, nent, 0, 186 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
186 eq->name, 187 name, &dev->mdev.priv.uuari.uars[0]);
187 &dev->mdev.priv.uuari.uars[0]);
188 if (err) { 188 if (err) {
189 kfree(eq); 189 kfree(eq);
190 goto clean; 190 goto clean;
@@ -301,9 +301,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
301 props->max_srq_sge = max_rq_sg - 1; 301 props->max_srq_sge = max_rq_sg - 1;
302 props->max_fast_reg_page_list_len = (unsigned int)-1; 302 props->max_fast_reg_page_list_len = (unsigned int)-1;
303 props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay; 303 props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay;
304 props->atomic_cap = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ? 304 props->atomic_cap = IB_ATOMIC_NONE;
305 IB_ATOMIC_HCA : IB_ATOMIC_NONE; 305 props->masked_atomic_cap = IB_ATOMIC_NONE;
306 props->masked_atomic_cap = IB_ATOMIC_HCA;
307 props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); 306 props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
308 props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg; 307 props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg;
309 props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg; 308 props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg;
@@ -1006,6 +1005,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
1006 ibev.device = &ibdev->ib_dev; 1005 ibev.device = &ibdev->ib_dev;
1007 ibev.element.port_num = port; 1006 ibev.element.port_num = port;
1008 1007
1008 if (port < 1 || port > ibdev->num_ports) {
1009 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
1010 return;
1011 }
1012
1009 if (ibdev->ib_active) 1013 if (ibdev->ib_active)
1010 ib_dispatch_event(&ibev); 1014 ib_dispatch_event(&ibev);
1011} 1015}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index bd41df95b6f0..3453580b1eb2 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -42,6 +42,10 @@ enum {
42 DEF_CACHE_SIZE = 10, 42 DEF_CACHE_SIZE = 10,
43}; 43};
44 44
45enum {
46 MLX5_UMR_ALIGN = 2048
47};
48
45static __be64 *mr_align(__be64 *ptr, int align) 49static __be64 *mr_align(__be64 *ptr, int align)
46{ 50{
47 unsigned long mask = align - 1; 51 unsigned long mask = align - 1;
@@ -61,13 +65,11 @@ static int order2idx(struct mlx5_ib_dev *dev, int order)
61 65
62static int add_keys(struct mlx5_ib_dev *dev, int c, int num) 66static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
63{ 67{
64 struct device *ddev = dev->ib_dev.dma_device;
65 struct mlx5_mr_cache *cache = &dev->cache; 68 struct mlx5_mr_cache *cache = &dev->cache;
66 struct mlx5_cache_ent *ent = &cache->ent[c]; 69 struct mlx5_cache_ent *ent = &cache->ent[c];
67 struct mlx5_create_mkey_mbox_in *in; 70 struct mlx5_create_mkey_mbox_in *in;
68 struct mlx5_ib_mr *mr; 71 struct mlx5_ib_mr *mr;
69 int npages = 1 << ent->order; 72 int npages = 1 << ent->order;
70 int size = sizeof(u64) * npages;
71 int err = 0; 73 int err = 0;
72 int i; 74 int i;
73 75
@@ -83,21 +85,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
83 } 85 }
84 mr->order = ent->order; 86 mr->order = ent->order;
85 mr->umred = 1; 87 mr->umred = 1;
86 mr->pas = kmalloc(size + 0x3f, GFP_KERNEL);
87 if (!mr->pas) {
88 kfree(mr);
89 err = -ENOMEM;
90 goto out;
91 }
92 mr->dma = dma_map_single(ddev, mr_align(mr->pas, 0x40), size,
93 DMA_TO_DEVICE);
94 if (dma_mapping_error(ddev, mr->dma)) {
95 kfree(mr->pas);
96 kfree(mr);
97 err = -ENOMEM;
98 goto out;
99 }
100
101 in->seg.status = 1 << 6; 88 in->seg.status = 1 << 6;
102 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2); 89 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
103 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); 90 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
@@ -108,8 +95,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
108 sizeof(*in)); 95 sizeof(*in));
109 if (err) { 96 if (err) {
110 mlx5_ib_warn(dev, "create mkey failed %d\n", err); 97 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
111 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
112 kfree(mr->pas);
113 kfree(mr); 98 kfree(mr);
114 goto out; 99 goto out;
115 } 100 }
@@ -129,11 +114,9 @@ out:
129 114
130static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) 115static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
131{ 116{
132 struct device *ddev = dev->ib_dev.dma_device;
133 struct mlx5_mr_cache *cache = &dev->cache; 117 struct mlx5_mr_cache *cache = &dev->cache;
134 struct mlx5_cache_ent *ent = &cache->ent[c]; 118 struct mlx5_cache_ent *ent = &cache->ent[c];
135 struct mlx5_ib_mr *mr; 119 struct mlx5_ib_mr *mr;
136 int size;
137 int err; 120 int err;
138 int i; 121 int i;
139 122
@@ -149,14 +132,10 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
149 ent->size--; 132 ent->size--;
150 spin_unlock(&ent->lock); 133 spin_unlock(&ent->lock);
151 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); 134 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
152 if (err) { 135 if (err)
153 mlx5_ib_warn(dev, "failed destroy mkey\n"); 136 mlx5_ib_warn(dev, "failed destroy mkey\n");
154 } else { 137 else
155 size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
156 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
157 kfree(mr->pas);
158 kfree(mr); 138 kfree(mr);
159 }
160 } 139 }
161} 140}
162 141
@@ -408,13 +387,12 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
408 387
409static void clean_keys(struct mlx5_ib_dev *dev, int c) 388static void clean_keys(struct mlx5_ib_dev *dev, int c)
410{ 389{
411 struct device *ddev = dev->ib_dev.dma_device;
412 struct mlx5_mr_cache *cache = &dev->cache; 390 struct mlx5_mr_cache *cache = &dev->cache;
413 struct mlx5_cache_ent *ent = &cache->ent[c]; 391 struct mlx5_cache_ent *ent = &cache->ent[c];
414 struct mlx5_ib_mr *mr; 392 struct mlx5_ib_mr *mr;
415 int size;
416 int err; 393 int err;
417 394
395 cancel_delayed_work(&ent->dwork);
418 while (1) { 396 while (1) {
419 spin_lock(&ent->lock); 397 spin_lock(&ent->lock);
420 if (list_empty(&ent->head)) { 398 if (list_empty(&ent->head)) {
@@ -427,14 +405,10 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
427 ent->size--; 405 ent->size--;
428 spin_unlock(&ent->lock); 406 spin_unlock(&ent->lock);
429 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); 407 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
430 if (err) { 408 if (err)
431 mlx5_ib_warn(dev, "failed destroy mkey\n"); 409 mlx5_ib_warn(dev, "failed destroy mkey\n");
432 } else { 410 else
433 size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
434 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
435 kfree(mr->pas);
436 kfree(mr); 411 kfree(mr);
437 }
438 } 412 }
439} 413}
440 414
@@ -540,13 +514,15 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
540 int i; 514 int i;
541 515
542 dev->cache.stopped = 1; 516 dev->cache.stopped = 1;
543 destroy_workqueue(dev->cache.wq); 517 flush_workqueue(dev->cache.wq);
544 518
545 mlx5_mr_cache_debugfs_cleanup(dev); 519 mlx5_mr_cache_debugfs_cleanup(dev);
546 520
547 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) 521 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
548 clean_keys(dev, i); 522 clean_keys(dev, i);
549 523
524 destroy_workqueue(dev->cache.wq);
525
550 return 0; 526 return 0;
551} 527}
552 528
@@ -675,10 +651,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
675 int page_shift, int order, int access_flags) 651 int page_shift, int order, int access_flags)
676{ 652{
677 struct mlx5_ib_dev *dev = to_mdev(pd->device); 653 struct mlx5_ib_dev *dev = to_mdev(pd->device);
654 struct device *ddev = dev->ib_dev.dma_device;
678 struct umr_common *umrc = &dev->umrc; 655 struct umr_common *umrc = &dev->umrc;
679 struct ib_send_wr wr, *bad; 656 struct ib_send_wr wr, *bad;
680 struct mlx5_ib_mr *mr; 657 struct mlx5_ib_mr *mr;
681 struct ib_sge sg; 658 struct ib_sge sg;
659 int size = sizeof(u64) * npages;
682 int err; 660 int err;
683 int i; 661 int i;
684 662
@@ -697,7 +675,22 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
697 if (!mr) 675 if (!mr)
698 return ERR_PTR(-EAGAIN); 676 return ERR_PTR(-EAGAIN);
699 677
700 mlx5_ib_populate_pas(dev, umem, page_shift, mr_align(mr->pas, 0x40), 1); 678 mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
679 if (!mr->pas) {
680 err = -ENOMEM;
681 goto error;
682 }
683
684 mlx5_ib_populate_pas(dev, umem, page_shift,
685 mr_align(mr->pas, MLX5_UMR_ALIGN), 1);
686
687 mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
688 DMA_TO_DEVICE);
689 if (dma_mapping_error(ddev, mr->dma)) {
690 kfree(mr->pas);
691 err = -ENOMEM;
692 goto error;
693 }
701 694
702 memset(&wr, 0, sizeof(wr)); 695 memset(&wr, 0, sizeof(wr));
703 wr.wr_id = (u64)(unsigned long)mr; 696 wr.wr_id = (u64)(unsigned long)mr;
@@ -718,6 +711,9 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
718 wait_for_completion(&mr->done); 711 wait_for_completion(&mr->done);
719 up(&umrc->sem); 712 up(&umrc->sem);
720 713
714 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
715 kfree(mr->pas);
716
721 if (mr->status != IB_WC_SUCCESS) { 717 if (mr->status != IB_WC_SUCCESS) {
722 mlx5_ib_warn(dev, "reg umr failed\n"); 718 mlx5_ib_warn(dev, "reg umr failed\n");
723 err = -EFAULT; 719 err = -EFAULT;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 045f8cdbd303..5659ea880741 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -203,7 +203,7 @@ static int sq_overhead(enum ib_qp_type qp_type)
203 203
204 switch (qp_type) { 204 switch (qp_type) {
205 case IB_QPT_XRC_INI: 205 case IB_QPT_XRC_INI:
206 size = sizeof(struct mlx5_wqe_xrc_seg); 206 size += sizeof(struct mlx5_wqe_xrc_seg);
207 /* fall through */ 207 /* fall through */
208 case IB_QPT_RC: 208 case IB_QPT_RC:
209 size += sizeof(struct mlx5_wqe_ctrl_seg) + 209 size += sizeof(struct mlx5_wqe_ctrl_seg) +
@@ -211,20 +211,23 @@ static int sq_overhead(enum ib_qp_type qp_type)
211 sizeof(struct mlx5_wqe_raddr_seg); 211 sizeof(struct mlx5_wqe_raddr_seg);
212 break; 212 break;
213 213
214 case IB_QPT_XRC_TGT:
215 return 0;
216
214 case IB_QPT_UC: 217 case IB_QPT_UC:
215 size = sizeof(struct mlx5_wqe_ctrl_seg) + 218 size += sizeof(struct mlx5_wqe_ctrl_seg) +
216 sizeof(struct mlx5_wqe_raddr_seg); 219 sizeof(struct mlx5_wqe_raddr_seg);
217 break; 220 break;
218 221
219 case IB_QPT_UD: 222 case IB_QPT_UD:
220 case IB_QPT_SMI: 223 case IB_QPT_SMI:
221 case IB_QPT_GSI: 224 case IB_QPT_GSI:
222 size = sizeof(struct mlx5_wqe_ctrl_seg) + 225 size += sizeof(struct mlx5_wqe_ctrl_seg) +
223 sizeof(struct mlx5_wqe_datagram_seg); 226 sizeof(struct mlx5_wqe_datagram_seg);
224 break; 227 break;
225 228
226 case MLX5_IB_QPT_REG_UMR: 229 case MLX5_IB_QPT_REG_UMR:
227 size = sizeof(struct mlx5_wqe_ctrl_seg) + 230 size += sizeof(struct mlx5_wqe_ctrl_seg) +
228 sizeof(struct mlx5_wqe_umr_ctrl_seg) + 231 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
229 sizeof(struct mlx5_mkey_seg); 232 sizeof(struct mlx5_mkey_seg);
230 break; 233 break;
@@ -270,7 +273,8 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
270 return wqe_size; 273 return wqe_size;
271 274
272 if (wqe_size > dev->mdev.caps.max_sq_desc_sz) { 275 if (wqe_size > dev->mdev.caps.max_sq_desc_sz) {
273 mlx5_ib_dbg(dev, "\n"); 276 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
277 wqe_size, dev->mdev.caps.max_sq_desc_sz);
274 return -EINVAL; 278 return -EINVAL;
275 } 279 }
276 280
@@ -280,9 +284,15 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
280 284
281 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); 285 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
282 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; 286 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
287 if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
288 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
289 qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
290 return -ENOMEM;
291 }
283 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); 292 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
284 qp->sq.max_gs = attr->cap.max_send_sge; 293 qp->sq.max_gs = attr->cap.max_send_sge;
285 qp->sq.max_post = 1 << ilog2(wq_size / wqe_size); 294 qp->sq.max_post = wq_size / wqe_size;
295 attr->cap.max_send_wr = qp->sq.max_post;
286 296
287 return wq_size; 297 return wq_size;
288} 298}
@@ -1280,6 +1290,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
1280 MLX5_QP_OPTPAR_Q_KEY, 1290 MLX5_QP_OPTPAR_Q_KEY,
1281 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX | 1291 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
1282 MLX5_QP_OPTPAR_Q_KEY, 1292 MLX5_QP_OPTPAR_Q_KEY,
1293 [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1294 MLX5_QP_OPTPAR_RRE |
1295 MLX5_QP_OPTPAR_RAE |
1296 MLX5_QP_OPTPAR_RWE |
1297 MLX5_QP_OPTPAR_PKEY_INDEX,
1283 }, 1298 },
1284 }, 1299 },
1285 [MLX5_QP_STATE_RTR] = { 1300 [MLX5_QP_STATE_RTR] = {
@@ -1314,6 +1329,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
1314 [MLX5_QP_STATE_RTS] = { 1329 [MLX5_QP_STATE_RTS] = {
1315 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, 1330 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1316 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, 1331 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
1332 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
1333 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
1334 MLX5_QP_OPTPAR_RWE |
1335 MLX5_QP_OPTPAR_RAE |
1336 MLX5_QP_OPTPAR_RRE,
1317 }, 1337 },
1318 }, 1338 },
1319}; 1339};
@@ -1651,29 +1671,6 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1651 rseg->reserved = 0; 1671 rseg->reserved = 0;
1652} 1672}
1653 1673
1654static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
1655{
1656 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1657 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1658 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1659 } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
1660 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1661 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1662 } else {
1663 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1664 aseg->compare = 0;
1665 }
1666}
1667
1668static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg,
1669 struct ib_send_wr *wr)
1670{
1671 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1672 aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
1673 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1674 aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1675}
1676
1677static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, 1674static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
1678 struct ib_send_wr *wr) 1675 struct ib_send_wr *wr)
1679{ 1676{
@@ -2063,28 +2060,11 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2063 2060
2064 case IB_WR_ATOMIC_CMP_AND_SWP: 2061 case IB_WR_ATOMIC_CMP_AND_SWP:
2065 case IB_WR_ATOMIC_FETCH_AND_ADD: 2062 case IB_WR_ATOMIC_FETCH_AND_ADD:
2066 set_raddr_seg(seg, wr->wr.atomic.remote_addr,
2067 wr->wr.atomic.rkey);
2068 seg += sizeof(struct mlx5_wqe_raddr_seg);
2069
2070 set_atomic_seg(seg, wr);
2071 seg += sizeof(struct mlx5_wqe_atomic_seg);
2072
2073 size += (sizeof(struct mlx5_wqe_raddr_seg) +
2074 sizeof(struct mlx5_wqe_atomic_seg)) / 16;
2075 break;
2076
2077 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: 2063 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
2078 set_raddr_seg(seg, wr->wr.atomic.remote_addr, 2064 mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
2079 wr->wr.atomic.rkey); 2065 err = -ENOSYS;
2080 seg += sizeof(struct mlx5_wqe_raddr_seg); 2066 *bad_wr = wr;
2081 2067 goto out;
2082 set_masked_atomic_seg(seg, wr);
2083 seg += sizeof(struct mlx5_wqe_masked_atomic_seg);
2084
2085 size += (sizeof(struct mlx5_wqe_raddr_seg) +
2086 sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16;
2087 break;
2088 2068
2089 case IB_WR_LOCAL_INV: 2069 case IB_WR_LOCAL_INV:
2090 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 2070 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 84d297afd6a9..0aa478bc291a 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -295,7 +295,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
295 mlx5_vfree(in); 295 mlx5_vfree(in);
296 if (err) { 296 if (err) {
297 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); 297 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
298 goto err_srq; 298 goto err_usr_kern_srq;
299 } 299 }
300 300
301 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn); 301 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
@@ -316,6 +316,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
316 316
317err_core: 317err_core:
318 mlx5_core_destroy_srq(&dev->mdev, &srq->msrq); 318 mlx5_core_destroy_srq(&dev->mdev, &srq->msrq);
319
320err_usr_kern_srq:
319 if (pd->uobject) 321 if (pd->uobject)
320 destroy_srq_user(pd, srq); 322 destroy_srq_user(pd, srq);
321 else 323 else
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 7c9d35f39d75..690201738993 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -357,7 +357,7 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
357 mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n", 357 mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",
358 eqe->type, eqe->subtype, eq->eqn); 358 eqe->type, eqe->subtype, eq->eqn);
359 break; 359 break;
360 }; 360 }
361 361
362 set_eqe_hw(eqe); 362 set_eqe_hw(eqe);
363 ++eq->cons_index; 363 ++eq->cons_index;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 4ed8235d2d36..50219ab2279d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -150,7 +150,7 @@ enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
150 return IB_QPS_SQE; 150 return IB_QPS_SQE;
151 case OCRDMA_QPS_ERR: 151 case OCRDMA_QPS_ERR:
152 return IB_QPS_ERR; 152 return IB_QPS_ERR;
153 }; 153 }
154 return IB_QPS_ERR; 154 return IB_QPS_ERR;
155} 155}
156 156
@@ -171,7 +171,7 @@ static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
171 return OCRDMA_QPS_SQE; 171 return OCRDMA_QPS_SQE;
172 case IB_QPS_ERR: 172 case IB_QPS_ERR:
173 return OCRDMA_QPS_ERR; 173 return OCRDMA_QPS_ERR;
174 }; 174 }
175 return OCRDMA_QPS_ERR; 175 return OCRDMA_QPS_ERR;
176} 176}
177 177
@@ -1982,7 +1982,7 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
1982 break; 1982 break;
1983 default: 1983 default:
1984 return -EINVAL; 1984 return -EINVAL;
1985 }; 1985 }
1986 1986
1987 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd)); 1987 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
1988 if (!cmd) 1988 if (!cmd)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 56e004940f18..0ce7674621ea 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -531,7 +531,7 @@ static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
531 case BE_DEV_DOWN: 531 case BE_DEV_DOWN:
532 ocrdma_close(dev); 532 ocrdma_close(dev);
533 break; 533 break;
534 }; 534 }
535} 535}
536 536
537static struct ocrdma_driver ocrdma_drv = { 537static struct ocrdma_driver ocrdma_drv = {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 6e982bb43c31..69f1d1221a6b 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -141,7 +141,7 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
141 /* Unsupported */ 141 /* Unsupported */
142 *ib_speed = IB_SPEED_SDR; 142 *ib_speed = IB_SPEED_SDR;
143 *ib_width = IB_WIDTH_1X; 143 *ib_width = IB_WIDTH_1X;
144 }; 144 }
145} 145}
146 146
147 147
@@ -2331,7 +2331,7 @@ static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2331 default: 2331 default:
2332 ibwc_status = IB_WC_GENERAL_ERR; 2332 ibwc_status = IB_WC_GENERAL_ERR;
2333 break; 2333 break;
2334 }; 2334 }
2335 return ibwc_status; 2335 return ibwc_status;
2336} 2336}
2337 2337
@@ -2370,7 +2370,7 @@ static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2370 pr_err("%s() invalid opcode received = 0x%x\n", 2370 pr_err("%s() invalid opcode received = 0x%x\n",
2371 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); 2371 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
2372 break; 2372 break;
2373 }; 2373 }
2374} 2374}
2375 2375
2376static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp, 2376static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 3591855cc5b5..6df23502059a 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -594,7 +594,7 @@ isert_connect_release(struct isert_conn *isert_conn)
594 594
595 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 595 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
596 596
597 if (device->use_frwr) 597 if (device && device->use_frwr)
598 isert_conn_free_frwr_pool(isert_conn); 598 isert_conn_free_frwr_pool(isert_conn);
599 599
600 if (isert_conn->conn_qp) { 600 if (isert_conn->conn_qp) {
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 653ac6bfc57a..6c923c7039a1 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1588,7 +1588,7 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1588 int resp_data_len; 1588 int resp_data_len;
1589 int resp_len; 1589 int resp_len;
1590 1590
1591 resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4; 1591 resp_data_len = 4;
1592 resp_len = sizeof(*srp_rsp) + resp_data_len; 1592 resp_len = sizeof(*srp_rsp) + resp_data_len;
1593 1593
1594 srp_rsp = ioctx->ioctx.buf; 1594 srp_rsp = ioctx->ioctx.buf;
@@ -1600,11 +1600,9 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1600 + atomic_xchg(&ch->req_lim_delta, 0)); 1600 + atomic_xchg(&ch->req_lim_delta, 0));
1601 srp_rsp->tag = tag; 1601 srp_rsp->tag = tag;
1602 1602
1603 if (rsp_code != SRP_TSK_MGMT_SUCCESS) { 1603 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1604 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID; 1604 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1605 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len); 1605 srp_rsp->data[3] = rsp_code;
1606 srp_rsp->data[3] = rsp_code;
1607 }
1608 1606
1609 return resp_len; 1607 return resp_len;
1610} 1608}
@@ -2358,6 +2356,8 @@ static void srpt_release_channel_work(struct work_struct *w)
2358 transport_deregister_session(se_sess); 2356 transport_deregister_session(se_sess);
2359 ch->sess = NULL; 2357 ch->sess = NULL;
2360 2358
2359 ib_destroy_cm_id(ch->cm_id);
2360
2361 srpt_destroy_ch_ib(ch); 2361 srpt_destroy_ch_ib(ch);
2362 2362
2363 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, 2363 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
@@ -2368,8 +2368,6 @@ static void srpt_release_channel_work(struct work_struct *w)
2368 list_del(&ch->list); 2368 list_del(&ch->list);
2369 spin_unlock_irq(&sdev->spinlock); 2369 spin_unlock_irq(&sdev->spinlock);
2370 2370
2371 ib_destroy_cm_id(ch->cm_id);
2372
2373 if (ch->release_done) 2371 if (ch->release_done)
2374 complete(ch->release_done); 2372 complete(ch->release_done);
2375 2373
diff --git a/drivers/input/input.c b/drivers/input/input.c
index c04469928925..e75d015024a1 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1734,6 +1734,7 @@ EXPORT_SYMBOL_GPL(input_class);
1734 */ 1734 */
1735struct input_dev *input_allocate_device(void) 1735struct input_dev *input_allocate_device(void)
1736{ 1736{
1737 static atomic_t input_no = ATOMIC_INIT(0);
1737 struct input_dev *dev; 1738 struct input_dev *dev;
1738 1739
1739 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL); 1740 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
@@ -1743,9 +1744,13 @@ struct input_dev *input_allocate_device(void)
1743 device_initialize(&dev->dev); 1744 device_initialize(&dev->dev);
1744 mutex_init(&dev->mutex); 1745 mutex_init(&dev->mutex);
1745 spin_lock_init(&dev->event_lock); 1746 spin_lock_init(&dev->event_lock);
1747 init_timer(&dev->timer);
1746 INIT_LIST_HEAD(&dev->h_list); 1748 INIT_LIST_HEAD(&dev->h_list);
1747 INIT_LIST_HEAD(&dev->node); 1749 INIT_LIST_HEAD(&dev->node);
1748 1750
1751 dev_set_name(&dev->dev, "input%ld",
1752 (unsigned long) atomic_inc_return(&input_no) - 1);
1753
1749 __module_get(THIS_MODULE); 1754 __module_get(THIS_MODULE);
1750 } 1755 }
1751 1756
@@ -2019,7 +2024,6 @@ static void devm_input_device_unregister(struct device *dev, void *res)
2019 */ 2024 */
2020int input_register_device(struct input_dev *dev) 2025int input_register_device(struct input_dev *dev)
2021{ 2026{
2022 static atomic_t input_no = ATOMIC_INIT(0);
2023 struct input_devres *devres = NULL; 2027 struct input_devres *devres = NULL;
2024 struct input_handler *handler; 2028 struct input_handler *handler;
2025 unsigned int packet_size; 2029 unsigned int packet_size;
@@ -2059,7 +2063,6 @@ int input_register_device(struct input_dev *dev)
2059 * If delay and period are pre-set by the driver, then autorepeating 2063 * If delay and period are pre-set by the driver, then autorepeating
2060 * is handled by the driver itself and we don't do it in input.c. 2064 * is handled by the driver itself and we don't do it in input.c.
2061 */ 2065 */
2062 init_timer(&dev->timer);
2063 if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) { 2066 if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) {
2064 dev->timer.data = (long) dev; 2067 dev->timer.data = (long) dev;
2065 dev->timer.function = input_repeat_key; 2068 dev->timer.function = input_repeat_key;
@@ -2073,9 +2076,6 @@ int input_register_device(struct input_dev *dev)
2073 if (!dev->setkeycode) 2076 if (!dev->setkeycode)
2074 dev->setkeycode = input_default_setkeycode; 2077 dev->setkeycode = input_default_setkeycode;
2075 2078
2076 dev_set_name(&dev->dev, "input%ld",
2077 (unsigned long) atomic_inc_return(&input_no) - 1);
2078
2079 error = device_add(&dev->dev); 2079 error = device_add(&dev->dev);
2080 if (error) 2080 if (error)
2081 goto err_free_vals; 2081 goto err_free_vals;
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 134c3b404a54..a2e758d27584 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -786,10 +786,17 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
786 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); 786 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
787 input_set_capability(input_dev, EV_MSC, MSC_SCAN); 787 input_set_capability(input_dev, EV_MSC, MSC_SCAN);
788 788
789 if (pdata) 789 if (pdata) {
790 error = pxa27x_keypad_build_keycode(keypad); 790 error = pxa27x_keypad_build_keycode(keypad);
791 else 791 } else {
792 error = pxa27x_keypad_build_keycode_from_dt(keypad); 792 error = pxa27x_keypad_build_keycode_from_dt(keypad);
793 /*
794 * Data that we get from DT resides in dynamically
795 * allocated memory so we need to update our pdata
796 * pointer.
797 */
798 pdata = keypad->pdata;
799 }
793 if (error) { 800 if (error) {
794 dev_err(&pdev->dev, "failed to build keycode\n"); 801 dev_err(&pdev->dev, "failed to build keycode\n");
795 goto failed_put_clk; 802 goto failed_put_clk;
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index 082684e7f390..9365535ba7f1 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -351,7 +351,9 @@ static void cm109_urb_irq_callback(struct urb *urb)
351 if (status) { 351 if (status) {
352 if (status == -ESHUTDOWN) 352 if (status == -ESHUTDOWN)
353 return; 353 return;
354 dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status); 354 dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n",
355 __func__, status);
356 goto out;
355 } 357 }
356 358
357 /* Special keys */ 359 /* Special keys */
@@ -418,8 +420,12 @@ static void cm109_urb_ctl_callback(struct urb *urb)
418 dev->ctl_data->byte[2], 420 dev->ctl_data->byte[2],
419 dev->ctl_data->byte[3]); 421 dev->ctl_data->byte[3]);
420 422
421 if (status) 423 if (status) {
422 dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status); 424 if (status == -ESHUTDOWN)
425 return;
426 dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n",
427 __func__, status);
428 }
423 429
424 spin_lock(&dev->ctl_submit_lock); 430 spin_lock(&dev->ctl_submit_lock);
425 431
@@ -427,7 +433,7 @@ static void cm109_urb_ctl_callback(struct urb *urb)
427 433
428 if (likely(!dev->shutdown)) { 434 if (likely(!dev->shutdown)) {
429 435
430 if (dev->buzzer_pending) { 436 if (dev->buzzer_pending || status) {
431 dev->buzzer_pending = 0; 437 dev->buzzer_pending = 0;
432 dev->ctl_urb_pending = 1; 438 dev->ctl_urb_pending = 1;
433 cm109_submit_buzz_toggle(dev); 439 cm109_submit_buzz_toggle(dev);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 7c5d72a6a26a..83658472ad25 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -103,6 +103,7 @@ static const struct alps_model_info alps_model_data[] = {
103 /* Dell Latitude E5500, E6400, E6500, Precision M4400 */ 103 /* Dell Latitude E5500, E6400, E6500, Precision M4400 */
104 { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, 104 { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
105 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, 105 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
106 { { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_DUALPOINT }, /* Dell XT2 */
106 { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ 107 { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
107 { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, 108 { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
108 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ 109 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 78e4de42efaa..52c9ebf94729 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -223,21 +223,26 @@ static int i8042_flush(void)
223{ 223{
224 unsigned long flags; 224 unsigned long flags;
225 unsigned char data, str; 225 unsigned char data, str;
226 int i = 0; 226 int count = 0;
227 int retval = 0;
227 228
228 spin_lock_irqsave(&i8042_lock, flags); 229 spin_lock_irqsave(&i8042_lock, flags);
229 230
230 while (((str = i8042_read_status()) & I8042_STR_OBF) && (i < I8042_BUFFER_SIZE)) { 231 while ((str = i8042_read_status()) & I8042_STR_OBF) {
231 udelay(50); 232 if (count++ < I8042_BUFFER_SIZE) {
232 data = i8042_read_data(); 233 udelay(50);
233 i++; 234 data = i8042_read_data();
234 dbg("%02x <- i8042 (flush, %s)\n", 235 dbg("%02x <- i8042 (flush, %s)\n",
235 data, str & I8042_STR_AUXDATA ? "aux" : "kbd"); 236 data, str & I8042_STR_AUXDATA ? "aux" : "kbd");
237 } else {
238 retval = -EIO;
239 break;
240 }
236 } 241 }
237 242
238 spin_unlock_irqrestore(&i8042_lock, flags); 243 spin_unlock_irqrestore(&i8042_lock, flags);
239 244
240 return i; 245 return retval;
241} 246}
242 247
243/* 248/*
@@ -849,7 +854,7 @@ static int __init i8042_check_aux(void)
849 854
850static int i8042_controller_check(void) 855static int i8042_controller_check(void)
851{ 856{
852 if (i8042_flush() == I8042_BUFFER_SIZE) { 857 if (i8042_flush()) {
853 pr_err("No controller found\n"); 858 pr_err("No controller found\n");
854 return -ENODEV; 859 return -ENODEV;
855 } 860 }
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 79b69ea47f74..e53416a4d7f3 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -1031,6 +1031,7 @@ static void wacom_destroy_leds(struct wacom *wacom)
1031} 1031}
1032 1032
1033static enum power_supply_property wacom_battery_props[] = { 1033static enum power_supply_property wacom_battery_props[] = {
1034 POWER_SUPPLY_PROP_SCOPE,
1034 POWER_SUPPLY_PROP_CAPACITY 1035 POWER_SUPPLY_PROP_CAPACITY
1035}; 1036};
1036 1037
@@ -1042,6 +1043,9 @@ static int wacom_battery_get_property(struct power_supply *psy,
1042 int ret = 0; 1043 int ret = 0;
1043 1044
1044 switch (psp) { 1045 switch (psp) {
1046 case POWER_SUPPLY_PROP_SCOPE:
1047 val->intval = POWER_SUPPLY_SCOPE_DEVICE;
1048 break;
1045 case POWER_SUPPLY_PROP_CAPACITY: 1049 case POWER_SUPPLY_PROP_CAPACITY:
1046 val->intval = 1050 val->intval =
1047 wacom->wacom_wac.battery_capacity * 100 / 31; 1051 wacom->wacom_wac.battery_capacity * 100 / 31;
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index b2aa503c16b1..c59b797eeafa 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -2054,6 +2054,12 @@ static const struct wacom_features wacom_features_0x101 =
2054static const struct wacom_features wacom_features_0x10D = 2054static const struct wacom_features wacom_features_0x10D =
2055 { "Wacom ISDv4 10D", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, 2055 { "Wacom ISDv4 10D", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
2056 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 2056 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
2057static const struct wacom_features wacom_features_0x10E =
2058 { "Wacom ISDv4 10E", WACOM_PKGLEN_MTTPC, 27760, 15694, 255,
2059 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
2060static const struct wacom_features wacom_features_0x10F =
2061 { "Wacom ISDv4 10F", WACOM_PKGLEN_MTTPC, 27760, 15694, 255,
2062 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
2057static const struct wacom_features wacom_features_0x4001 = 2063static const struct wacom_features wacom_features_0x4001 =
2058 { "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, 2064 { "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
2059 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 2065 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2248,6 +2254,8 @@ const struct usb_device_id wacom_ids[] = {
2248 { USB_DEVICE_WACOM(0x100) }, 2254 { USB_DEVICE_WACOM(0x100) },
2249 { USB_DEVICE_WACOM(0x101) }, 2255 { USB_DEVICE_WACOM(0x101) },
2250 { USB_DEVICE_WACOM(0x10D) }, 2256 { USB_DEVICE_WACOM(0x10D) },
2257 { USB_DEVICE_WACOM(0x10E) },
2258 { USB_DEVICE_WACOM(0x10F) },
2251 { USB_DEVICE_WACOM(0x300) }, 2259 { USB_DEVICE_WACOM(0x300) },
2252 { USB_DEVICE_WACOM(0x301) }, 2260 { USB_DEVICE_WACOM(0x301) },
2253 { USB_DEVICE_WACOM(0x304) }, 2261 { USB_DEVICE_WACOM(0x304) },
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index fe302e33f72e..c880ebaf1553 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -52,7 +52,7 @@ config AMD_IOMMU
52 select PCI_PRI 52 select PCI_PRI
53 select PCI_PASID 53 select PCI_PASID
54 select IOMMU_API 54 select IOMMU_API
55 depends on X86_64 && PCI && ACPI && X86_IO_APIC 55 depends on X86_64 && PCI && ACPI
56 ---help--- 56 ---help---
57 With this option you can enable support for AMD IOMMU hardware in 57 With this option you can enable support for AMD IOMMU hardware in
58 your system. An IOMMU is a hardware component which provides 58 your system. An IOMMU is a hardware component which provides
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index f417e89e1e7e..181c9ba929cd 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -377,6 +377,7 @@ struct arm_smmu_cfg {
377 u32 cbar; 377 u32 cbar;
378 pgd_t *pgd; 378 pgd_t *pgd;
379}; 379};
380#define INVALID_IRPTNDX 0xff
380 381
381#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) 382#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
382#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) 383#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
@@ -840,7 +841,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
840 if (IS_ERR_VALUE(ret)) { 841 if (IS_ERR_VALUE(ret)) {
841 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", 842 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
842 root_cfg->irptndx, irq); 843 root_cfg->irptndx, irq);
843 root_cfg->irptndx = -1; 844 root_cfg->irptndx = INVALID_IRPTNDX;
844 goto out_free_context; 845 goto out_free_context;
845 } 846 }
846 847
@@ -869,7 +870,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
869 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); 870 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
870 arm_smmu_tlb_inv_context(root_cfg); 871 arm_smmu_tlb_inv_context(root_cfg);
871 872
872 if (root_cfg->irptndx != -1) { 873 if (root_cfg->irptndx != INVALID_IRPTNDX) {
873 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; 874 irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
874 free_irq(irq, domain); 875 free_irq(irq, domain);
875 } 876 }
@@ -1857,8 +1858,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1857 goto out_put_parent; 1858 goto out_put_parent;
1858 } 1859 }
1859 1860
1860 arm_smmu_device_reset(smmu);
1861
1862 for (i = 0; i < smmu->num_global_irqs; ++i) { 1861 for (i = 0; i < smmu->num_global_irqs; ++i) {
1863 err = request_irq(smmu->irqs[i], 1862 err = request_irq(smmu->irqs[i],
1864 arm_smmu_global_fault, 1863 arm_smmu_global_fault,
@@ -1876,6 +1875,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1876 spin_lock(&arm_smmu_devices_lock); 1875 spin_lock(&arm_smmu_devices_lock);
1877 list_add(&smmu->list, &arm_smmu_devices); 1876 list_add(&smmu->list, &arm_smmu_devices);
1878 spin_unlock(&arm_smmu_devices_lock); 1877 spin_unlock(&arm_smmu_devices_lock);
1878
1879 arm_smmu_device_reset(smmu);
1879 return 0; 1880 return 0;
1880 1881
1881out_free_irqs: 1882out_free_irqs:
@@ -1966,10 +1967,10 @@ static int __init arm_smmu_init(void)
1966 return ret; 1967 return ret;
1967 1968
1968 /* Oh, for a proper bus abstraction */ 1969 /* Oh, for a proper bus abstraction */
1969 if (!iommu_present(&platform_bus_type)); 1970 if (!iommu_present(&platform_bus_type))
1970 bus_set_iommu(&platform_bus_type, &arm_smmu_ops); 1971 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1971 1972
1972 if (!iommu_present(&amba_bustype)); 1973 if (!iommu_present(&amba_bustype))
1973 bus_set_iommu(&amba_bustype, &arm_smmu_ops); 1974 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
1974 1975
1975 return 0; 1976 return 0;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 71eb233b9ace..2a7f0dd6abab 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -996,10 +996,11 @@ static void request_write(struct cached_dev *dc, struct search *s)
996 closure_bio_submit(bio, cl, s->d); 996 closure_bio_submit(bio, cl, s->d);
997 } else { 997 } else {
998 bch_writeback_add(dc); 998 bch_writeback_add(dc);
999 s->op.cache_bio = bio;
999 1000
1000 if (bio->bi_rw & REQ_FLUSH) { 1001 if (bio->bi_rw & REQ_FLUSH) {
1001 /* Also need to send a flush to the backing device */ 1002 /* Also need to send a flush to the backing device */
1002 struct bio *flush = bio_alloc_bioset(0, GFP_NOIO, 1003 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
1003 dc->disk.bio_split); 1004 dc->disk.bio_split);
1004 1005
1005 flush->bi_rw = WRITE_FLUSH; 1006 flush->bi_rw = WRITE_FLUSH;
@@ -1008,8 +1009,6 @@ static void request_write(struct cached_dev *dc, struct search *s)
1008 flush->bi_private = cl; 1009 flush->bi_private = cl;
1009 1010
1010 closure_bio_submit(flush, cl, s->d); 1011 closure_bio_submit(flush, cl, s->d);
1011 } else {
1012 s->op.cache_bio = bio;
1013 } 1012 }
1014 } 1013 }
1015out: 1014out:
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 4caa8e6d59d7..2d2b1b7588d7 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -269,6 +269,14 @@ static chunk_t area_location(struct pstore *ps, chunk_t area)
269 return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area); 269 return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
270} 270}
271 271
272static void skip_metadata(struct pstore *ps)
273{
274 uint32_t stride = ps->exceptions_per_area + 1;
275 chunk_t next_free = ps->next_free;
276 if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
277 ps->next_free++;
278}
279
272/* 280/*
273 * Read or write a metadata area. Remembering to skip the first 281 * Read or write a metadata area. Remembering to skip the first
274 * chunk which holds the header. 282 * chunk which holds the header.
@@ -502,6 +510,8 @@ static int read_exceptions(struct pstore *ps,
502 510
503 ps->current_area--; 511 ps->current_area--;
504 512
513 skip_metadata(ps);
514
505 return 0; 515 return 0;
506} 516}
507 517
@@ -616,8 +626,6 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
616 struct dm_exception *e) 626 struct dm_exception *e)
617{ 627{
618 struct pstore *ps = get_info(store); 628 struct pstore *ps = get_info(store);
619 uint32_t stride;
620 chunk_t next_free;
621 sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); 629 sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
622 630
623 /* Is there enough room ? */ 631 /* Is there enough room ? */
@@ -630,10 +638,8 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
630 * Move onto the next free pending, making sure to take 638 * Move onto the next free pending, making sure to take
631 * into account the location of the metadata chunks. 639 * into account the location of the metadata chunks.
632 */ 640 */
633 stride = (ps->exceptions_per_area + 1); 641 ps->next_free++;
634 next_free = ++ps->next_free; 642 skip_metadata(ps);
635 if (sector_div(next_free, stride) == 1)
636 ps->next_free++;
637 643
638 atomic_inc(&ps->pending_count); 644 atomic_inc(&ps->pending_count);
639 return 0; 645 return 0;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index adf4d7e1d5e1..561a65f82e26 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8111,6 +8111,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
8111 u64 *p; 8111 u64 *p;
8112 int lo, hi; 8112 int lo, hi;
8113 int rv = 1; 8113 int rv = 1;
8114 unsigned long flags;
8114 8115
8115 if (bb->shift < 0) 8116 if (bb->shift < 0)
8116 /* badblocks are disabled */ 8117 /* badblocks are disabled */
@@ -8125,7 +8126,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
8125 sectors = next - s; 8126 sectors = next - s;
8126 } 8127 }
8127 8128
8128 write_seqlock_irq(&bb->lock); 8129 write_seqlock_irqsave(&bb->lock, flags);
8129 8130
8130 p = bb->page; 8131 p = bb->page;
8131 lo = 0; 8132 lo = 0;
@@ -8241,7 +8242,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
8241 bb->changed = 1; 8242 bb->changed = 1;
8242 if (!acknowledged) 8243 if (!acknowledged)
8243 bb->unacked_exist = 1; 8244 bb->unacked_exist = 1;
8244 write_sequnlock_irq(&bb->lock); 8245 write_sequnlock_irqrestore(&bb->lock, flags);
8245 8246
8246 return rv; 8247 return rv;
8247} 8248}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d60412c7f995..aacf6bf352d8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1479,6 +1479,7 @@ static int raid1_spare_active(struct mddev *mddev)
1479 } 1479 }
1480 } 1480 }
1481 if (rdev 1481 if (rdev
1482 && rdev->recovery_offset == MaxSector
1482 && !test_bit(Faulty, &rdev->flags) 1483 && !test_bit(Faulty, &rdev->flags)
1483 && !test_and_set_bit(In_sync, &rdev->flags)) { 1484 && !test_and_set_bit(In_sync, &rdev->flags)) {
1484 count++; 1485 count++;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index df7b0a06b0ea..73dc8a377522 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1782,6 +1782,7 @@ static int raid10_spare_active(struct mddev *mddev)
1782 } 1782 }
1783 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state); 1783 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
1784 } else if (tmp->rdev 1784 } else if (tmp->rdev
1785 && tmp->rdev->recovery_offset == MaxSector
1785 && !test_bit(Faulty, &tmp->rdev->flags) 1786 && !test_bit(Faulty, &tmp->rdev->flags)
1786 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 1787 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1787 count++; 1788 count++;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7ff4f252ca1a..f8b906843926 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -778,6 +778,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
778 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 778 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
779 bi->bi_io_vec[0].bv_offset = 0; 779 bi->bi_io_vec[0].bv_offset = 0;
780 bi->bi_size = STRIPE_SIZE; 780 bi->bi_size = STRIPE_SIZE;
781 /*
782 * If this is discard request, set bi_vcnt 0. We don't
783 * want to confuse SCSI because SCSI will replace payload
784 */
785 if (rw & REQ_DISCARD)
786 bi->bi_vcnt = 0;
781 if (rrdev) 787 if (rrdev)
782 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); 788 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
783 789
@@ -816,6 +822,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
816 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 822 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
817 rbi->bi_io_vec[0].bv_offset = 0; 823 rbi->bi_io_vec[0].bv_offset = 0;
818 rbi->bi_size = STRIPE_SIZE; 824 rbi->bi_size = STRIPE_SIZE;
825 /*
826 * If this is discard request, set bi_vcnt 0. We don't
827 * want to confuse SCSI because SCSI will replace payload
828 */
829 if (rw & REQ_DISCARD)
830 rbi->bi_vcnt = 0;
819 if (conf->mddev->gendisk) 831 if (conf->mddev->gendisk)
820 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), 832 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
821 rbi, disk_devt(conf->mddev->gendisk), 833 rbi, disk_devt(conf->mddev->gendisk),
@@ -2910,6 +2922,14 @@ static void handle_stripe_clean_event(struct r5conf *conf,
2910 } 2922 }
2911 /* now that discard is done we can proceed with any sync */ 2923 /* now that discard is done we can proceed with any sync */
2912 clear_bit(STRIPE_DISCARD, &sh->state); 2924 clear_bit(STRIPE_DISCARD, &sh->state);
2925 /*
2926 * SCSI discard will change some bio fields and the stripe has
2927 * no updated data, so remove it from hash list and the stripe
2928 * will be reinitialized
2929 */
2930 spin_lock_irq(&conf->device_lock);
2931 remove_hash(sh);
2932 spin_unlock_irq(&conf->device_lock);
2913 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) 2933 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
2914 set_bit(STRIPE_HANDLE, &sh->state); 2934 set_bit(STRIPE_HANDLE, &sh->state);
2915 2935
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 2521f7e23018..e79749cfec81 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -912,14 +912,8 @@ static int tda10071_init(struct dvb_frontend *fe)
912 { 0xd5, 0x03, 0x03 }, 912 { 0xd5, 0x03, 0x03 },
913 }; 913 };
914 914
915 /* firmware status */ 915 if (priv->warm) {
916 ret = tda10071_rd_reg(priv, 0x51, &tmp);
917 if (ret)
918 goto error;
919
920 if (!tmp) {
921 /* warm state - wake up device from sleep */ 916 /* warm state - wake up device from sleep */
922 priv->warm = 1;
923 917
924 for (i = 0; i < ARRAY_SIZE(tab); i++) { 918 for (i = 0; i < ARRAY_SIZE(tab); i++) {
925 ret = tda10071_wr_reg_mask(priv, tab[i].reg, 919 ret = tda10071_wr_reg_mask(priv, tab[i].reg,
@@ -937,7 +931,6 @@ static int tda10071_init(struct dvb_frontend *fe)
937 goto error; 931 goto error;
938 } else { 932 } else {
939 /* cold state - try to download firmware */ 933 /* cold state - try to download firmware */
940 priv->warm = 0;
941 934
942 /* request the firmware, this will block and timeout */ 935 /* request the firmware, this will block and timeout */
943 ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent); 936 ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index bb0c99d7a4f1..b06a7e54ee0d 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -628,16 +628,13 @@ static int ad9389b_s_stream(struct v4l2_subdev *sd, int enable)
628 628
629static const struct v4l2_dv_timings_cap ad9389b_timings_cap = { 629static const struct v4l2_dv_timings_cap ad9389b_timings_cap = {
630 .type = V4L2_DV_BT_656_1120, 630 .type = V4L2_DV_BT_656_1120,
631 .bt = { 631 /* keep this initialization for compatibility with GCC < 4.4.6 */
632 .max_width = 1920, 632 .reserved = { 0 },
633 .max_height = 1200, 633 V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
634 .min_pixelclock = 25000000, 634 V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
635 .max_pixelclock = 170000000,
636 .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
637 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, 635 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
638 .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | 636 V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
639 V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM, 637 V4L2_DV_BT_CAP_CUSTOM)
640 },
641}; 638};
642 639
643static int ad9389b_s_dv_timings(struct v4l2_subdev *sd, 640static int ad9389b_s_dv_timings(struct v4l2_subdev *sd,
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 7a576097471f..7c8d971f1f61 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -119,16 +119,14 @@ static int adv7511_s_clock_freq(struct v4l2_subdev *sd, u32 freq);
119 119
120static const struct v4l2_dv_timings_cap adv7511_timings_cap = { 120static const struct v4l2_dv_timings_cap adv7511_timings_cap = {
121 .type = V4L2_DV_BT_656_1120, 121 .type = V4L2_DV_BT_656_1120,
122 .bt = { 122 /* keep this initialization for compatibility with GCC < 4.4.6 */
123 .max_width = ADV7511_MAX_WIDTH, 123 .reserved = { 0 },
124 .max_height = ADV7511_MAX_HEIGHT, 124 V4L2_INIT_BT_TIMINGS(0, ADV7511_MAX_WIDTH, 0, ADV7511_MAX_HEIGHT,
125 .min_pixelclock = ADV7511_MIN_PIXELCLOCK, 125 ADV7511_MIN_PIXELCLOCK, ADV7511_MAX_PIXELCLOCK,
126 .max_pixelclock = ADV7511_MAX_PIXELCLOCK, 126 V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
127 .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
128 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, 127 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
129 .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | 128 V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
130 V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM, 129 V4L2_DV_BT_CAP_CUSTOM)
131 },
132}; 130};
133 131
134static inline struct adv7511_state *get_adv7511_state(struct v4l2_subdev *sd) 132static inline struct adv7511_state *get_adv7511_state(struct v4l2_subdev *sd)
@@ -1126,6 +1124,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
1126 state->i2c_edid = i2c_new_dummy(client->adapter, state->i2c_edid_addr >> 1); 1124 state->i2c_edid = i2c_new_dummy(client->adapter, state->i2c_edid_addr >> 1);
1127 if (state->i2c_edid == NULL) { 1125 if (state->i2c_edid == NULL) {
1128 v4l2_err(sd, "failed to register edid i2c client\n"); 1126 v4l2_err(sd, "failed to register edid i2c client\n");
1127 err = -ENOMEM;
1129 goto err_entity; 1128 goto err_entity;
1130 } 1129 }
1131 1130
@@ -1133,6 +1132,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
1133 state->work_queue = create_singlethread_workqueue(sd->name); 1132 state->work_queue = create_singlethread_workqueue(sd->name);
1134 if (state->work_queue == NULL) { 1133 if (state->work_queue == NULL) {
1135 v4l2_err(sd, "could not create workqueue\n"); 1134 v4l2_err(sd, "could not create workqueue\n");
1135 err = -ENOMEM;
1136 goto err_unreg_cec; 1136 goto err_unreg_cec;
1137 } 1137 }
1138 1138
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index d1748901337c..22f729d66a96 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -546,30 +546,24 @@ static inline bool is_digital_input(struct v4l2_subdev *sd)
546 546
547static const struct v4l2_dv_timings_cap adv7842_timings_cap_analog = { 547static const struct v4l2_dv_timings_cap adv7842_timings_cap_analog = {
548 .type = V4L2_DV_BT_656_1120, 548 .type = V4L2_DV_BT_656_1120,
549 .bt = { 549 /* keep this initialization for compatibility with GCC < 4.4.6 */
550 .max_width = 1920, 550 .reserved = { 0 },
551 .max_height = 1200, 551 V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
552 .min_pixelclock = 25000000, 552 V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
553 .max_pixelclock = 170000000,
554 .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
555 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, 553 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
556 .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | 554 V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
557 V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM, 555 V4L2_DV_BT_CAP_CUSTOM)
558 },
559}; 556};
560 557
561static const struct v4l2_dv_timings_cap adv7842_timings_cap_digital = { 558static const struct v4l2_dv_timings_cap adv7842_timings_cap_digital = {
562 .type = V4L2_DV_BT_656_1120, 559 .type = V4L2_DV_BT_656_1120,
563 .bt = { 560 /* keep this initialization for compatibility with GCC < 4.4.6 */
564 .max_width = 1920, 561 .reserved = { 0 },
565 .max_height = 1200, 562 V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000,
566 .min_pixelclock = 25000000, 563 V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
567 .max_pixelclock = 225000000,
568 .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
569 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, 564 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
570 .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | 565 V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
571 V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM, 566 V4L2_DV_BT_CAP_CUSTOM)
572 },
573}; 567};
574 568
575static inline const struct v4l2_dv_timings_cap * 569static inline const struct v4l2_dv_timings_cap *
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index a58a8f663ffb..d9f65d7e3e58 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -46,14 +46,10 @@ struct ths8200_state {
46 46
47static const struct v4l2_dv_timings_cap ths8200_timings_cap = { 47static const struct v4l2_dv_timings_cap ths8200_timings_cap = {
48 .type = V4L2_DV_BT_656_1120, 48 .type = V4L2_DV_BT_656_1120,
49 .bt = { 49 /* keep this initialization for compatibility with GCC < 4.4.6 */
50 .max_width = 1920, 50 .reserved = { 0 },
51 .max_height = 1080, 51 V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1080, 25000000, 148500000,
52 .min_pixelclock = 25000000, 52 V4L2_DV_BT_STD_CEA861, V4L2_DV_BT_CAP_PROGRESSIVE)
53 .max_pixelclock = 148500000,
54 .standards = V4L2_DV_BT_STD_CEA861,
55 .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE,
56 },
57}; 53};
58 54
59static inline struct ths8200_state *to_state(struct v4l2_subdev *sd) 55static inline struct ths8200_state *to_state(struct v4l2_subdev *sd)
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index e12bbd8c3f0b..fb60da85bc2c 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1455,6 +1455,7 @@ static int video_release(struct file *file)
1455 1455
1456 /* stop video capture */ 1456 /* stop video capture */
1457 if (res_check(fh, RESOURCE_VIDEO)) { 1457 if (res_check(fh, RESOURCE_VIDEO)) {
1458 pm_qos_remove_request(&dev->qos_request);
1458 videobuf_streamoff(&fh->cap); 1459 videobuf_streamoff(&fh->cap);
1459 res_free(dev,fh,RESOURCE_VIDEO); 1460 res_free(dev,fh,RESOURCE_VIDEO);
1460 } 1461 }
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 15d23968d1de..9b88a4601007 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1423,6 +1423,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
1423 jpeg->vfd_decoder->release = video_device_release; 1423 jpeg->vfd_decoder->release = video_device_release;
1424 jpeg->vfd_decoder->lock = &jpeg->lock; 1424 jpeg->vfd_decoder->lock = &jpeg->lock;
1425 jpeg->vfd_decoder->v4l2_dev = &jpeg->v4l2_dev; 1425 jpeg->vfd_decoder->v4l2_dev = &jpeg->v4l2_dev;
1426 jpeg->vfd_decoder->vfl_dir = VFL_DIR_M2M;
1426 1427
1427 ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1); 1428 ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1);
1428 if (ret) { 1429 if (ret) {
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index 7a9c5e9329f2..4f30341dc2ab 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -776,7 +776,7 @@ static int sh_vou_try_fmt_vid_out(struct file *file, void *priv,
776 v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 1, 776 v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 1,
777 &pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0); 777 &pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
778 778
779 for (i = 0; ARRAY_SIZE(vou_fmt); i++) 779 for (i = 0; i < ARRAY_SIZE(vou_fmt); i++)
780 if (vou_fmt[i].pfmt == pix->pixelformat) 780 if (vou_fmt[i].pfmt == pix->pixelformat)
781 return 0; 781 return 0;
782 782
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c
index 8f9f6211c52e..f975b7008692 100644
--- a/drivers/media/platform/soc_camera/mx3_camera.c
+++ b/drivers/media/platform/soc_camera/mx3_camera.c
@@ -266,7 +266,6 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
266 struct idmac_channel *ichan = mx3_cam->idmac_channel[0]; 266 struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
267 struct idmac_video_param *video = &ichan->params.video; 267 struct idmac_video_param *video = &ichan->params.video;
268 const struct soc_mbus_pixelfmt *host_fmt = icd->current_fmt->host_fmt; 268 const struct soc_mbus_pixelfmt *host_fmt = icd->current_fmt->host_fmt;
269 unsigned long flags;
270 dma_cookie_t cookie; 269 dma_cookie_t cookie;
271 size_t new_size; 270 size_t new_size;
272 271
@@ -328,7 +327,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
328 memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0)); 327 memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0));
329#endif 328#endif
330 329
331 spin_lock_irqsave(&mx3_cam->lock, flags); 330 spin_lock_irq(&mx3_cam->lock);
332 list_add_tail(&buf->queue, &mx3_cam->capture); 331 list_add_tail(&buf->queue, &mx3_cam->capture);
333 332
334 if (!mx3_cam->active) 333 if (!mx3_cam->active)
@@ -351,7 +350,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
351 if (mx3_cam->active == buf) 350 if (mx3_cam->active == buf)
352 mx3_cam->active = NULL; 351 mx3_cam->active = NULL;
353 352
354 spin_unlock_irqrestore(&mx3_cam->lock, flags); 353 spin_unlock_irq(&mx3_cam->lock);
355error: 354error:
356 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); 355 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
357} 356}
diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
index ad9309da4a91..6c96e4898777 100644
--- a/drivers/media/tuners/e4000.c
+++ b/drivers/media/tuners/e4000.c
@@ -19,6 +19,7 @@
19 */ 19 */
20 20
21#include "e4000_priv.h" 21#include "e4000_priv.h"
22#include <linux/math64.h>
22 23
23/* write multiple registers */ 24/* write multiple registers */
24static int e4000_wr_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len) 25static int e4000_wr_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len)
@@ -233,7 +234,7 @@ static int e4000_set_params(struct dvb_frontend *fe)
233 * or more. 234 * or more.
234 */ 235 */
235 f_vco = c->frequency * e4000_pll_lut[i].mul; 236 f_vco = c->frequency * e4000_pll_lut[i].mul;
236 sigma_delta = 0x10000UL * (f_vco % priv->cfg->clock) / priv->cfg->clock; 237 sigma_delta = div_u64(0x10000ULL * (f_vco % priv->cfg->clock), priv->cfg->clock);
237 buf[0] = f_vco / priv->cfg->clock; 238 buf[0] = f_vco / priv->cfg->clock;
238 buf[1] = (sigma_delta >> 0) & 0xff; 239 buf[1] = (sigma_delta >> 0) & 0xff;
239 buf[2] = (sigma_delta >> 8) & 0xff; 240 buf[2] = (sigma_delta >> 8) & 0xff;
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index c43c8d32be40..be77482c3070 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -111,6 +111,13 @@ static const struct dmi_system_id stk_upside_down_dmi_table[] = {
111 DMI_MATCH(DMI_PRODUCT_NAME, "F3JC") 111 DMI_MATCH(DMI_PRODUCT_NAME, "F3JC")
112 } 112 }
113 }, 113 },
114 {
115 .ident = "T12Rg-H",
116 .matches = {
117 DMI_MATCH(DMI_SYS_VENDOR, "HCL Infosystems Limited"),
118 DMI_MATCH(DMI_PRODUCT_NAME, "T12Rg-H")
119 }
120 },
114 {} 121 {}
115}; 122};
116 123
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 81695d48c13e..c3bb2502225b 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -2090,6 +2090,15 @@ static struct usb_device_id uvc_ids[] = {
2090 .bInterfaceSubClass = 1, 2090 .bInterfaceSubClass = 1,
2091 .bInterfaceProtocol = 0, 2091 .bInterfaceProtocol = 0,
2092 .driver_info = UVC_QUIRK_PROBE_MINMAX }, 2092 .driver_info = UVC_QUIRK_PROBE_MINMAX },
2093 /* Microsoft Lifecam NX-3000 */
2094 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2095 | USB_DEVICE_ID_MATCH_INT_INFO,
2096 .idVendor = 0x045e,
2097 .idProduct = 0x0721,
2098 .bInterfaceClass = USB_CLASS_VIDEO,
2099 .bInterfaceSubClass = 1,
2100 .bInterfaceProtocol = 0,
2101 .driver_info = UVC_QUIRK_PROBE_DEF },
2093 /* Microsoft Lifecam VX-7000 */ 2102 /* Microsoft Lifecam VX-7000 */
2094 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 2103 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2095 | USB_DEVICE_ID_MATCH_INT_INFO, 2104 | USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2174,6 +2183,15 @@ static struct usb_device_id uvc_ids[] = {
2174 .bInterfaceSubClass = 1, 2183 .bInterfaceSubClass = 1,
2175 .bInterfaceProtocol = 0, 2184 .bInterfaceProtocol = 0,
2176 .driver_info = UVC_QUIRK_PROBE_DEF }, 2185 .driver_info = UVC_QUIRK_PROBE_DEF },
2186 /* Dell SP2008WFP Monitor */
2187 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2188 | USB_DEVICE_ID_MATCH_INT_INFO,
2189 .idVendor = 0x05a9,
2190 .idProduct = 0x2641,
2191 .bInterfaceClass = USB_CLASS_VIDEO,
2192 .bInterfaceSubClass = 1,
2193 .bInterfaceProtocol = 0,
2194 .driver_info = UVC_QUIRK_PROBE_DEF },
2177 /* Dell Alienware X51 */ 2195 /* Dell Alienware X51 */
2178 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 2196 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2179 | USB_DEVICE_ID_MATCH_INT_INFO, 2197 | USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 594c75eab5a5..de0e87f0b2c3 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -353,7 +353,9 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
353 353
354 if (b->m.planes[plane].bytesused > length) 354 if (b->m.planes[plane].bytesused > length)
355 return -EINVAL; 355 return -EINVAL;
356 if (b->m.planes[plane].data_offset >= 356
357 if (b->m.planes[plane].data_offset > 0 &&
358 b->m.planes[plane].data_offset >=
357 b->m.planes[plane].bytesused) 359 b->m.planes[plane].bytesused)
358 return -EINVAL; 360 return -EINVAL;
359 } 361 }
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index fd56f2563201..646f08f4f504 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -423,6 +423,39 @@ static inline int vma_is_io(struct vm_area_struct *vma)
423 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); 423 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
424} 424}
425 425
426static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
427 struct vm_area_struct *vma, unsigned long *res)
428{
429 unsigned long pfn, start_pfn, prev_pfn;
430 unsigned int i;
431 int ret;
432
433 if (!vma_is_io(vma))
434 return -EFAULT;
435
436 ret = follow_pfn(vma, start, &pfn);
437 if (ret)
438 return ret;
439
440 start_pfn = pfn;
441 start += PAGE_SIZE;
442
443 for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
444 prev_pfn = pfn;
445 ret = follow_pfn(vma, start, &pfn);
446
447 if (ret) {
448 pr_err("no page for address %lu\n", start);
449 return ret;
450 }
451 if (pfn != prev_pfn + 1)
452 return -EINVAL;
453 }
454
455 *res = start_pfn;
456 return 0;
457}
458
426static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, 459static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
427 int n_pages, struct vm_area_struct *vma, int write) 460 int n_pages, struct vm_area_struct *vma, int write)
428{ 461{
@@ -433,6 +466,9 @@ static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
433 unsigned long pfn; 466 unsigned long pfn;
434 int ret = follow_pfn(vma, start, &pfn); 467 int ret = follow_pfn(vma, start, &pfn);
435 468
469 if (!pfn_valid(pfn))
470 return -EINVAL;
471
436 if (ret) { 472 if (ret) {
437 pr_err("no page for address %lu\n", start); 473 pr_err("no page for address %lu\n", start);
438 return ret; 474 return ret;
@@ -468,16 +504,49 @@ static void vb2_dc_put_userptr(void *buf_priv)
468 struct vb2_dc_buf *buf = buf_priv; 504 struct vb2_dc_buf *buf = buf_priv;
469 struct sg_table *sgt = buf->dma_sgt; 505 struct sg_table *sgt = buf->dma_sgt;
470 506
471 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); 507 if (sgt) {
472 if (!vma_is_io(buf->vma)) 508 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
473 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); 509 if (!vma_is_io(buf->vma))
510 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
474 511
475 sg_free_table(sgt); 512 sg_free_table(sgt);
476 kfree(sgt); 513 kfree(sgt);
514 }
477 vb2_put_vma(buf->vma); 515 vb2_put_vma(buf->vma);
478 kfree(buf); 516 kfree(buf);
479} 517}
480 518
519/*
520 * For some kind of reserved memory there might be no struct page available,
521 * so all that can be done to support such 'pages' is to try to convert
522 * pfn to dma address or at the last resort just assume that
523 * dma address == physical address (like it has been assumed in earlier version
524 * of videobuf2-dma-contig
525 */
526
527#ifdef __arch_pfn_to_dma
528static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
529{
530 return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
531}
532#elif defined(__pfn_to_bus)
533static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
534{
535 return (dma_addr_t)__pfn_to_bus(pfn);
536}
537#elif defined(__pfn_to_phys)
538static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
539{
540 return (dma_addr_t)__pfn_to_phys(pfn);
541}
542#else
543static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
544{
545 /* really, we cannot do anything better at this point */
546 return (dma_addr_t)(pfn) << PAGE_SHIFT;
547}
548#endif
549
481static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, 550static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
482 unsigned long size, int write) 551 unsigned long size, int write)
483{ 552{
@@ -548,6 +617,14 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
548 /* extract page list from userspace mapping */ 617 /* extract page list from userspace mapping */
549 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write); 618 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
550 if (ret) { 619 if (ret) {
620 unsigned long pfn;
621 if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
622 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
623 buf->size = size;
624 kfree(pages);
625 return buf;
626 }
627
551 pr_err("failed to get user pages\n"); 628 pr_err("failed to get user pages\n");
552 goto fail_vma; 629 goto fail_vma;
553 } 630 }
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 87ed3fb5149a..f344659dceac 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -113,14 +113,14 @@ static const struct sh_mobile_sdhi_ops sdhi_ops = {
113}; 113};
114 114
115static const struct of_device_id sh_mobile_sdhi_of_match[] = { 115static const struct of_device_id sh_mobile_sdhi_of_match[] = {
116 { .compatible = "renesas,shmobile-sdhi" }, 116 { .compatible = "renesas,sdhi-shmobile" },
117 { .compatible = "renesas,sh7372-sdhi" }, 117 { .compatible = "renesas,sdhi-sh7372" },
118 { .compatible = "renesas,sh73a0-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 118 { .compatible = "renesas,sdhi-sh73a0", .data = &sh_mobile_sdhi_of_cfg[0], },
119 { .compatible = "renesas,r8a73a4-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 119 { .compatible = "renesas,sdhi-r8a73a4", .data = &sh_mobile_sdhi_of_cfg[0], },
120 { .compatible = "renesas,r8a7740-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 120 { .compatible = "renesas,sdhi-r8a7740", .data = &sh_mobile_sdhi_of_cfg[0], },
121 { .compatible = "renesas,r8a7778-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 121 { .compatible = "renesas,sdhi-r8a7778", .data = &sh_mobile_sdhi_of_cfg[0], },
122 { .compatible = "renesas,r8a7779-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 122 { .compatible = "renesas,sdhi-r8a7779", .data = &sh_mobile_sdhi_of_cfg[0], },
123 { .compatible = "renesas,r8a7790-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, 123 { .compatible = "renesas,sdhi-r8a7790", .data = &sh_mobile_sdhi_of_cfg[0], },
124 {}, 124 {},
125}; 125};
126MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match); 126MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 26b14f9fcac6..6bc9618af094 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -168,12 +168,25 @@ static inline int write_disable(struct m25p *flash)
168 */ 168 */
169static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable) 169static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
170{ 170{
171 int status;
172 bool need_wren = false;
173
171 switch (JEDEC_MFR(jedec_id)) { 174 switch (JEDEC_MFR(jedec_id)) {
172 case CFI_MFR_MACRONIX:
173 case CFI_MFR_ST: /* Micron, actually */ 175 case CFI_MFR_ST: /* Micron, actually */
176 /* Some Micron need WREN command; all will accept it */
177 need_wren = true;
178 case CFI_MFR_MACRONIX:
174 case 0xEF /* winbond */: 179 case 0xEF /* winbond */:
180 if (need_wren)
181 write_enable(flash);
182
175 flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B; 183 flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
176 return spi_write(flash->spi, flash->command, 1); 184 status = spi_write(flash->spi, flash->command, 1);
185
186 if (need_wren)
187 write_disable(flash);
188
189 return status;
177 default: 190 default:
178 /* Spansion style */ 191 /* Spansion style */
179 flash->command[0] = OPCODE_BRWR; 192 flash->command[0] = OPCODE_BRWR;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 59ab0692f0b9..a9830ff8e3f3 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -349,7 +349,7 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
349 349
350int common_nfc_set_geometry(struct gpmi_nand_data *this) 350int common_nfc_set_geometry(struct gpmi_nand_data *this)
351{ 351{
352 return set_geometry_by_ecc_info(this) ? 0 : legacy_set_geometry(this); 352 return legacy_set_geometry(this);
353} 353}
354 354
355struct dma_chan *get_dma_chan(struct gpmi_nand_data *this) 355struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 7ed4841327f2..d340b2f198c6 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2869,10 +2869,8 @@ static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
2869 2869
2870 len = le16_to_cpu(p->ext_param_page_length) * 16; 2870 len = le16_to_cpu(p->ext_param_page_length) * 16;
2871 ep = kmalloc(len, GFP_KERNEL); 2871 ep = kmalloc(len, GFP_KERNEL);
2872 if (!ep) { 2872 if (!ep)
2873 ret = -ENOMEM; 2873 return -ENOMEM;
2874 goto ext_out;
2875 }
2876 2874
2877 /* Send our own NAND_CMD_PARAM. */ 2875 /* Send our own NAND_CMD_PARAM. */
2878 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1); 2876 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
@@ -2920,7 +2918,7 @@ static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
2920 } 2918 }
2921 2919
2922 pr_info("ONFI extended param page detected.\n"); 2920 pr_info("ONFI extended param page detected.\n");
2923 return 0; 2921 ret = 0;
2924 2922
2925ext_out: 2923ext_out:
2926 kfree(ep); 2924 kfree(ep);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index dd03dfdfb0d6..c28d4e29af1a 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1320,7 +1320,12 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1320 for (cs = 0; cs < pdata->num_cs; cs++) { 1320 for (cs = 0; cs < pdata->num_cs; cs++) {
1321 struct mtd_info *mtd = info->host[cs]->mtd; 1321 struct mtd_info *mtd = info->host[cs]->mtd;
1322 1322
1323 mtd->name = pdev->name; 1323 /*
1324 * The mtd name matches the one used in 'mtdparts' kernel
1325 * parameter. This name cannot be changed or otherwise
1326 * user's mtd partitions configuration would get broken.
1327 */
1328 mtd->name = "pxa3xx_nand-0";
1324 info->cs = cs; 1329 info->cs = cs;
1325 ret = pxa3xx_nand_scan(mtd); 1330 ret = pxa3xx_nand_scan(mtd);
1326 if (ret) { 1331 if (ret) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 55bbb8b8200c..e883bfe2e727 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1724,6 +1724,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1724 struct bonding *bond = netdev_priv(bond_dev); 1724 struct bonding *bond = netdev_priv(bond_dev);
1725 struct slave *slave, *oldcurrent; 1725 struct slave *slave, *oldcurrent;
1726 struct sockaddr addr; 1726 struct sockaddr addr;
1727 int old_flags = bond_dev->flags;
1727 netdev_features_t old_features = bond_dev->features; 1728 netdev_features_t old_features = bond_dev->features;
1728 1729
1729 /* slave is not a slave or master is not master of this slave */ 1730 /* slave is not a slave or master is not master of this slave */
@@ -1855,12 +1856,18 @@ static int __bond_release_one(struct net_device *bond_dev,
1855 * bond_change_active_slave(..., NULL) 1856 * bond_change_active_slave(..., NULL)
1856 */ 1857 */
1857 if (!USES_PRIMARY(bond->params.mode)) { 1858 if (!USES_PRIMARY(bond->params.mode)) {
1858 /* unset promiscuity level from slave */ 1859 /* unset promiscuity level from slave
1859 if (bond_dev->flags & IFF_PROMISC) 1860 * NOTE: The NETDEV_CHANGEADDR call above may change the value
1861 * of the IFF_PROMISC flag in the bond_dev, but we need the
1862 * value of that flag before that change, as that was the value
1863 * when this slave was attached, so we cache at the start of the
1864 * function and use it here. Same goes for ALLMULTI below
1865 */
1866 if (old_flags & IFF_PROMISC)
1860 dev_set_promiscuity(slave_dev, -1); 1867 dev_set_promiscuity(slave_dev, -1);
1861 1868
1862 /* unset allmulti level from slave */ 1869 /* unset allmulti level from slave */
1863 if (bond_dev->flags & IFF_ALLMULTI) 1870 if (old_flags & IFF_ALLMULTI)
1864 dev_set_allmulti(slave_dev, -1); 1871 dev_set_allmulti(slave_dev, -1);
1865 1872
1866 bond_hw_addr_flush(bond_dev, slave_dev); 1873 bond_hw_addr_flush(bond_dev, slave_dev);
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 3b1ff6148702..693d8ffe4653 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1405,10 +1405,10 @@ static int at91_can_remove(struct platform_device *pdev)
1405 1405
1406static const struct platform_device_id at91_can_id_table[] = { 1406static const struct platform_device_id at91_can_id_table[] = {
1407 { 1407 {
1408 .name = "at91_can", 1408 .name = "at91sam9x5_can",
1409 .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data, 1409 .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data,
1410 }, { 1410 }, {
1411 .name = "at91sam9x5_can", 1411 .name = "at91_can",
1412 .driver_data = (kernel_ulong_t)&at91_at91sam9263_data, 1412 .driver_data = (kernel_ulong_t)&at91_at91sam9263_data,
1413 }, { 1413 }, {
1414 /* sentinel */ 1414 /* sentinel */
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index f9cba4123c66..1870c4731a57 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -705,14 +705,14 @@ static size_t can_get_size(const struct net_device *dev)
705 size_t size; 705 size_t size;
706 706
707 size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */ 707 size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
708 size += sizeof(struct can_ctrlmode); /* IFLA_CAN_CTRLMODE */ 708 size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
709 size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */ 709 size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
710 size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */ 710 size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
711 size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */ 711 size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
712 if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */ 712 if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
713 size += sizeof(struct can_berr_counter); 713 size += nla_total_size(sizeof(struct can_berr_counter));
714 if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */ 714 if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
715 size += sizeof(struct can_bittiming_const); 715 size += nla_total_size(sizeof(struct can_bittiming_const));
716 716
717 return size; 717 return size;
718} 718}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 71c677e651d7..8f5ce747feb5 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -62,7 +62,7 @@
62#define FLEXCAN_MCR_BCC BIT(16) 62#define FLEXCAN_MCR_BCC BIT(16)
63#define FLEXCAN_MCR_LPRIO_EN BIT(13) 63#define FLEXCAN_MCR_LPRIO_EN BIT(13)
64#define FLEXCAN_MCR_AEN BIT(12) 64#define FLEXCAN_MCR_AEN BIT(12)
65#define FLEXCAN_MCR_MAXMB(x) ((x) & 0xf) 65#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x1f)
66#define FLEXCAN_MCR_IDAM_A (0 << 8) 66#define FLEXCAN_MCR_IDAM_A (0 << 8)
67#define FLEXCAN_MCR_IDAM_B (1 << 8) 67#define FLEXCAN_MCR_IDAM_B (1 << 8)
68#define FLEXCAN_MCR_IDAM_C (2 << 8) 68#define FLEXCAN_MCR_IDAM_C (2 << 8)
@@ -702,7 +702,6 @@ static int flexcan_chip_start(struct net_device *dev)
702{ 702{
703 struct flexcan_priv *priv = netdev_priv(dev); 703 struct flexcan_priv *priv = netdev_priv(dev);
704 struct flexcan_regs __iomem *regs = priv->base; 704 struct flexcan_regs __iomem *regs = priv->base;
705 unsigned int i;
706 int err; 705 int err;
707 u32 reg_mcr, reg_ctrl; 706 u32 reg_mcr, reg_ctrl;
708 707
@@ -736,9 +735,11 @@ static int flexcan_chip_start(struct net_device *dev)
736 * 735 *
737 */ 736 */
738 reg_mcr = flexcan_read(&regs->mcr); 737 reg_mcr = flexcan_read(&regs->mcr);
738 reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
739 reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT | 739 reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
740 FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | 740 FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN |
741 FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS; 741 FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS |
742 FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID);
742 netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr); 743 netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
743 flexcan_write(reg_mcr, &regs->mcr); 744 flexcan_write(reg_mcr, &regs->mcr);
744 745
@@ -772,16 +773,9 @@ static int flexcan_chip_start(struct net_device *dev)
772 netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); 773 netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
773 flexcan_write(reg_ctrl, &regs->ctrl); 774 flexcan_write(reg_ctrl, &regs->ctrl);
774 775
775 for (i = 0; i < ARRAY_SIZE(regs->cantxfg); i++) { 776 /* Abort any pending TX, mark Mailbox as INACTIVE */
776 flexcan_write(0, &regs->cantxfg[i].can_ctrl); 777 flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
777 flexcan_write(0, &regs->cantxfg[i].can_id); 778 &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
778 flexcan_write(0, &regs->cantxfg[i].data[0]);
779 flexcan_write(0, &regs->cantxfg[i].data[1]);
780
781 /* put MB into rx queue */
782 flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
783 &regs->cantxfg[i].can_ctrl);
784 }
785 779
786 /* acceptance mask/acceptance code (accept everything) */ 780 /* acceptance mask/acceptance code (accept everything) */
787 flexcan_write(0x0, &regs->rxgmask); 781 flexcan_write(0x0, &regs->rxgmask);
@@ -991,9 +985,9 @@ static void unregister_flexcandev(struct net_device *dev)
991} 985}
992 986
993static const struct of_device_id flexcan_of_match[] = { 987static const struct of_device_id flexcan_of_match[] = {
994 { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
995 { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
996 { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, }, 988 { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
989 { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
990 { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
997 { /* sentinel */ }, 991 { /* sentinel */ },
998}; 992};
999MODULE_DEVICE_TABLE(of, flexcan_of_match); 993MODULE_DEVICE_TABLE(of, flexcan_of_match);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 874188ba06f7..25377e547f9b 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -76,6 +76,10 @@ MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces");
76/* maximum rx buffer len: extended CAN frame with timestamp */ 76/* maximum rx buffer len: extended CAN frame with timestamp */
77#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1) 77#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1)
78 78
79#define SLC_CMD_LEN 1
80#define SLC_SFF_ID_LEN 3
81#define SLC_EFF_ID_LEN 8
82
79struct slcan { 83struct slcan {
80 int magic; 84 int magic;
81 85
@@ -142,47 +146,63 @@ static void slc_bump(struct slcan *sl)
142{ 146{
143 struct sk_buff *skb; 147 struct sk_buff *skb;
144 struct can_frame cf; 148 struct can_frame cf;
145 int i, dlc_pos, tmp; 149 int i, tmp;
146 unsigned long ultmp; 150 u32 tmpid;
147 char cmd = sl->rbuff[0]; 151 char *cmd = sl->rbuff;
148 152
149 if ((cmd != 't') && (cmd != 'T') && (cmd != 'r') && (cmd != 'R')) 153 cf.can_id = 0;
154
155 switch (*cmd) {
156 case 'r':
157 cf.can_id = CAN_RTR_FLAG;
158 /* fallthrough */
159 case 't':
160 /* store dlc ASCII value and terminate SFF CAN ID string */
161 cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN];
162 sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0;
163 /* point to payload data behind the dlc */
164 cmd += SLC_CMD_LEN + SLC_SFF_ID_LEN + 1;
165 break;
166 case 'R':
167 cf.can_id = CAN_RTR_FLAG;
168 /* fallthrough */
169 case 'T':
170 cf.can_id |= CAN_EFF_FLAG;
171 /* store dlc ASCII value and terminate EFF CAN ID string */
172 cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN];
173 sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0;
174 /* point to payload data behind the dlc */
175 cmd += SLC_CMD_LEN + SLC_EFF_ID_LEN + 1;
176 break;
177 default:
150 return; 178 return;
179 }
151 180
152 if (cmd & 0x20) /* tiny chars 'r' 't' => standard frame format */ 181 if (kstrtou32(sl->rbuff + SLC_CMD_LEN, 16, &tmpid))
153 dlc_pos = 4; /* dlc position tiiid */
154 else
155 dlc_pos = 9; /* dlc position Tiiiiiiiid */
156
157 if (!((sl->rbuff[dlc_pos] >= '0') && (sl->rbuff[dlc_pos] < '9')))
158 return; 182 return;
159 183
160 cf.can_dlc = sl->rbuff[dlc_pos] - '0'; /* get can_dlc from ASCII val */ 184 cf.can_id |= tmpid;
161 185
162 sl->rbuff[dlc_pos] = 0; /* terminate can_id string */ 186 /* get can_dlc from sanitized ASCII value */
163 187 if (cf.can_dlc >= '0' && cf.can_dlc < '9')
164 if (kstrtoul(sl->rbuff+1, 16, &ultmp)) 188 cf.can_dlc -= '0';
189 else
165 return; 190 return;
166 191
167 cf.can_id = ultmp;
168
169 if (!(cmd & 0x20)) /* NO tiny chars => extended frame format */
170 cf.can_id |= CAN_EFF_FLAG;
171
172 if ((cmd | 0x20) == 'r') /* RTR frame */
173 cf.can_id |= CAN_RTR_FLAG;
174
175 *(u64 *) (&cf.data) = 0; /* clear payload */ 192 *(u64 *) (&cf.data) = 0; /* clear payload */
176 193
177 for (i = 0, dlc_pos++; i < cf.can_dlc; i++) { 194 /* RTR frames may have a dlc > 0 but they never have any data bytes */
178 tmp = hex_to_bin(sl->rbuff[dlc_pos++]); 195 if (!(cf.can_id & CAN_RTR_FLAG)) {
179 if (tmp < 0) 196 for (i = 0; i < cf.can_dlc; i++) {
180 return; 197 tmp = hex_to_bin(*cmd++);
181 cf.data[i] = (tmp << 4); 198 if (tmp < 0)
182 tmp = hex_to_bin(sl->rbuff[dlc_pos++]); 199 return;
183 if (tmp < 0) 200 cf.data[i] = (tmp << 4);
184 return; 201 tmp = hex_to_bin(*cmd++);
185 cf.data[i] |= tmp; 202 if (tmp < 0)
203 return;
204 cf.data[i] |= tmp;
205 }
186 } 206 }
187 207
188 skb = dev_alloc_skb(sizeof(struct can_frame) + 208 skb = dev_alloc_skb(sizeof(struct can_frame) +
@@ -209,7 +229,6 @@ static void slc_bump(struct slcan *sl)
209/* parse tty input stream */ 229/* parse tty input stream */
210static void slcan_unesc(struct slcan *sl, unsigned char s) 230static void slcan_unesc(struct slcan *sl, unsigned char s)
211{ 231{
212
213 if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */ 232 if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
214 if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && 233 if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
215 (sl->rcount > 4)) { 234 (sl->rcount > 4)) {
@@ -236,27 +255,46 @@ static void slcan_unesc(struct slcan *sl, unsigned char s)
236/* Encapsulate one can_frame and stuff into a TTY queue. */ 255/* Encapsulate one can_frame and stuff into a TTY queue. */
237static void slc_encaps(struct slcan *sl, struct can_frame *cf) 256static void slc_encaps(struct slcan *sl, struct can_frame *cf)
238{ 257{
239 int actual, idx, i; 258 int actual, i;
240 char cmd; 259 unsigned char *pos;
260 unsigned char *endpos;
261 canid_t id = cf->can_id;
262
263 pos = sl->xbuff;
241 264
242 if (cf->can_id & CAN_RTR_FLAG) 265 if (cf->can_id & CAN_RTR_FLAG)
243 cmd = 'R'; /* becomes 'r' in standard frame format */ 266 *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */
244 else 267 else
245 cmd = 'T'; /* becomes 't' in standard frame format */ 268 *pos = 'T'; /* becomes 't' in standard frame format (SSF) */
246 269
247 if (cf->can_id & CAN_EFF_FLAG) 270 /* determine number of chars for the CAN-identifier */
248 sprintf(sl->xbuff, "%c%08X%d", cmd, 271 if (cf->can_id & CAN_EFF_FLAG) {
249 cf->can_id & CAN_EFF_MASK, cf->can_dlc); 272 id &= CAN_EFF_MASK;
250 else 273 endpos = pos + SLC_EFF_ID_LEN;
251 sprintf(sl->xbuff, "%c%03X%d", cmd | 0x20, 274 } else {
252 cf->can_id & CAN_SFF_MASK, cf->can_dlc); 275 *pos |= 0x20; /* convert R/T to lower case for SFF */
276 id &= CAN_SFF_MASK;
277 endpos = pos + SLC_SFF_ID_LEN;
278 }
253 279
254 idx = strlen(sl->xbuff); 280 /* build 3 (SFF) or 8 (EFF) digit CAN identifier */
281 pos++;
282 while (endpos >= pos) {
283 *endpos-- = hex_asc_upper[id & 0xf];
284 id >>= 4;
285 }
286
287 pos += (cf->can_id & CAN_EFF_FLAG) ? SLC_EFF_ID_LEN : SLC_SFF_ID_LEN;
255 288
256 for (i = 0; i < cf->can_dlc; i++) 289 *pos++ = cf->can_dlc + '0';
257 sprintf(&sl->xbuff[idx + 2*i], "%02X", cf->data[i]); 290
291 /* RTR frames may have a dlc > 0 but they never have any data bytes */
292 if (!(cf->can_id & CAN_RTR_FLAG)) {
293 for (i = 0; i < cf->can_dlc; i++)
294 pos = hex_byte_pack_upper(pos, cf->data[i]);
295 }
258 296
259 strcat(sl->xbuff, "\r"); /* add terminating character */ 297 *pos++ = '\r';
260 298
261 /* Order of next two lines is *very* important. 299 /* Order of next two lines is *very* important.
262 * When we are sending a little amount of data, 300 * When we are sending a little amount of data,
@@ -267,8 +305,8 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
267 * 14 Oct 1994 Dmitry Gorodchanin. 305 * 14 Oct 1994 Dmitry Gorodchanin.
268 */ 306 */
269 set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); 307 set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
270 actual = sl->tty->ops->write(sl->tty, sl->xbuff, strlen(sl->xbuff)); 308 actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff);
271 sl->xleft = strlen(sl->xbuff) - actual; 309 sl->xleft = (pos - sl->xbuff) - actual;
272 sl->xhead = sl->xbuff + actual; 310 sl->xhead = sl->xbuff + actual;
273 sl->dev->stats.tx_bytes += cf->can_dlc; 311 sl->dev->stats.tx_bytes += cf->can_dlc;
274} 312}
@@ -286,11 +324,13 @@ static void slcan_write_wakeup(struct tty_struct *tty)
286 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) 324 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
287 return; 325 return;
288 326
327 spin_lock(&sl->lock);
289 if (sl->xleft <= 0) { 328 if (sl->xleft <= 0) {
290 /* Now serial buffer is almost free & we can start 329 /* Now serial buffer is almost free & we can start
291 * transmission of another packet */ 330 * transmission of another packet */
292 sl->dev->stats.tx_packets++; 331 sl->dev->stats.tx_packets++;
293 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 332 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
333 spin_unlock(&sl->lock);
294 netif_wake_queue(sl->dev); 334 netif_wake_queue(sl->dev);
295 return; 335 return;
296 } 336 }
@@ -298,6 +338,7 @@ static void slcan_write_wakeup(struct tty_struct *tty)
298 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 338 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
299 sl->xleft -= actual; 339 sl->xleft -= actual;
300 sl->xhead += actual; 340 sl->xhead += actual;
341 spin_unlock(&sl->lock);
301} 342}
302 343
303/* Send a can_frame to a TTY queue. */ 344/* Send a can_frame to a TTY queue. */
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index a0f647f92bf5..0b7a4c3b01a2 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -463,7 +463,7 @@ static int peak_usb_start(struct peak_usb_device *dev)
463 if (i < PCAN_USB_MAX_TX_URBS) { 463 if (i < PCAN_USB_MAX_TX_URBS) {
464 if (i == 0) { 464 if (i == 0) {
465 netdev_err(netdev, "couldn't setup any tx URB\n"); 465 netdev_err(netdev, "couldn't setup any tx URB\n");
466 return err; 466 goto err_tx;
467 } 467 }
468 468
469 netdev_warn(netdev, "tx performance may be slow\n"); 469 netdev_warn(netdev, "tx performance may be slow\n");
@@ -472,7 +472,7 @@ static int peak_usb_start(struct peak_usb_device *dev)
472 if (dev->adapter->dev_start) { 472 if (dev->adapter->dev_start) {
473 err = dev->adapter->dev_start(dev); 473 err = dev->adapter->dev_start(dev);
474 if (err) 474 if (err)
475 goto failed; 475 goto err_adapter;
476 } 476 }
477 477
478 dev->state |= PCAN_USB_STATE_STARTED; 478 dev->state |= PCAN_USB_STATE_STARTED;
@@ -481,19 +481,26 @@ static int peak_usb_start(struct peak_usb_device *dev)
481 if (dev->adapter->dev_set_bus) { 481 if (dev->adapter->dev_set_bus) {
482 err = dev->adapter->dev_set_bus(dev, 1); 482 err = dev->adapter->dev_set_bus(dev, 1);
483 if (err) 483 if (err)
484 goto failed; 484 goto err_adapter;
485 } 485 }
486 486
487 dev->can.state = CAN_STATE_ERROR_ACTIVE; 487 dev->can.state = CAN_STATE_ERROR_ACTIVE;
488 488
489 return 0; 489 return 0;
490 490
491failed: 491err_adapter:
492 if (err == -ENODEV) 492 if (err == -ENODEV)
493 netif_device_detach(dev->netdev); 493 netif_device_detach(dev->netdev);
494 494
495 netdev_warn(netdev, "couldn't submit control: %d\n", err); 495 netdev_warn(netdev, "couldn't submit control: %d\n", err);
496 496
497 for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) {
498 usb_free_urb(dev->tx_contexts[i].urb);
499 dev->tx_contexts[i].urb = NULL;
500 }
501err_tx:
502 usb_kill_anchored_urbs(&dev->rx_submitted);
503
497 return err; 504 return err;
498} 505}
499 506
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 97b3d32a98bd..c5e375ddd6c0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1197,8 +1197,9 @@ union cdu_context {
1197/* TM (timers) host DB constants */ 1197/* TM (timers) host DB constants */
1198#define TM_ILT_PAGE_SZ_HW 0 1198#define TM_ILT_PAGE_SZ_HW 0
1199#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ 1199#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
1200/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ 1200#define TM_CONN_NUM (BNX2X_FIRST_VF_CID + \
1201#define TM_CONN_NUM 1024 1201 BNX2X_VF_CIDS + \
1202 CNIC_ISCSI_CID_MAX)
1202#define TM_ILT_SZ (8 * TM_CONN_NUM) 1203#define TM_ILT_SZ (8 * TM_CONN_NUM)
1203#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) 1204#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
1204 1205
@@ -1527,7 +1528,6 @@ struct bnx2x {
1527#define PCI_32BIT_FLAG (1 << 1) 1528#define PCI_32BIT_FLAG (1 << 1)
1528#define ONE_PORT_FLAG (1 << 2) 1529#define ONE_PORT_FLAG (1 << 2)
1529#define NO_WOL_FLAG (1 << 3) 1530#define NO_WOL_FLAG (1 << 3)
1530#define USING_DAC_FLAG (1 << 4)
1531#define USING_MSIX_FLAG (1 << 5) 1531#define USING_MSIX_FLAG (1 << 5)
1532#define USING_MSI_FLAG (1 << 6) 1532#define USING_MSI_FLAG (1 << 6)
1533#define DISABLE_MSI_FLAG (1 << 7) 1533#define DISABLE_MSI_FLAG (1 << 7)
@@ -1621,7 +1621,7 @@ struct bnx2x {
1621 u16 rx_ticks_int; 1621 u16 rx_ticks_int;
1622 u16 rx_ticks; 1622 u16 rx_ticks;
1623/* Maximal coalescing timeout in us */ 1623/* Maximal coalescing timeout in us */
1624#define BNX2X_MAX_COALESCE_TOUT (0xf0*12) 1624#define BNX2X_MAX_COALESCE_TOUT (0xff*BNX2X_BTR)
1625 1625
1626 u32 lin_cnt; 1626 u32 lin_cnt;
1627 1627
@@ -2072,7 +2072,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
2072 2072
2073void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, 2073void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
2074 u8 src_type, u8 dst_type); 2074 u8 src_type, u8 dst_type);
2075int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae); 2075int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
2076 u32 *comp);
2076 2077
2077/* FLR related routines */ 2078/* FLR related routines */
2078u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp); 2079u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp);
@@ -2498,4 +2499,8 @@ enum bnx2x_pci_bus_speed {
2498}; 2499};
2499 2500
2500void bnx2x_set_local_cmng(struct bnx2x *bp); 2501void bnx2x_set_local_cmng(struct bnx2x *bp);
2502
2503#define MCPR_SCRATCH_BASE(bp) \
2504 (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
2505
2501#endif /* bnx2x.h */ 2506#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 61726af1de6e..4ab4c89c60cd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -681,6 +681,7 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
681 } 681 }
682 } 682 }
683#endif 683#endif
684 skb_record_rx_queue(skb, fp->rx_queue);
684 napi_gro_receive(&fp->napi, skb); 685 napi_gro_receive(&fp->napi, skb);
685} 686}
686 687
@@ -2481,8 +2482,7 @@ load_error_cnic2:
2481load_error_cnic1: 2482load_error_cnic1:
2482 bnx2x_napi_disable_cnic(bp); 2483 bnx2x_napi_disable_cnic(bp);
2483 /* Update the number of queues without the cnic queues */ 2484 /* Update the number of queues without the cnic queues */
2484 rc = bnx2x_set_real_num_queues(bp, 0); 2485 if (bnx2x_set_real_num_queues(bp, 0))
2485 if (rc)
2486 BNX2X_ERR("Unable to set real_num_queues not including cnic\n"); 2486 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2487load_error_cnic0: 2487load_error_cnic0:
2488 BNX2X_ERR("CNIC-related load failed\n"); 2488 BNX2X_ERR("CNIC-related load failed\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 324de5f05332..e8efa1c93ffe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -891,17 +891,8 @@ static void bnx2x_get_regs(struct net_device *dev,
891 * will re-enable parity attentions right after the dump. 891 * will re-enable parity attentions right after the dump.
892 */ 892 */
893 893
894 /* Disable parity on path 0 */
895 bnx2x_pretend_func(bp, 0);
896 bnx2x_disable_blocks_parity(bp); 894 bnx2x_disable_blocks_parity(bp);
897 895
898 /* Disable parity on path 1 */
899 bnx2x_pretend_func(bp, 1);
900 bnx2x_disable_blocks_parity(bp);
901
902 /* Return to current function */
903 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
904
905 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1; 896 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
906 dump_hdr.preset = DUMP_ALL_PRESETS; 897 dump_hdr.preset = DUMP_ALL_PRESETS;
907 dump_hdr.version = BNX2X_DUMP_VERSION; 898 dump_hdr.version = BNX2X_DUMP_VERSION;
@@ -928,18 +919,9 @@ static void bnx2x_get_regs(struct net_device *dev,
928 /* Actually read the registers */ 919 /* Actually read the registers */
929 __bnx2x_get_regs(bp, p); 920 __bnx2x_get_regs(bp, p);
930 921
931 /* Re-enable parity attentions on path 0 */ 922 /* Re-enable parity attentions */
932 bnx2x_pretend_func(bp, 0);
933 bnx2x_clear_blocks_parity(bp); 923 bnx2x_clear_blocks_parity(bp);
934 bnx2x_enable_blocks_parity(bp); 924 bnx2x_enable_blocks_parity(bp);
935
936 /* Re-enable parity attentions on path 1 */
937 bnx2x_pretend_func(bp, 1);
938 bnx2x_clear_blocks_parity(bp);
939 bnx2x_enable_blocks_parity(bp);
940
941 /* Return to current function */
942 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
943} 925}
944 926
945static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset) 927static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset)
@@ -993,17 +975,8 @@ static int bnx2x_get_dump_data(struct net_device *dev,
993 * will re-enable parity attentions right after the dump. 975 * will re-enable parity attentions right after the dump.
994 */ 976 */
995 977
996 /* Disable parity on path 0 */
997 bnx2x_pretend_func(bp, 0);
998 bnx2x_disable_blocks_parity(bp); 978 bnx2x_disable_blocks_parity(bp);
999 979
1000 /* Disable parity on path 1 */
1001 bnx2x_pretend_func(bp, 1);
1002 bnx2x_disable_blocks_parity(bp);
1003
1004 /* Return to current function */
1005 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1006
1007 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1; 980 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
1008 dump_hdr.preset = bp->dump_preset_idx; 981 dump_hdr.preset = bp->dump_preset_idx;
1009 dump_hdr.version = BNX2X_DUMP_VERSION; 982 dump_hdr.version = BNX2X_DUMP_VERSION;
@@ -1032,19 +1005,10 @@ static int bnx2x_get_dump_data(struct net_device *dev,
1032 /* Actually read the registers */ 1005 /* Actually read the registers */
1033 __bnx2x_get_preset_regs(bp, p, dump_hdr.preset); 1006 __bnx2x_get_preset_regs(bp, p, dump_hdr.preset);
1034 1007
1035 /* Re-enable parity attentions on path 0 */ 1008 /* Re-enable parity attentions */
1036 bnx2x_pretend_func(bp, 0);
1037 bnx2x_clear_blocks_parity(bp); 1009 bnx2x_clear_blocks_parity(bp);
1038 bnx2x_enable_blocks_parity(bp); 1010 bnx2x_enable_blocks_parity(bp);
1039 1011
1040 /* Re-enable parity attentions on path 1 */
1041 bnx2x_pretend_func(bp, 1);
1042 bnx2x_clear_blocks_parity(bp);
1043 bnx2x_enable_blocks_parity(bp);
1044
1045 /* Return to current function */
1046 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1047
1048 return 0; 1012 return 0;
1049} 1013}
1050 1014
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 76df015f486a..c2dfea7968f4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -640,23 +640,35 @@ static const struct {
640 * [30] MCP Latched ump_tx_parity 640 * [30] MCP Latched ump_tx_parity
641 * [31] MCP Latched scpad_parity 641 * [31] MCP Latched scpad_parity
642 */ 642 */
643#define MISC_AEU_ENABLE_MCP_PRTY_BITS \ 643#define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS \
644 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ 644 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
645 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ 645 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
646 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ 646 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
647
648#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
649 (MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \
647 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) 650 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
648 651
649/* Below registers control the MCP parity attention output. When 652/* Below registers control the MCP parity attention output. When
650 * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are 653 * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
651 * enabled, when cleared - disabled. 654 * enabled, when cleared - disabled.
652 */ 655 */
653static const u32 mcp_attn_ctl_regs[] = { 656static const struct {
654 MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, 657 u32 addr;
655 MISC_REG_AEU_ENABLE4_NIG_0, 658 u32 bits;
656 MISC_REG_AEU_ENABLE4_PXP_0, 659} mcp_attn_ctl_regs[] = {
657 MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, 660 { MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
658 MISC_REG_AEU_ENABLE4_NIG_1, 661 MISC_AEU_ENABLE_MCP_PRTY_BITS },
659 MISC_REG_AEU_ENABLE4_PXP_1 662 { MISC_REG_AEU_ENABLE4_NIG_0,
663 MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
664 { MISC_REG_AEU_ENABLE4_PXP_0,
665 MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
666 { MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
667 MISC_AEU_ENABLE_MCP_PRTY_BITS },
668 { MISC_REG_AEU_ENABLE4_NIG_1,
669 MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
670 { MISC_REG_AEU_ENABLE4_PXP_1,
671 MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }
660}; 672};
661 673
662static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable) 674static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
@@ -665,14 +677,14 @@ static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
665 u32 reg_val; 677 u32 reg_val;
666 678
667 for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) { 679 for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
668 reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]); 680 reg_val = REG_RD(bp, mcp_attn_ctl_regs[i].addr);
669 681
670 if (enable) 682 if (enable)
671 reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS; 683 reg_val |= mcp_attn_ctl_regs[i].bits;
672 else 684 else
673 reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS; 685 reg_val &= ~mcp_attn_ctl_regs[i].bits;
674 686
675 REG_WR(bp, mcp_attn_ctl_regs[i], reg_val); 687 REG_WR(bp, mcp_attn_ctl_regs[i].addr, reg_val);
676 } 688 }
677} 689}
678 690
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index d60a2ea3da19..51468227bf3b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -175,6 +175,7 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
175#define EDC_MODE_LINEAR 0x0022 175#define EDC_MODE_LINEAR 0x0022
176#define EDC_MODE_LIMITING 0x0044 176#define EDC_MODE_LIMITING 0x0044
177#define EDC_MODE_PASSIVE_DAC 0x0055 177#define EDC_MODE_PASSIVE_DAC 0x0055
178#define EDC_MODE_ACTIVE_DAC 0x0066
178 179
179/* ETS defines*/ 180/* ETS defines*/
180#define DCBX_INVALID_COS (0xFF) 181#define DCBX_INVALID_COS (0xFF)
@@ -3684,6 +3685,41 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
3684 bnx2x_update_link_attr(params, vars->link_attr_sync); 3685 bnx2x_update_link_attr(params, vars->link_attr_sync);
3685} 3686}
3686 3687
3688static void bnx2x_disable_kr2(struct link_params *params,
3689 struct link_vars *vars,
3690 struct bnx2x_phy *phy)
3691{
3692 struct bnx2x *bp = params->bp;
3693 int i;
3694 static struct bnx2x_reg_set reg_set[] = {
3695 /* Step 1 - Program the TX/RX alignment markers */
3696 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
3697 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
3698 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
3699 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
3700 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
3701 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
3702 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
3703 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
3704 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
3705 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
3706 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
3707 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
3708 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
3709 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
3710 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
3711 };
3712 DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
3713
3714 for (i = 0; i < ARRAY_SIZE(reg_set); i++)
3715 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3716 reg_set[i].val);
3717 vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
3718 bnx2x_update_link_attr(params, vars->link_attr_sync);
3719
3720 vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
3721}
3722
3687static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy, 3723static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
3688 struct link_params *params) 3724 struct link_params *params)
3689{ 3725{
@@ -3715,7 +3751,6 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3715 struct link_params *params, 3751 struct link_params *params,
3716 struct link_vars *vars) { 3752 struct link_vars *vars) {
3717 u16 lane, i, cl72_ctrl, an_adv = 0; 3753 u16 lane, i, cl72_ctrl, an_adv = 0;
3718 u16 ucode_ver;
3719 struct bnx2x *bp = params->bp; 3754 struct bnx2x *bp = params->bp;
3720 static struct bnx2x_reg_set reg_set[] = { 3755 static struct bnx2x_reg_set reg_set[] = {
3721 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, 3756 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
@@ -3806,15 +3841,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3806 3841
3807 /* Advertise pause */ 3842 /* Advertise pause */
3808 bnx2x_ext_phy_set_pause(params, phy, vars); 3843 bnx2x_ext_phy_set_pause(params, phy, vars);
3809 /* Set KR Autoneg Work-Around flag for Warpcore version older than D108 3844 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
3810 */
3811 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3812 MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver);
3813 if (ucode_ver < 0xd108) {
3814 DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n",
3815 ucode_ver);
3816 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
3817 }
3818 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3845 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3819 MDIO_WC_REG_DIGITAL5_MISC7, 0x100); 3846 MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
3820 3847
@@ -3838,6 +3865,8 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3838 bnx2x_set_aer_mmd(params, phy); 3865 bnx2x_set_aer_mmd(params, phy);
3839 3866
3840 bnx2x_warpcore_enable_AN_KR2(phy, params, vars); 3867 bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
3868 } else {
3869 bnx2x_disable_kr2(params, vars, phy);
3841 } 3870 }
3842 3871
3843 /* Enable Autoneg: only on the main lane */ 3872 /* Enable Autoneg: only on the main lane */
@@ -4347,20 +4376,14 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4347 struct bnx2x *bp = params->bp; 4376 struct bnx2x *bp = params->bp;
4348 u32 serdes_net_if; 4377 u32 serdes_net_if;
4349 u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0; 4378 u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0;
4350 u16 lane = bnx2x_get_warpcore_lane(phy, params);
4351 4379
4352 vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1; 4380 vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1;
4353 4381
4354 if (!vars->turn_to_run_wc_rt) 4382 if (!vars->turn_to_run_wc_rt)
4355 return; 4383 return;
4356 4384
4357 /* Return if there is no link partner */
4358 if (!(bnx2x_warpcore_get_sigdet(phy, params))) {
4359 DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n");
4360 return;
4361 }
4362
4363 if (vars->rx_tx_asic_rst) { 4385 if (vars->rx_tx_asic_rst) {
4386 u16 lane = bnx2x_get_warpcore_lane(phy, params);
4364 serdes_net_if = (REG_RD(bp, params->shmem_base + 4387 serdes_net_if = (REG_RD(bp, params->shmem_base +
4365 offsetof(struct shmem_region, dev_info. 4388 offsetof(struct shmem_region, dev_info.
4366 port_hw_config[params->port].default_cfg)) & 4389 port_hw_config[params->port].default_cfg)) &
@@ -4375,14 +4398,8 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4375 /*10G KR*/ 4398 /*10G KR*/
4376 lnkup_kr = (gp_status1 >> (12+lane)) & 0x1; 4399 lnkup_kr = (gp_status1 >> (12+lane)) & 0x1;
4377 4400
4378 DP(NETIF_MSG_LINK,
4379 "gp_status1 0x%x\n", gp_status1);
4380
4381 if (lnkup_kr || lnkup) { 4401 if (lnkup_kr || lnkup) {
4382 vars->rx_tx_asic_rst = 0; 4402 vars->rx_tx_asic_rst = 0;
4383 DP(NETIF_MSG_LINK,
4384 "link up, rx_tx_asic_rst 0x%x\n",
4385 vars->rx_tx_asic_rst);
4386 } else { 4403 } else {
4387 /* Reset the lane to see if link comes up.*/ 4404 /* Reset the lane to see if link comes up.*/
4388 bnx2x_warpcore_reset_lane(bp, phy, 1); 4405 bnx2x_warpcore_reset_lane(bp, phy, 1);
@@ -4507,10 +4524,14 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4507 * enabled transmitter to avoid current leakage in case 4524 * enabled transmitter to avoid current leakage in case
4508 * no module is connected 4525 * no module is connected
4509 */ 4526 */
4510 if (bnx2x_is_sfp_module_plugged(phy, params)) 4527 if ((params->loopback_mode == LOOPBACK_NONE) ||
4511 bnx2x_sfp_module_detection(phy, params); 4528 (params->loopback_mode == LOOPBACK_EXT)) {
4512 else 4529 if (bnx2x_is_sfp_module_plugged(phy, params))
4513 bnx2x_sfp_e3_set_transmitter(params, phy, 1); 4530 bnx2x_sfp_module_detection(phy, params);
4531 else
4532 bnx2x_sfp_e3_set_transmitter(params,
4533 phy, 1);
4534 }
4514 4535
4515 bnx2x_warpcore_config_sfi(phy, params); 4536 bnx2x_warpcore_config_sfi(phy, params);
4516 break; 4537 break;
@@ -5757,6 +5778,11 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
5757 rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed, 5778 rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
5758 duplex); 5779 duplex);
5759 5780
5781 /* In case of KR link down, start up the recovering procedure */
5782 if ((!link_up) && (phy->media_type == ETH_PHY_KR) &&
5783 (!(phy->flags & FLAGS_WC_DUAL_MODE)))
5784 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
5785
5760 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", 5786 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
5761 vars->duplex, vars->flow_ctrl, vars->link_status); 5787 vars->duplex, vars->flow_ctrl, vars->link_status);
5762 return rc; 5788 return rc;
@@ -6507,6 +6533,11 @@ static int bnx2x_link_initialize(struct link_params *params,
6507 params->phy[INT_PHY].config_init(phy, params, vars); 6533 params->phy[INT_PHY].config_init(phy, params, vars);
6508 } 6534 }
6509 6535
6536 /* Re-read this value in case it was changed inside config_init due to
6537 * limitations of optic module
6538 */
6539 vars->line_speed = params->phy[INT_PHY].req_line_speed;
6540
6510 /* Init external phy*/ 6541 /* Init external phy*/
6511 if (non_ext_phy) { 6542 if (non_ext_phy) {
6512 if (params->phy[INT_PHY].supported & 6543 if (params->phy[INT_PHY].supported &
@@ -8080,7 +8111,10 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8080 if (copper_module_type & 8111 if (copper_module_type &
8081 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { 8112 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
8082 DP(NETIF_MSG_LINK, "Active Copper cable detected\n"); 8113 DP(NETIF_MSG_LINK, "Active Copper cable detected\n");
8083 check_limiting_mode = 1; 8114 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8115 *edc_mode = EDC_MODE_ACTIVE_DAC;
8116 else
8117 check_limiting_mode = 1;
8084 } else if (copper_module_type & 8118 } else if (copper_module_type &
8085 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { 8119 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
8086 DP(NETIF_MSG_LINK, 8120 DP(NETIF_MSG_LINK,
@@ -8555,6 +8589,7 @@ static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
8555 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; 8589 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
8556 break; 8590 break;
8557 case EDC_MODE_PASSIVE_DAC: 8591 case EDC_MODE_PASSIVE_DAC:
8592 case EDC_MODE_ACTIVE_DAC:
8558 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC; 8593 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC;
8559 break; 8594 break;
8560 default: 8595 default:
@@ -9730,32 +9765,41 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9730 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, 9765 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
9731 an_1000_val); 9766 an_1000_val);
9732 9767
9733 /* set 100 speed advertisement */ 9768 /* Set 10/100 speed advertisement */
9734 if ((phy->req_line_speed == SPEED_AUTO_NEG) && 9769 if (phy->req_line_speed == SPEED_AUTO_NEG) {
9735 (phy->speed_cap_mask & 9770 if (phy->speed_cap_mask &
9736 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | 9771 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
9737 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) { 9772 /* Enable autoneg and restart autoneg for legacy speeds
9738 an_10_100_val |= (1<<7); 9773 */
9739 /* Enable autoneg and restart autoneg for legacy speeds */ 9774 autoneg_val |= (1<<9 | 1<<12);
9740 autoneg_val |= (1<<9 | 1<<12);
9741
9742 if (phy->req_duplex == DUPLEX_FULL)
9743 an_10_100_val |= (1<<8); 9775 an_10_100_val |= (1<<8);
9744 DP(NETIF_MSG_LINK, "Advertising 100M\n"); 9776 DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
9745 } 9777 }
9746 /* set 10 speed advertisement */ 9778
9747 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 9779 if (phy->speed_cap_mask &
9748 (phy->speed_cap_mask & 9780 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
9749 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | 9781 /* Enable autoneg and restart autoneg for legacy speeds
9750 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) && 9782 */
9751 (phy->supported & 9783 autoneg_val |= (1<<9 | 1<<12);
9752 (SUPPORTED_10baseT_Half | 9784 an_10_100_val |= (1<<7);
9753 SUPPORTED_10baseT_Full)))) { 9785 DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
9754 an_10_100_val |= (1<<5); 9786 }
9755 autoneg_val |= (1<<9 | 1<<12); 9787
9756 if (phy->req_duplex == DUPLEX_FULL) 9788 if ((phy->speed_cap_mask &
9789 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
9790 (phy->supported & SUPPORTED_10baseT_Full)) {
9757 an_10_100_val |= (1<<6); 9791 an_10_100_val |= (1<<6);
9758 DP(NETIF_MSG_LINK, "Advertising 10M\n"); 9792 autoneg_val |= (1<<9 | 1<<12);
9793 DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
9794 }
9795
9796 if ((phy->speed_cap_mask &
9797 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) &&
9798 (phy->supported & SUPPORTED_10baseT_Half)) {
9799 an_10_100_val |= (1<<5);
9800 autoneg_val |= (1<<9 | 1<<12);
9801 DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
9802 }
9759 } 9803 }
9760 9804
9761 /* Only 10/100 are allowed to work in FORCE mode */ 9805 /* Only 10/100 are allowed to work in FORCE mode */
@@ -13432,43 +13476,6 @@ static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
13432 } 13476 }
13433 } 13477 }
13434} 13478}
13435static void bnx2x_disable_kr2(struct link_params *params,
13436 struct link_vars *vars,
13437 struct bnx2x_phy *phy)
13438{
13439 struct bnx2x *bp = params->bp;
13440 int i;
13441 static struct bnx2x_reg_set reg_set[] = {
13442 /* Step 1 - Program the TX/RX alignment markers */
13443 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
13444 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
13445 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
13446 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
13447 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
13448 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
13449 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
13450 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
13451 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
13452 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
13453 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
13454 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
13455 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
13456 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
13457 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
13458 };
13459 DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
13460
13461 for (i = 0; i < ARRAY_SIZE(reg_set); i++)
13462 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
13463 reg_set[i].val);
13464 vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
13465 bnx2x_update_link_attr(params, vars->link_attr_sync);
13466
13467 vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
13468 /* Restart AN on leading lane */
13469 bnx2x_warpcore_restart_AN_KR(phy, params);
13470}
13471
13472static void bnx2x_kr2_recovery(struct link_params *params, 13479static void bnx2x_kr2_recovery(struct link_params *params,
13473 struct link_vars *vars, 13480 struct link_vars *vars,
13474 struct bnx2x_phy *phy) 13481 struct bnx2x_phy *phy)
@@ -13546,6 +13553,8 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13546 /* Disable KR2 on both lanes */ 13553 /* Disable KR2 on both lanes */
13547 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page); 13554 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page);
13548 bnx2x_disable_kr2(params, vars, phy); 13555 bnx2x_disable_kr2(params, vars, phy);
13556 /* Restart AN on leading lane */
13557 bnx2x_warpcore_restart_AN_KR(phy, params);
13549 return; 13558 return;
13550 } 13559 }
13551} 13560}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index a6704b555042..b42f89ce02ef 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -503,9 +503,9 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
503} 503}
504 504
505/* issue a dmae command over the init-channel and wait for completion */ 505/* issue a dmae command over the init-channel and wait for completion */
506int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae) 506int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
507 u32 *comp)
507{ 508{
508 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
509 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; 509 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
510 int rc = 0; 510 int rc = 0;
511 511
@@ -518,14 +518,14 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
518 spin_lock_bh(&bp->dmae_lock); 518 spin_lock_bh(&bp->dmae_lock);
519 519
520 /* reset completion */ 520 /* reset completion */
521 *wb_comp = 0; 521 *comp = 0;
522 522
523 /* post the command on the channel used for initializations */ 523 /* post the command on the channel used for initializations */
524 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); 524 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
525 525
526 /* wait for completion */ 526 /* wait for completion */
527 udelay(5); 527 udelay(5);
528 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 528 while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
529 529
530 if (!cnt || 530 if (!cnt ||
531 (bp->recovery_state != BNX2X_RECOVERY_DONE && 531 (bp->recovery_state != BNX2X_RECOVERY_DONE &&
@@ -537,7 +537,7 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
537 cnt--; 537 cnt--;
538 udelay(50); 538 udelay(50);
539 } 539 }
540 if (*wb_comp & DMAE_PCI_ERR_FLAG) { 540 if (*comp & DMAE_PCI_ERR_FLAG) {
541 BNX2X_ERR("DMAE PCI error!\n"); 541 BNX2X_ERR("DMAE PCI error!\n");
542 rc = DMAE_PCI_ERROR; 542 rc = DMAE_PCI_ERROR;
543 } 543 }
@@ -574,7 +574,7 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
574 dmae.len = len32; 574 dmae.len = len32;
575 575
576 /* issue the command and wait for completion */ 576 /* issue the command and wait for completion */
577 rc = bnx2x_issue_dmae_with_comp(bp, &dmae); 577 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
578 if (rc) { 578 if (rc) {
579 BNX2X_ERR("DMAE returned failure %d\n", rc); 579 BNX2X_ERR("DMAE returned failure %d\n", rc);
580 bnx2x_panic(); 580 bnx2x_panic();
@@ -611,7 +611,7 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
611 dmae.len = len32; 611 dmae.len = len32;
612 612
613 /* issue the command and wait for completion */ 613 /* issue the command and wait for completion */
614 rc = bnx2x_issue_dmae_with_comp(bp, &dmae); 614 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
615 if (rc) { 615 if (rc) {
616 BNX2X_ERR("DMAE returned failure %d\n", rc); 616 BNX2X_ERR("DMAE returned failure %d\n", rc);
617 bnx2x_panic(); 617 bnx2x_panic();
@@ -751,6 +751,10 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
751 return rc; 751 return rc;
752} 752}
753 753
754#define MCPR_TRACE_BUFFER_SIZE (0x800)
755#define SCRATCH_BUFFER_SIZE(bp) \
756 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
757
754void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) 758void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
755{ 759{
756 u32 addr, val; 760 u32 addr, val;
@@ -775,7 +779,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
775 trace_shmem_base = bp->common.shmem_base; 779 trace_shmem_base = bp->common.shmem_base;
776 else 780 else
777 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); 781 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
778 addr = trace_shmem_base - 0x800; 782
783 /* sanity */
784 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
785 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
786 SCRATCH_BUFFER_SIZE(bp)) {
787 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
788 trace_shmem_base);
789 return;
790 }
791
792 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
779 793
780 /* validate TRCB signature */ 794 /* validate TRCB signature */
781 mark = REG_RD(bp, addr); 795 mark = REG_RD(bp, addr);
@@ -787,14 +801,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
787 /* read cyclic buffer pointer */ 801 /* read cyclic buffer pointer */
788 addr += 4; 802 addr += 4;
789 mark = REG_RD(bp, addr); 803 mark = REG_RD(bp, addr);
790 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) 804 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
791 + ((mark + 0x3) & ~0x3) - 0x08000000; 805 if (mark >= trace_shmem_base || mark < addr + 4) {
806 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
807 return;
808 }
792 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); 809 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
793 810
794 printk("%s", lvl); 811 printk("%s", lvl);
795 812
796 /* dump buffer after the mark */ 813 /* dump buffer after the mark */
797 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) { 814 for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
798 for (word = 0; word < 8; word++) 815 for (word = 0; word < 8; word++)
799 data[word] = htonl(REG_RD(bp, offset + 4*word)); 816 data[word] = htonl(REG_RD(bp, offset + 4*word));
800 data[8] = 0x0; 817 data[8] = 0x0;
@@ -4280,65 +4297,60 @@ static void _print_next_block(int idx, const char *blk)
4280 pr_cont("%s%s", idx ? ", " : "", blk); 4297 pr_cont("%s%s", idx ? ", " : "", blk);
4281} 4298}
4282 4299
4283static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, 4300static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4284 int par_num, bool print) 4301 int *par_num, bool print)
4285{ 4302{
4286 int i = 0; 4303 u32 cur_bit;
4287 u32 cur_bit = 0; 4304 bool res;
4305 int i;
4306
4307 res = false;
4308
4288 for (i = 0; sig; i++) { 4309 for (i = 0; sig; i++) {
4289 cur_bit = ((u32)0x1 << i); 4310 cur_bit = (0x1UL << i);
4290 if (sig & cur_bit) { 4311 if (sig & cur_bit) {
4291 switch (cur_bit) { 4312 res |= true; /* Each bit is real error! */
4292 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 4313
4293 if (print) { 4314 if (print) {
4294 _print_next_block(par_num++, "BRB"); 4315 switch (cur_bit) {
4316 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4317 _print_next_block((*par_num)++, "BRB");
4295 _print_parity(bp, 4318 _print_parity(bp,
4296 BRB1_REG_BRB1_PRTY_STS); 4319 BRB1_REG_BRB1_PRTY_STS);
4297 } 4320 break;
4298 break; 4321 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4299 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 4322 _print_next_block((*par_num)++,
4300 if (print) { 4323 "PARSER");
4301 _print_next_block(par_num++, "PARSER");
4302 _print_parity(bp, PRS_REG_PRS_PRTY_STS); 4324 _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4303 } 4325 break;
4304 break; 4326 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4305 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 4327 _print_next_block((*par_num)++, "TSDM");
4306 if (print) {
4307 _print_next_block(par_num++, "TSDM");
4308 _print_parity(bp, 4328 _print_parity(bp,
4309 TSDM_REG_TSDM_PRTY_STS); 4329 TSDM_REG_TSDM_PRTY_STS);
4310 } 4330 break;
4311 break; 4331 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4312 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 4332 _print_next_block((*par_num)++,
4313 if (print) {
4314 _print_next_block(par_num++,
4315 "SEARCHER"); 4333 "SEARCHER");
4316 _print_parity(bp, SRC_REG_SRC_PRTY_STS); 4334 _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4317 } 4335 break;
4318 break; 4336 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4319 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 4337 _print_next_block((*par_num)++, "TCM");
4320 if (print) { 4338 _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4321 _print_next_block(par_num++, "TCM"); 4339 break;
4322 _print_parity(bp, 4340 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4323 TCM_REG_TCM_PRTY_STS); 4341 _print_next_block((*par_num)++,
4324 } 4342 "TSEMI");
4325 break;
4326 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4327 if (print) {
4328 _print_next_block(par_num++, "TSEMI");
4329 _print_parity(bp, 4343 _print_parity(bp,
4330 TSEM_REG_TSEM_PRTY_STS_0); 4344 TSEM_REG_TSEM_PRTY_STS_0);
4331 _print_parity(bp, 4345 _print_parity(bp,
4332 TSEM_REG_TSEM_PRTY_STS_1); 4346 TSEM_REG_TSEM_PRTY_STS_1);
4333 } 4347 break;
4334 break; 4348 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4335 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 4349 _print_next_block((*par_num)++, "XPB");
4336 if (print) {
4337 _print_next_block(par_num++, "XPB");
4338 _print_parity(bp, GRCBASE_XPB + 4350 _print_parity(bp, GRCBASE_XPB +
4339 PB_REG_PB_PRTY_STS); 4351 PB_REG_PB_PRTY_STS);
4352 break;
4340 } 4353 }
4341 break;
4342 } 4354 }
4343 4355
4344 /* Clear the bit */ 4356 /* Clear the bit */
@@ -4346,53 +4358,59 @@ static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4346 } 4358 }
4347 } 4359 }
4348 4360
4349 return par_num; 4361 return res;
4350} 4362}
4351 4363
4352static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, 4364static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4353 int par_num, bool *global, 4365 int *par_num, bool *global,
4354 bool print) 4366 bool print)
4355{ 4367{
4356 int i = 0; 4368 u32 cur_bit;
4357 u32 cur_bit = 0; 4369 bool res;
4370 int i;
4371
4372 res = false;
4373
4358 for (i = 0; sig; i++) { 4374 for (i = 0; sig; i++) {
4359 cur_bit = ((u32)0x1 << i); 4375 cur_bit = (0x1UL << i);
4360 if (sig & cur_bit) { 4376 if (sig & cur_bit) {
4377 res |= true; /* Each bit is real error! */
4361 switch (cur_bit) { 4378 switch (cur_bit) {
4362 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 4379 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4363 if (print) { 4380 if (print) {
4364 _print_next_block(par_num++, "PBF"); 4381 _print_next_block((*par_num)++, "PBF");
4365 _print_parity(bp, PBF_REG_PBF_PRTY_STS); 4382 _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4366 } 4383 }
4367 break; 4384 break;
4368 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 4385 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4369 if (print) { 4386 if (print) {
4370 _print_next_block(par_num++, "QM"); 4387 _print_next_block((*par_num)++, "QM");
4371 _print_parity(bp, QM_REG_QM_PRTY_STS); 4388 _print_parity(bp, QM_REG_QM_PRTY_STS);
4372 } 4389 }
4373 break; 4390 break;
4374 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 4391 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4375 if (print) { 4392 if (print) {
4376 _print_next_block(par_num++, "TM"); 4393 _print_next_block((*par_num)++, "TM");
4377 _print_parity(bp, TM_REG_TM_PRTY_STS); 4394 _print_parity(bp, TM_REG_TM_PRTY_STS);
4378 } 4395 }
4379 break; 4396 break;
4380 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 4397 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4381 if (print) { 4398 if (print) {
4382 _print_next_block(par_num++, "XSDM"); 4399 _print_next_block((*par_num)++, "XSDM");
4383 _print_parity(bp, 4400 _print_parity(bp,
4384 XSDM_REG_XSDM_PRTY_STS); 4401 XSDM_REG_XSDM_PRTY_STS);
4385 } 4402 }
4386 break; 4403 break;
4387 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 4404 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4388 if (print) { 4405 if (print) {
4389 _print_next_block(par_num++, "XCM"); 4406 _print_next_block((*par_num)++, "XCM");
4390 _print_parity(bp, XCM_REG_XCM_PRTY_STS); 4407 _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4391 } 4408 }
4392 break; 4409 break;
4393 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 4410 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4394 if (print) { 4411 if (print) {
4395 _print_next_block(par_num++, "XSEMI"); 4412 _print_next_block((*par_num)++,
4413 "XSEMI");
4396 _print_parity(bp, 4414 _print_parity(bp,
4397 XSEM_REG_XSEM_PRTY_STS_0); 4415 XSEM_REG_XSEM_PRTY_STS_0);
4398 _print_parity(bp, 4416 _print_parity(bp,
@@ -4401,7 +4419,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4401 break; 4419 break;
4402 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 4420 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4403 if (print) { 4421 if (print) {
4404 _print_next_block(par_num++, 4422 _print_next_block((*par_num)++,
4405 "DOORBELLQ"); 4423 "DOORBELLQ");
4406 _print_parity(bp, 4424 _print_parity(bp,
4407 DORQ_REG_DORQ_PRTY_STS); 4425 DORQ_REG_DORQ_PRTY_STS);
@@ -4409,7 +4427,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4409 break; 4427 break;
4410 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 4428 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4411 if (print) { 4429 if (print) {
4412 _print_next_block(par_num++, "NIG"); 4430 _print_next_block((*par_num)++, "NIG");
4413 if (CHIP_IS_E1x(bp)) { 4431 if (CHIP_IS_E1x(bp)) {
4414 _print_parity(bp, 4432 _print_parity(bp,
4415 NIG_REG_NIG_PRTY_STS); 4433 NIG_REG_NIG_PRTY_STS);
@@ -4423,32 +4441,34 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4423 break; 4441 break;
4424 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 4442 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4425 if (print) 4443 if (print)
4426 _print_next_block(par_num++, 4444 _print_next_block((*par_num)++,
4427 "VAUX PCI CORE"); 4445 "VAUX PCI CORE");
4428 *global = true; 4446 *global = true;
4429 break; 4447 break;
4430 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 4448 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4431 if (print) { 4449 if (print) {
4432 _print_next_block(par_num++, "DEBUG"); 4450 _print_next_block((*par_num)++,
4451 "DEBUG");
4433 _print_parity(bp, DBG_REG_DBG_PRTY_STS); 4452 _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4434 } 4453 }
4435 break; 4454 break;
4436 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 4455 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4437 if (print) { 4456 if (print) {
4438 _print_next_block(par_num++, "USDM"); 4457 _print_next_block((*par_num)++, "USDM");
4439 _print_parity(bp, 4458 _print_parity(bp,
4440 USDM_REG_USDM_PRTY_STS); 4459 USDM_REG_USDM_PRTY_STS);
4441 } 4460 }
4442 break; 4461 break;
4443 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 4462 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4444 if (print) { 4463 if (print) {
4445 _print_next_block(par_num++, "UCM"); 4464 _print_next_block((*par_num)++, "UCM");
4446 _print_parity(bp, UCM_REG_UCM_PRTY_STS); 4465 _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4447 } 4466 }
4448 break; 4467 break;
4449 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 4468 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4450 if (print) { 4469 if (print) {
4451 _print_next_block(par_num++, "USEMI"); 4470 _print_next_block((*par_num)++,
4471 "USEMI");
4452 _print_parity(bp, 4472 _print_parity(bp,
4453 USEM_REG_USEM_PRTY_STS_0); 4473 USEM_REG_USEM_PRTY_STS_0);
4454 _print_parity(bp, 4474 _print_parity(bp,
@@ -4457,21 +4477,21 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4457 break; 4477 break;
4458 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 4478 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4459 if (print) { 4479 if (print) {
4460 _print_next_block(par_num++, "UPB"); 4480 _print_next_block((*par_num)++, "UPB");
4461 _print_parity(bp, GRCBASE_UPB + 4481 _print_parity(bp, GRCBASE_UPB +
4462 PB_REG_PB_PRTY_STS); 4482 PB_REG_PB_PRTY_STS);
4463 } 4483 }
4464 break; 4484 break;
4465 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 4485 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4466 if (print) { 4486 if (print) {
4467 _print_next_block(par_num++, "CSDM"); 4487 _print_next_block((*par_num)++, "CSDM");
4468 _print_parity(bp, 4488 _print_parity(bp,
4469 CSDM_REG_CSDM_PRTY_STS); 4489 CSDM_REG_CSDM_PRTY_STS);
4470 } 4490 }
4471 break; 4491 break;
4472 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 4492 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4473 if (print) { 4493 if (print) {
4474 _print_next_block(par_num++, "CCM"); 4494 _print_next_block((*par_num)++, "CCM");
4475 _print_parity(bp, CCM_REG_CCM_PRTY_STS); 4495 _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4476 } 4496 }
4477 break; 4497 break;
@@ -4482,80 +4502,73 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4482 } 4502 }
4483 } 4503 }
4484 4504
4485 return par_num; 4505 return res;
4486} 4506}
4487 4507
4488static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, 4508static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4489 int par_num, bool print) 4509 int *par_num, bool print)
4490{ 4510{
4491 int i = 0; 4511 u32 cur_bit;
4492 u32 cur_bit = 0; 4512 bool res;
4513 int i;
4514
4515 res = false;
4516
4493 for (i = 0; sig; i++) { 4517 for (i = 0; sig; i++) {
4494 cur_bit = ((u32)0x1 << i); 4518 cur_bit = (0x1UL << i);
4495 if (sig & cur_bit) { 4519 if (sig & cur_bit) {
4496 switch (cur_bit) { 4520 res |= true; /* Each bit is real error! */
4497 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 4521 if (print) {
4498 if (print) { 4522 switch (cur_bit) {
4499 _print_next_block(par_num++, "CSEMI"); 4523 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4524 _print_next_block((*par_num)++,
4525 "CSEMI");
4500 _print_parity(bp, 4526 _print_parity(bp,
4501 CSEM_REG_CSEM_PRTY_STS_0); 4527 CSEM_REG_CSEM_PRTY_STS_0);
4502 _print_parity(bp, 4528 _print_parity(bp,
4503 CSEM_REG_CSEM_PRTY_STS_1); 4529 CSEM_REG_CSEM_PRTY_STS_1);
4504 } 4530 break;
4505 break; 4531 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4506 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 4532 _print_next_block((*par_num)++, "PXP");
4507 if (print) {
4508 _print_next_block(par_num++, "PXP");
4509 _print_parity(bp, PXP_REG_PXP_PRTY_STS); 4533 _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4510 _print_parity(bp, 4534 _print_parity(bp,
4511 PXP2_REG_PXP2_PRTY_STS_0); 4535 PXP2_REG_PXP2_PRTY_STS_0);
4512 _print_parity(bp, 4536 _print_parity(bp,
4513 PXP2_REG_PXP2_PRTY_STS_1); 4537 PXP2_REG_PXP2_PRTY_STS_1);
4514 } 4538 break;
4515 break; 4539 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4516 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 4540 _print_next_block((*par_num)++,
4517 if (print) 4541 "PXPPCICLOCKCLIENT");
4518 _print_next_block(par_num++, 4542 break;
4519 "PXPPCICLOCKCLIENT"); 4543 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4520 break; 4544 _print_next_block((*par_num)++, "CFC");
4521 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4522 if (print) {
4523 _print_next_block(par_num++, "CFC");
4524 _print_parity(bp, 4545 _print_parity(bp,
4525 CFC_REG_CFC_PRTY_STS); 4546 CFC_REG_CFC_PRTY_STS);
4526 } 4547 break;
4527 break; 4548 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4528 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 4549 _print_next_block((*par_num)++, "CDU");
4529 if (print) {
4530 _print_next_block(par_num++, "CDU");
4531 _print_parity(bp, CDU_REG_CDU_PRTY_STS); 4550 _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4532 } 4551 break;
4533 break; 4552 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4534 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 4553 _print_next_block((*par_num)++, "DMAE");
4535 if (print) {
4536 _print_next_block(par_num++, "DMAE");
4537 _print_parity(bp, 4554 _print_parity(bp,
4538 DMAE_REG_DMAE_PRTY_STS); 4555 DMAE_REG_DMAE_PRTY_STS);
4539 } 4556 break;
4540 break; 4557 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4541 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 4558 _print_next_block((*par_num)++, "IGU");
4542 if (print) {
4543 _print_next_block(par_num++, "IGU");
4544 if (CHIP_IS_E1x(bp)) 4559 if (CHIP_IS_E1x(bp))
4545 _print_parity(bp, 4560 _print_parity(bp,
4546 HC_REG_HC_PRTY_STS); 4561 HC_REG_HC_PRTY_STS);
4547 else 4562 else
4548 _print_parity(bp, 4563 _print_parity(bp,
4549 IGU_REG_IGU_PRTY_STS); 4564 IGU_REG_IGU_PRTY_STS);
4550 } 4565 break;
4551 break; 4566 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4552 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 4567 _print_next_block((*par_num)++, "MISC");
4553 if (print) {
4554 _print_next_block(par_num++, "MISC");
4555 _print_parity(bp, 4568 _print_parity(bp,
4556 MISC_REG_MISC_PRTY_STS); 4569 MISC_REG_MISC_PRTY_STS);
4570 break;
4557 } 4571 }
4558 break;
4559 } 4572 }
4560 4573
4561 /* Clear the bit */ 4574 /* Clear the bit */
@@ -4563,40 +4576,49 @@ static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4563 } 4576 }
4564 } 4577 }
4565 4578
4566 return par_num; 4579 return res;
4567} 4580}
4568 4581
4569static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, 4582static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4570 bool *global, bool print) 4583 int *par_num, bool *global,
4584 bool print)
4571{ 4585{
4572 int i = 0; 4586 bool res = false;
4573 u32 cur_bit = 0; 4587 u32 cur_bit;
4588 int i;
4589
4574 for (i = 0; sig; i++) { 4590 for (i = 0; sig; i++) {
4575 cur_bit = ((u32)0x1 << i); 4591 cur_bit = (0x1UL << i);
4576 if (sig & cur_bit) { 4592 if (sig & cur_bit) {
4577 switch (cur_bit) { 4593 switch (cur_bit) {
4578 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 4594 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4579 if (print) 4595 if (print)
4580 _print_next_block(par_num++, "MCP ROM"); 4596 _print_next_block((*par_num)++,
4597 "MCP ROM");
4581 *global = true; 4598 *global = true;
4599 res |= true;
4582 break; 4600 break;
4583 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 4601 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4584 if (print) 4602 if (print)
4585 _print_next_block(par_num++, 4603 _print_next_block((*par_num)++,
4586 "MCP UMP RX"); 4604 "MCP UMP RX");
4587 *global = true; 4605 *global = true;
4606 res |= true;
4588 break; 4607 break;
4589 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 4608 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4590 if (print) 4609 if (print)
4591 _print_next_block(par_num++, 4610 _print_next_block((*par_num)++,
4592 "MCP UMP TX"); 4611 "MCP UMP TX");
4593 *global = true; 4612 *global = true;
4613 res |= true;
4594 break; 4614 break;
4595 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 4615 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4596 if (print) 4616 if (print)
4597 _print_next_block(par_num++, 4617 _print_next_block((*par_num)++,
4598 "MCP SCPAD"); 4618 "MCP SCPAD");
4599 *global = true; 4619 /* clear latched SCPAD PATIRY from MCP */
4620 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4621 1UL << 10);
4600 break; 4622 break;
4601 } 4623 }
4602 4624
@@ -4605,45 +4627,50 @@ static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
4605 } 4627 }
4606 } 4628 }
4607 4629
4608 return par_num; 4630 return res;
4609} 4631}
4610 4632
4611static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, 4633static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4612 int par_num, bool print) 4634 int *par_num, bool print)
4613{ 4635{
4614 int i = 0; 4636 u32 cur_bit;
4615 u32 cur_bit = 0; 4637 bool res;
4638 int i;
4639
4640 res = false;
4641
4616 for (i = 0; sig; i++) { 4642 for (i = 0; sig; i++) {
4617 cur_bit = ((u32)0x1 << i); 4643 cur_bit = (0x1UL << i);
4618 if (sig & cur_bit) { 4644 if (sig & cur_bit) {
4619 switch (cur_bit) { 4645 res |= true; /* Each bit is real error! */
4620 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 4646 if (print) {
4621 if (print) { 4647 switch (cur_bit) {
4622 _print_next_block(par_num++, "PGLUE_B"); 4648 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4649 _print_next_block((*par_num)++,
4650 "PGLUE_B");
4623 _print_parity(bp, 4651 _print_parity(bp,
4624 PGLUE_B_REG_PGLUE_B_PRTY_STS); 4652 PGLUE_B_REG_PGLUE_B_PRTY_STS);
4625 } 4653 break;
4626 break; 4654 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4627 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 4655 _print_next_block((*par_num)++, "ATC");
4628 if (print) {
4629 _print_next_block(par_num++, "ATC");
4630 _print_parity(bp, 4656 _print_parity(bp,
4631 ATC_REG_ATC_PRTY_STS); 4657 ATC_REG_ATC_PRTY_STS);
4658 break;
4632 } 4659 }
4633 break;
4634 } 4660 }
4635
4636 /* Clear the bit */ 4661 /* Clear the bit */
4637 sig &= ~cur_bit; 4662 sig &= ~cur_bit;
4638 } 4663 }
4639 } 4664 }
4640 4665
4641 return par_num; 4666 return res;
4642} 4667}
4643 4668
4644static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, 4669static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4645 u32 *sig) 4670 u32 *sig)
4646{ 4671{
4672 bool res = false;
4673
4647 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 4674 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4648 (sig[1] & HW_PRTY_ASSERT_SET_1) || 4675 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4649 (sig[2] & HW_PRTY_ASSERT_SET_2) || 4676 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
@@ -4660,23 +4687,22 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4660 if (print) 4687 if (print)
4661 netdev_err(bp->dev, 4688 netdev_err(bp->dev,
4662 "Parity errors detected in blocks: "); 4689 "Parity errors detected in blocks: ");
4663 par_num = bnx2x_check_blocks_with_parity0(bp, 4690 res |= bnx2x_check_blocks_with_parity0(bp,
4664 sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); 4691 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
4665 par_num = bnx2x_check_blocks_with_parity1(bp, 4692 res |= bnx2x_check_blocks_with_parity1(bp,
4666 sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); 4693 sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
4667 par_num = bnx2x_check_blocks_with_parity2(bp, 4694 res |= bnx2x_check_blocks_with_parity2(bp,
4668 sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); 4695 sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
4669 par_num = bnx2x_check_blocks_with_parity3( 4696 res |= bnx2x_check_blocks_with_parity3(bp,
4670 sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); 4697 sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
4671 par_num = bnx2x_check_blocks_with_parity4(bp, 4698 res |= bnx2x_check_blocks_with_parity4(bp,
4672 sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); 4699 sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
4673 4700
4674 if (print) 4701 if (print)
4675 pr_cont("\n"); 4702 pr_cont("\n");
4703 }
4676 4704
4677 return true; 4705 return res;
4678 } else
4679 return false;
4680} 4706}
4681 4707
4682/** 4708/**
@@ -4703,6 +4729,14 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
4703 attn.sig[3] = REG_RD(bp, 4729 attn.sig[3] = REG_RD(bp,
4704 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + 4730 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
4705 port*4); 4731 port*4);
4732 /* Since MCP attentions can't be disabled inside the block, we need to
4733 * read AEU registers to see whether they're currently disabled
4734 */
4735 attn.sig[3] &= ((REG_RD(bp,
4736 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
4737 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
4738 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
4739 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
4706 4740
4707 if (!CHIP_IS_E1x(bp)) 4741 if (!CHIP_IS_E1x(bp))
4708 attn.sig[4] = REG_RD(bp, 4742 attn.sig[4] = REG_RD(bp,
@@ -5447,26 +5481,24 @@ static void bnx2x_timer(unsigned long data)
5447 if (IS_PF(bp) && 5481 if (IS_PF(bp) &&
5448 !BP_NOMCP(bp)) { 5482 !BP_NOMCP(bp)) {
5449 int mb_idx = BP_FW_MB_IDX(bp); 5483 int mb_idx = BP_FW_MB_IDX(bp);
5450 u32 drv_pulse; 5484 u16 drv_pulse;
5451 u32 mcp_pulse; 5485 u16 mcp_pulse;
5452 5486
5453 ++bp->fw_drv_pulse_wr_seq; 5487 ++bp->fw_drv_pulse_wr_seq;
5454 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 5488 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5455 /* TBD - add SYSTEM_TIME */
5456 drv_pulse = bp->fw_drv_pulse_wr_seq; 5489 drv_pulse = bp->fw_drv_pulse_wr_seq;
5457 bnx2x_drv_pulse(bp); 5490 bnx2x_drv_pulse(bp);
5458 5491
5459 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & 5492 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5460 MCP_PULSE_SEQ_MASK); 5493 MCP_PULSE_SEQ_MASK);
5461 /* The delta between driver pulse and mcp response 5494 /* The delta between driver pulse and mcp response
5462 * should be 1 (before mcp response) or 0 (after mcp response) 5495 * should not get too big. If the MFW is more than 5 pulses
5496 * behind, we should worry about it enough to generate an error
5497 * log.
5463 */ 5498 */
5464 if ((drv_pulse != mcp_pulse) && 5499 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5465 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { 5500 BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5466 /* someone lost a heartbeat... */
5467 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5468 drv_pulse, mcp_pulse); 5501 drv_pulse, mcp_pulse);
5469 }
5470 } 5502 }
5471 5503
5472 if (bp->state == BNX2X_STATE_OPEN) 5504 if (bp->state == BNX2X_STATE_OPEN)
@@ -7120,7 +7152,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
7120 int port = BP_PORT(bp); 7152 int port = BP_PORT(bp);
7121 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 7153 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
7122 u32 low, high; 7154 u32 low, high;
7123 u32 val; 7155 u32 val, reg;
7124 7156
7125 DP(NETIF_MSG_HW, "starting port init port %d\n", port); 7157 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
7126 7158
@@ -7265,6 +7297,17 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
7265 val |= CHIP_IS_E1(bp) ? 0 : 0x10; 7297 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7266 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); 7298 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
7267 7299
7300 /* SCPAD_PARITY should NOT trigger close the gates */
7301 reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
7302 REG_WR(bp, reg,
7303 REG_RD(bp, reg) &
7304 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7305
7306 reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
7307 REG_WR(bp, reg,
7308 REG_RD(bp, reg) &
7309 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7310
7268 bnx2x_init_block(bp, BLOCK_NIG, init_phase); 7311 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7269 7312
7270 if (!CHIP_IS_E1x(bp)) { 7313 if (!CHIP_IS_E1x(bp)) {
@@ -11679,9 +11722,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
11679static int bnx2x_open(struct net_device *dev) 11722static int bnx2x_open(struct net_device *dev)
11680{ 11723{
11681 struct bnx2x *bp = netdev_priv(dev); 11724 struct bnx2x *bp = netdev_priv(dev);
11682 bool global = false;
11683 int other_engine = BP_PATH(bp) ? 0 : 1;
11684 bool other_load_status, load_status;
11685 int rc; 11725 int rc;
11686 11726
11687 bp->stats_init = true; 11727 bp->stats_init = true;
@@ -11697,6 +11737,10 @@ static int bnx2x_open(struct net_device *dev)
11697 * Parity recovery is only relevant for PF driver. 11737 * Parity recovery is only relevant for PF driver.
11698 */ 11738 */
11699 if (IS_PF(bp)) { 11739 if (IS_PF(bp)) {
11740 int other_engine = BP_PATH(bp) ? 0 : 1;
11741 bool other_load_status, load_status;
11742 bool global = false;
11743
11700 other_load_status = bnx2x_get_load_status(bp, other_engine); 11744 other_load_status = bnx2x_get_load_status(bp, other_engine);
11701 load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); 11745 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
11702 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || 11746 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
@@ -12074,7 +12118,6 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp)
12074 struct device *dev = &bp->pdev->dev; 12118 struct device *dev = &bp->pdev->dev;
12075 12119
12076 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 12120 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
12077 bp->flags |= USING_DAC_FLAG;
12078 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 12121 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
12079 dev_err(dev, "dma_set_coherent_mask failed, aborting\n"); 12122 dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
12080 return -EIO; 12123 return -EIO;
@@ -12242,8 +12285,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
12242 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; 12285 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
12243 12286
12244 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; 12287 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
12245 if (bp->flags & USING_DAC_FLAG) 12288 dev->features |= NETIF_F_HIGHDMA;
12246 dev->features |= NETIF_F_HIGHDMA;
12247 12289
12248 /* Add Loopback capability to the device */ 12290 /* Add Loopback capability to the device */
12249 dev->hw_features |= NETIF_F_LOOPBACK; 12291 dev->hw_features |= NETIF_F_LOOPBACK;
@@ -12606,24 +12648,24 @@ static int set_max_cos_est(int chip_id)
12606 return BNX2X_MULTI_TX_COS_E1X; 12648 return BNX2X_MULTI_TX_COS_E1X;
12607 case BCM57712: 12649 case BCM57712:
12608 case BCM57712_MF: 12650 case BCM57712_MF:
12609 case BCM57712_VF:
12610 return BNX2X_MULTI_TX_COS_E2_E3A0; 12651 return BNX2X_MULTI_TX_COS_E2_E3A0;
12611 case BCM57800: 12652 case BCM57800:
12612 case BCM57800_MF: 12653 case BCM57800_MF:
12613 case BCM57800_VF:
12614 case BCM57810: 12654 case BCM57810:
12615 case BCM57810_MF: 12655 case BCM57810_MF:
12616 case BCM57840_4_10: 12656 case BCM57840_4_10:
12617 case BCM57840_2_20: 12657 case BCM57840_2_20:
12618 case BCM57840_O: 12658 case BCM57840_O:
12619 case BCM57840_MFO: 12659 case BCM57840_MFO:
12620 case BCM57810_VF:
12621 case BCM57840_MF: 12660 case BCM57840_MF:
12622 case BCM57840_VF:
12623 case BCM57811: 12661 case BCM57811:
12624 case BCM57811_MF: 12662 case BCM57811_MF:
12625 case BCM57811_VF:
12626 return BNX2X_MULTI_TX_COS_E3B0; 12663 return BNX2X_MULTI_TX_COS_E3B0;
12664 case BCM57712_VF:
12665 case BCM57800_VF:
12666 case BCM57810_VF:
12667 case BCM57840_VF:
12668 case BCM57811_VF:
12627 return 1; 12669 return 1;
12628 default: 12670 default:
12629 pr_err("Unknown board_type (%d), aborting\n", chip_id); 12671 pr_err("Unknown board_type (%d), aborting\n", chip_id);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 2604b6204abe..bf08ad68b405 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -470,10 +470,10 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
470 bnx2x_vfop_qdtor, cmd->done); 470 bnx2x_vfop_qdtor, cmd->done);
471 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, 471 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
472 cmd->block); 472 cmd->block);
473 } else {
474 BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid);
475 return -ENOMEM;
473 } 476 }
474 DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n",
475 vf->abs_vfid, vfop->rc);
476 return -ENOMEM;
477} 477}
478 478
479static void 479static void
@@ -1819,7 +1819,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1819 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1819 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1820 if (fid & IGU_FID_ENCODE_IS_PF) 1820 if (fid & IGU_FID_ENCODE_IS_PF)
1821 current_pf = fid & IGU_FID_PF_NUM_MASK; 1821 current_pf = fid & IGU_FID_PF_NUM_MASK;
1822 else if (current_pf == BP_ABS_FUNC(bp)) 1822 else if (current_pf == BP_FUNC(bp))
1823 bnx2x_vf_set_igu_info(bp, sb_id, 1823 bnx2x_vf_set_igu_info(bp, sb_id,
1824 (fid & IGU_FID_VF_NUM_MASK)); 1824 (fid & IGU_FID_VF_NUM_MASK));
1825 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1825 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
@@ -3180,6 +3180,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
3180 /* set local queue arrays */ 3180 /* set local queue arrays */
3181 vf->vfqs = &bp->vfdb->vfqs[qcount]; 3181 vf->vfqs = &bp->vfdb->vfqs[qcount];
3182 qcount += vf_sb_count(vf); 3182 qcount += vf_sb_count(vf);
3183 bnx2x_iov_static_resc(bp, vf);
3183 } 3184 }
3184 3185
3185 /* prepare msix vectors in VF configuration space */ 3186 /* prepare msix vectors in VF configuration space */
@@ -3187,6 +3188,8 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
3187 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 3188 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
3188 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 3189 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
3189 num_vf_queues); 3190 num_vf_queues);
3191 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
3192 vf_idx, num_vf_queues);
3190 } 3193 }
3191 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 3194 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
3192 3195
@@ -3387,14 +3390,16 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3387 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 3390 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
3388 if (rc) { 3391 if (rc) {
3389 BNX2X_ERR("failed to delete eth macs\n"); 3392 BNX2X_ERR("failed to delete eth macs\n");
3390 return -EINVAL; 3393 rc = -EINVAL;
3394 goto out;
3391 } 3395 }
3392 3396
3393 /* remove existing uc list macs */ 3397 /* remove existing uc list macs */
3394 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); 3398 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
3395 if (rc) { 3399 if (rc) {
3396 BNX2X_ERR("failed to delete uc_list macs\n"); 3400 BNX2X_ERR("failed to delete uc_list macs\n");
3397 return -EINVAL; 3401 rc = -EINVAL;
3402 goto out;
3398 } 3403 }
3399 3404
3400 /* configure the new mac to device */ 3405 /* configure the new mac to device */
@@ -3402,6 +3407,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3402 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 3407 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
3403 BNX2X_ETH_MAC, &ramrod_flags); 3408 BNX2X_ETH_MAC, &ramrod_flags);
3404 3409
3410out:
3405 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3411 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3406 } 3412 }
3407 3413
@@ -3464,7 +3470,8 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3464 &ramrod_flags); 3470 &ramrod_flags);
3465 if (rc) { 3471 if (rc) {
3466 BNX2X_ERR("failed to delete vlans\n"); 3472 BNX2X_ERR("failed to delete vlans\n");
3467 return -EINVAL; 3473 rc = -EINVAL;
3474 goto out;
3468 } 3475 }
3469 3476
3470 /* send queue update ramrod to configure default vlan and silent 3477 /* send queue update ramrod to configure default vlan and silent
@@ -3498,7 +3505,8 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3498 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 3505 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3499 if (rc) { 3506 if (rc) {
3500 BNX2X_ERR("failed to configure vlan\n"); 3507 BNX2X_ERR("failed to configure vlan\n");
3501 return -EINVAL; 3508 rc = -EINVAL;
3509 goto out;
3502 } 3510 }
3503 3511
3504 /* configure default vlan to vf queue and set silent 3512 /* configure default vlan to vf queue and set silent
@@ -3516,18 +3524,18 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3516 rc = bnx2x_queue_state_change(bp, &q_params); 3524 rc = bnx2x_queue_state_change(bp, &q_params);
3517 if (rc) { 3525 if (rc) {
3518 BNX2X_ERR("Failed to configure default VLAN\n"); 3526 BNX2X_ERR("Failed to configure default VLAN\n");
3519 return rc; 3527 goto out;
3520 } 3528 }
3521 3529
3522 /* clear the flag indicating that this VF needs its vlan 3530 /* clear the flag indicating that this VF needs its vlan
3523 * (will only be set if the HV configured th Vlan before vf was 3531 * (will only be set if the HV configured the Vlan before vf was
3524 * and we were called because the VF came up later 3532 * up and we were called because the VF came up later
3525 */ 3533 */
3534out:
3526 vf->cfg_flags &= ~VF_CFG_VLAN; 3535 vf->cfg_flags &= ~VF_CFG_VLAN;
3527
3528 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3536 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3529 } 3537 }
3530 return 0; 3538 return rc;
3531} 3539}
3532 3540
3533/* crc is the first field in the bulletin board. Compute the crc over the 3541/* crc is the first field in the bulletin board. Compute the crc over the
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 86436c77af03..3b75070411aa 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -196,7 +196,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
196 196
197 } else if (bp->func_stx) { 197 } else if (bp->func_stx) {
198 *stats_comp = 0; 198 *stats_comp = 0;
199 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); 199 bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp);
200 } 200 }
201} 201}
202 202
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 6cfb88732452..28757dfacf0d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -980,7 +980,7 @@ static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
980 dmae.len = len32; 980 dmae.len = len32;
981 981
982 /* issue the command and wait for completion */ 982 /* issue the command and wait for completion */
983 return bnx2x_issue_dmae_with_comp(bp, &dmae); 983 return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
984} 984}
985 985
986static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf) 986static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
@@ -1765,28 +1765,28 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1765 switch (mbx->first_tlv.tl.type) { 1765 switch (mbx->first_tlv.tl.type) {
1766 case CHANNEL_TLV_ACQUIRE: 1766 case CHANNEL_TLV_ACQUIRE:
1767 bnx2x_vf_mbx_acquire(bp, vf, mbx); 1767 bnx2x_vf_mbx_acquire(bp, vf, mbx);
1768 break; 1768 return;
1769 case CHANNEL_TLV_INIT: 1769 case CHANNEL_TLV_INIT:
1770 bnx2x_vf_mbx_init_vf(bp, vf, mbx); 1770 bnx2x_vf_mbx_init_vf(bp, vf, mbx);
1771 break; 1771 return;
1772 case CHANNEL_TLV_SETUP_Q: 1772 case CHANNEL_TLV_SETUP_Q:
1773 bnx2x_vf_mbx_setup_q(bp, vf, mbx); 1773 bnx2x_vf_mbx_setup_q(bp, vf, mbx);
1774 break; 1774 return;
1775 case CHANNEL_TLV_SET_Q_FILTERS: 1775 case CHANNEL_TLV_SET_Q_FILTERS:
1776 bnx2x_vf_mbx_set_q_filters(bp, vf, mbx); 1776 bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
1777 break; 1777 return;
1778 case CHANNEL_TLV_TEARDOWN_Q: 1778 case CHANNEL_TLV_TEARDOWN_Q:
1779 bnx2x_vf_mbx_teardown_q(bp, vf, mbx); 1779 bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
1780 break; 1780 return;
1781 case CHANNEL_TLV_CLOSE: 1781 case CHANNEL_TLV_CLOSE:
1782 bnx2x_vf_mbx_close_vf(bp, vf, mbx); 1782 bnx2x_vf_mbx_close_vf(bp, vf, mbx);
1783 break; 1783 return;
1784 case CHANNEL_TLV_RELEASE: 1784 case CHANNEL_TLV_RELEASE:
1785 bnx2x_vf_mbx_release_vf(bp, vf, mbx); 1785 bnx2x_vf_mbx_release_vf(bp, vf, mbx);
1786 break; 1786 return;
1787 case CHANNEL_TLV_UPDATE_RSS: 1787 case CHANNEL_TLV_UPDATE_RSS:
1788 bnx2x_vf_mbx_update_rss(bp, vf, mbx); 1788 bnx2x_vf_mbx_update_rss(bp, vf, mbx);
1789 break; 1789 return;
1790 } 1790 }
1791 1791
1792 } else { 1792 } else {
@@ -1802,26 +1802,24 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1802 for (i = 0; i < 20; i++) 1802 for (i = 0; i < 20; i++)
1803 DP_CONT(BNX2X_MSG_IOV, "%x ", 1803 DP_CONT(BNX2X_MSG_IOV, "%x ",
1804 mbx->msg->req.tlv_buf_size.tlv_buffer[i]); 1804 mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
1805 }
1805 1806
1806 /* test whether we can respond to the VF (do we have an address 1807 /* can we respond to VF (do we have an address for it?) */
1807 * for it?) 1808 if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
1808 */ 1809 /* mbx_resp uses the op_rc of the VF */
1809 if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { 1810 vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
1810 /* mbx_resp uses the op_rc of the VF */
1811 vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
1812 1811
1813 /* notify the VF that we do not support this request */ 1812 /* notify the VF that we do not support this request */
1814 bnx2x_vf_mbx_resp(bp, vf); 1813 bnx2x_vf_mbx_resp(bp, vf);
1815 } else { 1814 } else {
1816 /* can't send a response since this VF is unknown to us 1815 /* can't send a response since this VF is unknown to us
1817 * just ack the FW to release the mailbox and unlock 1816 * just ack the FW to release the mailbox and unlock
1818 * the channel. 1817 * the channel.
1819 */ 1818 */
1820 storm_memset_vf_mbx_ack(bp, vf->abs_vfid); 1819 storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
1821 mmiowb(); 1820 /* Firmware ack should be written before unlocking channel */
1822 bnx2x_unlock_vf_pf_channel(bp, vf, 1821 mmiowb();
1823 mbx->first_tlv.tl.type); 1822 bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
1824 }
1825 } 1823 }
1826} 1824}
1827 1825
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 78d6d6b970e1..48f52882a22b 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -106,7 +106,6 @@
106#define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */ 106#define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */
107 107
108#define XGMAC_ADDR_AE 0x80000000 108#define XGMAC_ADDR_AE 0x80000000
109#define XGMAC_MAX_FILTER_ADDR 31
110 109
111/* PMT Control and Status */ 110/* PMT Control and Status */
112#define XGMAC_PMT_POINTER_RESET 0x80000000 111#define XGMAC_PMT_POINTER_RESET 0x80000000
@@ -384,6 +383,7 @@ struct xgmac_priv {
384 struct device *device; 383 struct device *device;
385 struct napi_struct napi; 384 struct napi_struct napi;
386 385
386 int max_macs;
387 struct xgmac_extra_stats xstats; 387 struct xgmac_extra_stats xstats;
388 388
389 spinlock_t stats_lock; 389 spinlock_t stats_lock;
@@ -1291,14 +1291,12 @@ static void xgmac_set_rx_mode(struct net_device *dev)
1291 netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n", 1291 netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
1292 netdev_mc_count(dev), netdev_uc_count(dev)); 1292 netdev_mc_count(dev), netdev_uc_count(dev));
1293 1293
1294 if (dev->flags & IFF_PROMISC) { 1294 if (dev->flags & IFF_PROMISC)
1295 writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER); 1295 value |= XGMAC_FRAME_FILTER_PR;
1296 return;
1297 }
1298 1296
1299 memset(hash_filter, 0, sizeof(hash_filter)); 1297 memset(hash_filter, 0, sizeof(hash_filter));
1300 1298
1301 if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) { 1299 if (netdev_uc_count(dev) > priv->max_macs) {
1302 use_hash = true; 1300 use_hash = true;
1303 value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF; 1301 value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
1304 } 1302 }
@@ -1321,7 +1319,7 @@ static void xgmac_set_rx_mode(struct net_device *dev)
1321 goto out; 1319 goto out;
1322 } 1320 }
1323 1321
1324 if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) { 1322 if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) {
1325 use_hash = true; 1323 use_hash = true;
1326 value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; 1324 value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
1327 } else { 1325 } else {
@@ -1342,8 +1340,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
1342 } 1340 }
1343 1341
1344out: 1342out:
1345 for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++) 1343 for (i = reg; i <= priv->max_macs; i++)
1346 xgmac_set_mac_addr(ioaddr, NULL, reg); 1344 xgmac_set_mac_addr(ioaddr, NULL, i);
1347 for (i = 0; i < XGMAC_NUM_HASH; i++) 1345 for (i = 0; i < XGMAC_NUM_HASH; i++)
1348 writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); 1346 writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
1349 1347
@@ -1761,6 +1759,13 @@ static int xgmac_probe(struct platform_device *pdev)
1761 uid = readl(priv->base + XGMAC_VERSION); 1759 uid = readl(priv->base + XGMAC_VERSION);
1762 netdev_info(ndev, "h/w version is 0x%x\n", uid); 1760 netdev_info(ndev, "h/w version is 0x%x\n", uid);
1763 1761
1762 /* Figure out how many valid mac address filter registers we have */
1763 writel(1, priv->base + XGMAC_ADDR_HIGH(31));
1764 if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1)
1765 priv->max_macs = 31;
1766 else
1767 priv->max_macs = 7;
1768
1764 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 1769 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1765 ndev->irq = platform_get_irq(pdev, 0); 1770 ndev->irq = platform_get_irq(pdev, 0);
1766 if (ndev->irq == -ENXIO) { 1771 if (ndev->irq == -ENXIO) {
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 5f5896e522d2..a7a941b1a655 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -158,18 +158,6 @@ static inline board_info_t *to_dm9000_board(struct net_device *dev)
158 158
159/* DM9000 network board routine ---------------------------- */ 159/* DM9000 network board routine ---------------------------- */
160 160
161static void
162dm9000_reset(board_info_t * db)
163{
164 dev_dbg(db->dev, "resetting device\n");
165
166 /* RESET device */
167 writeb(DM9000_NCR, db->io_addr);
168 udelay(200);
169 writeb(NCR_RST, db->io_data);
170 udelay(200);
171}
172
173/* 161/*
174 * Read a byte from I/O port 162 * Read a byte from I/O port
175 */ 163 */
@@ -191,6 +179,27 @@ iow(board_info_t * db, int reg, int value)
191 writeb(value, db->io_data); 179 writeb(value, db->io_data);
192} 180}
193 181
182static void
183dm9000_reset(board_info_t *db)
184{
185 dev_dbg(db->dev, "resetting device\n");
186
187 /* Reset DM9000, see DM9000 Application Notes V1.22 Jun 11, 2004 page 29
188 * The essential point is that we have to do a double reset, and the
189 * instruction is to set LBK into MAC internal loopback mode.
190 */
191 iow(db, DM9000_NCR, 0x03);
192 udelay(100); /* Application note says at least 20 us */
193 if (ior(db, DM9000_NCR) & 1)
194 dev_err(db->dev, "dm9000 did not respond to first reset\n");
195
196 iow(db, DM9000_NCR, 0);
197 iow(db, DM9000_NCR, 0x03);
198 udelay(100);
199 if (ior(db, DM9000_NCR) & 1)
200 dev_err(db->dev, "dm9000 did not respond to second reset\n");
201}
202
194/* routines for sending block to chip */ 203/* routines for sending block to chip */
195 204
196static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count) 205static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
@@ -744,15 +753,20 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
744static void dm9000_show_carrier(board_info_t *db, 753static void dm9000_show_carrier(board_info_t *db,
745 unsigned carrier, unsigned nsr) 754 unsigned carrier, unsigned nsr)
746{ 755{
756 int lpa;
747 struct net_device *ndev = db->ndev; 757 struct net_device *ndev = db->ndev;
758 struct mii_if_info *mii = &db->mii;
748 unsigned ncr = dm9000_read_locked(db, DM9000_NCR); 759 unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
749 760
750 if (carrier) 761 if (carrier) {
751 dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n", 762 lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
763 dev_info(db->dev,
764 "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n",
752 ndev->name, (nsr & NSR_SPEED) ? 10 : 100, 765 ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
753 (ncr & NCR_FDX) ? "full" : "half"); 766 (ncr & NCR_FDX) ? "full" : "half", lpa);
754 else 767 } else {
755 dev_info(db->dev, "%s: link down\n", ndev->name); 768 dev_info(db->dev, "%s: link down\n", ndev->name);
769 }
756} 770}
757 771
758static void 772static void
@@ -890,9 +904,15 @@ dm9000_init_dm9000(struct net_device *dev)
890 (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0); 904 (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
891 905
892 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ 906 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
907 iow(db, DM9000_GPR, 0);
893 908
894 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ 909 /* If we are dealing with DM9000B, some extra steps are required: a
895 dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */ 910 * manual phy reset, and setting init params.
911 */
912 if (db->type == TYPE_DM9000B) {
913 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
914 dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM);
915 }
896 916
897 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; 917 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
898 918
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index ace5050dba38..db020230bd0b 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -88,6 +88,7 @@ static inline char *nic_name(struct pci_dev *pdev)
88#define BE_MIN_MTU 256 88#define BE_MIN_MTU 256
89 89
90#define BE_NUM_VLANS_SUPPORTED 64 90#define BE_NUM_VLANS_SUPPORTED 64
91#define BE_UMC_NUM_VLANS_SUPPORTED 15
91#define BE_MAX_EQD 96u 92#define BE_MAX_EQD 96u
92#define BE_MAX_TX_FRAG_COUNT 30 93#define BE_MAX_TX_FRAG_COUNT 30
93 94
@@ -333,6 +334,7 @@ enum vf_state {
333 334
334#define BE_FLAGS_LINK_STATUS_INIT 1 335#define BE_FLAGS_LINK_STATUS_INIT 1
335#define BE_FLAGS_WORKER_SCHEDULED (1 << 3) 336#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
337#define BE_FLAGS_VLAN_PROMISC (1 << 4)
336#define BE_FLAGS_NAPI_ENABLED (1 << 9) 338#define BE_FLAGS_NAPI_ENABLED (1 << 9)
337#define BE_UC_PMAC_COUNT 30 339#define BE_UC_PMAC_COUNT 30
338#define BE_VF_UC_PMAC_COUNT 2 340#define BE_VF_UC_PMAC_COUNT 2
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 1ab5dab11eff..c08fd32bb8e5 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -180,6 +180,9 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
180 dev_err(&adapter->pdev->dev, 180 dev_err(&adapter->pdev->dev,
181 "opcode %d-%d failed:status %d-%d\n", 181 "opcode %d-%d failed:status %d-%d\n",
182 opcode, subsystem, compl_status, extd_status); 182 opcode, subsystem, compl_status, extd_status);
183
184 if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
185 return extd_status;
183 } 186 }
184 } 187 }
185done: 188done:
@@ -1195,7 +1198,6 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1195 1198
1196 if (lancer_chip(adapter)) { 1199 if (lancer_chip(adapter)) {
1197 req->hdr.version = 1; 1200 req->hdr.version = 1;
1198 req->if_id = cpu_to_le16(adapter->if_handle);
1199 } else if (BEx_chip(adapter)) { 1201 } else if (BEx_chip(adapter)) {
1200 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) 1202 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1201 req->hdr.version = 2; 1203 req->hdr.version = 2;
@@ -1203,6 +1205,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1203 req->hdr.version = 2; 1205 req->hdr.version = 2;
1204 } 1206 }
1205 1207
1208 if (req->hdr.version > 0)
1209 req->if_id = cpu_to_le16(adapter->if_handle);
1206 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 1210 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1207 req->ulp_num = BE_ULP1_NUM; 1211 req->ulp_num = BE_ULP1_NUM;
1208 req->type = BE_ETH_TX_RING_TYPE_STANDARD; 1212 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
@@ -1812,6 +1816,12 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1812 } else if (flags & IFF_ALLMULTI) { 1816 } else if (flags & IFF_ALLMULTI) {
1813 req->if_flags_mask = req->if_flags = 1817 req->if_flags_mask = req->if_flags =
1814 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); 1818 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1819 } else if (flags & BE_FLAGS_VLAN_PROMISC) {
1820 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1821
1822 if (value == ON)
1823 req->if_flags =
1824 cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1815 } else { 1825 } else {
1816 struct netdev_hw_addr *ha; 1826 struct netdev_hw_addr *ha;
1817 int i = 0; 1827 int i = 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d026226db88c..108ca8abf0af 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -60,6 +60,8 @@ enum {
60 MCC_STATUS_NOT_SUPPORTED = 66 60 MCC_STATUS_NOT_SUPPORTED = 66
61}; 61};
62 62
63#define MCC_ADDL_STS_INSUFFICIENT_RESOURCES 0x16
64
63#define CQE_STATUS_COMPL_MASK 0xFFFF 65#define CQE_STATUS_COMPL_MASK 0xFFFF
64#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ 66#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
65#define CQE_STATUS_EXTD_MASK 0xFFFF 67#define CQE_STATUS_EXTD_MASK 0xFFFF
@@ -1791,7 +1793,7 @@ struct be_nic_res_desc {
1791 u8 acpi_params; 1793 u8 acpi_params;
1792 u8 wol_param; 1794 u8 wol_param;
1793 u16 rsvd7; 1795 u16 rsvd7;
1794 u32 rsvd8[3]; 1796 u32 rsvd8[7];
1795} __packed; 1797} __packed;
1796 1798
1797struct be_cmd_req_get_func_config { 1799struct be_cmd_req_get_func_config {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 100b528b9bd0..2c38cc402119 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -855,11 +855,11 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
855 unsigned int eth_hdr_len; 855 unsigned int eth_hdr_len;
856 struct iphdr *ip; 856 struct iphdr *ip;
857 857
858 /* Lancer ASIC has a bug wherein packets that are 32 bytes or less 858 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
859 * may cause a transmit stall on that port. So the work-around is to 859 * may cause a transmit stall on that port. So the work-around is to
860 * pad such packets to a 36-byte length. 860 * pad short packets (<= 32 bytes) to a 36-byte length.
861 */ 861 */
862 if (unlikely(lancer_chip(adapter) && skb->len <= 32)) { 862 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
863 if (skb_padto(skb, 36)) 863 if (skb_padto(skb, 36))
864 goto tx_drop; 864 goto tx_drop;
865 skb->len = 36; 865 skb->len = 36;
@@ -1013,18 +1013,40 @@ static int be_vid_config(struct be_adapter *adapter)
1013 status = be_cmd_vlan_config(adapter, adapter->if_handle, 1013 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1014 vids, num, 1, 0); 1014 vids, num, 1, 0);
1015 1015
1016 /* Set to VLAN promisc mode as setting VLAN filter failed */
1017 if (status) { 1016 if (status) {
1018 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n"); 1017 /* Set to VLAN promisc mode as setting VLAN filter failed */
1019 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n"); 1018 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1020 goto set_vlan_promisc; 1019 goto set_vlan_promisc;
1020 dev_err(&adapter->pdev->dev,
1021 "Setting HW VLAN filtering failed.\n");
1022 } else {
1023 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1024 /* hw VLAN filtering re-enabled. */
1025 status = be_cmd_rx_filter(adapter,
1026 BE_FLAGS_VLAN_PROMISC, OFF);
1027 if (!status) {
1028 dev_info(&adapter->pdev->dev,
1029 "Disabling VLAN Promiscuous mode.\n");
1030 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1031 dev_info(&adapter->pdev->dev,
1032 "Re-Enabling HW VLAN filtering\n");
1033 }
1034 }
1021 } 1035 }
1022 1036
1023 return status; 1037 return status;
1024 1038
1025set_vlan_promisc: 1039set_vlan_promisc:
1026 status = be_cmd_vlan_config(adapter, adapter->if_handle, 1040 dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1027 NULL, 0, 1, 1); 1041
1042 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1043 if (!status) {
1044 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1045 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1046 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1047 } else
1048 dev_err(&adapter->pdev->dev,
1049 "Failed to enable VLAN Promiscuous mode.\n");
1028 return status; 1050 return status;
1029} 1051}
1030 1052
@@ -1033,10 +1055,6 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1033 struct be_adapter *adapter = netdev_priv(netdev); 1055 struct be_adapter *adapter = netdev_priv(netdev);
1034 int status = 0; 1056 int status = 0;
1035 1057
1036 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1037 status = -EINVAL;
1038 goto ret;
1039 }
1040 1058
1041 /* Packets with VID 0 are always received by Lancer by default */ 1059 /* Packets with VID 0 are always received by Lancer by default */
1042 if (lancer_chip(adapter) && vid == 0) 1060 if (lancer_chip(adapter) && vid == 0)
@@ -1059,11 +1077,6 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1059 struct be_adapter *adapter = netdev_priv(netdev); 1077 struct be_adapter *adapter = netdev_priv(netdev);
1060 int status = 0; 1078 int status = 0;
1061 1079
1062 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1063 status = -EINVAL;
1064 goto ret;
1065 }
1066
1067 /* Packets with VID 0 are always received by Lancer by default */ 1080 /* Packets with VID 0 are always received by Lancer by default */
1068 if (lancer_chip(adapter) && vid == 0) 1081 if (lancer_chip(adapter) && vid == 0)
1069 goto ret; 1082 goto ret;
@@ -1188,8 +1201,8 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
1188 1201
1189 vi->vf = vf; 1202 vi->vf = vf;
1190 vi->tx_rate = vf_cfg->tx_rate; 1203 vi->tx_rate = vf_cfg->tx_rate;
1191 vi->vlan = vf_cfg->vlan_tag; 1204 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1192 vi->qos = 0; 1205 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1193 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN); 1206 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1194 1207
1195 return 0; 1208 return 0;
@@ -1199,28 +1212,29 @@ static int be_set_vf_vlan(struct net_device *netdev,
1199 int vf, u16 vlan, u8 qos) 1212 int vf, u16 vlan, u8 qos)
1200{ 1213{
1201 struct be_adapter *adapter = netdev_priv(netdev); 1214 struct be_adapter *adapter = netdev_priv(netdev);
1215 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1202 int status = 0; 1216 int status = 0;
1203 1217
1204 if (!sriov_enabled(adapter)) 1218 if (!sriov_enabled(adapter))
1205 return -EPERM; 1219 return -EPERM;
1206 1220
1207 if (vf >= adapter->num_vfs || vlan > 4095) 1221 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1208 return -EINVAL; 1222 return -EINVAL;
1209 1223
1210 if (vlan) { 1224 if (vlan || qos) {
1211 if (adapter->vf_cfg[vf].vlan_tag != vlan) { 1225 vlan |= qos << VLAN_PRIO_SHIFT;
1226 if (vf_cfg->vlan_tag != vlan) {
1212 /* If this is new value, program it. Else skip. */ 1227 /* If this is new value, program it. Else skip. */
1213 adapter->vf_cfg[vf].vlan_tag = vlan; 1228 vf_cfg->vlan_tag = vlan;
1214 1229 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1215 status = be_cmd_set_hsw_config(adapter, vlan, 1230 vf_cfg->if_handle, 0);
1216 vf + 1, adapter->vf_cfg[vf].if_handle, 0);
1217 } 1231 }
1218 } else { 1232 } else {
1219 /* Reset Transparent Vlan Tagging. */ 1233 /* Reset Transparent Vlan Tagging. */
1220 adapter->vf_cfg[vf].vlan_tag = 0; 1234 vf_cfg->vlan_tag = 0;
1221 vlan = adapter->vf_cfg[vf].def_vid; 1235 vlan = vf_cfg->def_vid;
1222 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, 1236 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1223 adapter->vf_cfg[vf].if_handle, 0); 1237 vf_cfg->if_handle, 0);
1224 } 1238 }
1225 1239
1226 1240
@@ -2963,6 +2977,8 @@ static void BEx_get_resources(struct be_adapter *adapter,
2963 2977
2964 if (adapter->function_mode & FLEX10_MODE) 2978 if (adapter->function_mode & FLEX10_MODE)
2965 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8; 2979 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2980 else if (adapter->function_mode & UMC_ENABLED)
2981 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
2966 else 2982 else
2967 res->max_vlans = BE_NUM_VLANS_SUPPORTED; 2983 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
2968 res->max_mcast_mac = BE_MAX_MC; 2984 res->max_mcast_mac = BE_MAX_MC;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index c4eaadeb572f..9fbe4dda7a0e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -88,6 +88,7 @@
88 88
89#include <asm/io.h> 89#include <asm/io.h>
90#include <asm/reg.h> 90#include <asm/reg.h>
91#include <asm/mpc85xx.h>
91#include <asm/irq.h> 92#include <asm/irq.h>
92#include <asm/uaccess.h> 93#include <asm/uaccess.h>
93#include <linux/module.h> 94#include <linux/module.h>
@@ -939,9 +940,8 @@ static void gfar_init_filer_table(struct gfar_private *priv)
939 } 940 }
940} 941}
941 942
942static void gfar_detect_errata(struct gfar_private *priv) 943static void __gfar_detect_errata_83xx(struct gfar_private *priv)
943{ 944{
944 struct device *dev = &priv->ofdev->dev;
945 unsigned int pvr = mfspr(SPRN_PVR); 945 unsigned int pvr = mfspr(SPRN_PVR);
946 unsigned int svr = mfspr(SPRN_SVR); 946 unsigned int svr = mfspr(SPRN_SVR);
947 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ 947 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
@@ -957,15 +957,33 @@ static void gfar_detect_errata(struct gfar_private *priv)
957 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 957 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
958 priv->errata |= GFAR_ERRATA_76; 958 priv->errata |= GFAR_ERRATA_76;
959 959
960 /* MPC8313 and MPC837x all rev */ 960 /* MPC8313 Rev < 2.0 */
961 if ((pvr == 0x80850010 && mod == 0x80b0) || 961 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
962 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 962 priv->errata |= GFAR_ERRATA_12;
963 priv->errata |= GFAR_ERRATA_A002; 963}
964 964
965 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */ 965static void __gfar_detect_errata_85xx(struct gfar_private *priv)
966 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) || 966{
967 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020)) 967 unsigned int svr = mfspr(SPRN_SVR);
968
969 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
968 priv->errata |= GFAR_ERRATA_12; 970 priv->errata |= GFAR_ERRATA_12;
971 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
972 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
973 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
974}
975
976static void gfar_detect_errata(struct gfar_private *priv)
977{
978 struct device *dev = &priv->ofdev->dev;
979
980 /* no plans to fix */
981 priv->errata |= GFAR_ERRATA_A002;
982
983 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
984 __gfar_detect_errata_85xx(priv);
985 else /* non-mpc85xx parts, i.e. e300 core based */
986 __gfar_detect_errata_83xx(priv);
969 987
970 if (priv->errata) 988 if (priv->errata)
971 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", 989 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
@@ -1599,7 +1617,7 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)
1599 /* Normaly TSEC should not hang on GRS commands, so we should 1617 /* Normaly TSEC should not hang on GRS commands, so we should
1600 * actually wait for IEVENT_GRSC flag. 1618 * actually wait for IEVENT_GRSC flag.
1601 */ 1619 */
1602 if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002))) 1620 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1603 return 0; 1621 return 0;
1604 1622
1605 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are 1623 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 098f133908ae..e006a09ba899 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -452,7 +452,9 @@ static int gianfar_ptp_probe(struct platform_device *dev)
452 err = -ENODEV; 452 err = -ENODEV;
453 453
454 etsects->caps = ptp_gianfar_caps; 454 etsects->caps = ptp_gianfar_caps;
455 etsects->cksel = DEFAULT_CKSEL; 455
456 if (get_of_u32(node, "fsl,cksel", &etsects->cksel))
457 etsects->cksel = DEFAULT_CKSEL;
456 458
457 if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) || 459 if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) ||
458 get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) || 460 get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 0c524fa9f811..cfef7fc32cdd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -701,8 +701,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
701 701
702 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); 702 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
703 if (cmd_details) { 703 if (cmd_details) {
704 memcpy(details, cmd_details, 704 *details = *cmd_details;
705 sizeof(struct i40e_asq_cmd_details));
706 705
707 /* If the cmd_details are defined copy the cookie. The 706 /* If the cmd_details are defined copy the cookie. The
708 * cpu_to_le32 is not needed here because the data is ignored 707 * cpu_to_le32 is not needed here because the data is ignored
@@ -760,7 +759,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
760 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); 759 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
761 760
762 /* if the desc is available copy the temp desc to the right place */ 761 /* if the desc is available copy the temp desc to the right place */
763 memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc)); 762 *desc_on_ring = *desc;
764 763
765 /* if buff is not NULL assume indirect command */ 764 /* if buff is not NULL assume indirect command */
766 if (buff != NULL) { 765 if (buff != NULL) {
@@ -807,7 +806,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
807 806
808 /* if ready, copy the desc back to temp */ 807 /* if ready, copy the desc back to temp */
809 if (i40e_asq_done(hw)) { 808 if (i40e_asq_done(hw)) {
810 memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc)); 809 *desc = *desc_on_ring;
811 if (buff != NULL) 810 if (buff != NULL)
812 memcpy(buff, dma_buff->va, buff_size); 811 memcpy(buff, dma_buff->va, buff_size);
813 retval = le16_to_cpu(desc->retval); 812 retval = le16_to_cpu(desc->retval);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index c21df7bc3b1d..1e4ea134975a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -507,7 +507,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
507 507
508 /* save link status information */ 508 /* save link status information */
509 if (link) 509 if (link)
510 memcpy(link, hw_link_info, sizeof(struct i40e_link_status)); 510 *link = *hw_link_info;
511 511
512 /* flag cleared so helper functions don't call AQ again */ 512 /* flag cleared so helper functions don't call AQ again */
513 hw->phy.get_link_info = false; 513 hw->phy.get_link_info = false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 601d482694ea..221aa4795017 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -101,10 +101,10 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
101 mem->size = ALIGN(size, alignment); 101 mem->size = ALIGN(size, alignment);
102 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 102 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
103 &mem->pa, GFP_KERNEL); 103 &mem->pa, GFP_KERNEL);
104 if (mem->va) 104 if (!mem->va)
105 return 0; 105 return -ENOMEM;
106 106
107 return -ENOMEM; 107 return 0;
108} 108}
109 109
110/** 110/**
@@ -136,10 +136,10 @@ int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
136 mem->size = size; 136 mem->size = size;
137 mem->va = kzalloc(size, GFP_KERNEL); 137 mem->va = kzalloc(size, GFP_KERNEL);
138 138
139 if (mem->va) 139 if (!mem->va)
140 return 0; 140 return -ENOMEM;
141 141
142 return -ENOMEM; 142 return 0;
143} 143}
144 144
145/** 145/**
@@ -174,8 +174,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
174 u16 needed, u16 id) 174 u16 needed, u16 id)
175{ 175{
176 int ret = -ENOMEM; 176 int ret = -ENOMEM;
177 int i = 0; 177 int i, j;
178 int j = 0;
179 178
180 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { 179 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
181 dev_info(&pf->pdev->dev, 180 dev_info(&pf->pdev->dev,
@@ -186,7 +185,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
186 185
187 /* start the linear search with an imperfect hint */ 186 /* start the linear search with an imperfect hint */
188 i = pile->search_hint; 187 i = pile->search_hint;
189 while (i < pile->num_entries && ret < 0) { 188 while (i < pile->num_entries) {
190 /* skip already allocated entries */ 189 /* skip already allocated entries */
191 if (pile->list[i] & I40E_PILE_VALID_BIT) { 190 if (pile->list[i] & I40E_PILE_VALID_BIT) {
192 i++; 191 i++;
@@ -205,6 +204,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
205 pile->list[i+j] = id | I40E_PILE_VALID_BIT; 204 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
206 ret = i; 205 ret = i;
207 pile->search_hint = i + j; 206 pile->search_hint = i + j;
207 break;
208 } else { 208 } else {
209 /* not enough, so skip over it and continue looking */ 209 /* not enough, so skip over it and continue looking */
210 i += j; 210 i += j;
@@ -1388,7 +1388,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1388 bool add_happened = false; 1388 bool add_happened = false;
1389 int filter_list_len = 0; 1389 int filter_list_len = 0;
1390 u32 changed_flags = 0; 1390 u32 changed_flags = 0;
1391 i40e_status ret = 0; 1391 i40e_status aq_ret = 0;
1392 struct i40e_pf *pf; 1392 struct i40e_pf *pf;
1393 int num_add = 0; 1393 int num_add = 0;
1394 int num_del = 0; 1394 int num_del = 0;
@@ -1449,28 +1449,28 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1449 1449
1450 /* flush a full buffer */ 1450 /* flush a full buffer */
1451 if (num_del == filter_list_len) { 1451 if (num_del == filter_list_len) {
1452 ret = i40e_aq_remove_macvlan(&pf->hw, 1452 aq_ret = i40e_aq_remove_macvlan(&pf->hw,
1453 vsi->seid, del_list, num_del, 1453 vsi->seid, del_list, num_del,
1454 NULL); 1454 NULL);
1455 num_del = 0; 1455 num_del = 0;
1456 memset(del_list, 0, sizeof(*del_list)); 1456 memset(del_list, 0, sizeof(*del_list));
1457 1457
1458 if (ret) 1458 if (aq_ret)
1459 dev_info(&pf->pdev->dev, 1459 dev_info(&pf->pdev->dev,
1460 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", 1460 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
1461 ret, 1461 aq_ret,
1462 pf->hw.aq.asq_last_status); 1462 pf->hw.aq.asq_last_status);
1463 } 1463 }
1464 } 1464 }
1465 if (num_del) { 1465 if (num_del) {
1466 ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, 1466 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1467 del_list, num_del, NULL); 1467 del_list, num_del, NULL);
1468 num_del = 0; 1468 num_del = 0;
1469 1469
1470 if (ret) 1470 if (aq_ret)
1471 dev_info(&pf->pdev->dev, 1471 dev_info(&pf->pdev->dev,
1472 "ignoring delete macvlan error, err %d, aq_err %d\n", 1472 "ignoring delete macvlan error, err %d, aq_err %d\n",
1473 ret, pf->hw.aq.asq_last_status); 1473 aq_ret, pf->hw.aq.asq_last_status);
1474 } 1474 }
1475 1475
1476 kfree(del_list); 1476 kfree(del_list);
@@ -1515,32 +1515,30 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1515 1515
1516 /* flush a full buffer */ 1516 /* flush a full buffer */
1517 if (num_add == filter_list_len) { 1517 if (num_add == filter_list_len) {
1518 ret = i40e_aq_add_macvlan(&pf->hw, 1518 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1519 vsi->seid, 1519 add_list, num_add,
1520 add_list, 1520 NULL);
1521 num_add,
1522 NULL);
1523 num_add = 0; 1521 num_add = 0;
1524 1522
1525 if (ret) 1523 if (aq_ret)
1526 break; 1524 break;
1527 memset(add_list, 0, sizeof(*add_list)); 1525 memset(add_list, 0, sizeof(*add_list));
1528 } 1526 }
1529 } 1527 }
1530 if (num_add) { 1528 if (num_add) {
1531 ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 1529 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1532 add_list, num_add, NULL); 1530 add_list, num_add, NULL);
1533 num_add = 0; 1531 num_add = 0;
1534 } 1532 }
1535 kfree(add_list); 1533 kfree(add_list);
1536 add_list = NULL; 1534 add_list = NULL;
1537 1535
1538 if (add_happened && (!ret)) { 1536 if (add_happened && (!aq_ret)) {
1539 /* do nothing */; 1537 /* do nothing */;
1540 } else if (add_happened && (ret)) { 1538 } else if (add_happened && (aq_ret)) {
1541 dev_info(&pf->pdev->dev, 1539 dev_info(&pf->pdev->dev,
1542 "add filter failed, err %d, aq_err %d\n", 1540 "add filter failed, err %d, aq_err %d\n",
1543 ret, pf->hw.aq.asq_last_status); 1541 aq_ret, pf->hw.aq.asq_last_status);
1544 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && 1542 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1545 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1543 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1546 &vsi->state)) { 1544 &vsi->state)) {
@@ -1556,28 +1554,27 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1556 if (changed_flags & IFF_ALLMULTI) { 1554 if (changed_flags & IFF_ALLMULTI) {
1557 bool cur_multipromisc; 1555 bool cur_multipromisc;
1558 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); 1556 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
1559 ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, 1557 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1560 vsi->seid, 1558 vsi->seid,
1561 cur_multipromisc, 1559 cur_multipromisc,
1562 NULL); 1560 NULL);
1563 if (ret) 1561 if (aq_ret)
1564 dev_info(&pf->pdev->dev, 1562 dev_info(&pf->pdev->dev,
1565 "set multi promisc failed, err %d, aq_err %d\n", 1563 "set multi promisc failed, err %d, aq_err %d\n",
1566 ret, pf->hw.aq.asq_last_status); 1564 aq_ret, pf->hw.aq.asq_last_status);
1567 } 1565 }
1568 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { 1566 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1569 bool cur_promisc; 1567 bool cur_promisc;
1570 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || 1568 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1571 test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 1569 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1572 &vsi->state)); 1570 &vsi->state));
1573 ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, 1571 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1574 vsi->seid, 1572 vsi->seid,
1575 cur_promisc, 1573 cur_promisc, NULL);
1576 NULL); 1574 if (aq_ret)
1577 if (ret)
1578 dev_info(&pf->pdev->dev, 1575 dev_info(&pf->pdev->dev,
1579 "set uni promisc failed, err %d, aq_err %d\n", 1576 "set uni promisc failed, err %d, aq_err %d\n",
1580 ret, pf->hw.aq.asq_last_status); 1577 aq_ret, pf->hw.aq.asq_last_status);
1581 } 1578 }
1582 1579
1583 clear_bit(__I40E_CONFIG_BUSY, &vsi->state); 1580 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1790,6 +1787,8 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
1790 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan 1787 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
1791 * @vsi: the vsi being configured 1788 * @vsi: the vsi being configured
1792 * @vid: vlan id to be removed (0 = untagged only , -1 = any) 1789 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
1790 *
1791 * Return: 0 on success or negative otherwise
1793 **/ 1792 **/
1794int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) 1793int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
1795{ 1794{
@@ -1863,37 +1862,39 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
1863 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload 1862 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
1864 * @netdev: network interface to be adjusted 1863 * @netdev: network interface to be adjusted
1865 * @vid: vlan id to be added 1864 * @vid: vlan id to be added
1865 *
1866 * net_device_ops implementation for adding vlan ids
1866 **/ 1867 **/
1867static int i40e_vlan_rx_add_vid(struct net_device *netdev, 1868static int i40e_vlan_rx_add_vid(struct net_device *netdev,
1868 __always_unused __be16 proto, u16 vid) 1869 __always_unused __be16 proto, u16 vid)
1869{ 1870{
1870 struct i40e_netdev_priv *np = netdev_priv(netdev); 1871 struct i40e_netdev_priv *np = netdev_priv(netdev);
1871 struct i40e_vsi *vsi = np->vsi; 1872 struct i40e_vsi *vsi = np->vsi;
1872 int ret; 1873 int ret = 0;
1873 1874
1874 if (vid > 4095) 1875 if (vid > 4095)
1875 return 0; 1876 return -EINVAL;
1877
1878 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
1876 1879
1877 netdev_info(vsi->netdev, "adding %pM vid=%d\n",
1878 netdev->dev_addr, vid);
1879 /* If the network stack called us with vid = 0, we should 1880 /* If the network stack called us with vid = 0, we should
1880 * indicate to i40e_vsi_add_vlan() that we want to receive 1881 * indicate to i40e_vsi_add_vlan() that we want to receive
1881 * any traffic (i.e. with any vlan tag, or untagged) 1882 * any traffic (i.e. with any vlan tag, or untagged)
1882 */ 1883 */
1883 ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY); 1884 ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
1884 1885
1885 if (!ret) { 1886 if (!ret && (vid < VLAN_N_VID))
1886 if (vid < VLAN_N_VID) 1887 set_bit(vid, vsi->active_vlans);
1887 set_bit(vid, vsi->active_vlans);
1888 }
1889 1888
1890 return 0; 1889 return ret;
1891} 1890}
1892 1891
1893/** 1892/**
1894 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 1893 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
1895 * @netdev: network interface to be adjusted 1894 * @netdev: network interface to be adjusted
1896 * @vid: vlan id to be removed 1895 * @vid: vlan id to be removed
1896 *
1897 * net_device_ops implementation for adding vlan ids
1897 **/ 1898 **/
1898static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 1899static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
1899 __always_unused __be16 proto, u16 vid) 1900 __always_unused __be16 proto, u16 vid)
@@ -1901,15 +1902,16 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
1901 struct i40e_netdev_priv *np = netdev_priv(netdev); 1902 struct i40e_netdev_priv *np = netdev_priv(netdev);
1902 struct i40e_vsi *vsi = np->vsi; 1903 struct i40e_vsi *vsi = np->vsi;
1903 1904
1904 netdev_info(vsi->netdev, "removing %pM vid=%d\n", 1905 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
1905 netdev->dev_addr, vid); 1906
1906 /* return code is ignored as there is nothing a user 1907 /* return code is ignored as there is nothing a user
1907 * can do about failure to remove and a log message was 1908 * can do about failure to remove and a log message was
1908 * already printed from another function 1909 * already printed from the other function
1909 */ 1910 */
1910 i40e_vsi_kill_vlan(vsi, vid); 1911 i40e_vsi_kill_vlan(vsi, vid);
1911 1912
1912 clear_bit(vid, vsi->active_vlans); 1913 clear_bit(vid, vsi->active_vlans);
1914
1913 return 0; 1915 return 0;
1914} 1916}
1915 1917
@@ -1936,10 +1938,10 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
1936 * @vsi: the vsi being adjusted 1938 * @vsi: the vsi being adjusted
1937 * @vid: the vlan id to set as a PVID 1939 * @vid: the vlan id to set as a PVID
1938 **/ 1940 **/
1939i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) 1941int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
1940{ 1942{
1941 struct i40e_vsi_context ctxt; 1943 struct i40e_vsi_context ctxt;
1942 i40e_status ret; 1944 i40e_status aq_ret;
1943 1945
1944 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 1946 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1945 vsi->info.pvid = cpu_to_le16(vid); 1947 vsi->info.pvid = cpu_to_le16(vid);
@@ -1948,14 +1950,15 @@ i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
1948 1950
1949 ctxt.seid = vsi->seid; 1951 ctxt.seid = vsi->seid;
1950 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 1952 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1951 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 1953 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1952 if (ret) { 1954 if (aq_ret) {
1953 dev_info(&vsi->back->pdev->dev, 1955 dev_info(&vsi->back->pdev->dev,
1954 "%s: update vsi failed, aq_err=%d\n", 1956 "%s: update vsi failed, aq_err=%d\n",
1955 __func__, vsi->back->hw.aq.asq_last_status); 1957 __func__, vsi->back->hw.aq.asq_last_status);
1958 return -ENOENT;
1956 } 1959 }
1957 1960
1958 return ret; 1961 return 0;
1959} 1962}
1960 1963
1961/** 1964/**
@@ -3326,7 +3329,8 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3326 **/ 3329 **/
3327static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) 3330static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3328{ 3331{
3329 int num_tc = 0, i; 3332 u8 num_tc = 0;
3333 int i;
3330 3334
3331 /* Scan the ETS Config Priority Table to find 3335 /* Scan the ETS Config Priority Table to find
3332 * traffic class enabled for a given priority 3336 * traffic class enabled for a given priority
@@ -3341,9 +3345,7 @@ static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3341 /* Traffic class index starts from zero so 3345 /* Traffic class index starts from zero so
3342 * increment to return the actual count 3346 * increment to return the actual count
3343 */ 3347 */
3344 num_tc++; 3348 return num_tc + 1;
3345
3346 return num_tc;
3347} 3349}
3348 3350
3349/** 3351/**
@@ -3451,28 +3453,27 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3451 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; 3453 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
3452 struct i40e_pf *pf = vsi->back; 3454 struct i40e_pf *pf = vsi->back;
3453 struct i40e_hw *hw = &pf->hw; 3455 struct i40e_hw *hw = &pf->hw;
3456 i40e_status aq_ret;
3454 u32 tc_bw_max; 3457 u32 tc_bw_max;
3455 int ret;
3456 int i; 3458 int i;
3457 3459
3458 /* Get the VSI level BW configuration */ 3460 /* Get the VSI level BW configuration */
3459 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); 3461 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
3460 if (ret) { 3462 if (aq_ret) {
3461 dev_info(&pf->pdev->dev, 3463 dev_info(&pf->pdev->dev,
3462 "couldn't get pf vsi bw config, err %d, aq_err %d\n", 3464 "couldn't get pf vsi bw config, err %d, aq_err %d\n",
3463 ret, pf->hw.aq.asq_last_status); 3465 aq_ret, pf->hw.aq.asq_last_status);
3464 return ret; 3466 return -EINVAL;
3465 } 3467 }
3466 3468
3467 /* Get the VSI level BW configuration per TC */ 3469 /* Get the VSI level BW configuration per TC */
3468 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, 3470 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
3469 &bw_ets_config, 3471 NULL);
3470 NULL); 3472 if (aq_ret) {
3471 if (ret) {
3472 dev_info(&pf->pdev->dev, 3473 dev_info(&pf->pdev->dev,
3473 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n", 3474 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
3474 ret, pf->hw.aq.asq_last_status); 3475 aq_ret, pf->hw.aq.asq_last_status);
3475 return ret; 3476 return -EINVAL;
3476 } 3477 }
3477 3478
3478 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { 3479 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
@@ -3494,7 +3495,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3494 /* 3 bits out of 4 for each TC */ 3495 /* 3 bits out of 4 for each TC */
3495 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); 3496 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
3496 } 3497 }
3497 return ret; 3498
3499 return 0;
3498} 3500}
3499 3501
3500/** 3502/**
@@ -3505,30 +3507,30 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3505 * 3507 *
3506 * Returns 0 on success, negative value on failure 3508 * Returns 0 on success, negative value on failure
3507 **/ 3509 **/
3508static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, 3510static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
3509 u8 enabled_tc,
3510 u8 *bw_share) 3511 u8 *bw_share)
3511{ 3512{
3512 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 3513 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
3513 int i, ret = 0; 3514 i40e_status aq_ret;
3515 int i;
3514 3516
3515 bw_data.tc_valid_bits = enabled_tc; 3517 bw_data.tc_valid_bits = enabled_tc;
3516 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 3518 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3517 bw_data.tc_bw_credits[i] = bw_share[i]; 3519 bw_data.tc_bw_credits[i] = bw_share[i];
3518 3520
3519 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, 3521 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
3520 &bw_data, NULL); 3522 NULL);
3521 if (ret) { 3523 if (aq_ret) {
3522 dev_info(&vsi->back->pdev->dev, 3524 dev_info(&vsi->back->pdev->dev,
3523 "%s: AQ command Config VSI BW allocation per TC failed = %d\n", 3525 "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
3524 __func__, vsi->back->hw.aq.asq_last_status); 3526 __func__, vsi->back->hw.aq.asq_last_status);
3525 return ret; 3527 return -EINVAL;
3526 } 3528 }
3527 3529
3528 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 3530 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3529 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; 3531 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
3530 3532
3531 return ret; 3533 return 0;
3532} 3534}
3533 3535
3534/** 3536/**
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 48cbc833b051..151e00cad113 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1607,6 +1607,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1607 igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); 1607 igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
1608 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); 1608 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
1609 } 1609 }
1610 } else if (hw->phy.type == e1000_phy_82580) {
1611 /* enable MII loopback */
1612 igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
1610 } 1613 }
1611 1614
1612 /* add small delay to avoid loopback test failure */ 1615 /* add small delay to avoid loopback test failure */
@@ -2652,6 +2655,8 @@ static int igb_set_eee(struct net_device *netdev,
2652 (hw->phy.media_type != e1000_media_type_copper)) 2655 (hw->phy.media_type != e1000_media_type_copper))
2653 return -EOPNOTSUPP; 2656 return -EOPNOTSUPP;
2654 2657
2658 memset(&eee_curr, 0, sizeof(struct ethtool_eee));
2659
2655 ret_val = igb_get_eee(netdev, &eee_curr); 2660 ret_val = igb_get_eee(netdev, &eee_curr);
2656 if (ret_val) 2661 if (ret_val)
2657 return ret_val; 2662 return ret_val;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 7fb5677451f9..2c210ec35d59 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1131,15 +1131,13 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
1131 p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT); 1131 p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
1132 p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT); 1132 p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
1133 spin_unlock_bh(&mp->mib_counters_lock); 1133 spin_unlock_bh(&mp->mib_counters_lock);
1134
1135 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
1136} 1134}
1137 1135
1138static void mib_counters_timer_wrapper(unsigned long _mp) 1136static void mib_counters_timer_wrapper(unsigned long _mp)
1139{ 1137{
1140 struct mv643xx_eth_private *mp = (void *)_mp; 1138 struct mv643xx_eth_private *mp = (void *)_mp;
1141
1142 mib_counters_update(mp); 1139 mib_counters_update(mp);
1140 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
1143} 1141}
1144 1142
1145 1143
@@ -2237,6 +2235,7 @@ static int mv643xx_eth_open(struct net_device *dev)
2237 mp->int_mask |= INT_TX_END_0 << i; 2235 mp->int_mask |= INT_TX_END_0 << i;
2238 } 2236 }
2239 2237
2238 add_timer(&mp->mib_counters_timer);
2240 port_start(mp); 2239 port_start(mp);
2241 2240
2242 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); 2241 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
@@ -2534,6 +2533,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
2534 if (!ppdev) 2533 if (!ppdev)
2535 return -ENOMEM; 2534 return -ENOMEM;
2536 ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 2535 ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
2536 ppdev->dev.of_node = pnp;
2537 2537
2538 ret = platform_device_add_resources(ppdev, &res, 1); 2538 ret = platform_device_add_resources(ppdev, &res, 1);
2539 if (ret) 2539 if (ret)
@@ -2916,7 +2916,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2916 mp->mib_counters_timer.data = (unsigned long)mp; 2916 mp->mib_counters_timer.data = (unsigned long)mp;
2917 mp->mib_counters_timer.function = mib_counters_timer_wrapper; 2917 mp->mib_counters_timer.function = mib_counters_timer_wrapper;
2918 mp->mib_counters_timer.expires = jiffies + 30 * HZ; 2918 mp->mib_counters_timer.expires = jiffies + 30 * HZ;
2919 add_timer(&mp->mib_counters_timer);
2920 2919
2921 spin_lock_init(&mp->mib_counters_lock); 2920 spin_lock_init(&mp->mib_counters_lock);
2922 2921
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 1a9c4f6269ea..ecc7f7b696b8 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3086,13 +3086,16 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3086 PCI_DMA_FROMDEVICE); 3086 PCI_DMA_FROMDEVICE);
3087 skge_rx_reuse(e, skge->rx_buf_size); 3087 skge_rx_reuse(e, skge->rx_buf_size);
3088 } else { 3088 } else {
3089 struct skge_element ee;
3089 struct sk_buff *nskb; 3090 struct sk_buff *nskb;
3090 3091
3091 nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); 3092 nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
3092 if (!nskb) 3093 if (!nskb)
3093 goto resubmit; 3094 goto resubmit;
3094 3095
3095 skb = e->skb; 3096 ee = *e;
3097
3098 skb = ee.skb;
3096 prefetch(skb->data); 3099 prefetch(skb->data);
3097 3100
3098 if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { 3101 if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
@@ -3101,8 +3104,8 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3101 } 3104 }
3102 3105
3103 pci_unmap_single(skge->hw->pdev, 3106 pci_unmap_single(skge->hw->pdev,
3104 dma_unmap_addr(e, mapaddr), 3107 dma_unmap_addr(&ee, mapaddr),
3105 dma_unmap_len(e, maplen), 3108 dma_unmap_len(&ee, maplen),
3106 PCI_DMA_FROMDEVICE); 3109 PCI_DMA_FROMDEVICE);
3107 } 3110 }
3108 3111
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index dec455c8f627..afe2efa69c86 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -70,14 +70,15 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
70 put_page(page); 70 put_page(page);
71 return -ENOMEM; 71 return -ENOMEM;
72 } 72 }
73 page_alloc->size = PAGE_SIZE << order; 73 page_alloc->page_size = PAGE_SIZE << order;
74 page_alloc->page = page; 74 page_alloc->page = page;
75 page_alloc->dma = dma; 75 page_alloc->dma = dma;
76 page_alloc->offset = frag_info->frag_align; 76 page_alloc->page_offset = frag_info->frag_align;
77 /* Not doing get_page() for each frag is a big win 77 /* Not doing get_page() for each frag is a big win
78 * on asymetric workloads. 78 * on asymetric workloads.
79 */ 79 */
80 atomic_set(&page->_count, page_alloc->size / frag_info->frag_stride); 80 atomic_set(&page->_count,
81 page_alloc->page_size / frag_info->frag_stride);
81 return 0; 82 return 0;
82} 83}
83 84
@@ -96,16 +97,19 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
96 for (i = 0; i < priv->num_frags; i++) { 97 for (i = 0; i < priv->num_frags; i++) {
97 frag_info = &priv->frag_info[i]; 98 frag_info = &priv->frag_info[i];
98 page_alloc[i] = ring_alloc[i]; 99 page_alloc[i] = ring_alloc[i];
99 page_alloc[i].offset += frag_info->frag_stride; 100 page_alloc[i].page_offset += frag_info->frag_stride;
100 if (page_alloc[i].offset + frag_info->frag_stride <= ring_alloc[i].size) 101
102 if (page_alloc[i].page_offset + frag_info->frag_stride <=
103 ring_alloc[i].page_size)
101 continue; 104 continue;
105
102 if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp)) 106 if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
103 goto out; 107 goto out;
104 } 108 }
105 109
106 for (i = 0; i < priv->num_frags; i++) { 110 for (i = 0; i < priv->num_frags; i++) {
107 frags[i] = ring_alloc[i]; 111 frags[i] = ring_alloc[i];
108 dma = ring_alloc[i].dma + ring_alloc[i].offset; 112 dma = ring_alloc[i].dma + ring_alloc[i].page_offset;
109 ring_alloc[i] = page_alloc[i]; 113 ring_alloc[i] = page_alloc[i];
110 rx_desc->data[i].addr = cpu_to_be64(dma); 114 rx_desc->data[i].addr = cpu_to_be64(dma);
111 } 115 }
@@ -117,7 +121,7 @@ out:
117 frag_info = &priv->frag_info[i]; 121 frag_info = &priv->frag_info[i];
118 if (page_alloc[i].page != ring_alloc[i].page) { 122 if (page_alloc[i].page != ring_alloc[i].page) {
119 dma_unmap_page(priv->ddev, page_alloc[i].dma, 123 dma_unmap_page(priv->ddev, page_alloc[i].dma,
120 page_alloc[i].size, PCI_DMA_FROMDEVICE); 124 page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
121 page = page_alloc[i].page; 125 page = page_alloc[i].page;
122 atomic_set(&page->_count, 1); 126 atomic_set(&page->_count, 1);
123 put_page(page); 127 put_page(page);
@@ -131,10 +135,12 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
131 int i) 135 int i)
132{ 136{
133 const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; 137 const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
138 u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
139
134 140
135 if (frags[i].offset + frag_info->frag_stride > frags[i].size) 141 if (next_frag_end > frags[i].page_size)
136 dma_unmap_page(priv->ddev, frags[i].dma, frags[i].size, 142 dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
137 PCI_DMA_FROMDEVICE); 143 PCI_DMA_FROMDEVICE);
138 144
139 if (frags[i].page) 145 if (frags[i].page)
140 put_page(frags[i].page); 146 put_page(frags[i].page);
@@ -161,7 +167,7 @@ out:
161 167
162 page_alloc = &ring->page_alloc[i]; 168 page_alloc = &ring->page_alloc[i];
163 dma_unmap_page(priv->ddev, page_alloc->dma, 169 dma_unmap_page(priv->ddev, page_alloc->dma,
164 page_alloc->size, PCI_DMA_FROMDEVICE); 170 page_alloc->page_size, PCI_DMA_FROMDEVICE);
165 page = page_alloc->page; 171 page = page_alloc->page;
166 atomic_set(&page->_count, 1); 172 atomic_set(&page->_count, 1);
167 put_page(page); 173 put_page(page);
@@ -184,10 +190,11 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
184 i, page_count(page_alloc->page)); 190 i, page_count(page_alloc->page));
185 191
186 dma_unmap_page(priv->ddev, page_alloc->dma, 192 dma_unmap_page(priv->ddev, page_alloc->dma,
187 page_alloc->size, PCI_DMA_FROMDEVICE); 193 page_alloc->page_size, PCI_DMA_FROMDEVICE);
188 while (page_alloc->offset + frag_info->frag_stride < page_alloc->size) { 194 while (page_alloc->page_offset + frag_info->frag_stride <
195 page_alloc->page_size) {
189 put_page(page_alloc->page); 196 put_page(page_alloc->page);
190 page_alloc->offset += frag_info->frag_stride; 197 page_alloc->page_offset += frag_info->frag_stride;
191 } 198 }
192 page_alloc->page = NULL; 199 page_alloc->page = NULL;
193 } 200 }
@@ -478,7 +485,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
478 /* Save page reference in skb */ 485 /* Save page reference in skb */
479 __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page); 486 __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
480 skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size); 487 skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
481 skb_frags_rx[nr].page_offset = frags[nr].offset; 488 skb_frags_rx[nr].page_offset = frags[nr].page_offset;
482 skb->truesize += frag_info->frag_stride; 489 skb->truesize += frag_info->frag_stride;
483 frags[nr].page = NULL; 490 frags[nr].page = NULL;
484 } 491 }
@@ -517,7 +524,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
517 524
518 /* Get pointer to first fragment so we could copy the headers into the 525 /* Get pointer to first fragment so we could copy the headers into the
519 * (linear part of the) skb */ 526 * (linear part of the) skb */
520 va = page_address(frags[0].page) + frags[0].offset; 527 va = page_address(frags[0].page) + frags[0].page_offset;
521 528
522 if (length <= SMALL_PACKET_SIZE) { 529 if (length <= SMALL_PACKET_SIZE) {
523 /* We are copying all relevant data to the skb - temporarily 530 /* We are copying all relevant data to the skb - temporarily
@@ -645,7 +652,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
645 dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), 652 dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
646 DMA_FROM_DEVICE); 653 DMA_FROM_DEVICE);
647 ethh = (struct ethhdr *)(page_address(frags[0].page) + 654 ethh = (struct ethhdr *)(page_address(frags[0].page) +
648 frags[0].offset); 655 frags[0].page_offset);
649 656
650 if (is_multicast_ether_addr(ethh->h_dest)) { 657 if (is_multicast_ether_addr(ethh->h_dest)) {
651 struct mlx4_mac_entry *entry; 658 struct mlx4_mac_entry *entry;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 5e0aa569306a..bf06e3610d27 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -237,8 +237,8 @@ struct mlx4_en_tx_desc {
237struct mlx4_en_rx_alloc { 237struct mlx4_en_rx_alloc {
238 struct page *page; 238 struct page *page;
239 dma_addr_t dma; 239 dma_addr_t dma;
240 u32 offset; 240 u32 page_offset;
241 u32 size; 241 u32 page_size;
242}; 242};
243 243
244struct mlx4_en_tx_ring { 244struct mlx4_en_tx_ring {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 5472cbd34028..6ca30739625f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -180,28 +180,32 @@ static int verify_block_sig(struct mlx5_cmd_prot_block *block)
180 return 0; 180 return 0;
181} 181}
182 182
183static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token) 183static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
184 int csum)
184{ 185{
185 block->token = token; 186 block->token = token;
186 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2); 187 if (csum) {
187 block->sig = ~xor8_buf(block, sizeof(*block) - 1); 188 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
189 sizeof(block->data) - 2);
190 block->sig = ~xor8_buf(block, sizeof(*block) - 1);
191 }
188} 192}
189 193
190static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token) 194static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
191{ 195{
192 struct mlx5_cmd_mailbox *next = msg->next; 196 struct mlx5_cmd_mailbox *next = msg->next;
193 197
194 while (next) { 198 while (next) {
195 calc_block_sig(next->buf, token); 199 calc_block_sig(next->buf, token, csum);
196 next = next->next; 200 next = next->next;
197 } 201 }
198} 202}
199 203
200static void set_signature(struct mlx5_cmd_work_ent *ent) 204static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
201{ 205{
202 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); 206 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
203 calc_chain_sig(ent->in, ent->token); 207 calc_chain_sig(ent->in, ent->token, csum);
204 calc_chain_sig(ent->out, ent->token); 208 calc_chain_sig(ent->out, ent->token, csum);
205} 209}
206 210
207static void poll_timeout(struct mlx5_cmd_work_ent *ent) 211static void poll_timeout(struct mlx5_cmd_work_ent *ent)
@@ -539,8 +543,7 @@ static void cmd_work_handler(struct work_struct *work)
539 lay->type = MLX5_PCI_CMD_XPORT; 543 lay->type = MLX5_PCI_CMD_XPORT;
540 lay->token = ent->token; 544 lay->token = ent->token;
541 lay->status_own = CMD_OWNER_HW; 545 lay->status_own = CMD_OWNER_HW;
542 if (!cmd->checksum_disabled) 546 set_signature(ent, !cmd->checksum_disabled);
543 set_signature(ent);
544 dump_command(dev, ent, 1); 547 dump_command(dev, ent, 1);
545 ktime_get_ts(&ent->ts1); 548 ktime_get_ts(&ent->ts1);
546 549
@@ -773,8 +776,6 @@ static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
773 776
774 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); 777 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
775 block = next->buf; 778 block = next->buf;
776 if (xor8_buf(block, sizeof(*block)) != 0xff)
777 return -EINVAL;
778 779
779 memcpy(to, block->data, copy); 780 memcpy(to, block->data, copy);
780 to += copy; 781 to += copy;
@@ -1361,6 +1362,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
1361 goto err_map; 1362 goto err_map;
1362 } 1363 }
1363 1364
1365 cmd->checksum_disabled = 1;
1364 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; 1366 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1365 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; 1367 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1366 1368
@@ -1510,7 +1512,7 @@ int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1510 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; 1512 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
1511 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; 1513 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
1512 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; 1514 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
1513 case MLX5_CMD_STAT_LIM_ERR: return -EINVAL; 1515 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
1514 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; 1516 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
1515 case MLX5_CMD_STAT_IX_ERR: return -EINVAL; 1517 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
1516 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; 1518 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 443cc4d7b024..2231d93cc7ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -366,9 +366,11 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
366 goto err_in; 366 goto err_in;
367 } 367 }
368 368
369 snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
370 name, pci_name(dev->pdev));
369 eq->eqn = out.eq_number; 371 eq->eqn = out.eq_number;
370 err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, 372 err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
371 name, eq); 373 eq->name, eq);
372 if (err) 374 if (err)
373 goto err_eq; 375 goto err_eq;
374 376
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index b47739b0b5f6..bc0f5fb66e24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -165,9 +165,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
165 struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL; 165 struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL;
166 struct mlx5_cmd_query_hca_cap_mbox_in query_ctx; 166 struct mlx5_cmd_query_hca_cap_mbox_in query_ctx;
167 struct mlx5_cmd_set_hca_cap_mbox_out set_out; 167 struct mlx5_cmd_set_hca_cap_mbox_out set_out;
168 struct mlx5_profile *prof = dev->profile;
169 u64 flags; 168 u64 flags;
170 int csum = 1;
171 int err; 169 int err;
172 170
173 memset(&query_ctx, 0, sizeof(query_ctx)); 171 memset(&query_ctx, 0, sizeof(query_ctx));
@@ -197,20 +195,14 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
197 memcpy(&set_ctx->hca_cap, &query_out->hca_cap, 195 memcpy(&set_ctx->hca_cap, &query_out->hca_cap,
198 sizeof(set_ctx->hca_cap)); 196 sizeof(set_ctx->hca_cap));
199 197
200 if (prof->mask & MLX5_PROF_MASK_CMDIF_CSUM) {
201 csum = !!prof->cmdif_csum;
202 flags = be64_to_cpu(set_ctx->hca_cap.flags);
203 if (csum)
204 flags |= MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
205 else
206 flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
207
208 set_ctx->hca_cap.flags = cpu_to_be64(flags);
209 }
210
211 if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) 198 if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
212 set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; 199 set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp;
213 200
201 flags = be64_to_cpu(query_out->hca_cap.flags);
202 /* disable checksum */
203 flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
204
205 set_ctx->hca_cap.flags = cpu_to_be64(flags);
214 memset(&set_out, 0, sizeof(set_out)); 206 memset(&set_out, 0, sizeof(set_out));
215 set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12); 207 set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12);
216 set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP); 208 set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP);
@@ -225,9 +217,6 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
225 if (err) 217 if (err)
226 goto query_ex; 218 goto query_ex;
227 219
228 if (!csum)
229 dev->cmd.checksum_disabled = 1;
230
231query_ex: 220query_ex:
232 kfree(query_out); 221 kfree(query_out);
233 kfree(set_ctx); 222 kfree(set_ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 3a2408d44820..7b12acf210f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -90,6 +90,10 @@ struct mlx5_manage_pages_outbox {
90 __be64 pas[0]; 90 __be64 pas[0];
91}; 91};
92 92
93enum {
94 MAX_RECLAIM_TIME_MSECS = 5000,
95};
96
93static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) 97static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
94{ 98{
95 struct rb_root *root = &dev->priv.page_root; 99 struct rb_root *root = &dev->priv.page_root;
@@ -279,6 +283,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
279 int err; 283 int err;
280 int i; 284 int i;
281 285
286 if (nclaimed)
287 *nclaimed = 0;
288
282 memset(&in, 0, sizeof(in)); 289 memset(&in, 0, sizeof(in));
283 outlen = sizeof(*out) + npages * sizeof(out->pas[0]); 290 outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
284 out = mlx5_vzalloc(outlen); 291 out = mlx5_vzalloc(outlen);
@@ -388,20 +395,25 @@ static int optimal_reclaimed_pages(void)
388 395
389int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) 396int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
390{ 397{
391 unsigned long end = jiffies + msecs_to_jiffies(5000); 398 unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
392 struct fw_page *fwp; 399 struct fw_page *fwp;
393 struct rb_node *p; 400 struct rb_node *p;
401 int nclaimed = 0;
394 int err; 402 int err;
395 403
396 do { 404 do {
397 p = rb_first(&dev->priv.page_root); 405 p = rb_first(&dev->priv.page_root);
398 if (p) { 406 if (p) {
399 fwp = rb_entry(p, struct fw_page, rb_node); 407 fwp = rb_entry(p, struct fw_page, rb_node);
400 err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), NULL); 408 err = reclaim_pages(dev, fwp->func_id,
409 optimal_reclaimed_pages(),
410 &nclaimed);
401 if (err) { 411 if (err) {
402 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err); 412 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err);
403 return err; 413 return err;
404 } 414 }
415 if (nclaimed)
416 end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
405 } 417 }
406 if (time_after(jiffies, end)) { 418 if (time_after(jiffies, end)) {
407 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); 419 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 83c2091c9c23..ea54d95e5b9f 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -448,7 +448,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
448 irq = irq_of_parse_and_map(node, 0); 448 irq = irq_of_parse_and_map(node, 0);
449 if (irq <= 0) { 449 if (irq <= 0) {
450 netdev_err(ndev, "irq_of_parse_and_map failed\n"); 450 netdev_err(ndev, "irq_of_parse_and_map failed\n");
451 return -EINVAL; 451 ret = -EINVAL;
452 goto irq_map_fail;
452 } 453 }
453 454
454 priv = netdev_priv(ndev); 455 priv = netdev_priv(ndev);
@@ -472,24 +473,32 @@ static int moxart_mac_probe(struct platform_device *pdev)
472 priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * 473 priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
473 TX_DESC_NUM, &priv->tx_base, 474 TX_DESC_NUM, &priv->tx_base,
474 GFP_DMA | GFP_KERNEL); 475 GFP_DMA | GFP_KERNEL);
475 if (priv->tx_desc_base == NULL) 476 if (priv->tx_desc_base == NULL) {
477 ret = -ENOMEM;
476 goto init_fail; 478 goto init_fail;
479 }
477 480
478 priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE * 481 priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
479 RX_DESC_NUM, &priv->rx_base, 482 RX_DESC_NUM, &priv->rx_base,
480 GFP_DMA | GFP_KERNEL); 483 GFP_DMA | GFP_KERNEL);
481 if (priv->rx_desc_base == NULL) 484 if (priv->rx_desc_base == NULL) {
485 ret = -ENOMEM;
482 goto init_fail; 486 goto init_fail;
487 }
483 488
484 priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM, 489 priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM,
485 GFP_ATOMIC); 490 GFP_ATOMIC);
486 if (!priv->tx_buf_base) 491 if (!priv->tx_buf_base) {
492 ret = -ENOMEM;
487 goto init_fail; 493 goto init_fail;
494 }
488 495
489 priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM, 496 priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM,
490 GFP_ATOMIC); 497 GFP_ATOMIC);
491 if (!priv->rx_buf_base) 498 if (!priv->rx_buf_base) {
499 ret = -ENOMEM;
492 goto init_fail; 500 goto init_fail;
501 }
493 502
494 platform_set_drvdata(pdev, ndev); 503 platform_set_drvdata(pdev, ndev);
495 504
@@ -522,7 +531,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
522init_fail: 531init_fail:
523 netdev_err(ndev, "init failed\n"); 532 netdev_err(ndev, "init failed\n");
524 moxart_mac_free_memory(ndev); 533 moxart_mac_free_memory(ndev);
525 534irq_map_fail:
535 free_netdev(ndev);
526 return ret; 536 return ret;
527} 537}
528 538
@@ -543,7 +553,7 @@ static const struct of_device_id moxart_mac_match[] = {
543 { } 553 { }
544}; 554};
545 555
546struct __initdata platform_driver moxart_mac_driver = { 556static struct platform_driver moxart_mac_driver = {
547 .probe = moxart_mac_probe, 557 .probe = moxart_mac_probe,
548 .remove = moxart_remove, 558 .remove = moxart_remove,
549 .driver = { 559 .driver = {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 4d7ad0074d1c..ff83a9fcd4c5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -665,7 +665,7 @@ static int qlcnic_set_channels(struct net_device *dev,
665 return err; 665 return err;
666 } 666 }
667 667
668 if (channel->tx_count) { 668 if (qlcnic_82xx_check(adapter) && channel->tx_count) {
669 err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count); 669 err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count);
670 if (err) 670 if (err)
671 return err; 671 return err;
@@ -1794,3 +1794,11 @@ const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = {
1794 .set_msglevel = qlcnic_set_msglevel, 1794 .set_msglevel = qlcnic_set_msglevel,
1795 .get_msglevel = qlcnic_get_msglevel, 1795 .get_msglevel = qlcnic_get_msglevel,
1796}; 1796};
1797
1798const struct ethtool_ops qlcnic_ethtool_failed_ops = {
1799 .get_settings = qlcnic_get_settings,
1800 .get_drvinfo = qlcnic_get_drvinfo,
1801 .set_msglevel = qlcnic_set_msglevel,
1802 .get_msglevel = qlcnic_get_msglevel,
1803 .set_dump = qlcnic_set_dump,
1804};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index c4c5023e1fdf..9e61eb867452 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -431,6 +431,9 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
431 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 431 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
432 usleep_range(10000, 11000); 432 usleep_range(10000, 11000);
433 433
434 if (!adapter->fw_work.work.func)
435 return;
436
434 cancel_delayed_work_sync(&adapter->fw_work); 437 cancel_delayed_work_sync(&adapter->fw_work);
435} 438}
436 439
@@ -2254,7 +2257,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2254 2257
2255 err = qlcnic_alloc_adapter_resources(adapter); 2258 err = qlcnic_alloc_adapter_resources(adapter);
2256 if (err) 2259 if (err)
2257 goto err_out_free_netdev; 2260 goto err_out_free_wq;
2258 2261
2259 adapter->dev_rst_time = jiffies; 2262 adapter->dev_rst_time = jiffies;
2260 adapter->ahw->revision_id = pdev->revision; 2263 adapter->ahw->revision_id = pdev->revision;
@@ -2275,8 +2278,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2275 adapter->portnum = adapter->ahw->pci_func; 2278 adapter->portnum = adapter->ahw->pci_func;
2276 err = qlcnic_start_firmware(adapter); 2279 err = qlcnic_start_firmware(adapter);
2277 if (err) { 2280 if (err) {
2278 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); 2281 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
2279 goto err_out_free_hw; 2282 "\t\tIf reboot doesn't help, try flashing the card\n");
2283 goto err_out_maintenance_mode;
2280 } 2284 }
2281 2285
2282 qlcnic_get_multiq_capability(adapter); 2286 qlcnic_get_multiq_capability(adapter);
@@ -2392,6 +2396,9 @@ err_out_disable_msi:
2392err_out_free_hw: 2396err_out_free_hw:
2393 qlcnic_free_adapter_resources(adapter); 2397 qlcnic_free_adapter_resources(adapter);
2394 2398
2399err_out_free_wq:
2400 destroy_workqueue(adapter->qlcnic_wq);
2401
2395err_out_free_netdev: 2402err_out_free_netdev:
2396 free_netdev(netdev); 2403 free_netdev(netdev);
2397 2404
@@ -2408,6 +2415,22 @@ err_out_disable_pdev:
2408 pci_set_drvdata(pdev, NULL); 2415 pci_set_drvdata(pdev, NULL);
2409 pci_disable_device(pdev); 2416 pci_disable_device(pdev);
2410 return err; 2417 return err;
2418
2419err_out_maintenance_mode:
2420 netdev->netdev_ops = &qlcnic_netdev_failed_ops;
2421 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
2422 err = register_netdev(netdev);
2423
2424 if (err) {
2425 dev_err(&pdev->dev, "Failed to register net device\n");
2426 qlcnic_clr_all_drv_state(adapter, 0);
2427 goto err_out_free_hw;
2428 }
2429
2430 pci_set_drvdata(pdev, adapter);
2431 qlcnic_add_sysfs(adapter);
2432
2433 return 0;
2411} 2434}
2412 2435
2413static void qlcnic_remove(struct pci_dev *pdev) 2436static void qlcnic_remove(struct pci_dev *pdev)
@@ -2518,8 +2541,16 @@ static int qlcnic_resume(struct pci_dev *pdev)
2518static int qlcnic_open(struct net_device *netdev) 2541static int qlcnic_open(struct net_device *netdev)
2519{ 2542{
2520 struct qlcnic_adapter *adapter = netdev_priv(netdev); 2543 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2544 u32 state;
2521 int err; 2545 int err;
2522 2546
2547 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
2548 if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
2549 netdev_err(netdev, "%s: Device is in FAILED state\n", __func__);
2550
2551 return -EIO;
2552 }
2553
2523 netif_carrier_off(netdev); 2554 netif_carrier_off(netdev);
2524 2555
2525 err = qlcnic_attach(adapter); 2556 err = qlcnic_attach(adapter);
@@ -3228,6 +3259,13 @@ void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key)
3228 return; 3259 return;
3229 3260
3230 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); 3261 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
3262 if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
3263 netdev_err(adapter->netdev, "%s: Device is in FAILED state\n",
3264 __func__);
3265 qlcnic_api_unlock(adapter);
3266
3267 return;
3268 }
3231 3269
3232 if (state == QLCNIC_DEV_READY) { 3270 if (state == QLCNIC_DEV_READY) {
3233 QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, 3271 QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
@@ -3613,11 +3651,6 @@ int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, u32 txq)
3613 u8 max_hw = QLCNIC_MAX_TX_RINGS; 3651 u8 max_hw = QLCNIC_MAX_TX_RINGS;
3614 u32 max_allowed; 3652 u32 max_allowed;
3615 3653
3616 if (!qlcnic_82xx_check(adapter)) {
3617 netdev_err(netdev, "No Multi TX-Q support\n");
3618 return -EINVAL;
3619 }
3620
3621 if (!qlcnic_use_msi_x && !qlcnic_use_msi) { 3654 if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
3622 netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n"); 3655 netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n");
3623 return -EINVAL; 3656 return -EINVAL;
@@ -3657,8 +3690,7 @@ int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
3657 u8 max_hw = adapter->ahw->max_rx_ques; 3690 u8 max_hw = adapter->ahw->max_rx_ques;
3658 u32 max_allowed; 3691 u32 max_allowed;
3659 3692
3660 if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x && 3693 if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
3661 !qlcnic_use_msi) {
3662 netdev_err(netdev, "No RSS support in INT-x mode\n"); 3694 netdev_err(netdev, "No RSS support in INT-x mode\n");
3663 return -EINVAL; 3695 return -EINVAL;
3664 } 3696 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 330d9a8774ad..686f460b1502 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -397,6 +397,7 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
397{ 397{
398 struct net_device *netdev = adapter->netdev; 398 struct net_device *netdev = adapter->netdev;
399 399
400 rtnl_lock();
400 if (netif_running(netdev)) 401 if (netif_running(netdev))
401 __qlcnic_down(adapter, netdev); 402 __qlcnic_down(adapter, netdev);
402 403
@@ -407,12 +408,15 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
407 /* After disabling SRIOV re-init the driver in default mode 408 /* After disabling SRIOV re-init the driver in default mode
408 configure opmode based on op_mode of function 409 configure opmode based on op_mode of function
409 */ 410 */
410 if (qlcnic_83xx_configure_opmode(adapter)) 411 if (qlcnic_83xx_configure_opmode(adapter)) {
412 rtnl_unlock();
411 return -EIO; 413 return -EIO;
414 }
412 415
413 if (netif_running(netdev)) 416 if (netif_running(netdev))
414 __qlcnic_up(adapter, netdev); 417 __qlcnic_up(adapter, netdev);
415 418
419 rtnl_unlock();
416 return 0; 420 return 0;
417} 421}
418 422
@@ -533,6 +537,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
533 return -EIO; 537 return -EIO;
534 } 538 }
535 539
540 rtnl_lock();
536 if (netif_running(netdev)) 541 if (netif_running(netdev))
537 __qlcnic_down(adapter, netdev); 542 __qlcnic_down(adapter, netdev);
538 543
@@ -555,6 +560,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
555 __qlcnic_up(adapter, netdev); 560 __qlcnic_up(adapter, netdev);
556 561
557error: 562error:
563 rtnl_unlock();
558 return err; 564 return err;
559} 565}
560 566
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index c6165d05cc13..019f4377307f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -1272,6 +1272,7 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
1272void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) 1272void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
1273{ 1273{
1274 struct device *dev = &adapter->pdev->dev; 1274 struct device *dev = &adapter->pdev->dev;
1275 u32 state;
1275 1276
1276 if (device_create_bin_file(dev, &bin_attr_port_stats)) 1277 if (device_create_bin_file(dev, &bin_attr_port_stats))
1277 dev_info(dev, "failed to create port stats sysfs entry"); 1278 dev_info(dev, "failed to create port stats sysfs entry");
@@ -1285,8 +1286,13 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
1285 if (device_create_bin_file(dev, &bin_attr_mem)) 1286 if (device_create_bin_file(dev, &bin_attr_mem))
1286 dev_info(dev, "failed to create mem sysfs entry\n"); 1287 dev_info(dev, "failed to create mem sysfs entry\n");
1287 1288
1289 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
1290 if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
1291 return;
1292
1288 if (device_create_bin_file(dev, &bin_attr_pci_config)) 1293 if (device_create_bin_file(dev, &bin_attr_pci_config))
1289 dev_info(dev, "failed to create pci config sysfs entry"); 1294 dev_info(dev, "failed to create pci config sysfs entry");
1295
1290 if (device_create_file(dev, &dev_attr_beacon)) 1296 if (device_create_file(dev, &dev_attr_beacon))
1291 dev_info(dev, "failed to create beacon sysfs entry"); 1297 dev_info(dev, "failed to create beacon sysfs entry");
1292 1298
@@ -1307,6 +1313,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
1307void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) 1313void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
1308{ 1314{
1309 struct device *dev = &adapter->pdev->dev; 1315 struct device *dev = &adapter->pdev->dev;
1316 u32 state;
1310 1317
1311 device_remove_bin_file(dev, &bin_attr_port_stats); 1318 device_remove_bin_file(dev, &bin_attr_port_stats);
1312 1319
@@ -1315,6 +1322,11 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
1315 device_remove_file(dev, &dev_attr_diag_mode); 1322 device_remove_file(dev, &dev_attr_diag_mode);
1316 device_remove_bin_file(dev, &bin_attr_crb); 1323 device_remove_bin_file(dev, &bin_attr_crb);
1317 device_remove_bin_file(dev, &bin_attr_mem); 1324 device_remove_bin_file(dev, &bin_attr_mem);
1325
1326 state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
1327 if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
1328 return;
1329
1318 device_remove_bin_file(dev, &bin_attr_pci_config); 1330 device_remove_bin_file(dev, &bin_attr_pci_config);
1319 device_remove_file(dev, &dev_attr_beacon); 1331 device_remove_file(dev, &dev_attr_beacon);
1320 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) 1332 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 10093f0c4c0f..6bc5db703920 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -740,8 +740,8 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
740 int i; 740 int i;
741 741
742 if (!mpi_coredump) { 742 if (!mpi_coredump) {
743 netif_err(qdev, drv, qdev->ndev, "No memory available\n"); 743 netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
744 return -ENOMEM; 744 return -EINVAL;
745 } 745 }
746 746
747 /* Try to get the spinlock, but dont worry if 747 /* Try to get the spinlock, but dont worry if
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
index ff2bf8a4e247..7ad146080c36 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
@@ -1274,7 +1274,7 @@ void ql_mpi_reset_work(struct work_struct *work)
1274 return; 1274 return;
1275 } 1275 }
1276 1276
1277 if (!ql_core_dump(qdev, qdev->mpi_coredump)) { 1277 if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) {
1278 netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n"); 1278 netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
1279 qdev->core_is_dumped = 1; 1279 qdev->core_is_dumped = 1;
1280 queue_delayed_work(qdev->workqueue, 1280 queue_delayed_work(qdev->workqueue,
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 5cd831ebfa83..b57c278d3b46 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -688,12 +688,16 @@ static struct sh_eth_cpu_data r8a7740_data = {
688 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 688 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
689 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 689 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
690 EESR_TDE | EESR_ECI, 690 EESR_TDE | EESR_ECI,
691 .fdr_value = 0x0000070f,
692 .rmcr_value = 0x00000001,
691 693
692 .apr = 1, 694 .apr = 1,
693 .mpr = 1, 695 .mpr = 1,
694 .tpauser = 1, 696 .tpauser = 1,
695 .bculr = 1, 697 .bculr = 1,
696 .hw_swap = 1, 698 .hw_swap = 1,
699 .rpadir = 1,
700 .rpadir_value = 2 << 16,
697 .no_trimd = 1, 701 .no_trimd = 1,
698 .no_ade = 1, 702 .no_ade = 1,
699 .tsu = 1, 703 .tsu = 1,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 9f18ae984f9e..21f9ad6392e9 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -444,6 +444,18 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
444 EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS), 444 EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
445 EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS), 445 EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
446 EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS), 446 EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
447 EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
448 EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
449 EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
450 EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
451 EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
452 EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
453 EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
454 EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
455 EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
456 EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
457 EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
458 EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
447}; 459};
448 460
449#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \ 461#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
@@ -498,44 +510,72 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
498#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \ 510#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
499 (1ULL << EF10_STAT_rx_length_error)) 511 (1ULL << EF10_STAT_rx_length_error))
500 512
501#if BITS_PER_LONG == 64 513/* These statistics are only provided if the firmware supports the
502#define STAT_MASK_BITMAP(bits) (bits) 514 * capability PM_AND_RXDP_COUNTERS.
503#else 515 */
504#define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32 516#define HUNT_PM_AND_RXDP_STAT_MASK ( \
505#endif 517 (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \
506 518 (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \
507static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx) 519 (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \
508{ 520 (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \
509 static const unsigned long hunt_40g_stat_mask[] = { 521 (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \
510 STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK | 522 (1ULL << EF10_STAT_rx_pm_discard_qbb) | \
511 HUNT_40G_EXTRA_STAT_MASK) 523 (1ULL << EF10_STAT_rx_pm_discard_mapping) | \
512 }; 524 (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
513 static const unsigned long hunt_10g_only_stat_mask[] = { 525 (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
514 STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK | 526 (1ULL << EF10_STAT_rx_dp_streaming_packets) | \
515 HUNT_10G_ONLY_STAT_MASK) 527 (1ULL << EF10_STAT_rx_dp_emerg_fetch) | \
516 }; 528 (1ULL << EF10_STAT_rx_dp_emerg_wait))
529
530static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
531{
532 u64 raw_mask = HUNT_COMMON_STAT_MASK;
517 u32 port_caps = efx_mcdi_phy_get_caps(efx); 533 u32 port_caps = efx_mcdi_phy_get_caps(efx);
534 struct efx_ef10_nic_data *nic_data = efx->nic_data;
518 535
519 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) 536 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
520 return hunt_40g_stat_mask; 537 raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
521 else 538 else
522 return hunt_10g_only_stat_mask; 539 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
540
541 if (nic_data->datapath_caps &
542 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
543 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
544
545 return raw_mask;
546}
547
548static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
549{
550 u64 raw_mask = efx_ef10_raw_stat_mask(efx);
551
552#if BITS_PER_LONG == 64
553 mask[0] = raw_mask;
554#else
555 mask[0] = raw_mask & 0xffffffff;
556 mask[1] = raw_mask >> 32;
557#endif
523} 558}
524 559
525static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) 560static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
526{ 561{
562 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
563
564 efx_ef10_get_stat_mask(efx, mask);
527 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, 565 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
528 efx_ef10_stat_mask(efx), names); 566 mask, names);
529} 567}
530 568
531static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) 569static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
532{ 570{
533 struct efx_ef10_nic_data *nic_data = efx->nic_data; 571 struct efx_ef10_nic_data *nic_data = efx->nic_data;
534 const unsigned long *stats_mask = efx_ef10_stat_mask(efx); 572 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
535 __le64 generation_start, generation_end; 573 __le64 generation_start, generation_end;
536 u64 *stats = nic_data->stats; 574 u64 *stats = nic_data->stats;
537 __le64 *dma_stats; 575 __le64 *dma_stats;
538 576
577 efx_ef10_get_stat_mask(efx, mask);
578
539 dma_stats = efx->stats_buffer.addr; 579 dma_stats = efx->stats_buffer.addr;
540 nic_data = efx->nic_data; 580 nic_data = efx->nic_data;
541 581
@@ -543,8 +583,9 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
543 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) 583 if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
544 return 0; 584 return 0;
545 rmb(); 585 rmb();
546 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask, 586 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
547 stats, efx->stats_buffer.addr, false); 587 stats, efx->stats_buffer.addr, false);
588 rmb();
548 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; 589 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
549 if (generation_end != generation_start) 590 if (generation_end != generation_start)
550 return -EAGAIN; 591 return -EAGAIN;
@@ -563,12 +604,14 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
563static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats, 604static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
564 struct rtnl_link_stats64 *core_stats) 605 struct rtnl_link_stats64 *core_stats)
565{ 606{
566 const unsigned long *mask = efx_ef10_stat_mask(efx); 607 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
567 struct efx_ef10_nic_data *nic_data = efx->nic_data; 608 struct efx_ef10_nic_data *nic_data = efx->nic_data;
568 u64 *stats = nic_data->stats; 609 u64 *stats = nic_data->stats;
569 size_t stats_count = 0, index; 610 size_t stats_count = 0, index;
570 int retry; 611 int retry;
571 612
613 efx_ef10_get_stat_mask(efx, mask);
614
572 /* If we're unlucky enough to read statistics during the DMA, wait 615 /* If we're unlucky enough to read statistics during the DMA, wait
573 * up to 10ms for it to finish (typically takes <500us) 616 * up to 10ms for it to finish (typically takes <500us)
574 */ 617 */
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 128d7cdf9eb2..366c8e3e3784 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -27,10 +27,10 @@
27 27
28/* A reboot/assertion causes the MCDI status word to be set after the 28/* A reboot/assertion causes the MCDI status word to be set after the
29 * command word is set or a REBOOT event is sent. If we notice a reboot 29 * command word is set or a REBOOT event is sent. If we notice a reboot
30 * via these mechanisms then wait 20ms for the status word to be set. 30 * via these mechanisms then wait 250ms for the status word to be set.
31 */ 31 */
32#define MCDI_STATUS_DELAY_US 100 32#define MCDI_STATUS_DELAY_US 100
33#define MCDI_STATUS_DELAY_COUNT 200 33#define MCDI_STATUS_DELAY_COUNT 2500
34#define MCDI_STATUS_SLEEP_MS \ 34#define MCDI_STATUS_SLEEP_MS \
35 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) 35 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
36 36
@@ -800,9 +800,6 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
800 } else { 800 } else {
801 int count; 801 int count;
802 802
803 /* Nobody was waiting for an MCDI request, so trigger a reset */
804 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
805
806 /* Consume the status word since efx_mcdi_rpc_finish() won't */ 803 /* Consume the status word since efx_mcdi_rpc_finish() won't */
807 for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { 804 for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
808 if (efx_mcdi_poll_reboot(efx)) 805 if (efx_mcdi_poll_reboot(efx))
@@ -810,6 +807,9 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
810 udelay(MCDI_STATUS_DELAY_US); 807 udelay(MCDI_STATUS_DELAY_US);
811 } 808 }
812 mcdi->new_epoch = true; 809 mcdi->new_epoch = true;
810
811 /* Nobody was waiting for an MCDI request, so trigger a reset */
812 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
813 } 813 }
814 814
815 spin_unlock(&mcdi->iface_lock); 815 spin_unlock(&mcdi->iface_lock);
@@ -963,7 +963,7 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
963 bool *was_attached) 963 bool *was_attached)
964{ 964{
965 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN); 965 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
966 MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN); 966 MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
967 size_t outlen; 967 size_t outlen;
968 int rc; 968 int rc;
969 969
@@ -981,6 +981,22 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
981 goto fail; 981 goto fail;
982 } 982 }
983 983
984 /* We currently assume we have control of the external link
985 * and are completely trusted by firmware. Abort probing
986 * if that's not true for this function.
987 */
988 if (driver_operating &&
989 outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN &&
990 (MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) &
991 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
992 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
993 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
994 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) {
995 netif_err(efx, probe, efx->net_dev,
996 "This driver version only supports one function per port\n");
997 return -ENODEV;
998 }
999
984 if (was_attached != NULL) 1000 if (was_attached != NULL)
985 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); 1001 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
986 return 0; 1002 return 0;
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index b5cf62492f8e..e0a63ddb7a6c 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -2574,8 +2574,58 @@
2574#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */ 2574#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
2575#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */ 2575#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
2576#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */ 2576#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
2577#define MC_CMD_GMAC_DMABUF_START 0x40 /* enum */ 2577/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
2578#define MC_CMD_GMAC_DMABUF_END 0x5f /* enum */ 2578 * capability only.
2579 */
2580#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c
2581/* enum: PM discard_bb_overflow counter. Valid for EF10 with
2582 * PM_AND_RXDP_COUNTERS capability only.
2583 */
2584#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d
2585/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
2586 * capability only.
2587 */
2588#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e
2589/* enum: PM discard_vfifo_full counter. Valid for EF10 with
2590 * PM_AND_RXDP_COUNTERS capability only.
2591 */
2592#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f
2593/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
2594 * capability only.
2595 */
2596#define MC_CMD_MAC_PM_TRUNC_QBB 0x40
2597/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
2598 * capability only.
2599 */
2600#define MC_CMD_MAC_PM_DISCARD_QBB 0x41
2601/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
2602 * capability only.
2603 */
2604#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42
2605/* enum: RXDP counter: Number of packets dropped due to the queue being
2606 * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
2607 */
2608#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43
2609/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10
2610 * with PM_AND_RXDP_COUNTERS capability only.
2611 */
2612#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45
2613/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with
2614 * PM_AND_RXDP_COUNTERS capability only.
2615 */
2616#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
2617/* enum: RXDP counter: Number of times an emergency descriptor fetch was
2618 * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
2619 */
2620#define MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS 0x47
2621/* enum: RXDP counter: Number of times the DPCPU waited for an existing
2622 * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
2623 */
2624#define MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS 0x48
2625/* enum: Start of GMAC stats buffer space, for Siena only. */
2626#define MC_CMD_GMAC_DMABUF_START 0x40
2627/* enum: End of GMAC stats buffer space, for Siena only. */
2628#define MC_CMD_GMAC_DMABUF_END 0x5f
2579#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */ 2629#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
2580#define MC_CMD_MAC_NSTATS 0x61 /* enum */ 2630#define MC_CMD_MAC_NSTATS 0x61 /* enum */
2581 2631
@@ -5065,6 +5115,8 @@
5065#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1 5115#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
5066#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26 5116#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
5067#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1 5117#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
5118#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
5119#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
5068/* RxDPCPU firmware id. */ 5120/* RxDPCPU firmware id. */
5069#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4 5121#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
5070#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2 5122#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index e7dbd2dd202e..9826594c8a48 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -469,8 +469,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
469 * @count: Length of the @desc array 469 * @count: Length of the @desc array
470 * @mask: Bitmask of which elements of @desc are enabled 470 * @mask: Bitmask of which elements of @desc are enabled
471 * @stats: Buffer to update with the converted statistics. The length 471 * @stats: Buffer to update with the converted statistics. The length
472 * of this array must be at least the number of set bits in the 472 * of this array must be at least @count.
473 * first @count bits of @mask.
474 * @dma_buf: DMA buffer containing hardware statistics 473 * @dma_buf: DMA buffer containing hardware statistics
475 * @accumulate: If set, the converted values will be added rather than 474 * @accumulate: If set, the converted values will be added rather than
476 * directly stored to the corresponding elements of @stats 475 * directly stored to the corresponding elements of @stats
@@ -503,11 +502,9 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
503 } 502 }
504 503
505 if (accumulate) 504 if (accumulate)
506 *stats += val; 505 stats[index] += val;
507 else 506 else
508 *stats = val; 507 stats[index] = val;
509 } 508 }
510
511 ++stats;
512 } 509 }
513} 510}
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index fda29d39032f..890bbbe8320e 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -386,6 +386,18 @@ enum {
386 EF10_STAT_rx_align_error, 386 EF10_STAT_rx_align_error,
387 EF10_STAT_rx_length_error, 387 EF10_STAT_rx_length_error,
388 EF10_STAT_rx_nodesc_drops, 388 EF10_STAT_rx_nodesc_drops,
389 EF10_STAT_rx_pm_trunc_bb_overflow,
390 EF10_STAT_rx_pm_discard_bb_overflow,
391 EF10_STAT_rx_pm_trunc_vfifo_full,
392 EF10_STAT_rx_pm_discard_vfifo_full,
393 EF10_STAT_rx_pm_trunc_qbb,
394 EF10_STAT_rx_pm_discard_qbb,
395 EF10_STAT_rx_pm_discard_mapping,
396 EF10_STAT_rx_dp_q_disabled_packets,
397 EF10_STAT_rx_dp_di_dropped_packets,
398 EF10_STAT_rx_dp_streaming_packets,
399 EF10_STAT_rx_dp_emerg_fetch,
400 EF10_STAT_rx_dp_emerg_wait,
389 EF10_STAT_COUNT 401 EF10_STAT_COUNT
390}; 402};
391 403
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 5730fe2445a6..98eedb90cdc3 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -1124,8 +1124,7 @@ static const char * chip_ids[ 16 ] = {
1124 void __iomem *__ioaddr = ioaddr; \ 1124 void __iomem *__ioaddr = ioaddr; \
1125 if (__len >= 2 && (unsigned long)__ptr & 2) { \ 1125 if (__len >= 2 && (unsigned long)__ptr & 2) { \
1126 __len -= 2; \ 1126 __len -= 2; \
1127 SMC_outw(*(u16 *)__ptr, ioaddr, \ 1127 SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
1128 DATA_REG(lp)); \
1129 __ptr += 2; \ 1128 __ptr += 2; \
1130 } \ 1129 } \
1131 if (SMC_CAN_USE_DATACS && lp->datacs) \ 1130 if (SMC_CAN_USE_DATACS && lp->datacs) \
@@ -1133,8 +1132,7 @@ static const char * chip_ids[ 16 ] = {
1133 SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \ 1132 SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \
1134 if (__len & 2) { \ 1133 if (__len & 2) { \
1135 __ptr += (__len & ~3); \ 1134 __ptr += (__len & ~3); \
1136 SMC_outw(*((u16 *)__ptr), ioaddr, \ 1135 SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
1137 DATA_REG(lp)); \
1138 } \ 1136 } \
1139 } else if (SMC_16BIT(lp)) \ 1137 } else if (SMC_16BIT(lp)) \
1140 SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1); \ 1138 SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1); \
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 79974e31187a..cc3ce557e4aa 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -639,13 +639,6 @@ void cpsw_rx_handler(void *token, int len, int status)
639static irqreturn_t cpsw_interrupt(int irq, void *dev_id) 639static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
640{ 640{
641 struct cpsw_priv *priv = dev_id; 641 struct cpsw_priv *priv = dev_id;
642 u32 rx, tx, rx_thresh;
643
644 rx_thresh = __raw_readl(&priv->wr_regs->rx_thresh_stat);
645 rx = __raw_readl(&priv->wr_regs->rx_stat);
646 tx = __raw_readl(&priv->wr_regs->tx_stat);
647 if (!rx_thresh && !rx && !tx)
648 return IRQ_NONE;
649 642
650 cpsw_intr_disable(priv); 643 cpsw_intr_disable(priv);
651 if (priv->irq_enabled == true) { 644 if (priv->irq_enabled == true) {
@@ -1169,9 +1162,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
1169 } 1162 }
1170 } 1163 }
1171 1164
1165 napi_enable(&priv->napi);
1172 cpdma_ctlr_start(priv->dma); 1166 cpdma_ctlr_start(priv->dma);
1173 cpsw_intr_enable(priv); 1167 cpsw_intr_enable(priv);
1174 napi_enable(&priv->napi);
1175 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 1168 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
1176 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 1169 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
1177 1170
@@ -1771,8 +1764,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1771 } 1764 }
1772 data->mac_control = prop; 1765 data->mac_control = prop;
1773 1766
1774 if (!of_property_read_u32(node, "dual_emac", &prop)) 1767 if (of_property_read_bool(node, "dual_emac"))
1775 data->dual_emac = prop; 1768 data->dual_emac = 1;
1776 1769
1777 /* 1770 /*
1778 * Populate all the child nodes here... 1771 * Populate all the child nodes here...
@@ -1782,7 +1775,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1782 if (ret) 1775 if (ret)
1783 pr_warn("Doesn't have any child node\n"); 1776 pr_warn("Doesn't have any child node\n");
1784 1777
1785 for_each_node_by_name(slave_node, "slave") { 1778 for_each_child_of_node(node, slave_node) {
1786 struct cpsw_slave_data *slave_data = data->slave_data + i; 1779 struct cpsw_slave_data *slave_data = data->slave_data + i;
1787 const void *mac_addr = NULL; 1780 const void *mac_addr = NULL;
1788 u32 phyid; 1781 u32 phyid;
@@ -1791,6 +1784,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1791 struct device_node *mdio_node; 1784 struct device_node *mdio_node;
1792 struct platform_device *mdio; 1785 struct platform_device *mdio;
1793 1786
1787 /* This is no slave child node, continue */
1788 if (strcmp(slave_node->name, "slave"))
1789 continue;
1790
1794 parp = of_get_property(slave_node, "phy_id", &lenp); 1791 parp = of_get_property(slave_node, "phy_id", &lenp);
1795 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { 1792 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
1796 pr_err("Missing slave[%d] phy_id property\n", i); 1793 pr_err("Missing slave[%d] phy_id property\n", i);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 67df09ea9d04..6a32ef9d63ae 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -876,8 +876,7 @@ static void emac_dev_mcast_set(struct net_device *ndev)
876 netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) { 876 netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
877 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); 877 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
878 emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL); 878 emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
879 } 879 } else if (!netdev_mc_empty(ndev)) {
880 if (!netdev_mc_empty(ndev)) {
881 struct netdev_hw_addr *ha; 880 struct netdev_hw_addr *ha;
882 881
883 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); 882 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index c8f088ab5fdf..bdf697b184ae 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -32,7 +32,7 @@
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 33
34#define DRV_NAME "via-rhine" 34#define DRV_NAME "via-rhine"
35#define DRV_VERSION "1.5.0" 35#define DRV_VERSION "1.5.1"
36#define DRV_RELDATE "2010-10-09" 36#define DRV_RELDATE "2010-10-09"
37 37
38#include <linux/types.h> 38#include <linux/types.h>
@@ -1704,7 +1704,12 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1704 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); 1704 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1705 1705
1706 if (unlikely(vlan_tx_tag_present(skb))) { 1706 if (unlikely(vlan_tx_tag_present(skb))) {
1707 rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16); 1707 u16 vid_pcp = vlan_tx_tag_get(skb);
1708
1709 /* drop CFI/DEI bit, register needs VID and PCP */
1710 vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1711 ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1712 rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1708 /* request tagging */ 1713 /* request tagging */
1709 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); 1714 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1710 } 1715 }
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index b88121f240ca..0029148077a9 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -297,6 +297,12 @@ static int temac_dma_bd_init(struct net_device *ndev)
297 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 297 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
298 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); 298 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
299 299
300 /* Init descriptor indexes */
301 lp->tx_bd_ci = 0;
302 lp->tx_bd_next = 0;
303 lp->tx_bd_tail = 0;
304 lp->rx_bd_ci = 0;
305
300 return 0; 306 return 0;
301 307
302out: 308out:
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 0721e72f9299..5af1c3e5032a 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -975,7 +975,6 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
975 return -EINVAL; /* Cannot change this parameter when up */ 975 return -EINVAL; /* Cannot change this parameter when up */
976 if ((ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_KERNEL)) == NULL) 976 if ((ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_KERNEL)) == NULL)
977 return -ENOBUFS; 977 return -ENOBUFS;
978 ym->bitrate = 9600;
979 if (copy_from_user(ym, ifr->ifr_data, sizeof(struct yamdrv_ioctl_mcs))) { 978 if (copy_from_user(ym, ifr->ifr_data, sizeof(struct yamdrv_ioctl_mcs))) {
980 kfree(ym); 979 kfree(ym);
981 return -EFAULT; 980 return -EFAULT;
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 42e6deee6db5..0632d34905c7 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -82,7 +82,6 @@ struct mrf24j40 {
82 82
83 struct mutex buffer_mutex; /* only used to protect buf */ 83 struct mutex buffer_mutex; /* only used to protect buf */
84 struct completion tx_complete; 84 struct completion tx_complete;
85 struct work_struct irqwork;
86 u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */ 85 u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */
87}; 86};
88 87
@@ -344,6 +343,8 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
344 if (ret) 343 if (ret)
345 goto err; 344 goto err;
346 345
346 INIT_COMPLETION(devrec->tx_complete);
347
347 /* Set TXNTRIG bit of TXNCON to send packet */ 348 /* Set TXNTRIG bit of TXNCON to send packet */
348 ret = read_short_reg(devrec, REG_TXNCON, &val); 349 ret = read_short_reg(devrec, REG_TXNCON, &val);
349 if (ret) 350 if (ret)
@@ -354,8 +355,6 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
354 val |= 0x4; 355 val |= 0x4;
355 write_short_reg(devrec, REG_TXNCON, val); 356 write_short_reg(devrec, REG_TXNCON, val);
356 357
357 INIT_COMPLETION(devrec->tx_complete);
358
359 /* Wait for the device to send the TX complete interrupt. */ 358 /* Wait for the device to send the TX complete interrupt. */
360 ret = wait_for_completion_interruptible_timeout( 359 ret = wait_for_completion_interruptible_timeout(
361 &devrec->tx_complete, 360 &devrec->tx_complete,
@@ -590,17 +589,6 @@ static struct ieee802154_ops mrf24j40_ops = {
590static irqreturn_t mrf24j40_isr(int irq, void *data) 589static irqreturn_t mrf24j40_isr(int irq, void *data)
591{ 590{
592 struct mrf24j40 *devrec = data; 591 struct mrf24j40 *devrec = data;
593
594 disable_irq_nosync(irq);
595
596 schedule_work(&devrec->irqwork);
597
598 return IRQ_HANDLED;
599}
600
601static void mrf24j40_isrwork(struct work_struct *work)
602{
603 struct mrf24j40 *devrec = container_of(work, struct mrf24j40, irqwork);
604 u8 intstat; 592 u8 intstat;
605 int ret; 593 int ret;
606 594
@@ -618,7 +606,7 @@ static void mrf24j40_isrwork(struct work_struct *work)
618 mrf24j40_handle_rx(devrec); 606 mrf24j40_handle_rx(devrec);
619 607
620out: 608out:
621 enable_irq(devrec->spi->irq); 609 return IRQ_HANDLED;
622} 610}
623 611
624static int mrf24j40_probe(struct spi_device *spi) 612static int mrf24j40_probe(struct spi_device *spi)
@@ -642,7 +630,6 @@ static int mrf24j40_probe(struct spi_device *spi)
642 630
643 mutex_init(&devrec->buffer_mutex); 631 mutex_init(&devrec->buffer_mutex);
644 init_completion(&devrec->tx_complete); 632 init_completion(&devrec->tx_complete);
645 INIT_WORK(&devrec->irqwork, mrf24j40_isrwork);
646 devrec->spi = spi; 633 devrec->spi = spi;
647 spi_set_drvdata(spi, devrec); 634 spi_set_drvdata(spi, devrec);
648 635
@@ -688,11 +675,12 @@ static int mrf24j40_probe(struct spi_device *spi)
688 val &= ~0x3; /* Clear RX mode (normal) */ 675 val &= ~0x3; /* Clear RX mode (normal) */
689 write_short_reg(devrec, REG_RXMCR, val); 676 write_short_reg(devrec, REG_RXMCR, val);
690 677
691 ret = request_irq(spi->irq, 678 ret = request_threaded_irq(spi->irq,
692 mrf24j40_isr, 679 NULL,
693 IRQF_TRIGGER_FALLING, 680 mrf24j40_isr,
694 dev_name(&spi->dev), 681 IRQF_TRIGGER_LOW|IRQF_ONESHOT,
695 devrec); 682 dev_name(&spi->dev),
683 devrec);
696 684
697 if (ret) { 685 if (ret) {
698 dev_err(printdev(devrec), "Unable to get IRQ"); 686 dev_err(printdev(devrec), "Unable to get IRQ");
@@ -721,7 +709,6 @@ static int mrf24j40_remove(struct spi_device *spi)
721 dev_dbg(printdev(devrec), "remove\n"); 709 dev_dbg(printdev(devrec), "remove\n");
722 710
723 free_irq(spi->irq, devrec); 711 free_irq(spi->irq, devrec);
724 flush_work(&devrec->irqwork); /* TODO: Is this the right call? */
725 ieee802154_unregister_device(devrec->dev); 712 ieee802154_unregister_device(devrec->dev);
726 ieee802154_free_device(devrec->dev); 713 ieee802154_free_device(devrec->dev);
727 /* TODO: Will ieee802154_free_device() wait until ->xmit() is 714 /* TODO: Will ieee802154_free_device() wait until ->xmit() is
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index a34d6bf5e43b..cc70ecfc7062 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -429,11 +429,13 @@ static void slip_write_wakeup(struct tty_struct *tty)
429 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) 429 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
430 return; 430 return;
431 431
432 spin_lock(&sl->lock);
432 if (sl->xleft <= 0) { 433 if (sl->xleft <= 0) {
433 /* Now serial buffer is almost free & we can start 434 /* Now serial buffer is almost free & we can start
434 * transmission of another packet */ 435 * transmission of another packet */
435 sl->dev->stats.tx_packets++; 436 sl->dev->stats.tx_packets++;
436 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 437 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
438 spin_unlock(&sl->lock);
437 sl_unlock(sl); 439 sl_unlock(sl);
438 return; 440 return;
439 } 441 }
@@ -441,6 +443,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
441 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 443 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
442 sl->xleft -= actual; 444 sl->xleft -= actual;
443 sl->xhead += actual; 445 sl->xhead += actual;
446 spin_unlock(&sl->lock);
444} 447}
445 448
446static void sl_tx_timeout(struct net_device *dev) 449static void sl_tx_timeout(struct net_device *dev)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 807815fc9968..7cb105c103fe 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1293,7 +1293,8 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1293 if (unlikely(!noblock)) 1293 if (unlikely(!noblock))
1294 add_wait_queue(&tfile->wq.wait, &wait); 1294 add_wait_queue(&tfile->wq.wait, &wait);
1295 while (len) { 1295 while (len) {
1296 current->state = TASK_INTERRUPTIBLE; 1296 if (unlikely(!noblock))
1297 current->state = TASK_INTERRUPTIBLE;
1297 1298
1298 /* Read frames from the queue */ 1299 /* Read frames from the queue */
1299 if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) { 1300 if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
@@ -1320,9 +1321,10 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1320 break; 1321 break;
1321 } 1322 }
1322 1323
1323 current->state = TASK_RUNNING; 1324 if (unlikely(!noblock)) {
1324 if (unlikely(!noblock)) 1325 current->state = TASK_RUNNING;
1325 remove_wait_queue(&tfile->wq.wait, &wait); 1326 remove_wait_queue(&tfile->wq.wait, &wait);
1327 }
1326 1328
1327 return ret; 1329 return ret;
1328} 1330}
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 3569293df872..846cc19c04f2 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -36,8 +36,8 @@
36#define AX_RXHDR_L4_TYPE_TCP 16 36#define AX_RXHDR_L4_TYPE_TCP 16
37#define AX_RXHDR_L3CSUM_ERR 2 37#define AX_RXHDR_L3CSUM_ERR 2
38#define AX_RXHDR_L4CSUM_ERR 1 38#define AX_RXHDR_L4CSUM_ERR 1
39#define AX_RXHDR_CRC_ERR ((u32)BIT(31)) 39#define AX_RXHDR_CRC_ERR ((u32)BIT(29))
40#define AX_RXHDR_DROP_ERR ((u32)BIT(30)) 40#define AX_RXHDR_DROP_ERR ((u32)BIT(31))
41#define AX_ACCESS_MAC 0x01 41#define AX_ACCESS_MAC 0x01
42#define AX_ACCESS_PHY 0x02 42#define AX_ACCESS_PHY 0x02
43#define AX_ACCESS_EEPROM 0x04 43#define AX_ACCESS_EEPROM 0x04
@@ -1406,6 +1406,19 @@ static const struct driver_info sitecom_info = {
1406 .tx_fixup = ax88179_tx_fixup, 1406 .tx_fixup = ax88179_tx_fixup,
1407}; 1407};
1408 1408
1409static const struct driver_info samsung_info = {
1410 .description = "Samsung USB Ethernet Adapter",
1411 .bind = ax88179_bind,
1412 .unbind = ax88179_unbind,
1413 .status = ax88179_status,
1414 .link_reset = ax88179_link_reset,
1415 .reset = ax88179_reset,
1416 .stop = ax88179_stop,
1417 .flags = FLAG_ETHER | FLAG_FRAMING_AX,
1418 .rx_fixup = ax88179_rx_fixup,
1419 .tx_fixup = ax88179_tx_fixup,
1420};
1421
1409static const struct usb_device_id products[] = { 1422static const struct usb_device_id products[] = {
1410{ 1423{
1411 /* ASIX AX88179 10/100/1000 */ 1424 /* ASIX AX88179 10/100/1000 */
@@ -1418,7 +1431,11 @@ static const struct usb_device_id products[] = {
1418}, { 1431}, {
1419 /* Sitecom USB 3.0 to Gigabit Adapter */ 1432 /* Sitecom USB 3.0 to Gigabit Adapter */
1420 USB_DEVICE(0x0df6, 0x0072), 1433 USB_DEVICE(0x0df6, 0x0072),
1421 .driver_info = (unsigned long) &sitecom_info, 1434 .driver_info = (unsigned long)&sitecom_info,
1435}, {
1436 /* Samsung USB Ethernet Adapter */
1437 USB_DEVICE(0x04e8, 0xa100),
1438 .driver_info = (unsigned long)&samsung_info,
1422}, 1439},
1423 { }, 1440 { },
1424}; 1441};
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 2dbb9460349d..c6867f926cff 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -303,7 +303,7 @@ static void dm9601_set_multicast(struct net_device *net)
303 rx_ctl |= 0x02; 303 rx_ctl |= 0x02;
304 } else if (net->flags & IFF_ALLMULTI || 304 } else if (net->flags & IFF_ALLMULTI ||
305 netdev_mc_count(net) > DM_MAX_MCAST) { 305 netdev_mc_count(net) > DM_MAX_MCAST) {
306 rx_ctl |= 0x04; 306 rx_ctl |= 0x08;
307 } else if (!netdev_mc_empty(net)) { 307 } else if (!netdev_mc_empty(net)) {
308 struct netdev_hw_addr *ha; 308 struct netdev_hw_addr *ha;
309 309
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 6312332afeba..818ce90185b5 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -714,7 +714,8 @@ static const struct usb_device_id products[] = {
714 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 714 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
715 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 715 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
716 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 716 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
717 {QMI_FIXED_INTF(0x1e2d, 0x12d1, 4)}, /* Cinterion PLxx */ 717 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
718 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
718 719
719 /* 4. Gobi 1000 devices */ 720 /* 4. Gobi 1000 devices */
720 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 721 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 7b331e613e02..90a429b7ebad 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1241,7 +1241,9 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
1241 if (num_sgs == 1) 1241 if (num_sgs == 1)
1242 return 0; 1242 return 0;
1243 1243
1244 urb->sg = kmalloc(num_sgs * sizeof(struct scatterlist), GFP_ATOMIC); 1244 /* reserve one for zero packet */
1245 urb->sg = kmalloc((num_sgs + 1) * sizeof(struct scatterlist),
1246 GFP_ATOMIC);
1245 if (!urb->sg) 1247 if (!urb->sg)
1246 return -ENOMEM; 1248 return -ENOMEM;
1247 1249
@@ -1305,7 +1307,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1305 if (build_dma_sg(skb, urb) < 0) 1307 if (build_dma_sg(skb, urb) < 0)
1306 goto drop; 1308 goto drop;
1307 } 1309 }
1308 entry->length = length = urb->transfer_buffer_length; 1310 length = urb->transfer_buffer_length;
1309 1311
1310 /* don't assume the hardware handles USB_ZERO_PACKET 1312 /* don't assume the hardware handles USB_ZERO_PACKET
1311 * NOTE: strictly conforming cdc-ether devices should expect 1313 * NOTE: strictly conforming cdc-ether devices should expect
@@ -1317,15 +1319,18 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1317 if (length % dev->maxpacket == 0) { 1319 if (length % dev->maxpacket == 0) {
1318 if (!(info->flags & FLAG_SEND_ZLP)) { 1320 if (!(info->flags & FLAG_SEND_ZLP)) {
1319 if (!(info->flags & FLAG_MULTI_PACKET)) { 1321 if (!(info->flags & FLAG_MULTI_PACKET)) {
1320 urb->transfer_buffer_length++; 1322 length++;
1321 if (skb_tailroom(skb)) { 1323 if (skb_tailroom(skb) && !urb->num_sgs) {
1322 skb->data[skb->len] = 0; 1324 skb->data[skb->len] = 0;
1323 __skb_put(skb, 1); 1325 __skb_put(skb, 1);
1324 } 1326 } else if (urb->num_sgs)
1327 sg_set_buf(&urb->sg[urb->num_sgs++],
1328 dev->padding_pkt, 1);
1325 } 1329 }
1326 } else 1330 } else
1327 urb->transfer_flags |= URB_ZERO_PACKET; 1331 urb->transfer_flags |= URB_ZERO_PACKET;
1328 } 1332 }
1333 entry->length = urb->transfer_buffer_length = length;
1329 1334
1330 spin_lock_irqsave(&dev->txq.lock, flags); 1335 spin_lock_irqsave(&dev->txq.lock, flags);
1331 retval = usb_autopm_get_interface_async(dev->intf); 1336 retval = usb_autopm_get_interface_async(dev->intf);
@@ -1509,6 +1514,7 @@ void usbnet_disconnect (struct usb_interface *intf)
1509 1514
1510 usb_kill_urb(dev->interrupt); 1515 usb_kill_urb(dev->interrupt);
1511 usb_free_urb(dev->interrupt); 1516 usb_free_urb(dev->interrupt);
1517 kfree(dev->padding_pkt);
1512 1518
1513 free_netdev(net); 1519 free_netdev(net);
1514} 1520}
@@ -1679,9 +1685,18 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1679 /* initialize max rx_qlen and tx_qlen */ 1685 /* initialize max rx_qlen and tx_qlen */
1680 usbnet_update_max_qlen(dev); 1686 usbnet_update_max_qlen(dev);
1681 1687
1688 if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) &&
1689 !(info->flags & FLAG_MULTI_PACKET)) {
1690 dev->padding_pkt = kzalloc(1, GFP_KERNEL);
1691 if (!dev->padding_pkt) {
1692 status = -ENOMEM;
1693 goto out4;
1694 }
1695 }
1696
1682 status = register_netdev (net); 1697 status = register_netdev (net);
1683 if (status) 1698 if (status)
1684 goto out4; 1699 goto out5;
1685 netif_info(dev, probe, dev->net, 1700 netif_info(dev, probe, dev->net,
1686 "register '%s' at usb-%s-%s, %s, %pM\n", 1701 "register '%s' at usb-%s-%s, %s, %pM\n",
1687 udev->dev.driver->name, 1702 udev->dev.driver->name,
@@ -1699,6 +1714,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1699 1714
1700 return 0; 1715 return 0;
1701 1716
1717out5:
1718 kfree(dev->padding_pkt);
1702out4: 1719out4:
1703 usb_free_urb(dev->interrupt); 1720 usb_free_urb(dev->interrupt);
1704out3: 1721out3:
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index defec2b3c5a4..9fbdfcd1e1a0 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -938,7 +938,9 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
938 return -EINVAL; 938 return -EINVAL;
939 } else { 939 } else {
940 vi->curr_queue_pairs = queue_pairs; 940 vi->curr_queue_pairs = queue_pairs;
941 schedule_delayed_work(&vi->refill, 0); 941 /* virtnet_open() will refill when device is going to up. */
942 if (dev->flags & IFF_UP)
943 schedule_delayed_work(&vi->refill, 0);
942 } 944 }
943 945
944 return 0; 946 return 0;
@@ -1116,6 +1118,11 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
1116{ 1118{
1117 struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); 1119 struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
1118 1120
1121 mutex_lock(&vi->config_lock);
1122
1123 if (!vi->config_enable)
1124 goto done;
1125
1119 switch(action & ~CPU_TASKS_FROZEN) { 1126 switch(action & ~CPU_TASKS_FROZEN) {
1120 case CPU_ONLINE: 1127 case CPU_ONLINE:
1121 case CPU_DOWN_FAILED: 1128 case CPU_DOWN_FAILED:
@@ -1128,6 +1135,9 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
1128 default: 1135 default:
1129 break; 1136 break;
1130 } 1137 }
1138
1139done:
1140 mutex_unlock(&vi->config_lock);
1131 return NOTIFY_OK; 1141 return NOTIFY_OK;
1132} 1142}
1133 1143
@@ -1733,7 +1743,9 @@ static int virtnet_restore(struct virtio_device *vdev)
1733 vi->config_enable = true; 1743 vi->config_enable = true;
1734 mutex_unlock(&vi->config_lock); 1744 mutex_unlock(&vi->config_lock);
1735 1745
1746 rtnl_lock();
1736 virtnet_set_queues(vi, vi->curr_queue_pairs); 1747 virtnet_set_queues(vi, vi->curr_queue_pairs);
1748 rtnl_unlock();
1737 1749
1738 return 0; 1750 return 0;
1739} 1751}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index d1292fe746bc..2ef5b6219f3f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -952,8 +952,7 @@ void vxlan_sock_release(struct vxlan_sock *vs)
952 952
953 spin_lock(&vn->sock_lock); 953 spin_lock(&vn->sock_lock);
954 hlist_del_rcu(&vs->hlist); 954 hlist_del_rcu(&vs->hlist);
955 smp_wmb(); 955 rcu_assign_sk_user_data(vs->sock->sk, NULL);
956 vs->sock->sk->sk_user_data = NULL;
957 vxlan_notify_del_rx_port(sk); 956 vxlan_notify_del_rx_port(sk);
958 spin_unlock(&vn->sock_lock); 957 spin_unlock(&vn->sock_lock);
959 958
@@ -1048,8 +1047,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1048 1047
1049 port = inet_sk(sk)->inet_sport; 1048 port = inet_sk(sk)->inet_sport;
1050 1049
1051 smp_read_barrier_depends(); 1050 vs = rcu_dereference_sk_user_data(sk);
1052 vs = (struct vxlan_sock *)sk->sk_user_data;
1053 if (!vs) 1051 if (!vs)
1054 goto drop; 1052 goto drop;
1055 1053
@@ -2302,8 +2300,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
2302 atomic_set(&vs->refcnt, 1); 2300 atomic_set(&vs->refcnt, 1);
2303 vs->rcv = rcv; 2301 vs->rcv = rcv;
2304 vs->data = data; 2302 vs->data = data;
2305 smp_wmb(); 2303 rcu_assign_sk_user_data(vs->sock->sk, vs);
2306 vs->sock->sk->sk_user_data = vs;
2307 2304
2308 spin_lock(&vn->sock_lock); 2305 spin_lock(&vn->sock_lock);
2309 hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); 2306 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 3f0c4f268751..bcfff0d62de4 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -1972,6 +1972,7 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
1972 } 1972 }
1973 1973
1974 i = port->index; 1974 i = port->index;
1975 memset(&sync, 0, sizeof(sync));
1975 sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed); 1976 sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed);
1976 /* Lucky card and linux use same encoding here */ 1977 /* Lucky card and linux use same encoding here */
1977 sync.clock_type = FST_RDB(card, portConfig[i].internalClock) == 1978 sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 6a24a5a70cc7..4c0a69779b89 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -355,6 +355,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
355 ifr->ifr_settings.size = size; /* data size wanted */ 355 ifr->ifr_settings.size = size; /* data size wanted */
356 return -ENOBUFS; 356 return -ENOBUFS;
357 } 357 }
358 memset(&line, 0, sizeof(line));
358 line.clock_type = get_status(port)->clocking; 359 line.clock_type = get_status(port)->clocking;
359 line.clock_rate = 0; 360 line.clock_rate = 0;
360 line.loopback = 0; 361 line.loopback = 0;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index e4f65900132d..709301f88dcd 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -208,6 +208,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
208 struct ath_hw *ah = sc->sc_ah; 208 struct ath_hw *ah = sc->sc_ah;
209 struct ath_common *common = ath9k_hw_common(ah); 209 struct ath_common *common = ath9k_hw_common(ah);
210 unsigned long flags; 210 unsigned long flags;
211 int i;
211 212
212 if (ath_startrecv(sc) != 0) { 213 if (ath_startrecv(sc) != 0) {
213 ath_err(common, "Unable to restart recv logic\n"); 214 ath_err(common, "Unable to restart recv logic\n");
@@ -235,6 +236,15 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
235 } 236 }
236 work: 237 work:
237 ath_restart_work(sc); 238 ath_restart_work(sc);
239
240 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
241 if (!ATH_TXQ_SETUP(sc, i))
242 continue;
243
244 spin_lock_bh(&sc->tx.txq[i].axq_lock);
245 ath_txq_schedule(sc, &sc->tx.txq[i]);
246 spin_unlock_bh(&sc->tx.txq[i].axq_lock);
247 }
238 } 248 }
239 249
240 ieee80211_wake_queues(sc->hw); 250 ieee80211_wake_queues(sc->hw);
@@ -539,21 +549,10 @@ chip_reset:
539 549
540static int ath_reset(struct ath_softc *sc) 550static int ath_reset(struct ath_softc *sc)
541{ 551{
542 int i, r; 552 int r;
543 553
544 ath9k_ps_wakeup(sc); 554 ath9k_ps_wakeup(sc);
545
546 r = ath_reset_internal(sc, NULL); 555 r = ath_reset_internal(sc, NULL);
547
548 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
549 if (!ATH_TXQ_SETUP(sc, i))
550 continue;
551
552 spin_lock_bh(&sc->tx.txq[i].axq_lock);
553 ath_txq_schedule(sc, &sc->tx.txq[i]);
554 spin_unlock_bh(&sc->tx.txq[i].axq_lock);
555 }
556
557 ath9k_ps_restore(sc); 556 ath9k_ps_restore(sc);
558 557
559 return r; 558 return r;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 4ee472a5a4e4..ab9e3a8410bc 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -1270,13 +1270,6 @@ static void ath9k_antenna_check(struct ath_softc *sc,
1270 return; 1270 return;
1271 1271
1272 /* 1272 /*
1273 * All MPDUs in an aggregate will use the same LNA
1274 * as the first MPDU.
1275 */
1276 if (rs->rs_isaggr && !rs->rs_firstaggr)
1277 return;
1278
1279 /*
1280 * Change the default rx antenna if rx diversity 1273 * Change the default rx antenna if rx diversity
1281 * chooses the other antenna 3 times in a row. 1274 * chooses the other antenna 3 times in a row.
1282 */ 1275 */
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 35b515fe3ffa..dd30452df966 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -399,6 +399,7 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
399 tbf->bf_buf_addr = bf->bf_buf_addr; 399 tbf->bf_buf_addr = bf->bf_buf_addr;
400 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); 400 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
401 tbf->bf_state = bf->bf_state; 401 tbf->bf_state = bf->bf_state;
402 tbf->bf_state.stale = false;
402 403
403 return tbf; 404 return tbf;
404} 405}
@@ -1389,11 +1390,15 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1389 u16 tid, u16 *ssn) 1390 u16 tid, u16 *ssn)
1390{ 1391{
1391 struct ath_atx_tid *txtid; 1392 struct ath_atx_tid *txtid;
1393 struct ath_txq *txq;
1392 struct ath_node *an; 1394 struct ath_node *an;
1393 u8 density; 1395 u8 density;
1394 1396
1395 an = (struct ath_node *)sta->drv_priv; 1397 an = (struct ath_node *)sta->drv_priv;
1396 txtid = ATH_AN_2_TID(an, tid); 1398 txtid = ATH_AN_2_TID(an, tid);
1399 txq = txtid->ac->txq;
1400
1401 ath_txq_lock(sc, txq);
1397 1402
1398 /* update ampdu factor/density, they may have changed. This may happen 1403 /* update ampdu factor/density, they may have changed. This may happen
1399 * in HT IBSS when a beacon with HT-info is received after the station 1404 * in HT IBSS when a beacon with HT-info is received after the station
@@ -1417,6 +1422,8 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1417 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); 1422 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1418 txtid->baw_head = txtid->baw_tail = 0; 1423 txtid->baw_head = txtid->baw_tail = 0;
1419 1424
1425 ath_txq_unlock_complete(sc, txq);
1426
1420 return 0; 1427 return 0;
1421} 1428}
1422 1429
@@ -1555,8 +1562,10 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
1555 __skb_unlink(bf->bf_mpdu, tid_q); 1562 __skb_unlink(bf->bf_mpdu, tid_q);
1556 list_add_tail(&bf->list, &bf_q); 1563 list_add_tail(&bf->list, &bf_q);
1557 ath_set_rates(tid->an->vif, tid->an->sta, bf); 1564 ath_set_rates(tid->an->vif, tid->an->sta, bf);
1558 ath_tx_addto_baw(sc, tid, bf); 1565 if (bf_isampdu(bf)) {
1559 bf->bf_state.bf_type &= ~BUF_AGGR; 1566 ath_tx_addto_baw(sc, tid, bf);
1567 bf->bf_state.bf_type &= ~BUF_AGGR;
1568 }
1560 if (bf_tail) 1569 if (bf_tail)
1561 bf_tail->bf_next = bf; 1570 bf_tail->bf_next = bf;
1562 1571
@@ -1950,7 +1959,9 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1950 if (bf_is_ampdu_not_probing(bf)) 1959 if (bf_is_ampdu_not_probing(bf))
1951 txq->axq_ampdu_depth++; 1960 txq->axq_ampdu_depth++;
1952 1961
1953 bf = bf->bf_lastbf->bf_next; 1962 bf_last = bf->bf_lastbf;
1963 bf = bf_last->bf_next;
1964 bf_last->bf_next = NULL;
1954 } 1965 }
1955 } 1966 }
1956} 1967}
@@ -1958,15 +1969,18 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1958static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 1969static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1959 struct ath_atx_tid *tid, struct sk_buff *skb) 1970 struct ath_atx_tid *tid, struct sk_buff *skb)
1960{ 1971{
1972 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1961 struct ath_frame_info *fi = get_frame_info(skb); 1973 struct ath_frame_info *fi = get_frame_info(skb);
1962 struct list_head bf_head; 1974 struct list_head bf_head;
1963 struct ath_buf *bf; 1975 struct ath_buf *bf = fi->bf;
1964
1965 bf = fi->bf;
1966 1976
1967 INIT_LIST_HEAD(&bf_head); 1977 INIT_LIST_HEAD(&bf_head);
1968 list_add_tail(&bf->list, &bf_head); 1978 list_add_tail(&bf->list, &bf_head);
1969 bf->bf_state.bf_type = 0; 1979 bf->bf_state.bf_type = 0;
1980 if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
1981 bf->bf_state.bf_type = BUF_AMPDU;
1982 ath_tx_addto_baw(sc, tid, bf);
1983 }
1970 1984
1971 bf->bf_next = NULL; 1985 bf->bf_next = NULL;
1972 bf->bf_lastbf = bf; 1986 bf->bf_lastbf = bf;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 64f4a2bc8dde..c3462b75bd08 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -464,8 +464,6 @@ static struct sdio_driver brcmf_sdmmc_driver = {
464 464
465static int brcmf_sdio_pd_probe(struct platform_device *pdev) 465static int brcmf_sdio_pd_probe(struct platform_device *pdev)
466{ 466{
467 int ret;
468
469 brcmf_dbg(SDIO, "Enter\n"); 467 brcmf_dbg(SDIO, "Enter\n");
470 468
471 brcmfmac_sdio_pdata = pdev->dev.platform_data; 469 brcmfmac_sdio_pdata = pdev->dev.platform_data;
@@ -473,11 +471,7 @@ static int brcmf_sdio_pd_probe(struct platform_device *pdev)
473 if (brcmfmac_sdio_pdata->power_on) 471 if (brcmfmac_sdio_pdata->power_on)
474 brcmfmac_sdio_pdata->power_on(); 472 brcmfmac_sdio_pdata->power_on();
475 473
476 ret = sdio_register_driver(&brcmf_sdmmc_driver); 474 return 0;
477 if (ret)
478 brcmf_err("sdio_register_driver failed: %d\n", ret);
479
480 return ret;
481} 475}
482 476
483static int brcmf_sdio_pd_remove(struct platform_device *pdev) 477static int brcmf_sdio_pd_remove(struct platform_device *pdev)
@@ -500,6 +494,15 @@ static struct platform_driver brcmf_sdio_pd = {
500 } 494 }
501}; 495};
502 496
497void brcmf_sdio_register(void)
498{
499 int ret;
500
501 ret = sdio_register_driver(&brcmf_sdmmc_driver);
502 if (ret)
503 brcmf_err("sdio_register_driver failed: %d\n", ret);
504}
505
503void brcmf_sdio_exit(void) 506void brcmf_sdio_exit(void)
504{ 507{
505 brcmf_dbg(SDIO, "Enter\n"); 508 brcmf_dbg(SDIO, "Enter\n");
@@ -510,18 +513,13 @@ void brcmf_sdio_exit(void)
510 sdio_unregister_driver(&brcmf_sdmmc_driver); 513 sdio_unregister_driver(&brcmf_sdmmc_driver);
511} 514}
512 515
513void brcmf_sdio_init(void) 516void __init brcmf_sdio_init(void)
514{ 517{
515 int ret; 518 int ret;
516 519
517 brcmf_dbg(SDIO, "Enter\n"); 520 brcmf_dbg(SDIO, "Enter\n");
518 521
519 ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe); 522 ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
520 if (ret == -ENODEV) { 523 if (ret == -ENODEV)
521 brcmf_dbg(SDIO, "No platform data available, registering without.\n"); 524 brcmf_dbg(SDIO, "No platform data available.\n");
522 ret = sdio_register_driver(&brcmf_sdmmc_driver);
523 }
524
525 if (ret)
526 brcmf_err("driver registration failed: %d\n", ret);
527} 525}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index f7c1985844e4..74156f84180c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -156,10 +156,11 @@ extern int brcmf_bus_start(struct device *dev);
156#ifdef CONFIG_BRCMFMAC_SDIO 156#ifdef CONFIG_BRCMFMAC_SDIO
157extern void brcmf_sdio_exit(void); 157extern void brcmf_sdio_exit(void);
158extern void brcmf_sdio_init(void); 158extern void brcmf_sdio_init(void);
159extern void brcmf_sdio_register(void);
159#endif 160#endif
160#ifdef CONFIG_BRCMFMAC_USB 161#ifdef CONFIG_BRCMFMAC_USB
161extern void brcmf_usb_exit(void); 162extern void brcmf_usb_exit(void);
162extern void brcmf_usb_init(void); 163extern void brcmf_usb_register(void);
163#endif 164#endif
164 165
165#endif /* _BRCMF_BUS_H_ */ 166#endif /* _BRCMF_BUS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index e067aec1fbf1..40e7f854e10f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -1231,21 +1231,23 @@ u32 brcmf_get_chip_info(struct brcmf_if *ifp)
1231 return bus->chip << 4 | bus->chiprev; 1231 return bus->chip << 4 | bus->chiprev;
1232} 1232}
1233 1233
1234static void brcmf_driver_init(struct work_struct *work) 1234static void brcmf_driver_register(struct work_struct *work)
1235{ 1235{
1236 brcmf_debugfs_init();
1237
1238#ifdef CONFIG_BRCMFMAC_SDIO 1236#ifdef CONFIG_BRCMFMAC_SDIO
1239 brcmf_sdio_init(); 1237 brcmf_sdio_register();
1240#endif 1238#endif
1241#ifdef CONFIG_BRCMFMAC_USB 1239#ifdef CONFIG_BRCMFMAC_USB
1242 brcmf_usb_init(); 1240 brcmf_usb_register();
1243#endif 1241#endif
1244} 1242}
1245static DECLARE_WORK(brcmf_driver_work, brcmf_driver_init); 1243static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
1246 1244
1247static int __init brcmfmac_module_init(void) 1245static int __init brcmfmac_module_init(void)
1248{ 1246{
1247 brcmf_debugfs_init();
1248#ifdef CONFIG_BRCMFMAC_SDIO
1249 brcmf_sdio_init();
1250#endif
1249 if (!schedule_work(&brcmf_driver_work)) 1251 if (!schedule_work(&brcmf_driver_work))
1250 return -EBUSY; 1252 return -EBUSY;
1251 1253
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 39e01a7c8556..f4aea47e0730 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -1539,7 +1539,7 @@ void brcmf_usb_exit(void)
1539 brcmf_release_fw(&fw_image_list); 1539 brcmf_release_fw(&fw_image_list);
1540} 1540}
1541 1541
1542void brcmf_usb_init(void) 1542void brcmf_usb_register(void)
1543{ 1543{
1544 brcmf_dbg(USB, "Enter\n"); 1544 brcmf_dbg(USB, "Enter\n");
1545 INIT_LIST_HEAD(&fw_image_list); 1545 INIT_LIST_HEAD(&fw_image_list);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 3a6544710c8a..edc5d105ff98 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -457,6 +457,8 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
457 if (err != 0) 457 if (err != 0)
458 brcms_err(wl->wlc->hw->d11core, "%s: brcms_up() returned %d\n", 458 brcms_err(wl->wlc->hw->d11core, "%s: brcms_up() returned %d\n",
459 __func__, err); 459 __func__, err);
460
461 bcma_core_pci_power_save(wl->wlc->hw->d11core->bus, true);
460 return err; 462 return err;
461} 463}
462 464
@@ -479,6 +481,8 @@ static void brcms_ops_stop(struct ieee80211_hw *hw)
479 return; 481 return;
480 } 482 }
481 483
484 bcma_core_pci_power_save(wl->wlc->hw->d11core->bus, false);
485
482 /* put driver in down state */ 486 /* put driver in down state */
483 spin_lock_bh(&wl->lock); 487 spin_lock_bh(&wl->lock);
484 brcms_down(wl); 488 brcms_down(wl);
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index f5e6b489ed32..755a0c8edfe1 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -42,7 +42,6 @@ struct hwbus_priv {
42 spinlock_t lock; /* Serialize all bus operations */ 42 spinlock_t lock; /* Serialize all bus operations */
43 wait_queue_head_t wq; 43 wait_queue_head_t wq;
44 int claimed; 44 int claimed;
45 int irq_disabled;
46}; 45};
47 46
48#define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2) 47#define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2)
@@ -238,9 +237,9 @@ static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id)
238 struct hwbus_priv *self = dev_id; 237 struct hwbus_priv *self = dev_id;
239 238
240 if (self->core) { 239 if (self->core) {
241 disable_irq_nosync(self->func->irq); 240 cw1200_spi_lock(self);
242 self->irq_disabled = 1;
243 cw1200_irq_handler(self->core); 241 cw1200_irq_handler(self->core);
242 cw1200_spi_unlock(self);
244 return IRQ_HANDLED; 243 return IRQ_HANDLED;
245 } else { 244 } else {
246 return IRQ_NONE; 245 return IRQ_NONE;
@@ -253,9 +252,10 @@ static int cw1200_spi_irq_subscribe(struct hwbus_priv *self)
253 252
254 pr_debug("SW IRQ subscribe\n"); 253 pr_debug("SW IRQ subscribe\n");
255 254
256 ret = request_any_context_irq(self->func->irq, cw1200_spi_irq_handler, 255 ret = request_threaded_irq(self->func->irq, NULL,
257 IRQF_TRIGGER_HIGH, 256 cw1200_spi_irq_handler,
258 "cw1200_wlan_irq", self); 257 IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
258 "cw1200_wlan_irq", self);
259 if (WARN_ON(ret < 0)) 259 if (WARN_ON(ret < 0))
260 goto exit; 260 goto exit;
261 261
@@ -273,22 +273,13 @@ exit:
273 273
274static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self) 274static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self)
275{ 275{
276 int ret = 0;
277
276 pr_debug("SW IRQ unsubscribe\n"); 278 pr_debug("SW IRQ unsubscribe\n");
277 disable_irq_wake(self->func->irq); 279 disable_irq_wake(self->func->irq);
278 free_irq(self->func->irq, self); 280 free_irq(self->func->irq, self);
279 281
280 return 0; 282 return ret;
281}
282
283static int cw1200_spi_irq_enable(struct hwbus_priv *self, int enable)
284{
285 /* Disables are handled by the interrupt handler */
286 if (enable && self->irq_disabled) {
287 enable_irq(self->func->irq);
288 self->irq_disabled = 0;
289 }
290
291 return 0;
292} 283}
293 284
294static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata) 285static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
@@ -368,7 +359,6 @@ static struct hwbus_ops cw1200_spi_hwbus_ops = {
368 .unlock = cw1200_spi_unlock, 359 .unlock = cw1200_spi_unlock,
369 .align_size = cw1200_spi_align_size, 360 .align_size = cw1200_spi_align_size,
370 .power_mgmt = cw1200_spi_pm, 361 .power_mgmt = cw1200_spi_pm,
371 .irq_enable = cw1200_spi_irq_enable,
372}; 362};
373 363
374/* Probe Function to be called by SPI stack when device is discovered */ 364/* Probe Function to be called by SPI stack when device is discovered */
diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c
index 0b2061bbc68b..acdff0f7f952 100644
--- a/drivers/net/wireless/cw1200/fwio.c
+++ b/drivers/net/wireless/cw1200/fwio.c
@@ -485,7 +485,7 @@ int cw1200_load_firmware(struct cw1200_common *priv)
485 485
486 /* Enable interrupt signalling */ 486 /* Enable interrupt signalling */
487 priv->hwbus_ops->lock(priv->hwbus_priv); 487 priv->hwbus_ops->lock(priv->hwbus_priv);
488 ret = __cw1200_irq_enable(priv, 2); 488 ret = __cw1200_irq_enable(priv, 1);
489 priv->hwbus_ops->unlock(priv->hwbus_priv); 489 priv->hwbus_ops->unlock(priv->hwbus_priv);
490 if (ret < 0) 490 if (ret < 0)
491 goto unsubscribe; 491 goto unsubscribe;
diff --git a/drivers/net/wireless/cw1200/hwbus.h b/drivers/net/wireless/cw1200/hwbus.h
index 51dfb3a90735..8b2fc831c3de 100644
--- a/drivers/net/wireless/cw1200/hwbus.h
+++ b/drivers/net/wireless/cw1200/hwbus.h
@@ -28,7 +28,6 @@ struct hwbus_ops {
28 void (*unlock)(struct hwbus_priv *self); 28 void (*unlock)(struct hwbus_priv *self);
29 size_t (*align_size)(struct hwbus_priv *self, size_t size); 29 size_t (*align_size)(struct hwbus_priv *self, size_t size);
30 int (*power_mgmt)(struct hwbus_priv *self, bool suspend); 30 int (*power_mgmt)(struct hwbus_priv *self, bool suspend);
31 int (*irq_enable)(struct hwbus_priv *self, int enable);
32}; 31};
33 32
34#endif /* CW1200_HWBUS_H */ 33#endif /* CW1200_HWBUS_H */
diff --git a/drivers/net/wireless/cw1200/hwio.c b/drivers/net/wireless/cw1200/hwio.c
index 41bd7615ccaa..ff230b7aeedd 100644
--- a/drivers/net/wireless/cw1200/hwio.c
+++ b/drivers/net/wireless/cw1200/hwio.c
@@ -273,21 +273,6 @@ int __cw1200_irq_enable(struct cw1200_common *priv, int enable)
273 u16 val16; 273 u16 val16;
274 int ret; 274 int ret;
275 275
276 /* We need to do this hack because the SPI layer can sleep on I/O
277 and the general path involves I/O to the device in interrupt
278 context.
279
280 However, the initial enable call needs to go to the hardware.
281
282 We don't worry about shutdown because we do a full reset which
283 clears the interrupt enabled bits.
284 */
285 if (priv->hwbus_ops->irq_enable) {
286 ret = priv->hwbus_ops->irq_enable(priv->hwbus_priv, enable);
287 if (ret || enable < 2)
288 return ret;
289 }
290
291 if (HIF_8601_SILICON == priv->hw_type) { 276 if (HIF_8601_SILICON == priv->hw_type) {
292 ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); 277 ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
293 if (ret < 0) { 278 if (ret < 0) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 30d45e2fc193..8ac305be68f4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -240,6 +240,12 @@ const struct iwl_cfg iwl6035_2agn_cfg = {
240 .ht_params = &iwl6000_ht_params, 240 .ht_params = &iwl6000_ht_params,
241}; 241};
242 242
243const struct iwl_cfg iwl6035_2agn_sff_cfg = {
244 .name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN",
245 IWL_DEVICE_6035,
246 .ht_params = &iwl6000_ht_params,
247};
248
243const struct iwl_cfg iwl1030_bgn_cfg = { 249const struct iwl_cfg iwl1030_bgn_cfg = {
244 .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN", 250 .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
245 IWL_DEVICE_6030, 251 IWL_DEVICE_6030,
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index e4d370bff306..b03c25e14903 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -280,6 +280,7 @@ extern const struct iwl_cfg iwl2000_2bgn_cfg;
280extern const struct iwl_cfg iwl2000_2bgn_d_cfg; 280extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
281extern const struct iwl_cfg iwl2030_2bgn_cfg; 281extern const struct iwl_cfg iwl2030_2bgn_cfg;
282extern const struct iwl_cfg iwl6035_2agn_cfg; 282extern const struct iwl_cfg iwl6035_2agn_cfg;
283extern const struct iwl_cfg iwl6035_2agn_sff_cfg;
283extern const struct iwl_cfg iwl105_bgn_cfg; 284extern const struct iwl_cfg iwl105_bgn_cfg;
284extern const struct iwl_cfg iwl105_bgn_d_cfg; 285extern const struct iwl_cfg iwl105_bgn_d_cfg;
285extern const struct iwl_cfg iwl135_bgn_cfg; 286extern const struct iwl_cfg iwl135_bgn_cfg;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index dd57a36ecb10..80b47508647c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -601,8 +601,10 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
601{ 601{
602 int ret; 602 int ret;
603 603
604 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 604 if (trans->state != IWL_TRANS_FW_ALIVE) {
605 "%s bad state = %d", __func__, trans->state); 605 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
606 return -EIO;
607 }
606 608
607 if (!(cmd->flags & CMD_ASYNC)) 609 if (!(cmd->flags & CMD_ASYNC))
608 lock_map_acquire_read(&trans->sync_cmd_lockdep_map); 610 lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 21407a353a3b..d58e393324ef 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -273,7 +273,10 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
273 if (!mvmvif->queue_params[ac].uapsd) 273 if (!mvmvif->queue_params[ac].uapsd)
274 continue; 274 continue;
275 275
276 cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); 276 if (mvm->cur_ucode != IWL_UCODE_WOWLAN)
277 cmd->flags |=
278 cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
279
277 cmd->uapsd_ac_flags |= BIT(ac); 280 cmd->uapsd_ac_flags |= BIT(ac);
278 281
279 /* QNDP TID - the highest TID with no admission control */ 282 /* QNDP TID - the highest TID with no admission control */
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 9a7ab8495300..621fb71f282a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -394,6 +394,11 @@ static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
394 return false; 394 return false;
395 } 395 }
396 396
397 /*
398 * If scan cannot be aborted, it means that we had a
399 * SCAN_COMPLETE_NOTIFICATION in the pipe and it called
400 * ieee80211_scan_completed already.
401 */
397 IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n", 402 IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n",
398 *resp); 403 *resp);
399 return true; 404 return true;
@@ -417,14 +422,19 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
417 SCAN_COMPLETE_NOTIFICATION }; 422 SCAN_COMPLETE_NOTIFICATION };
418 int ret; 423 int ret;
419 424
425 if (mvm->scan_status == IWL_MVM_SCAN_NONE)
426 return;
427
420 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort, 428 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
421 scan_abort_notif, 429 scan_abort_notif,
422 ARRAY_SIZE(scan_abort_notif), 430 ARRAY_SIZE(scan_abort_notif),
423 iwl_mvm_scan_abort_notif, NULL); 431 iwl_mvm_scan_abort_notif, NULL);
424 432
425 ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL); 433 ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD,
434 CMD_SYNC | CMD_SEND_IN_RFKILL, 0, NULL);
426 if (ret) { 435 if (ret) {
427 IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret); 436 IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
437 /* mac80211's state will be cleaned in the fw_restart flow */
428 goto out_remove_notif; 438 goto out_remove_notif;
429 } 439 }
430 440
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index dc02cb9792af..26108a1a29fa 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -139,13 +139,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
139 139
140/* 6x00 Series */ 140/* 6x00 Series */
141 {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)}, 141 {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
142 {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)},
142 {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)}, 143 {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
144 {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)},
143 {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)}, 145 {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
144 {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)}, 146 {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
145 {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)}, 147 {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
146 {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)}, 148 {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
147 {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)}, 149 {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
148 {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)}, 150 {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
151 {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)},
149 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, 152 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
150 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, 153 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
151 154
@@ -153,12 +156,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
153 {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)}, 156 {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
154 {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)}, 157 {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
155 {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)}, 158 {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
159 {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)},
156 {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)}, 160 {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
157 {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)}, 161 {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
162 {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)},
158 {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)}, 163 {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
164 {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)},
159 {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)}, 165 {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
160 {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)}, 166 {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
161 {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)}, 167 {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
168 {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)},
162 {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)}, 169 {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
163 {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */ 170 {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
164 {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */ 171 {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
@@ -240,8 +247,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
240 247
241/* 6x35 Series */ 248/* 6x35 Series */
242 {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)}, 249 {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
250 {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)},
243 {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)}, 251 {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
252 {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)},
244 {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)}, 253 {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
254 {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)},
245 {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)}, 255 {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
246 {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)}, 256 {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)},
247 257
@@ -260,54 +270,86 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
260#if IS_ENABLED(CONFIG_IWLMVM) 270#if IS_ENABLED(CONFIG_IWLMVM)
261/* 7000 Series */ 271/* 7000 Series */
262 {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, 272 {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
273 {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
263 {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)}, 274 {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
264 {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)}, 275 {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
276 {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
265 {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)}, 277 {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
266 {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)}, 278 {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)},
267 {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)}, 279 {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)},
268 {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)}, 280 {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)},
281 {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)},
269 {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)}, 282 {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)},
283 {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)},
270 {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)}, 284 {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)},
271 {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)}, 285 {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)},
286 {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)},
272 {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)}, 287 {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)},
288 {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)},
273 {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)}, 289 {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
274 {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)}, 290 {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
275 {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)}, 291 {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
276 {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)}, 292 {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)},
277 {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)}, 293 {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)},
278 {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)}, 294 {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)},
295 {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)},
296 {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)},
297 {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)},
298 {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)},
299 {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)},
279 {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)}, 300 {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
301 {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)},
280 {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)}, 302 {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
281 {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)}, 303 {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
282 {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)}, 304 {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
305 {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)},
283 {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)}, 306 {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)},
284 {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)}, 307 {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)},
308 {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)},
285 {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)}, 309 {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)},
286 {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)}, 310 {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)},
287 {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)}, 311 {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)},
312 {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
313 {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
288 {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)}, 314 {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
315 {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
289 {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)}, 316 {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
317 {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
290 {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)}, 318 {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)},
291 {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)}, 319 {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)},
320 {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)},
292 {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)}, 321 {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)},
293 {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)}, 322 {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)},
323 {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)},
324 {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)},
325 {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)},
326 {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)},
294 {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)}, 327 {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)},
328 {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)},
295 {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)}, 329 {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)},
296 {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)}, 330 {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)},
297 331
298/* 3160 Series */ 332/* 3160 Series */
299 {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)}, 333 {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)},
334 {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)},
300 {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)}, 335 {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)},
336 {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)},
301 {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)}, 337 {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)},
302 {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)}, 338 {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)},
303 {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)}, 339 {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)},
340 {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)},
304 {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)}, 341 {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)},
342 {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)},
343 {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)},
305 {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)}, 344 {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)},
345 {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)},
306 {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)}, 346 {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)},
347 {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)},
307 {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, 348 {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
308 {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, 349 {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
309 {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, 350 {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
310 {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, 351 {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
352 {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
311#endif /* CONFIG_IWLMVM */ 353#endif /* CONFIG_IWLMVM */
312 354
313 {0} 355 {0}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index bad95d28d50d..c3f904d422b0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1401,6 +1401,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1401 spin_lock_init(&trans_pcie->reg_lock); 1401 spin_lock_init(&trans_pcie->reg_lock);
1402 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 1402 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
1403 1403
1404 err = pci_enable_device(pdev);
1405 if (err)
1406 goto out_no_pci;
1407
1404 if (!cfg->base_params->pcie_l1_allowed) { 1408 if (!cfg->base_params->pcie_l1_allowed) {
1405 /* 1409 /*
1406 * W/A - seems to solve weird behavior. We need to remove this 1410 * W/A - seems to solve weird behavior. We need to remove this
@@ -1412,10 +1416,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1412 PCIE_LINK_STATE_CLKPM); 1416 PCIE_LINK_STATE_CLKPM);
1413 } 1417 }
1414 1418
1415 err = pci_enable_device(pdev);
1416 if (err)
1417 goto out_no_pci;
1418
1419 pci_set_master(pdev); 1419 pci_set_master(pdev);
1420 1420
1421 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 1421 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index f45eb29c2ede..1424335163b9 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1102,6 +1102,8 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
1102 * non-AGG queue. 1102 * non-AGG queue.
1103 */ 1103 */
1104 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); 1104 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
1105
1106 ssn = trans_pcie->txq[txq_id].q.read_ptr;
1105 } 1107 }
1106 1108
1107 /* Place first TFD at index corresponding to start sequence number. 1109 /* Place first TFD at index corresponding to start sequence number.
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 21c688264708..1214c587fd08 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -150,7 +150,7 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
150 */ 150 */
151int 151int
152mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, 152mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
153 struct mwifiex_ra_list_tbl *pra_list, int headroom, 153 struct mwifiex_ra_list_tbl *pra_list,
154 int ptrindex, unsigned long ra_list_flags) 154 int ptrindex, unsigned long ra_list_flags)
155 __releases(&priv->wmm.ra_list_spinlock) 155 __releases(&priv->wmm.ra_list_spinlock)
156{ 156{
@@ -160,6 +160,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
160 int pad = 0, ret; 160 int pad = 0, ret;
161 struct mwifiex_tx_param tx_param; 161 struct mwifiex_tx_param tx_param;
162 struct txpd *ptx_pd = NULL; 162 struct txpd *ptx_pd = NULL;
163 int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN;
163 164
164 skb_src = skb_peek(&pra_list->skb_head); 165 skb_src = skb_peek(&pra_list->skb_head);
165 if (!skb_src) { 166 if (!skb_src) {
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.h b/drivers/net/wireless/mwifiex/11n_aggr.h
index 900e1c62a0cc..892098d6a696 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.h
+++ b/drivers/net/wireless/mwifiex/11n_aggr.h
@@ -26,7 +26,7 @@
26int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv, 26int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv,
27 struct sk_buff *skb); 27 struct sk_buff *skb);
28int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, 28int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
29 struct mwifiex_ra_list_tbl *ptr, int headroom, 29 struct mwifiex_ra_list_tbl *ptr,
30 int ptr_index, unsigned long flags) 30 int ptr_index, unsigned long flags)
31 __releases(&priv->wmm.ra_list_spinlock); 31 __releases(&priv->wmm.ra_list_spinlock);
32 32
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 2d761477d15e..a6c46f3b6e3a 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -1155,7 +1155,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
1155 uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions); 1155 uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions);
1156 1156
1157 if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) && 1157 if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) &&
1158 adapter->iface_type == MWIFIEX_SDIO) { 1158 adapter->iface_type != MWIFIEX_USB) {
1159 mwifiex_hs_activated_event(priv, true); 1159 mwifiex_hs_activated_event(priv, true);
1160 return 0; 1160 return 0;
1161 } else { 1161 } else {
@@ -1167,8 +1167,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
1167 } 1167 }
1168 if (conditions != HS_CFG_CANCEL) { 1168 if (conditions != HS_CFG_CANCEL) {
1169 adapter->is_hs_configured = true; 1169 adapter->is_hs_configured = true;
1170 if (adapter->iface_type == MWIFIEX_USB || 1170 if (adapter->iface_type == MWIFIEX_USB)
1171 adapter->iface_type == MWIFIEX_PCIE)
1172 mwifiex_hs_activated_event(priv, true); 1171 mwifiex_hs_activated_event(priv, true);
1173 } else { 1172 } else {
1174 adapter->is_hs_configured = false; 1173 adapter->is_hs_configured = false;
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 9d7c0e6c4fc7..37f873bb342f 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1422,13 +1422,19 @@ static int mwifiex_deauthenticate_infra(struct mwifiex_private *priv, u8 *mac)
1422 */ 1422 */
1423int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac) 1423int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
1424{ 1424{
1425 int ret = 0;
1426
1425 if (!priv->media_connected) 1427 if (!priv->media_connected)
1426 return 0; 1428 return 0;
1427 1429
1428 switch (priv->bss_mode) { 1430 switch (priv->bss_mode) {
1429 case NL80211_IFTYPE_STATION: 1431 case NL80211_IFTYPE_STATION:
1430 case NL80211_IFTYPE_P2P_CLIENT: 1432 case NL80211_IFTYPE_P2P_CLIENT:
1431 return mwifiex_deauthenticate_infra(priv, mac); 1433 ret = mwifiex_deauthenticate_infra(priv, mac);
1434 if (ret)
1435 cfg80211_disconnected(priv->netdev, 0, NULL, 0,
1436 GFP_KERNEL);
1437 break;
1432 case NL80211_IFTYPE_ADHOC: 1438 case NL80211_IFTYPE_ADHOC:
1433 return mwifiex_send_cmd_sync(priv, 1439 return mwifiex_send_cmd_sync(priv,
1434 HostCmd_CMD_802_11_AD_HOC_STOP, 1440 HostCmd_CMD_802_11_AD_HOC_STOP,
@@ -1440,7 +1446,7 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
1440 break; 1446 break;
1441 } 1447 }
1442 1448
1443 return 0; 1449 return ret;
1444} 1450}
1445EXPORT_SYMBOL_GPL(mwifiex_deauthenticate); 1451EXPORT_SYMBOL_GPL(mwifiex_deauthenticate);
1446 1452
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index fd778337deee..c2b91f566e05 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -358,10 +358,12 @@ process_start:
358 } 358 }
359 } while (true); 359 } while (true);
360 360
361 if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter)) 361 spin_lock_irqsave(&adapter->main_proc_lock, flags);
362 if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter)) {
363 spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
362 goto process_start; 364 goto process_start;
365 }
363 366
364 spin_lock_irqsave(&adapter->main_proc_lock, flags);
365 adapter->mwifiex_processing = false; 367 adapter->mwifiex_processing = false;
366 spin_unlock_irqrestore(&adapter->main_proc_lock, flags); 368 spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
367 369
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 8b057524b252..8c351f71f72f 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -118,7 +118,8 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
118 dev_dbg(adapter->dev, 118 dev_dbg(adapter->dev,
119 "info: successfully disconnected from %pM: reason code %d\n", 119 "info: successfully disconnected from %pM: reason code %d\n",
120 priv->cfg_bssid, reason_code); 120 priv->cfg_bssid, reason_code);
121 if (priv->bss_mode == NL80211_IFTYPE_STATION) { 121 if (priv->bss_mode == NL80211_IFTYPE_STATION ||
122 priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
122 cfg80211_disconnected(priv->netdev, reason_code, NULL, 0, 123 cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
123 GFP_KERNEL); 124 GFP_KERNEL);
124 } 125 }
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 2472d4b7f00e..1c70b8d09227 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -447,9 +447,6 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
447 */ 447 */
448 adapter->is_suspended = true; 448 adapter->is_suspended = true;
449 449
450 for (i = 0; i < adapter->priv_num; i++)
451 netif_carrier_off(adapter->priv[i]->netdev);
452
453 if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb) 450 if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
454 usb_kill_urb(card->rx_cmd.urb); 451 usb_kill_urb(card->rx_cmd.urb);
455 452
@@ -509,10 +506,6 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
509 MWIFIEX_RX_CMD_BUF_SIZE); 506 MWIFIEX_RX_CMD_BUF_SIZE);
510 } 507 }
511 508
512 for (i = 0; i < adapter->priv_num; i++)
513 if (adapter->priv[i]->media_connected)
514 netif_carrier_on(adapter->priv[i]->netdev);
515
516 /* Disable Host Sleep */ 509 /* Disable Host Sleep */
517 if (adapter->hs_activated) 510 if (adapter->hs_activated)
518 mwifiex_cancel_hs(mwifiex_get_priv(adapter, 511 mwifiex_cancel_hs(mwifiex_get_priv(adapter,
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 2e8f9cdea54d..95fa3599b407 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -1239,8 +1239,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1239 if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) && 1239 if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) &&
1240 mwifiex_is_11n_aggragation_possible(priv, ptr, 1240 mwifiex_is_11n_aggragation_possible(priv, ptr,
1241 adapter->tx_buf_size)) 1241 adapter->tx_buf_size))
1242 mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN, 1242 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
1243 ptr_index, flags);
1244 /* ra_list_spinlock has been freed in 1243 /* ra_list_spinlock has been freed in
1245 mwifiex_11n_aggregate_pkt() */ 1244 mwifiex_11n_aggregate_pkt() */
1246 else 1245 else
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index b9deef66cf4b..e328d3058c41 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -83,6 +83,7 @@ static struct usb_device_id p54u_table[] = {
83 {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */ 83 {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */
84 {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ 84 {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
85 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ 85 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
86 {USB_DEVICE(0x07aa, 0x0020)}, /* Corega WLUSB2GTST USB */
86 {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */ 87 {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */
87 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ 88 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
88 {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */ 89 {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */
@@ -979,6 +980,7 @@ static int p54u_load_firmware(struct ieee80211_hw *dev,
979 if (err) { 980 if (err) {
980 dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s " 981 dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s "
981 "(%d)!\n", p54u_fwlist[i].fw, err); 982 "(%d)!\n", p54u_fwlist[i].fw, err);
983 usb_put_dev(udev);
982 } 984 }
983 985
984 return err; 986 return err;
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 76d95deb274b..dc49e525ae5e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -105,13 +105,11 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
105 goto exit_release_regions; 105 goto exit_release_regions;
106 } 106 }
107 107
108 pci_enable_msi(pci_dev);
109
110 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); 108 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
111 if (!hw) { 109 if (!hw) {
112 rt2x00_probe_err("Failed to allocate hardware\n"); 110 rt2x00_probe_err("Failed to allocate hardware\n");
113 retval = -ENOMEM; 111 retval = -ENOMEM;
114 goto exit_disable_msi; 112 goto exit_release_regions;
115 } 113 }
116 114
117 pci_set_drvdata(pci_dev, hw); 115 pci_set_drvdata(pci_dev, hw);
@@ -152,9 +150,6 @@ exit_free_reg:
152exit_free_device: 150exit_free_device:
153 ieee80211_free_hw(hw); 151 ieee80211_free_hw(hw);
154 152
155exit_disable_msi:
156 pci_disable_msi(pci_dev);
157
158exit_release_regions: 153exit_release_regions:
159 pci_release_regions(pci_dev); 154 pci_release_regions(pci_dev);
160 155
@@ -179,8 +174,6 @@ void rt2x00pci_remove(struct pci_dev *pci_dev)
179 rt2x00pci_free_reg(rt2x00dev); 174 rt2x00pci_free_reg(rt2x00dev);
180 ieee80211_free_hw(hw); 175 ieee80211_free_hw(hw);
181 176
182 pci_disable_msi(pci_dev);
183
184 /* 177 /*
185 * Free the PCI device data. 178 * Free the PCI device data.
186 */ 179 */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 763cf1defab5..5a060e537fbe 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -343,7 +343,8 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
343 (bool)GET_RX_DESC_PAGGR(pdesc)); 343 (bool)GET_RX_DESC_PAGGR(pdesc));
344 rx_status->mactime = GET_RX_DESC_TSFL(pdesc); 344 rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
345 if (phystatus) { 345 if (phystatus) {
346 p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE); 346 p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
347 stats->rx_bufshift);
347 rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc, 348 rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
348 p_drvinfo); 349 p_drvinfo);
349 } 350 }
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index cc03e7c87cbe..703258742d28 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -2057,7 +2057,7 @@ struct rtl_priv {
2057 that it points to the data allocated 2057 that it points to the data allocated
2058 beyond this structure like: 2058 beyond this structure like:
2059 rtl_pci_priv or rtl_usb_priv */ 2059 rtl_pci_priv or rtl_usb_priv */
2060 u8 priv[0]; 2060 u8 priv[0] __aligned(sizeof(void *));
2061}; 2061};
2062 2062
2063#define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv)) 2063#define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv))
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index a53782ef1540..1b08d8798372 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -24,6 +24,12 @@
24struct backend_info { 24struct backend_info {
25 struct xenbus_device *dev; 25 struct xenbus_device *dev;
26 struct xenvif *vif; 26 struct xenvif *vif;
27
28 /* This is the state that will be reflected in xenstore when any
29 * active hotplug script completes.
30 */
31 enum xenbus_state state;
32
27 enum xenbus_state frontend_state; 33 enum xenbus_state frontend_state;
28 struct xenbus_watch hotplug_status_watch; 34 struct xenbus_watch hotplug_status_watch;
29 u8 have_hotplug_status_watch:1; 35 u8 have_hotplug_status_watch:1;
@@ -33,11 +39,15 @@ static int connect_rings(struct backend_info *);
33static void connect(struct backend_info *); 39static void connect(struct backend_info *);
34static void backend_create_xenvif(struct backend_info *be); 40static void backend_create_xenvif(struct backend_info *be);
35static void unregister_hotplug_status_watch(struct backend_info *be); 41static void unregister_hotplug_status_watch(struct backend_info *be);
42static void set_backend_state(struct backend_info *be,
43 enum xenbus_state state);
36 44
37static int netback_remove(struct xenbus_device *dev) 45static int netback_remove(struct xenbus_device *dev)
38{ 46{
39 struct backend_info *be = dev_get_drvdata(&dev->dev); 47 struct backend_info *be = dev_get_drvdata(&dev->dev);
40 48
49 set_backend_state(be, XenbusStateClosed);
50
41 unregister_hotplug_status_watch(be); 51 unregister_hotplug_status_watch(be);
42 if (be->vif) { 52 if (be->vif) {
43 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); 53 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
@@ -136,6 +146,8 @@ static int netback_probe(struct xenbus_device *dev,
136 if (err) 146 if (err)
137 goto fail; 147 goto fail;
138 148
149 be->state = XenbusStateInitWait;
150
139 /* This kicks hotplug scripts, so do it immediately. */ 151 /* This kicks hotplug scripts, so do it immediately. */
140 backend_create_xenvif(be); 152 backend_create_xenvif(be);
141 153
@@ -208,24 +220,113 @@ static void backend_create_xenvif(struct backend_info *be)
208 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); 220 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
209} 221}
210 222
211 223static void backend_disconnect(struct backend_info *be)
212static void disconnect_backend(struct xenbus_device *dev)
213{ 224{
214 struct backend_info *be = dev_get_drvdata(&dev->dev);
215
216 if (be->vif) 225 if (be->vif)
217 xenvif_disconnect(be->vif); 226 xenvif_disconnect(be->vif);
218} 227}
219 228
220static void destroy_backend(struct xenbus_device *dev) 229static void backend_connect(struct backend_info *be)
221{ 230{
222 struct backend_info *be = dev_get_drvdata(&dev->dev); 231 if (be->vif)
232 connect(be);
233}
223 234
224 if (be->vif) { 235static inline void backend_switch_state(struct backend_info *be,
225 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); 236 enum xenbus_state state)
226 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); 237{
227 xenvif_free(be->vif); 238 struct xenbus_device *dev = be->dev;
228 be->vif = NULL; 239
240 pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state));
241 be->state = state;
242
243 /* If we are waiting for a hotplug script then defer the
244 * actual xenbus state change.
245 */
246 if (!be->have_hotplug_status_watch)
247 xenbus_switch_state(dev, state);
248}
249
250/* Handle backend state transitions:
251 *
252 * The backend state starts in InitWait and the following transitions are
253 * allowed.
254 *
255 * InitWait -> Connected
256 *
257 * ^ \ |
258 * | \ |
259 * | \ |
260 * | \ |
261 * | \ |
262 * | \ |
263 * | V V
264 *
265 * Closed <-> Closing
266 *
267 * The state argument specifies the eventual state of the backend and the
268 * function transitions to that state via the shortest path.
269 */
270static void set_backend_state(struct backend_info *be,
271 enum xenbus_state state)
272{
273 while (be->state != state) {
274 switch (be->state) {
275 case XenbusStateClosed:
276 switch (state) {
277 case XenbusStateInitWait:
278 case XenbusStateConnected:
279 pr_info("%s: prepare for reconnect\n",
280 be->dev->nodename);
281 backend_switch_state(be, XenbusStateInitWait);
282 break;
283 case XenbusStateClosing:
284 backend_switch_state(be, XenbusStateClosing);
285 break;
286 default:
287 BUG();
288 }
289 break;
290 case XenbusStateInitWait:
291 switch (state) {
292 case XenbusStateConnected:
293 backend_connect(be);
294 backend_switch_state(be, XenbusStateConnected);
295 break;
296 case XenbusStateClosing:
297 case XenbusStateClosed:
298 backend_switch_state(be, XenbusStateClosing);
299 break;
300 default:
301 BUG();
302 }
303 break;
304 case XenbusStateConnected:
305 switch (state) {
306 case XenbusStateInitWait:
307 case XenbusStateClosing:
308 case XenbusStateClosed:
309 backend_disconnect(be);
310 backend_switch_state(be, XenbusStateClosing);
311 break;
312 default:
313 BUG();
314 }
315 break;
316 case XenbusStateClosing:
317 switch (state) {
318 case XenbusStateInitWait:
319 case XenbusStateConnected:
320 case XenbusStateClosed:
321 backend_switch_state(be, XenbusStateClosed);
322 break;
323 default:
324 BUG();
325 }
326 break;
327 default:
328 BUG();
329 }
229 } 330 }
230} 331}
231 332
@@ -237,40 +338,33 @@ static void frontend_changed(struct xenbus_device *dev,
237{ 338{
238 struct backend_info *be = dev_get_drvdata(&dev->dev); 339 struct backend_info *be = dev_get_drvdata(&dev->dev);
239 340
240 pr_debug("frontend state %s\n", xenbus_strstate(frontend_state)); 341 pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state));
241 342
242 be->frontend_state = frontend_state; 343 be->frontend_state = frontend_state;
243 344
244 switch (frontend_state) { 345 switch (frontend_state) {
245 case XenbusStateInitialising: 346 case XenbusStateInitialising:
246 if (dev->state == XenbusStateClosed) { 347 set_backend_state(be, XenbusStateInitWait);
247 pr_info("%s: prepare for reconnect\n", dev->nodename);
248 xenbus_switch_state(dev, XenbusStateInitWait);
249 }
250 break; 348 break;
251 349
252 case XenbusStateInitialised: 350 case XenbusStateInitialised:
253 break; 351 break;
254 352
255 case XenbusStateConnected: 353 case XenbusStateConnected:
256 if (dev->state == XenbusStateConnected) 354 set_backend_state(be, XenbusStateConnected);
257 break;
258 if (be->vif)
259 connect(be);
260 break; 355 break;
261 356
262 case XenbusStateClosing: 357 case XenbusStateClosing:
263 disconnect_backend(dev); 358 set_backend_state(be, XenbusStateClosing);
264 xenbus_switch_state(dev, XenbusStateClosing);
265 break; 359 break;
266 360
267 case XenbusStateClosed: 361 case XenbusStateClosed:
268 xenbus_switch_state(dev, XenbusStateClosed); 362 set_backend_state(be, XenbusStateClosed);
269 if (xenbus_dev_is_online(dev)) 363 if (xenbus_dev_is_online(dev))
270 break; 364 break;
271 destroy_backend(dev);
272 /* fall through if not online */ 365 /* fall through if not online */
273 case XenbusStateUnknown: 366 case XenbusStateUnknown:
367 set_backend_state(be, XenbusStateClosed);
274 device_unregister(&dev->dev); 368 device_unregister(&dev->dev);
275 break; 369 break;
276 370
@@ -363,7 +457,9 @@ static void hotplug_status_changed(struct xenbus_watch *watch,
363 if (IS_ERR(str)) 457 if (IS_ERR(str))
364 return; 458 return;
365 if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) { 459 if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) {
366 xenbus_switch_state(be->dev, XenbusStateConnected); 460 /* Complete any pending state change */
461 xenbus_switch_state(be->dev, be->state);
462
367 /* Not interested in this watch anymore. */ 463 /* Not interested in this watch anymore. */
368 unregister_hotplug_status_watch(be); 464 unregister_hotplug_status_watch(be);
369 } 465 }
@@ -393,12 +489,8 @@ static void connect(struct backend_info *be)
393 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, 489 err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
394 hotplug_status_changed, 490 hotplug_status_changed,
395 "%s/%s", dev->nodename, "hotplug-status"); 491 "%s/%s", dev->nodename, "hotplug-status");
396 if (err) { 492 if (!err)
397 /* Switch now, since we can't do a watch. */
398 xenbus_switch_state(dev, XenbusStateConnected);
399 } else {
400 be->have_hotplug_status_watch = 1; 493 be->have_hotplug_status_watch = 1;
401 }
402 494
403 netif_wake_queue(be->vif->dev); 495 netif_wake_queue(be->vif->dev);
404} 496}
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 9d2009a9004d..78cc76053328 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -74,10 +74,4 @@ config OF_MTD
74 depends on MTD 74 depends on MTD
75 def_bool y 75 def_bool y
76 76
77config OF_RESERVED_MEM
78 depends on OF_FLATTREE && (DMA_CMA || (HAVE_GENERIC_DMA_COHERENT && HAVE_MEMBLOCK))
79 def_bool y
80 help
81 Initialization code for DMA reserved memory
82
83endmenu # OF 77endmenu # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index ed9660adad77..efd05102c405 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -9,4 +9,3 @@ obj-$(CONFIG_OF_MDIO) += of_mdio.o
9obj-$(CONFIG_OF_PCI) += of_pci.o 9obj-$(CONFIG_OF_PCI) += of_pci.o
10obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o 10obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o
11obj-$(CONFIG_OF_MTD) += of_mtd.o 11obj-$(CONFIG_OF_MTD) += of_mtd.o
12obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 865d3f66c86b..7d4c70f859e3 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -303,10 +303,8 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
303 struct device_node *cpun, *cpus; 303 struct device_node *cpun, *cpus;
304 304
305 cpus = of_find_node_by_path("/cpus"); 305 cpus = of_find_node_by_path("/cpus");
306 if (!cpus) { 306 if (!cpus)
307 pr_warn("Missing cpus node, bailing out\n");
308 return NULL; 307 return NULL;
309 }
310 308
311 for_each_child_of_node(cpus, cpun) { 309 for_each_child_of_node(cpus, cpun) {
312 if (of_node_cmp(cpun->type, "cpu")) 310 if (of_node_cmp(cpun->type, "cpu"))
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 229dd9d69e18..a4fa9ad31b8f 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -18,7 +18,6 @@
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/random.h>
22 21
23#include <asm/setup.h> /* for COMMAND_LINE_SIZE */ 22#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
24#ifdef CONFIG_PPC 23#ifdef CONFIG_PPC
@@ -803,14 +802,3 @@ void __init unflatten_device_tree(void)
803} 802}
804 803
805#endif /* CONFIG_OF_EARLY_FLATTREE */ 804#endif /* CONFIG_OF_EARLY_FLATTREE */
806
807/* Feed entire flattened device tree into the random pool */
808static int __init add_fdt_randomness(void)
809{
810 if (initial_boot_params)
811 add_device_randomness(initial_boot_params,
812 be32_to_cpu(initial_boot_params->totalsize));
813
814 return 0;
815}
816core_initcall(add_fdt_randomness);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
deleted file mode 100644
index 0fe40c7d6904..000000000000
--- a/drivers/of/of_reserved_mem.c
+++ /dev/null
@@ -1,173 +0,0 @@
1/*
2 * Device tree based initialization code for reserved memory.
3 *
4 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 * Author: Marek Szyprowski <m.szyprowski@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
12 */
13
14#include <linux/memblock.h>
15#include <linux/err.h>
16#include <linux/of.h>
17#include <linux/of_fdt.h>
18#include <linux/of_platform.h>
19#include <linux/mm.h>
20#include <linux/sizes.h>
21#include <linux/mm_types.h>
22#include <linux/dma-contiguous.h>
23#include <linux/dma-mapping.h>
24#include <linux/of_reserved_mem.h>
25
26#define MAX_RESERVED_REGIONS 16
27struct reserved_mem {
28 phys_addr_t base;
29 unsigned long size;
30 struct cma *cma;
31 char name[32];
32};
33static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
34static int reserved_mem_count;
35
36static int __init fdt_scan_reserved_mem(unsigned long node, const char *uname,
37 int depth, void *data)
38{
39 struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
40 phys_addr_t base, size;
41 int is_cma, is_reserved;
42 unsigned long len;
43 const char *status;
44 __be32 *prop;
45
46 is_cma = IS_ENABLED(CONFIG_DMA_CMA) &&
47 of_flat_dt_is_compatible(node, "linux,contiguous-memory-region");
48 is_reserved = of_flat_dt_is_compatible(node, "reserved-memory-region");
49
50 if (!is_reserved && !is_cma) {
51 /* ignore node and scan next one */
52 return 0;
53 }
54
55 status = of_get_flat_dt_prop(node, "status", &len);
56 if (status && strcmp(status, "okay") != 0) {
57 /* ignore disabled node nad scan next one */
58 return 0;
59 }
60
61 prop = of_get_flat_dt_prop(node, "reg", &len);
62 if (!prop || (len < (dt_root_size_cells + dt_root_addr_cells) *
63 sizeof(__be32))) {
64 pr_err("Reserved mem: node %s, incorrect \"reg\" property\n",
65 uname);
66 /* ignore node and scan next one */
67 return 0;
68 }
69 base = dt_mem_next_cell(dt_root_addr_cells, &prop);
70 size = dt_mem_next_cell(dt_root_size_cells, &prop);
71
72 if (!size) {
73 /* ignore node and scan next one */
74 return 0;
75 }
76
77 pr_info("Reserved mem: found %s, memory base %lx, size %ld MiB\n",
78 uname, (unsigned long)base, (unsigned long)size / SZ_1M);
79
80 if (reserved_mem_count == ARRAY_SIZE(reserved_mem))
81 return -ENOSPC;
82
83 rmem->base = base;
84 rmem->size = size;
85 strlcpy(rmem->name, uname, sizeof(rmem->name));
86
87 if (is_cma) {
88 struct cma *cma;
89 if (dma_contiguous_reserve_area(size, base, 0, &cma) == 0) {
90 rmem->cma = cma;
91 reserved_mem_count++;
92 if (of_get_flat_dt_prop(node,
93 "linux,default-contiguous-region",
94 NULL))
95 dma_contiguous_set_default(cma);
96 }
97 } else if (is_reserved) {
98 if (memblock_remove(base, size) == 0)
99 reserved_mem_count++;
100 else
101 pr_err("Failed to reserve memory for %s\n", uname);
102 }
103
104 return 0;
105}
106
107static struct reserved_mem *get_dma_memory_region(struct device *dev)
108{
109 struct device_node *node;
110 const char *name;
111 int i;
112
113 node = of_parse_phandle(dev->of_node, "memory-region", 0);
114 if (!node)
115 return NULL;
116
117 name = kbasename(node->full_name);
118 for (i = 0; i < reserved_mem_count; i++)
119 if (strcmp(name, reserved_mem[i].name) == 0)
120 return &reserved_mem[i];
121 return NULL;
122}
123
124/**
125 * of_reserved_mem_device_init() - assign reserved memory region to given device
126 *
127 * This function assign memory region pointed by "memory-region" device tree
128 * property to the given device.
129 */
130void of_reserved_mem_device_init(struct device *dev)
131{
132 struct reserved_mem *region = get_dma_memory_region(dev);
133 if (!region)
134 return;
135
136 if (region->cma) {
137 dev_set_cma_area(dev, region->cma);
138 pr_info("Assigned CMA %s to %s device\n", region->name,
139 dev_name(dev));
140 } else {
141 if (dma_declare_coherent_memory(dev, region->base, region->base,
142 region->size, DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) != 0)
143 pr_info("Declared reserved memory %s to %s device\n",
144 region->name, dev_name(dev));
145 }
146}
147
148/**
149 * of_reserved_mem_device_release() - release reserved memory device structures
150 *
151 * This function releases structures allocated for memory region handling for
152 * the given device.
153 */
154void of_reserved_mem_device_release(struct device *dev)
155{
156 struct reserved_mem *region = get_dma_memory_region(dev);
157 if (!region && !region->cma)
158 dma_release_declared_memory(dev);
159}
160
161/**
162 * early_init_dt_scan_reserved_mem() - create reserved memory regions
163 *
164 * This function grabs memory from early allocator for device exclusive use
165 * defined in device tree structures. It should be called by arch specific code
166 * once the early allocator (memblock) has been activated and all other
167 * subsystems have already allocated/reserved memory.
168 */
169void __init early_init_dt_scan_reserved_mem(void)
170{
171 of_scan_flat_dt_by_path("/memory/reserved-memory",
172 fdt_scan_reserved_mem, NULL);
173}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 9b439ac63d8e..f6dcde220821 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -21,7 +21,6 @@
21#include <linux/of_device.h> 21#include <linux/of_device.h>
22#include <linux/of_irq.h> 22#include <linux/of_irq.h>
23#include <linux/of_platform.h> 23#include <linux/of_platform.h>
24#include <linux/of_reserved_mem.h>
25#include <linux/platform_device.h> 24#include <linux/platform_device.h>
26 25
27const struct of_device_id of_default_bus_match_table[] = { 26const struct of_device_id of_default_bus_match_table[] = {
@@ -219,8 +218,6 @@ static struct platform_device *of_platform_device_create_pdata(
219 dev->dev.bus = &platform_bus_type; 218 dev->dev.bus = &platform_bus_type;
220 dev->dev.platform_data = platform_data; 219 dev->dev.platform_data = platform_data;
221 220
222 of_reserved_mem_device_init(&dev->dev);
223
224 /* We do not fill the DMA ops for platform devices by default. 221 /* We do not fill the DMA ops for platform devices by default.
225 * This is currently the responsibility of the platform code 222 * This is currently the responsibility of the platform code
226 * to do such, possibly using a device notifier 223 * to do such, possibly using a device notifier
@@ -228,7 +225,6 @@ static struct platform_device *of_platform_device_create_pdata(
228 225
229 if (of_device_add(dev) != 0) { 226 if (of_device_add(dev) != 0) {
230 platform_device_put(dev); 227 platform_device_put(dev);
231 of_reserved_mem_device_release(&dev->dev);
232 return NULL; 228 return NULL;
233 } 229 }
234 230
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 0b7d23b4ad95..1ea75236a15f 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -552,9 +552,8 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
552 struct acpiphp_func *func; 552 struct acpiphp_func *func;
553 int max, pass; 553 int max, pass;
554 LIST_HEAD(add_list); 554 LIST_HEAD(add_list);
555 int nr_found;
556 555
557 nr_found = acpiphp_rescan_slot(slot); 556 acpiphp_rescan_slot(slot);
558 max = acpiphp_max_busnr(bus); 557 max = acpiphp_max_busnr(bus);
559 for (pass = 0; pass < 2; pass++) { 558 for (pass = 0; pass < 2; pass++) {
560 list_for_each_entry(dev, &bus->devices, bus_list) { 559 list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -574,9 +573,6 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
574 } 573 }
575 } 574 }
576 __pci_bus_assign_resources(bus, &add_list, NULL); 575 __pci_bus_assign_resources(bus, &add_list, NULL);
577 /* Nothing more to do here if there are no new devices on this bus. */
578 if (!nr_found && (slot->flags & SLOT_ENABLED))
579 return;
580 576
581 acpiphp_sanitize_bus(bus); 577 acpiphp_sanitize_bus(bus);
582 acpiphp_set_hpp_values(bus); 578 acpiphp_set_hpp_values(bus);
@@ -994,14 +990,16 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
994 990
995 /* 991 /*
996 * This bridge should have been registered as a hotplug function 992 * This bridge should have been registered as a hotplug function
997 * under its parent, so the context has to be there. If not, we 993 * under its parent, so the context should be there, unless the
998 * are in deep goo. 994 * parent is going to be handled by pciehp, in which case this
995 * bridge is not interesting to us either.
999 */ 996 */
1000 mutex_lock(&acpiphp_context_lock); 997 mutex_lock(&acpiphp_context_lock);
1001 context = acpiphp_get_context(handle); 998 context = acpiphp_get_context(handle);
1002 if (WARN_ON(!context)) { 999 if (!context) {
1003 mutex_unlock(&acpiphp_context_lock); 1000 mutex_unlock(&acpiphp_context_lock);
1004 put_device(&bus->dev); 1001 put_device(&bus->dev);
1002 pci_dev_put(bridge->pci_dev);
1005 kfree(bridge); 1003 kfree(bridge);
1006 return; 1004 return;
1007 } 1005 }
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index a138965c01cb..b8fcc38c0d11 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -490,7 +490,7 @@ exit:
490 * <devicename> <state> <pinname> are values that should match the pinctrl-maps 490 * <devicename> <state> <pinname> are values that should match the pinctrl-maps
491 * <newvalue> reflects the new config and is driver dependant 491 * <newvalue> reflects the new config and is driver dependant
492 */ 492 */
493static int pinconf_dbg_config_write(struct file *file, 493static ssize_t pinconf_dbg_config_write(struct file *file,
494 const char __user *user_buf, size_t count, loff_t *ppos) 494 const char __user *user_buf, size_t count, loff_t *ppos)
495{ 495{
496 struct pinctrl_maps *maps_node; 496 struct pinctrl_maps *maps_node;
@@ -508,7 +508,7 @@ static int pinconf_dbg_config_write(struct file *file,
508 int i; 508 int i;
509 509
510 /* Get userspace string and assure termination */ 510 /* Get userspace string and assure termination */
511 buf_size = min(count, (size_t)(sizeof(buf)-1)); 511 buf_size = min(count, sizeof(buf) - 1);
512 if (copy_from_user(buf, user_buf, buf_size)) 512 if (copy_from_user(buf, user_buf, buf_size))
513 return -EFAULT; 513 return -EFAULT;
514 buf[buf_size] = 0; 514 buf[buf_size] = 0;
diff --git a/drivers/pinctrl/pinctrl-exynos.c b/drivers/pinctrl/pinctrl-exynos.c
index 2689f8d01a1e..155b1b3a0e7a 100644
--- a/drivers/pinctrl/pinctrl-exynos.c
+++ b/drivers/pinctrl/pinctrl-exynos.c
@@ -663,18 +663,18 @@ static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
663/* pin banks of s5pv210 pin-controller */ 663/* pin banks of s5pv210 pin-controller */
664static struct samsung_pin_bank s5pv210_pin_bank[] = { 664static struct samsung_pin_bank s5pv210_pin_bank[] = {
665 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), 665 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
666 EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04), 666 EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpa1", 0x04),
667 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08), 667 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
668 EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c), 668 EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c),
669 EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10), 669 EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10),
670 EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14), 670 EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14),
671 EXYNOS_PIN_BANK_EINTG(4, 0x0c0, "gpd1", 0x18), 671 EXYNOS_PIN_BANK_EINTG(6, 0x0c0, "gpd1", 0x18),
672 EXYNOS_PIN_BANK_EINTG(5, 0x0e0, "gpe0", 0x1c), 672 EXYNOS_PIN_BANK_EINTG(8, 0x0e0, "gpe0", 0x1c),
673 EXYNOS_PIN_BANK_EINTG(8, 0x100, "gpe1", 0x20), 673 EXYNOS_PIN_BANK_EINTG(5, 0x100, "gpe1", 0x20),
674 EXYNOS_PIN_BANK_EINTG(6, 0x120, "gpf0", 0x24), 674 EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpf0", 0x24),
675 EXYNOS_PIN_BANK_EINTG(8, 0x140, "gpf1", 0x28), 675 EXYNOS_PIN_BANK_EINTG(8, 0x140, "gpf1", 0x28),
676 EXYNOS_PIN_BANK_EINTG(8, 0x160, "gpf2", 0x2c), 676 EXYNOS_PIN_BANK_EINTG(8, 0x160, "gpf2", 0x2c),
677 EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpf3", 0x30), 677 EXYNOS_PIN_BANK_EINTG(6, 0x180, "gpf3", 0x30),
678 EXYNOS_PIN_BANK_EINTG(7, 0x1a0, "gpg0", 0x34), 678 EXYNOS_PIN_BANK_EINTG(7, 0x1a0, "gpg0", 0x34),
679 EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38), 679 EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38),
680 EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c), 680 EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c),
diff --git a/drivers/pinctrl/pinctrl-palmas.c b/drivers/pinctrl/pinctrl-palmas.c
index 82638fac3cfa..30c4d356cb33 100644
--- a/drivers/pinctrl/pinctrl-palmas.c
+++ b/drivers/pinctrl/pinctrl-palmas.c
@@ -891,9 +891,10 @@ static int palmas_pinconf_set(struct pinctrl_dev *pctldev,
891 param = pinconf_to_config_param(configs[i]); 891 param = pinconf_to_config_param(configs[i]);
892 param_val = pinconf_to_config_argument(configs[i]); 892 param_val = pinconf_to_config_argument(configs[i]);
893 893
894 if (param == PIN_CONFIG_BIAS_PULL_PIN_DEFAULT)
895 continue;
896
894 switch (param) { 897 switch (param) {
895 case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
896 return 0;
897 case PIN_CONFIG_BIAS_DISABLE: 898 case PIN_CONFIG_BIAS_DISABLE:
898 case PIN_CONFIG_BIAS_PULL_UP: 899 case PIN_CONFIG_BIAS_PULL_UP:
899 case PIN_CONFIG_BIAS_PULL_DOWN: 900 case PIN_CONFIG_BIAS_PULL_DOWN:
diff --git a/drivers/pinctrl/pinctrl-tegra114.c b/drivers/pinctrl/pinctrl-tegra114.c
index 622c4854977e..93c9e3899d5e 100644
--- a/drivers/pinctrl/pinctrl-tegra114.c
+++ b/drivers/pinctrl/pinctrl-tegra114.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Arthur: Pritesh Raithatha <praithatha@nvidia.com> 6 * Author: Pritesh Raithatha <praithatha@nvidia.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License, 9 * under the terms and conditions of the GNU General Public License,
@@ -2763,7 +2763,6 @@ static struct platform_driver tegra114_pinctrl_driver = {
2763}; 2763};
2764module_platform_driver(tegra114_pinctrl_driver); 2764module_platform_driver(tegra114_pinctrl_driver);
2765 2765
2766MODULE_ALIAS("platform:tegra114-pinctrl");
2767MODULE_AUTHOR("Pritesh Raithatha <praithatha@nvidia.com>"); 2766MODULE_AUTHOR("Pritesh Raithatha <praithatha@nvidia.com>");
2768MODULE_DESCRIPTION("NVIDIA Tegra114 pincontrol driver"); 2767MODULE_DESCRIPTION("NVIDIA Tegra114 pinctrl driver");
2769MODULE_LICENSE("GPL v2"); 2768MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 96d6b2eef4f2..b51a7460cc49 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -504,6 +504,7 @@ config ASUS_WMI
504 depends on BACKLIGHT_CLASS_DEVICE 504 depends on BACKLIGHT_CLASS_DEVICE
505 depends on RFKILL || RFKILL = n 505 depends on RFKILL || RFKILL = n
506 depends on HOTPLUG_PCI 506 depends on HOTPLUG_PCI
507 depends on ACPI_VIDEO || ACPI_VIDEO = n
507 select INPUT_SPARSEKMAP 508 select INPUT_SPARSEKMAP
508 select LEDS_CLASS 509 select LEDS_CLASS
509 select NEW_LEDS 510 select NEW_LEDS
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index d3fd52036fd6..13ec195f0ca6 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -127,18 +127,17 @@ MODULE_PARM_DESC(minor,
127 "default is -1 (automatic)"); 127 "default is -1 (automatic)");
128#endif 128#endif
129 129
130static int kbd_backlight = 1; 130static int kbd_backlight = -1;
131module_param(kbd_backlight, int, 0444); 131module_param(kbd_backlight, int, 0444);
132MODULE_PARM_DESC(kbd_backlight, 132MODULE_PARM_DESC(kbd_backlight,
133 "set this to 0 to disable keyboard backlight, " 133 "set this to 0 to disable keyboard backlight, "
134 "1 to enable it (default: 0)"); 134 "1 to enable it (default: no change from current value)");
135 135
136static int kbd_backlight_timeout; /* = 0 */ 136static int kbd_backlight_timeout = -1;
137module_param(kbd_backlight_timeout, int, 0444); 137module_param(kbd_backlight_timeout, int, 0444);
138MODULE_PARM_DESC(kbd_backlight_timeout, 138MODULE_PARM_DESC(kbd_backlight_timeout,
139 "set this to 0 to set the default 10 seconds timeout, " 139 "meaningful values vary from 0 to 3 and their meaning depends "
140 "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout " 140 "on the model (default: no change from current value)");
141 "(default: 0)");
142 141
143#ifdef CONFIG_PM_SLEEP 142#ifdef CONFIG_PM_SLEEP
144static void sony_nc_kbd_backlight_resume(void); 143static void sony_nc_kbd_backlight_resume(void);
@@ -1844,6 +1843,8 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
1844 if (!kbdbl_ctl) 1843 if (!kbdbl_ctl)
1845 return -ENOMEM; 1844 return -ENOMEM;
1846 1845
1846 kbdbl_ctl->mode = kbd_backlight;
1847 kbdbl_ctl->timeout = kbd_backlight_timeout;
1847 kbdbl_ctl->handle = handle; 1848 kbdbl_ctl->handle = handle;
1848 if (handle == 0x0137) 1849 if (handle == 0x0137)
1849 kbdbl_ctl->base = 0x0C00; 1850 kbdbl_ctl->base = 0x0C00;
@@ -1870,8 +1871,8 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
1870 if (ret) 1871 if (ret)
1871 goto outmode; 1872 goto outmode;
1872 1873
1873 __sony_nc_kbd_backlight_mode_set(kbd_backlight); 1874 __sony_nc_kbd_backlight_mode_set(kbdbl_ctl->mode);
1874 __sony_nc_kbd_backlight_timeout_set(kbd_backlight_timeout); 1875 __sony_nc_kbd_backlight_timeout_set(kbdbl_ctl->timeout);
1875 1876
1876 return 0; 1877 return 0;
1877 1878
@@ -1886,17 +1887,8 @@ outkzalloc:
1886static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd) 1887static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
1887{ 1888{
1888 if (kbdbl_ctl) { 1889 if (kbdbl_ctl) {
1889 int result;
1890
1891 device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr); 1890 device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
1892 device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr); 1891 device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr);
1893
1894 /* restore the default hw behaviour */
1895 sony_call_snc_handle(kbdbl_ctl->handle,
1896 kbdbl_ctl->base | 0x10000, &result);
1897 sony_call_snc_handle(kbdbl_ctl->handle,
1898 kbdbl_ctl->base + 0x200, &result);
1899
1900 kfree(kbdbl_ctl); 1892 kfree(kbdbl_ctl);
1901 kbdbl_ctl = NULL; 1893 kbdbl_ctl = NULL;
1902 } 1894 }
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 1a7816390773..b9f2653e4ef9 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -709,7 +709,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
709 struct of_regulator_match **da9063_reg_matches) 709 struct of_regulator_match **da9063_reg_matches)
710{ 710{
711 da9063_reg_matches = NULL; 711 da9063_reg_matches = NULL;
712 return PTR_ERR(-ENODEV); 712 return ERR_PTR(-ENODEV);
713} 713}
714#endif 714#endif
715 715
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 488dfe7ce9a6..7e2b165972e6 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -201,13 +201,7 @@ static unsigned int palmas_smps_ramp_delay[4] = {0, 10000, 5000, 2500};
201#define SMPS_CTRL_MODE_ECO 0x02 201#define SMPS_CTRL_MODE_ECO 0x02
202#define SMPS_CTRL_MODE_PWM 0x03 202#define SMPS_CTRL_MODE_PWM 0x03
203 203
204/* These values are derived from the data sheet. And are the number of steps 204#define PALMAS_SMPS_NUM_VOLTAGES 122
205 * where there is a voltage change, the ranges at beginning and end of register
206 * max/min values where there are no change are ommitted.
207 *
208 * So they are basically (maxV-minV)/stepV
209 */
210#define PALMAS_SMPS_NUM_VOLTAGES 117
211#define PALMAS_SMPS10_NUM_VOLTAGES 2 205#define PALMAS_SMPS10_NUM_VOLTAGES 2
212#define PALMAS_LDO_NUM_VOLTAGES 50 206#define PALMAS_LDO_NUM_VOLTAGES 50
213 207
@@ -979,6 +973,7 @@ static int palmas_regulators_probe(struct platform_device *pdev)
979 pmic->desc[id].min_uV = 900000; 973 pmic->desc[id].min_uV = 900000;
980 pmic->desc[id].uV_step = 50000; 974 pmic->desc[id].uV_step = 50000;
981 pmic->desc[id].linear_min_sel = 1; 975 pmic->desc[id].linear_min_sel = 1;
976 pmic->desc[id].enable_time = 500;
982 pmic->desc[id].vsel_reg = 977 pmic->desc[id].vsel_reg =
983 PALMAS_BASE_TO_REG(PALMAS_LDO_BASE, 978 PALMAS_BASE_TO_REG(PALMAS_LDO_BASE,
984 palmas_regs_info[id].vsel_addr); 979 palmas_regs_info[id].vsel_addr);
@@ -997,6 +992,11 @@ static int palmas_regulators_probe(struct platform_device *pdev)
997 pmic->desc[id].min_uV = 450000; 992 pmic->desc[id].min_uV = 450000;
998 pmic->desc[id].uV_step = 25000; 993 pmic->desc[id].uV_step = 25000;
999 } 994 }
995
996 /* LOD6 in vibrator mode will have enable time 2000us */
997 if (pdata && pdata->ldo6_vibrator &&
998 (id == PALMAS_REG_LDO6))
999 pmic->desc[id].enable_time = 2000;
1000 } else { 1000 } else {
1001 pmic->desc[id].n_voltages = 1; 1001 pmic->desc[id].n_voltages = 1;
1002 pmic->desc[id].ops = &palmas_ops_extreg; 1002 pmic->desc[id].ops = &palmas_ops_extreg;
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index d8e3e1262bc2..20c271d49dcb 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -279,8 +279,12 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb,
279 ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, regs->control_reg, 279 ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, regs->control_reg,
280 abb->base); 280 abb->base);
281 281
282 /* program LDO VBB vset override if needed */ 282 /*
283 if (abb->ldo_base) 283 * program LDO VBB vset override if needed for !bypass mode
284 * XXX: Do not switch sequence - for !bypass, LDO override reset *must*
285 * be performed *before* switch to bias mode else VBB glitches.
286 */
287 if (abb->ldo_base && info->opp_sel != TI_ABB_NOMINAL_OPP)
284 ti_abb_program_ldovbb(dev, abb, info); 288 ti_abb_program_ldovbb(dev, abb, info);
285 289
286 /* Initiate ABB ldo change */ 290 /* Initiate ABB ldo change */
@@ -295,6 +299,14 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb,
295 if (ret) 299 if (ret)
296 goto out; 300 goto out;
297 301
302 /*
303 * Reset LDO VBB vset override bypass mode
304 * XXX: Do not switch sequence - for bypass, LDO override reset *must*
305 * be performed *after* switch to bypass else VBB glitches.
306 */
307 if (abb->ldo_base && info->opp_sel == TI_ABB_NOMINAL_OPP)
308 ti_abb_program_ldovbb(dev, abb, info);
309
298out: 310out:
299 return ret; 311 return ret;
300} 312}
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 1432b26ef2e9..2205fbc2c37b 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -63,7 +63,7 @@ static irqreturn_t wm831x_ldo_uv_irq(int irq, void *data)
63 */ 63 */
64 64
65static const struct regulator_linear_range wm831x_gp_ldo_ranges[] = { 65static const struct regulator_linear_range wm831x_gp_ldo_ranges[] = {
66 { .min_uV = 900000, .max_uV = 1650000, .min_sel = 0, .max_sel = 14, 66 { .min_uV = 900000, .max_uV = 1600000, .min_sel = 0, .max_sel = 14,
67 .uV_step = 50000 }, 67 .uV_step = 50000 },
68 { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31, 68 { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31,
69 .uV_step = 100000 }, 69 .uV_step = 100000 },
@@ -332,7 +332,7 @@ static struct platform_driver wm831x_gp_ldo_driver = {
332 */ 332 */
333 333
334static const struct regulator_linear_range wm831x_aldo_ranges[] = { 334static const struct regulator_linear_range wm831x_aldo_ranges[] = {
335 { .min_uV = 1000000, .max_uV = 1650000, .min_sel = 0, .max_sel = 12, 335 { .min_uV = 1000000, .max_uV = 1600000, .min_sel = 0, .max_sel = 12,
336 .uV_step = 50000 }, 336 .uV_step = 50000 },
337 { .min_uV = 1700000, .max_uV = 3500000, .min_sel = 13, .max_sel = 31, 337 { .min_uV = 1700000, .max_uV = 3500000, .min_sel = 13, .max_sel = 31,
338 .uV_step = 100000 }, 338 .uV_step = 100000 },
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 835b5f0f344e..61ca9292a429 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -543,7 +543,7 @@ static int wm8350_dcdc_set_suspend_mode(struct regulator_dev *rdev,
543} 543}
544 544
545static const struct regulator_linear_range wm8350_ldo_ranges[] = { 545static const struct regulator_linear_range wm8350_ldo_ranges[] = {
546 { .min_uV = 900000, .max_uV = 1750000, .min_sel = 0, .max_sel = 15, 546 { .min_uV = 900000, .max_uV = 1650000, .min_sel = 0, .max_sel = 15,
547 .uV_step = 50000 }, 547 .uV_step = 50000 },
548 { .min_uV = 1800000, .max_uV = 3300000, .min_sel = 16, .max_sel = 31, 548 { .min_uV = 1800000, .max_uV = 3300000, .min_sel = 16, .max_sel = 31,
549 .uV_step = 100000 }, 549 .uV_step = 100000 },
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 5adb2042e824..cee7e2708a1f 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2077,6 +2077,7 @@ dasd_eckd_build_format(struct dasd_device *base,
2077 int intensity = 0; 2077 int intensity = 0;
2078 int r0_perm; 2078 int r0_perm;
2079 int nr_tracks; 2079 int nr_tracks;
2080 int use_prefix;
2080 2081
2081 startdev = dasd_alias_get_start_dev(base); 2082 startdev = dasd_alias_get_start_dev(base);
2082 if (!startdev) 2083 if (!startdev)
@@ -2106,28 +2107,46 @@ dasd_eckd_build_format(struct dasd_device *base,
2106 intensity = fdata->intensity; 2107 intensity = fdata->intensity;
2107 } 2108 }
2108 2109
2110 use_prefix = base_priv->features.feature[8] & 0x01;
2111
2109 switch (intensity) { 2112 switch (intensity) {
2110 case 0x00: /* Normal format */ 2113 case 0x00: /* Normal format */
2111 case 0x08: /* Normal format, use cdl. */ 2114 case 0x08: /* Normal format, use cdl. */
2112 cplength = 2 + (rpt*nr_tracks); 2115 cplength = 2 + (rpt*nr_tracks);
2113 datasize = sizeof(struct PFX_eckd_data) + 2116 if (use_prefix)
2114 sizeof(struct LO_eckd_data) + 2117 datasize = sizeof(struct PFX_eckd_data) +
2115 rpt * nr_tracks * sizeof(struct eckd_count); 2118 sizeof(struct LO_eckd_data) +
2119 rpt * nr_tracks * sizeof(struct eckd_count);
2120 else
2121 datasize = sizeof(struct DE_eckd_data) +
2122 sizeof(struct LO_eckd_data) +
2123 rpt * nr_tracks * sizeof(struct eckd_count);
2116 break; 2124 break;
2117 case 0x01: /* Write record zero and format track. */ 2125 case 0x01: /* Write record zero and format track. */
2118 case 0x09: /* Write record zero and format track, use cdl. */ 2126 case 0x09: /* Write record zero and format track, use cdl. */
2119 cplength = 2 + rpt * nr_tracks; 2127 cplength = 2 + rpt * nr_tracks;
2120 datasize = sizeof(struct PFX_eckd_data) + 2128 if (use_prefix)
2121 sizeof(struct LO_eckd_data) + 2129 datasize = sizeof(struct PFX_eckd_data) +
2122 sizeof(struct eckd_count) + 2130 sizeof(struct LO_eckd_data) +
2123 rpt * nr_tracks * sizeof(struct eckd_count); 2131 sizeof(struct eckd_count) +
2132 rpt * nr_tracks * sizeof(struct eckd_count);
2133 else
2134 datasize = sizeof(struct DE_eckd_data) +
2135 sizeof(struct LO_eckd_data) +
2136 sizeof(struct eckd_count) +
2137 rpt * nr_tracks * sizeof(struct eckd_count);
2124 break; 2138 break;
2125 case 0x04: /* Invalidate track. */ 2139 case 0x04: /* Invalidate track. */
2126 case 0x0c: /* Invalidate track, use cdl. */ 2140 case 0x0c: /* Invalidate track, use cdl. */
2127 cplength = 3; 2141 cplength = 3;
2128 datasize = sizeof(struct PFX_eckd_data) + 2142 if (use_prefix)
2129 sizeof(struct LO_eckd_data) + 2143 datasize = sizeof(struct PFX_eckd_data) +
2130 sizeof(struct eckd_count); 2144 sizeof(struct LO_eckd_data) +
2145 sizeof(struct eckd_count);
2146 else
2147 datasize = sizeof(struct DE_eckd_data) +
2148 sizeof(struct LO_eckd_data) +
2149 sizeof(struct eckd_count);
2131 break; 2150 break;
2132 default: 2151 default:
2133 dev_warn(&startdev->cdev->dev, 2152 dev_warn(&startdev->cdev->dev,
@@ -2147,14 +2166,25 @@ dasd_eckd_build_format(struct dasd_device *base,
2147 2166
2148 switch (intensity & ~0x08) { 2167 switch (intensity & ~0x08) {
2149 case 0x00: /* Normal format. */ 2168 case 0x00: /* Normal format. */
2150 prefix(ccw++, (struct PFX_eckd_data *) data, 2169 if (use_prefix) {
2151 fdata->start_unit, fdata->stop_unit, 2170 prefix(ccw++, (struct PFX_eckd_data *) data,
2152 DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2171 fdata->start_unit, fdata->stop_unit,
2153 /* grant subsystem permission to format R0 */ 2172 DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2154 if (r0_perm) 2173 /* grant subsystem permission to format R0 */
2155 ((struct PFX_eckd_data *)data) 2174 if (r0_perm)
2156 ->define_extent.ga_extended |= 0x04; 2175 ((struct PFX_eckd_data *)data)
2157 data += sizeof(struct PFX_eckd_data); 2176 ->define_extent.ga_extended |= 0x04;
2177 data += sizeof(struct PFX_eckd_data);
2178 } else {
2179 define_extent(ccw++, (struct DE_eckd_data *) data,
2180 fdata->start_unit, fdata->stop_unit,
2181 DASD_ECKD_CCW_WRITE_CKD, startdev);
2182 /* grant subsystem permission to format R0 */
2183 if (r0_perm)
2184 ((struct DE_eckd_data *) data)
2185 ->ga_extended |= 0x04;
2186 data += sizeof(struct DE_eckd_data);
2187 }
2158 ccw[-1].flags |= CCW_FLAG_CC; 2188 ccw[-1].flags |= CCW_FLAG_CC;
2159 locate_record(ccw++, (struct LO_eckd_data *) data, 2189 locate_record(ccw++, (struct LO_eckd_data *) data,
2160 fdata->start_unit, 0, rpt*nr_tracks, 2190 fdata->start_unit, 0, rpt*nr_tracks,
@@ -2163,11 +2193,18 @@ dasd_eckd_build_format(struct dasd_device *base,
2163 data += sizeof(struct LO_eckd_data); 2193 data += sizeof(struct LO_eckd_data);
2164 break; 2194 break;
2165 case 0x01: /* Write record zero + format track. */ 2195 case 0x01: /* Write record zero + format track. */
2166 prefix(ccw++, (struct PFX_eckd_data *) data, 2196 if (use_prefix) {
2167 fdata->start_unit, fdata->stop_unit, 2197 prefix(ccw++, (struct PFX_eckd_data *) data,
2168 DASD_ECKD_CCW_WRITE_RECORD_ZERO, 2198 fdata->start_unit, fdata->stop_unit,
2169 base, startdev); 2199 DASD_ECKD_CCW_WRITE_RECORD_ZERO,
2170 data += sizeof(struct PFX_eckd_data); 2200 base, startdev);
2201 data += sizeof(struct PFX_eckd_data);
2202 } else {
2203 define_extent(ccw++, (struct DE_eckd_data *) data,
2204 fdata->start_unit, fdata->stop_unit,
2205 DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev);
2206 data += sizeof(struct DE_eckd_data);
2207 }
2171 ccw[-1].flags |= CCW_FLAG_CC; 2208 ccw[-1].flags |= CCW_FLAG_CC;
2172 locate_record(ccw++, (struct LO_eckd_data *) data, 2209 locate_record(ccw++, (struct LO_eckd_data *) data,
2173 fdata->start_unit, 0, rpt * nr_tracks + 1, 2210 fdata->start_unit, 0, rpt * nr_tracks + 1,
@@ -2176,10 +2213,17 @@ dasd_eckd_build_format(struct dasd_device *base,
2176 data += sizeof(struct LO_eckd_data); 2213 data += sizeof(struct LO_eckd_data);
2177 break; 2214 break;
2178 case 0x04: /* Invalidate track. */ 2215 case 0x04: /* Invalidate track. */
2179 prefix(ccw++, (struct PFX_eckd_data *) data, 2216 if (use_prefix) {
2180 fdata->start_unit, fdata->stop_unit, 2217 prefix(ccw++, (struct PFX_eckd_data *) data,
2181 DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2218 fdata->start_unit, fdata->stop_unit,
2182 data += sizeof(struct PFX_eckd_data); 2219 DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2220 data += sizeof(struct PFX_eckd_data);
2221 } else {
2222 define_extent(ccw++, (struct DE_eckd_data *) data,
2223 fdata->start_unit, fdata->stop_unit,
2224 DASD_ECKD_CCW_WRITE_CKD, startdev);
2225 data += sizeof(struct DE_eckd_data);
2226 }
2183 ccw[-1].flags |= CCW_FLAG_CC; 2227 ccw[-1].flags |= CCW_FLAG_CC;
2184 locate_record(ccw++, (struct LO_eckd_data *) data, 2228 locate_record(ccw++, (struct LO_eckd_data *) data,
2185 fdata->start_unit, 0, 1, 2229 fdata->start_unit, 0, 1,
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index a3aa374799dc..1fe264379e0d 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -486,7 +486,7 @@ sclp_sync_wait(void)
486 timeout = 0; 486 timeout = 0;
487 if (timer_pending(&sclp_request_timer)) { 487 if (timer_pending(&sclp_request_timer)) {
488 /* Get timeout TOD value */ 488 /* Get timeout TOD value */
489 timeout = get_tod_clock() + 489 timeout = get_tod_clock_fast() +
490 sclp_tod_from_jiffies(sclp_request_timer.expires - 490 sclp_tod_from_jiffies(sclp_request_timer.expires -
491 jiffies); 491 jiffies);
492 } 492 }
@@ -508,7 +508,7 @@ sclp_sync_wait(void)
508 while (sclp_running_state != sclp_running_state_idle) { 508 while (sclp_running_state != sclp_running_state_idle) {
509 /* Check for expired request timer */ 509 /* Check for expired request timer */
510 if (timer_pending(&sclp_request_timer) && 510 if (timer_pending(&sclp_request_timer) &&
511 get_tod_clock() > timeout && 511 get_tod_clock_fast() > timeout &&
512 del_timer(&sclp_request_timer)) 512 del_timer(&sclp_request_timer))
513 sclp_request_timer.function(sclp_request_timer.data); 513 sclp_request_timer.function(sclp_request_timer.data);
514 cpu_relax(); 514 cpu_relax();
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 8cd34bf644b3..77df9cb00688 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -145,9 +145,11 @@ bool __init sclp_has_linemode(void)
145 145
146 if (sccb->header.response_code != 0x20) 146 if (sccb->header.response_code != 0x20)
147 return 0; 147 return 0;
148 if (sccb->sclp_send_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)) 148 if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK)))
149 return 1; 149 return 0;
150 return 0; 150 if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
151 return 0;
152 return 1;
151} 153}
152 154
153bool __init sclp_has_vt220(void) 155bool __init sclp_has_vt220(void)
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index a0f47c83fd62..3f4ca4e09a4c 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -810,7 +810,7 @@ static void tty3270_resize_work(struct work_struct *work)
810 struct winsize ws; 810 struct winsize ws;
811 811
812 screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols); 812 screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols);
813 if (!screen) 813 if (IS_ERR(screen))
814 return; 814 return;
815 /* Switch to new output size */ 815 /* Switch to new output size */
816 spin_lock_bh(&tp->view.lock); 816 spin_lock_bh(&tp->view.lock);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 9b3a24e8d3a0..cf31d3321dab 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -313,7 +313,7 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
313 int ret; 313 int ret;
314 314
315 dev_num = iminor(inode); 315 dev_num = iminor(inode);
316 if (dev_num > MAXMINOR) 316 if (dev_num >= MAXMINOR)
317 return -ENODEV; 317 return -ENODEV;
318 logptr = &sys_ser[dev_num]; 318 logptr = &sys_ser[dev_num];
319 319
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index d7da67a31c77..88e35d85d205 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -878,9 +878,9 @@ static void css_reset(void)
878 atomic_inc(&chpid_reset_count); 878 atomic_inc(&chpid_reset_count);
879 } 879 }
880 /* Wait for machine check for all channel paths. */ 880 /* Wait for machine check for all channel paths. */
881 timeout = get_tod_clock() + (RCHP_TIMEOUT << 12); 881 timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12);
882 while (atomic_read(&chpid_reset_count) != 0) { 882 while (atomic_read(&chpid_reset_count) != 0) {
883 if (get_tod_clock() > timeout) 883 if (get_tod_clock_fast() > timeout)
884 break; 884 break;
885 cpu_relax(); 885 cpu_relax();
886 } 886 }
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 8ed52aa49122..bbd3e511c771 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -338,10 +338,10 @@ again:
338 retries++; 338 retries++;
339 339
340 if (!start_time) { 340 if (!start_time) {
341 start_time = get_tod_clock(); 341 start_time = get_tod_clock_fast();
342 goto again; 342 goto again;
343 } 343 }
344 if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) 344 if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
345 goto again; 345 goto again;
346 } 346 }
347 if (retries) { 347 if (retries) {
@@ -504,7 +504,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
504 int count, stop; 504 int count, stop;
505 unsigned char state = 0; 505 unsigned char state = 0;
506 506
507 q->timestamp = get_tod_clock(); 507 q->timestamp = get_tod_clock_fast();
508 508
509 /* 509 /*
510 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 510 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -595,7 +595,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
595 * At this point we know, that inbound first_to_check 595 * At this point we know, that inbound first_to_check
596 * has (probably) not moved (see qdio_inbound_processing). 596 * has (probably) not moved (see qdio_inbound_processing).
597 */ 597 */
598 if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { 598 if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
599 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", 599 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
600 q->first_to_check); 600 q->first_to_check);
601 return 1; 601 return 1;
@@ -728,7 +728,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
728 int count, stop; 728 int count, stop;
729 unsigned char state = 0; 729 unsigned char state = 0;
730 730
731 q->timestamp = get_tod_clock(); 731 q->timestamp = get_tod_clock_fast();
732 732
733 if (need_siga_sync(q)) 733 if (need_siga_sync(q))
734 if (((queue_type(q) != QDIO_IQDIO_QFMT) && 734 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index feab3a5e50b5..757eb0716d45 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -696,7 +696,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
696 while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, 696 while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC,
697 PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, 697 PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
698 pci_device)) != NULL) { 698 pci_device)) != NULL) {
699 struct blogic_adapter *adapter = adapter; 699 struct blogic_adapter *host_adapter = adapter;
700 struct blogic_adapter_info adapter_info; 700 struct blogic_adapter_info adapter_info;
701 enum blogic_isa_ioport mod_ioaddr_req; 701 enum blogic_isa_ioport mod_ioaddr_req;
702 unsigned char bus; 702 unsigned char bus;
@@ -744,9 +744,9 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
744 known and enabled, note that the particular Standard ISA I/O 744 known and enabled, note that the particular Standard ISA I/O
745 Address should not be probed. 745 Address should not be probed.
746 */ 746 */
747 adapter->io_addr = io_addr; 747 host_adapter->io_addr = io_addr;
748 blogic_intreset(adapter); 748 blogic_intreset(host_adapter);
749 if (blogic_cmd(adapter, BLOGIC_INQ_PCI_INFO, NULL, 0, 749 if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
750 &adapter_info, sizeof(adapter_info)) == 750 &adapter_info, sizeof(adapter_info)) ==
751 sizeof(adapter_info)) { 751 sizeof(adapter_info)) {
752 if (adapter_info.isa_port < 6) 752 if (adapter_info.isa_port < 6)
@@ -762,7 +762,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
762 I/O Address assigned at system initialization. 762 I/O Address assigned at system initialization.
763 */ 763 */
764 mod_ioaddr_req = BLOGIC_IO_DISABLE; 764 mod_ioaddr_req = BLOGIC_IO_DISABLE;
765 blogic_cmd(adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req, 765 blogic_cmd(host_adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req,
766 sizeof(mod_ioaddr_req), NULL, 0); 766 sizeof(mod_ioaddr_req), NULL, 0);
767 /* 767 /*
768 For the first MultiMaster Host Adapter enumerated, 768 For the first MultiMaster Host Adapter enumerated,
@@ -779,12 +779,12 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
779 779
780 fetch_localram.offset = BLOGIC_AUTOSCSI_BASE + 45; 780 fetch_localram.offset = BLOGIC_AUTOSCSI_BASE + 45;
781 fetch_localram.count = sizeof(autoscsi_byte45); 781 fetch_localram.count = sizeof(autoscsi_byte45);
782 blogic_cmd(adapter, BLOGIC_FETCH_LOCALRAM, 782 blogic_cmd(host_adapter, BLOGIC_FETCH_LOCALRAM,
783 &fetch_localram, sizeof(fetch_localram), 783 &fetch_localram, sizeof(fetch_localram),
784 &autoscsi_byte45, 784 &autoscsi_byte45,
785 sizeof(autoscsi_byte45)); 785 sizeof(autoscsi_byte45));
786 blogic_cmd(adapter, BLOGIC_GET_BOARD_ID, NULL, 0, &id, 786 blogic_cmd(host_adapter, BLOGIC_GET_BOARD_ID, NULL, 0,
787 sizeof(id)); 787 &id, sizeof(id));
788 if (id.fw_ver_digit1 == '5') 788 if (id.fw_ver_digit1 == '5')
789 force_scan_order = 789 force_scan_order =
790 autoscsi_byte45.force_scan_order; 790 autoscsi_byte45.force_scan_order;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 408a42ef787a..f0d432c139d0 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -771,6 +771,8 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
771static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 771static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
772{ 772{
773 struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; 773 struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
774 if (!capable(CAP_SYS_RAWIO))
775 return -EPERM;
774 return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg); 776 return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
775} 777}
776 778
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 2ef497ebadc0..ee5c1833eb73 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -20,7 +20,7 @@
20 * | Device Discovery | 0x2095 | 0x2020-0x2022, | 20 * | Device Discovery | 0x2095 | 0x2020-0x2022, |
21 * | | | 0x2011-0x2012, | 21 * | | | 0x2011-0x2012, |
22 * | | | 0x2016 | 22 * | | | 0x2016 |
23 * | Queue Command and IO tracing | 0x3058 | 0x3006-0x300b | 23 * | Queue Command and IO tracing | 0x3059 | 0x3006-0x300b |
24 * | | | 0x3027-0x3028 | 24 * | | | 0x3027-0x3028 |
25 * | | | 0x303d-0x3041 | 25 * | | | 0x303d-0x3041 |
26 * | | | 0x302d,0x3033 | 26 * | | | 0x302d,0x3033 |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index df1b30ba938c..ff9c86b1a0d8 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1957,6 +1957,15 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1957 que = MSW(sts->handle); 1957 que = MSW(sts->handle);
1958 req = ha->req_q_map[que]; 1958 req = ha->req_q_map[que];
1959 1959
1960 /* Check for invalid queue pointer */
1961 if (req == NULL ||
1962 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
1963 ql_dbg(ql_dbg_io, vha, 0x3059,
1964 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
1965 "que=%u.\n", sts->handle, req, que);
1966 return;
1967 }
1968
1960 /* Validate handle. */ 1969 /* Validate handle. */
1961 if (handle < req->num_outstanding_cmds) 1970 if (handle < req->num_outstanding_cmds)
1962 sp = req->outstanding_cmds[handle]; 1971 sp = req->outstanding_cmds[handle];
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e62d17d41d4e..5693f6d7eddb 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2854,6 +2854,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2854 gd->events |= DISK_EVENT_MEDIA_CHANGE; 2854 gd->events |= DISK_EVENT_MEDIA_CHANGE;
2855 } 2855 }
2856 2856
2857 blk_pm_runtime_init(sdp->request_queue, dev);
2857 add_disk(gd); 2858 add_disk(gd);
2858 if (sdkp->capacity) 2859 if (sdkp->capacity)
2859 sd_dif_config_host(sdkp); 2860 sd_dif_config_host(sdkp);
@@ -2862,7 +2863,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2862 2863
2863 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 2864 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
2864 sdp->removable ? "removable " : ""); 2865 sdp->removable ? "removable " : "");
2865 blk_pm_runtime_init(sdp->request_queue, dev);
2866 scsi_autopm_put_device(sdp); 2866 scsi_autopm_put_device(sdp);
2867 put_device(&sdkp->dev); 2867 put_device(&sdkp->dev);
2868} 2868}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 5cbc4bb1b395..df5e961484e1 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -105,8 +105,11 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ;
105static int sg_add(struct device *, struct class_interface *); 105static int sg_add(struct device *, struct class_interface *);
106static void sg_remove(struct device *, struct class_interface *); 106static void sg_remove(struct device *, struct class_interface *);
107 107
108static DEFINE_SPINLOCK(sg_open_exclusive_lock);
109
108static DEFINE_IDR(sg_index_idr); 110static DEFINE_IDR(sg_index_idr);
109static DEFINE_RWLOCK(sg_index_lock); 111static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
112 file descriptor list for device */
110 113
111static struct class_interface sg_interface = { 114static struct class_interface sg_interface = {
112 .add_dev = sg_add, 115 .add_dev = sg_add,
@@ -143,7 +146,8 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
143} Sg_request; 146} Sg_request;
144 147
145typedef struct sg_fd { /* holds the state of a file descriptor */ 148typedef struct sg_fd { /* holds the state of a file descriptor */
146 struct list_head sfd_siblings; /* protected by sfd_lock of device */ 149 /* sfd_siblings is protected by sg_index_lock */
150 struct list_head sfd_siblings;
147 struct sg_device *parentdp; /* owning device */ 151 struct sg_device *parentdp; /* owning device */
148 wait_queue_head_t read_wait; /* queue read until command done */ 152 wait_queue_head_t read_wait; /* queue read until command done */
149 rwlock_t rq_list_lock; /* protect access to list in req_arr */ 153 rwlock_t rq_list_lock; /* protect access to list in req_arr */
@@ -166,12 +170,13 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
166 170
167typedef struct sg_device { /* holds the state of each scsi generic device */ 171typedef struct sg_device { /* holds the state of each scsi generic device */
168 struct scsi_device *device; 172 struct scsi_device *device;
173 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
169 int sg_tablesize; /* adapter's max scatter-gather table size */ 174 int sg_tablesize; /* adapter's max scatter-gather table size */
170 u32 index; /* device index number */ 175 u32 index; /* device index number */
171 spinlock_t sfd_lock; /* protect file descriptor list for device */ 176 /* sfds is protected by sg_index_lock */
172 struct list_head sfds; 177 struct list_head sfds;
173 struct rw_semaphore o_sem; /* exclude open should hold this rwsem */
174 volatile char detached; /* 0->attached, 1->detached pending removal */ 178 volatile char detached; /* 0->attached, 1->detached pending removal */
179 /* exclude protected by sg_open_exclusive_lock */
175 char exclude; /* opened for exclusive access */ 180 char exclude; /* opened for exclusive access */
176 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ 181 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
177 struct gendisk *disk; 182 struct gendisk *disk;
@@ -220,14 +225,35 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
220 return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE); 225 return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
221} 226}
222 227
228static int get_exclude(Sg_device *sdp)
229{
230 unsigned long flags;
231 int ret;
232
233 spin_lock_irqsave(&sg_open_exclusive_lock, flags);
234 ret = sdp->exclude;
235 spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
236 return ret;
237}
238
239static int set_exclude(Sg_device *sdp, char val)
240{
241 unsigned long flags;
242
243 spin_lock_irqsave(&sg_open_exclusive_lock, flags);
244 sdp->exclude = val;
245 spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
246 return val;
247}
248
223static int sfds_list_empty(Sg_device *sdp) 249static int sfds_list_empty(Sg_device *sdp)
224{ 250{
225 unsigned long flags; 251 unsigned long flags;
226 int ret; 252 int ret;
227 253
228 spin_lock_irqsave(&sdp->sfd_lock, flags); 254 read_lock_irqsave(&sg_index_lock, flags);
229 ret = list_empty(&sdp->sfds); 255 ret = list_empty(&sdp->sfds);
230 spin_unlock_irqrestore(&sdp->sfd_lock, flags); 256 read_unlock_irqrestore(&sg_index_lock, flags);
231 return ret; 257 return ret;
232} 258}
233 259
@@ -239,6 +265,7 @@ sg_open(struct inode *inode, struct file *filp)
239 struct request_queue *q; 265 struct request_queue *q;
240 Sg_device *sdp; 266 Sg_device *sdp;
241 Sg_fd *sfp; 267 Sg_fd *sfp;
268 int res;
242 int retval; 269 int retval;
243 270
244 nonseekable_open(inode, filp); 271 nonseekable_open(inode, filp);
@@ -267,52 +294,54 @@ sg_open(struct inode *inode, struct file *filp)
267 goto error_out; 294 goto error_out;
268 } 295 }
269 296
270 if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) { 297 if (flags & O_EXCL) {
271 retval = -EPERM; /* Can't lock it with read only access */ 298 if (O_RDONLY == (flags & O_ACCMODE)) {
272 goto error_out; 299 retval = -EPERM; /* Can't lock it with read only access */
273 } 300 goto error_out;
274 if (flags & O_NONBLOCK) { 301 }
275 if (flags & O_EXCL) { 302 if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) {
276 if (!down_write_trylock(&sdp->o_sem)) { 303 retval = -EBUSY;
277 retval = -EBUSY; 304 goto error_out;
278 goto error_out; 305 }
279 } 306 res = wait_event_interruptible(sdp->o_excl_wait,
280 } else { 307 ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1)));
281 if (!down_read_trylock(&sdp->o_sem)) { 308 if (res) {
282 retval = -EBUSY; 309 retval = res; /* -ERESTARTSYS because signal hit process */
283 goto error_out; 310 goto error_out;
284 } 311 }
312 } else if (get_exclude(sdp)) { /* some other fd has an exclusive lock on dev */
313 if (flags & O_NONBLOCK) {
314 retval = -EBUSY;
315 goto error_out;
316 }
317 res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp));
318 if (res) {
319 retval = res; /* -ERESTARTSYS because signal hit process */
320 goto error_out;
285 } 321 }
286 } else {
287 if (flags & O_EXCL)
288 down_write(&sdp->o_sem);
289 else
290 down_read(&sdp->o_sem);
291 } 322 }
292 /* Since write lock is held, no need to check sfd_list */ 323 if (sdp->detached) {
293 if (flags & O_EXCL) 324 retval = -ENODEV;
294 sdp->exclude = 1; /* used by release lock */ 325 goto error_out;
295 326 }
296 if (sfds_list_empty(sdp)) { /* no existing opens on this device */ 327 if (sfds_list_empty(sdp)) { /* no existing opens on this device */
297 sdp->sgdebug = 0; 328 sdp->sgdebug = 0;
298 q = sdp->device->request_queue; 329 q = sdp->device->request_queue;
299 sdp->sg_tablesize = queue_max_segments(q); 330 sdp->sg_tablesize = queue_max_segments(q);
300 } 331 }
301 sfp = sg_add_sfp(sdp, dev); 332 if ((sfp = sg_add_sfp(sdp, dev)))
302 if (!IS_ERR(sfp))
303 filp->private_data = sfp; 333 filp->private_data = sfp;
304 /* retval is already provably zero at this point because of the
305 * check after retval = scsi_autopm_get_device(sdp->device))
306 */
307 else { 334 else {
308 retval = PTR_ERR(sfp);
309
310 if (flags & O_EXCL) { 335 if (flags & O_EXCL) {
311 sdp->exclude = 0; /* undo if error */ 336 set_exclude(sdp, 0); /* undo if error */
312 up_write(&sdp->o_sem); 337 wake_up_interruptible(&sdp->o_excl_wait);
313 } else 338 }
314 up_read(&sdp->o_sem); 339 retval = -ENOMEM;
340 goto error_out;
341 }
342 retval = 0;
315error_out: 343error_out:
344 if (retval) {
316 scsi_autopm_put_device(sdp->device); 345 scsi_autopm_put_device(sdp->device);
317sdp_put: 346sdp_put:
318 scsi_device_put(sdp->device); 347 scsi_device_put(sdp->device);
@@ -329,18 +358,13 @@ sg_release(struct inode *inode, struct file *filp)
329{ 358{
330 Sg_device *sdp; 359 Sg_device *sdp;
331 Sg_fd *sfp; 360 Sg_fd *sfp;
332 int excl;
333 361
334 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 362 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
335 return -ENXIO; 363 return -ENXIO;
336 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); 364 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
337 365
338 excl = sdp->exclude; 366 set_exclude(sdp, 0);
339 sdp->exclude = 0; 367 wake_up_interruptible(&sdp->o_excl_wait);
340 if (excl)
341 up_write(&sdp->o_sem);
342 else
343 up_read(&sdp->o_sem);
344 368
345 scsi_autopm_put_device(sdp->device); 369 scsi_autopm_put_device(sdp->device);
346 kref_put(&sfp->f_ref, sg_remove_sfp); 370 kref_put(&sfp->f_ref, sg_remove_sfp);
@@ -1391,9 +1415,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1391 disk->first_minor = k; 1415 disk->first_minor = k;
1392 sdp->disk = disk; 1416 sdp->disk = disk;
1393 sdp->device = scsidp; 1417 sdp->device = scsidp;
1394 spin_lock_init(&sdp->sfd_lock);
1395 INIT_LIST_HEAD(&sdp->sfds); 1418 INIT_LIST_HEAD(&sdp->sfds);
1396 init_rwsem(&sdp->o_sem); 1419 init_waitqueue_head(&sdp->o_excl_wait);
1397 sdp->sg_tablesize = queue_max_segments(q); 1420 sdp->sg_tablesize = queue_max_segments(q);
1398 sdp->index = k; 1421 sdp->index = k;
1399 kref_init(&sdp->d_ref); 1422 kref_init(&sdp->d_ref);
@@ -1526,13 +1549,11 @@ static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
1526 1549
1527 /* Need a write lock to set sdp->detached. */ 1550 /* Need a write lock to set sdp->detached. */
1528 write_lock_irqsave(&sg_index_lock, iflags); 1551 write_lock_irqsave(&sg_index_lock, iflags);
1529 spin_lock(&sdp->sfd_lock);
1530 sdp->detached = 1; 1552 sdp->detached = 1;
1531 list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) { 1553 list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
1532 wake_up_interruptible(&sfp->read_wait); 1554 wake_up_interruptible(&sfp->read_wait);
1533 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); 1555 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
1534 } 1556 }
1535 spin_unlock(&sdp->sfd_lock);
1536 write_unlock_irqrestore(&sg_index_lock, iflags); 1557 write_unlock_irqrestore(&sg_index_lock, iflags);
1537 1558
1538 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); 1559 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
@@ -2043,7 +2064,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
2043 2064
2044 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); 2065 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
2045 if (!sfp) 2066 if (!sfp)
2046 return ERR_PTR(-ENOMEM); 2067 return NULL;
2047 2068
2048 init_waitqueue_head(&sfp->read_wait); 2069 init_waitqueue_head(&sfp->read_wait);
2049 rwlock_init(&sfp->rq_list_lock); 2070 rwlock_init(&sfp->rq_list_lock);
@@ -2057,13 +2078,9 @@ sg_add_sfp(Sg_device * sdp, int dev)
2057 sfp->cmd_q = SG_DEF_COMMAND_Q; 2078 sfp->cmd_q = SG_DEF_COMMAND_Q;
2058 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; 2079 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
2059 sfp->parentdp = sdp; 2080 sfp->parentdp = sdp;
2060 spin_lock_irqsave(&sdp->sfd_lock, iflags); 2081 write_lock_irqsave(&sg_index_lock, iflags);
2061 if (sdp->detached) {
2062 spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
2063 return ERR_PTR(-ENODEV);
2064 }
2065 list_add_tail(&sfp->sfd_siblings, &sdp->sfds); 2082 list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
2066 spin_unlock_irqrestore(&sdp->sfd_lock, iflags); 2083 write_unlock_irqrestore(&sg_index_lock, iflags);
2067 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp)); 2084 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
2068 if (unlikely(sg_big_buff != def_reserved_size)) 2085 if (unlikely(sg_big_buff != def_reserved_size))
2069 sg_big_buff = def_reserved_size; 2086 sg_big_buff = def_reserved_size;
@@ -2113,9 +2130,10 @@ static void sg_remove_sfp(struct kref *kref)
2113 struct sg_device *sdp = sfp->parentdp; 2130 struct sg_device *sdp = sfp->parentdp;
2114 unsigned long iflags; 2131 unsigned long iflags;
2115 2132
2116 spin_lock_irqsave(&sdp->sfd_lock, iflags); 2133 write_lock_irqsave(&sg_index_lock, iflags);
2117 list_del(&sfp->sfd_siblings); 2134 list_del(&sfp->sfd_siblings);
2118 spin_unlock_irqrestore(&sdp->sfd_lock, iflags); 2135 write_unlock_irqrestore(&sg_index_lock, iflags);
2136 wake_up_interruptible(&sdp->o_excl_wait);
2119 2137
2120 INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); 2138 INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
2121 schedule_work(&sfp->ew.work); 2139 schedule_work(&sfp->ew.work);
@@ -2502,7 +2520,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
2502 return 0; 2520 return 0;
2503} 2521}
2504 2522
2505/* must be called while holding sg_index_lock and sfd_lock */ 2523/* must be called while holding sg_index_lock */
2506static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) 2524static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2507{ 2525{
2508 int k, m, new_interface, blen, usg; 2526 int k, m, new_interface, blen, usg;
@@ -2587,26 +2605,22 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
2587 2605
2588 read_lock_irqsave(&sg_index_lock, iflags); 2606 read_lock_irqsave(&sg_index_lock, iflags);
2589 sdp = it ? sg_lookup_dev(it->index) : NULL; 2607 sdp = it ? sg_lookup_dev(it->index) : NULL;
2590 if (sdp) { 2608 if (sdp && !list_empty(&sdp->sfds)) {
2591 spin_lock(&sdp->sfd_lock); 2609 struct scsi_device *scsidp = sdp->device;
2592 if (!list_empty(&sdp->sfds)) {
2593 struct scsi_device *scsidp = sdp->device;
2594 2610
2595 seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); 2611 seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
2596 if (sdp->detached) 2612 if (sdp->detached)
2597 seq_printf(s, "detached pending close "); 2613 seq_printf(s, "detached pending close ");
2598 else 2614 else
2599 seq_printf 2615 seq_printf
2600 (s, "scsi%d chan=%d id=%d lun=%d em=%d", 2616 (s, "scsi%d chan=%d id=%d lun=%d em=%d",
2601 scsidp->host->host_no, 2617 scsidp->host->host_no,
2602 scsidp->channel, scsidp->id, 2618 scsidp->channel, scsidp->id,
2603 scsidp->lun, 2619 scsidp->lun,
2604 scsidp->host->hostt->emulated); 2620 scsidp->host->hostt->emulated);
2605 seq_printf(s, " sg_tablesize=%d excl=%d\n", 2621 seq_printf(s, " sg_tablesize=%d excl=%d\n",
2606 sdp->sg_tablesize, sdp->exclude); 2622 sdp->sg_tablesize, get_exclude(sdp));
2607 sg_proc_debug_helper(s, sdp); 2623 sg_proc_debug_helper(s, sdp);
2608 }
2609 spin_unlock(&sdp->sfd_lock);
2610 } 2624 }
2611 read_unlock_irqrestore(&sg_index_lock, iflags); 2625 read_unlock_irqrestore(&sg_index_lock, iflags);
2612 return 0; 2626 return 0;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index fd7cc566095a..d4ac60b4a56e 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1583,7 +1583,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
1583 /* Initialize the hardware */ 1583 /* Initialize the hardware */
1584 ret = clk_prepare_enable(clk); 1584 ret = clk_prepare_enable(clk);
1585 if (ret) 1585 if (ret)
1586 goto out_unmap_regs; 1586 goto out_free_irq;
1587 spi_writel(as, CR, SPI_BIT(SWRST)); 1587 spi_writel(as, CR, SPI_BIT(SWRST));
1588 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1588 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
1589 if (as->caps.has_wdrbt) { 1589 if (as->caps.has_wdrbt) {
@@ -1614,6 +1614,7 @@ out_free_dma:
1614 spi_writel(as, CR, SPI_BIT(SWRST)); 1614 spi_writel(as, CR, SPI_BIT(SWRST));
1615 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ 1615 spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
1616 clk_disable_unprepare(clk); 1616 clk_disable_unprepare(clk);
1617out_free_irq:
1617 free_irq(irq, master); 1618 free_irq(irq, master);
1618out_unmap_regs: 1619out_unmap_regs:
1619 iounmap(as->regs); 1620 iounmap(as->regs);
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index 5655acf55bfe..6416798828e7 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -226,7 +226,6 @@ static int spi_clps711x_probe(struct platform_device *pdev)
226 dev_name(&pdev->dev), hw); 226 dev_name(&pdev->dev), hw);
227 if (ret) { 227 if (ret) {
228 dev_err(&pdev->dev, "Can't request IRQ\n"); 228 dev_err(&pdev->dev, "Can't request IRQ\n");
229 clk_put(hw->spi_clk);
230 goto clk_out; 229 goto clk_out;
231 } 230 }
232 231
@@ -247,7 +246,6 @@ err_out:
247 gpio_free(hw->chipselect[i]); 246 gpio_free(hw->chipselect[i]);
248 247
249 spi_master_put(master); 248 spi_master_put(master);
250 kfree(master);
251 249
252 return ret; 250 return ret;
253} 251}
@@ -263,7 +261,6 @@ static int spi_clps711x_remove(struct platform_device *pdev)
263 gpio_free(hw->chipselect[i]); 261 gpio_free(hw->chipselect[i]);
264 262
265 spi_unregister_master(master); 263 spi_unregister_master(master);
266 kfree(master);
267 264
268 return 0; 265 return 0;
269} 266}
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 6cd07d13ecab..4e44575bd87a 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -476,15 +476,9 @@ static int dspi_probe(struct platform_device *pdev)
476 master->bus_num = bus_num; 476 master->bus_num = bus_num;
477 477
478 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 478 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
479 if (!res) {
480 dev_err(&pdev->dev, "can't get platform resource\n");
481 ret = -EINVAL;
482 goto out_master_put;
483 }
484
485 dspi->base = devm_ioremap_resource(&pdev->dev, res); 479 dspi->base = devm_ioremap_resource(&pdev->dev, res);
486 if (!dspi->base) { 480 if (IS_ERR(dspi->base)) {
487 ret = -EINVAL; 481 ret = PTR_ERR(dspi->base);
488 goto out_master_put; 482 goto out_master_put;
489 } 483 }
490 484
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index dbc5e999a1f5..6adf4e35816d 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -522,8 +522,10 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
522 psc_num = master->bus_num; 522 psc_num = master->bus_num;
523 snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num); 523 snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num);
524 clk = devm_clk_get(dev, clk_name); 524 clk = devm_clk_get(dev, clk_name);
525 if (IS_ERR(clk)) 525 if (IS_ERR(clk)) {
526 ret = PTR_ERR(clk);
526 goto free_irq; 527 goto free_irq;
528 }
527 ret = clk_prepare_enable(clk); 529 ret = clk_prepare_enable(clk);
528 if (ret) 530 if (ret)
529 goto free_irq; 531 goto free_irq;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 2eb06ee0b326..c1a50674c1e3 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -546,8 +546,17 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
546 if (pm_runtime_suspended(&drv_data->pdev->dev)) 546 if (pm_runtime_suspended(&drv_data->pdev->dev))
547 return IRQ_NONE; 547 return IRQ_NONE;
548 548
549 sccr1_reg = read_SSCR1(reg); 549 /*
550 * If the device is not yet in RPM suspended state and we get an
551 * interrupt that is meant for another device, check if status bits
552 * are all set to one. That means that the device is already
553 * powered off.
554 */
550 status = read_SSSR(reg); 555 status = read_SSSR(reg);
556 if (status == ~0)
557 return IRQ_NONE;
558
559 sccr1_reg = read_SSCR1(reg);
551 560
552 /* Ignore possible writes if we don't need to write */ 561 /* Ignore possible writes if we don't need to write */
553 if (!(sccr1_reg & SSCR1_TIE)) 562 if (!(sccr1_reg & SSCR1_TIE))
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 512b8893893b..a80376dc3a10 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1428,6 +1428,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
1428 S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN, 1428 S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
1429 sdd->regs + S3C64XX_SPI_INT_EN); 1429 sdd->regs + S3C64XX_SPI_INT_EN);
1430 1430
1431 pm_runtime_enable(&pdev->dev);
1432
1431 if (spi_register_master(master)) { 1433 if (spi_register_master(master)) {
1432 dev_err(&pdev->dev, "cannot register SPI master\n"); 1434 dev_err(&pdev->dev, "cannot register SPI master\n");
1433 ret = -EBUSY; 1435 ret = -EBUSY;
@@ -1440,8 +1442,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
1440 mem_res, 1442 mem_res,
1441 sdd->rx_dma.dmach, sdd->tx_dma.dmach); 1443 sdd->rx_dma.dmach, sdd->tx_dma.dmach);
1442 1444
1443 pm_runtime_enable(&pdev->dev);
1444
1445 return 0; 1445 return 0;
1446 1446
1447err3: 1447err3:
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 0b68cb592fa4..e488a90a98b8 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -296,6 +296,8 @@ static int hspi_probe(struct platform_device *pdev)
296 goto error1; 296 goto error1;
297 } 297 }
298 298
299 pm_runtime_enable(&pdev->dev);
300
299 master->num_chipselect = 1; 301 master->num_chipselect = 1;
300 master->bus_num = pdev->id; 302 master->bus_num = pdev->id;
301 master->setup = hspi_setup; 303 master->setup = hspi_setup;
@@ -309,8 +311,6 @@ static int hspi_probe(struct platform_device *pdev)
309 goto error1; 311 goto error1;
310 } 312 }
311 313
312 pm_runtime_enable(&pdev->dev);
313
314 return 0; 314 return 0;
315 315
316 error1: 316 error1:
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index f91bc1fdd895..639ba96adb36 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -1960,6 +1960,7 @@ cntrlEnd:
1960 1960
1961 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n"); 1961 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
1962 1962
1963 memset(&DevInfo, 0, sizeof(DevInfo));
1963 DevInfo.MaxRDMBufferSize = BUFFER_4K; 1964 DevInfo.MaxRDMBufferSize = BUFFER_4K;
1964 DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START; 1965 DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
1965 DevInfo.u32RxAlignmentCorrection = 0; 1966 DevInfo.u32RxAlignmentCorrection = 0;
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 3ba4c5712dff..853f62b2b1a9 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -369,28 +369,23 @@ static int ni_65xx_dio_insn_bits(struct comedi_device *dev,
369{ 369{
370 const struct ni_65xx_board *board = comedi_board(dev); 370 const struct ni_65xx_board *board = comedi_board(dev);
371 struct ni_65xx_private *devpriv = dev->private; 371 struct ni_65xx_private *devpriv = dev->private;
372 unsigned base_bitfield_channel; 372 int base_bitfield_channel;
373 const unsigned max_ports_per_bitfield = 5;
374 unsigned read_bits = 0; 373 unsigned read_bits = 0;
375 unsigned j; 374 int last_port_offset = ni_65xx_port_by_channel(s->n_chan - 1);
375 int port_offset;
376 376
377 base_bitfield_channel = CR_CHAN(insn->chanspec); 377 base_bitfield_channel = CR_CHAN(insn->chanspec);
378 for (j = 0; j < max_ports_per_bitfield; ++j) { 378 for (port_offset = ni_65xx_port_by_channel(base_bitfield_channel);
379 const unsigned port_offset = 379 port_offset <= last_port_offset; port_offset++) {
380 ni_65xx_port_by_channel(base_bitfield_channel) + j; 380 unsigned port = sprivate(s)->base_port + port_offset;
381 const unsigned port = 381 int base_port_channel = port_offset * ni_65xx_channels_per_port;
382 sprivate(s)->base_port + port_offset;
383 unsigned base_port_channel;
384 unsigned port_mask, port_data, port_read_bits; 382 unsigned port_mask, port_data, port_read_bits;
385 int bitshift; 383 int bitshift = base_port_channel - base_bitfield_channel;
386 if (port >= ni_65xx_total_num_ports(board)) 384
385 if (bitshift >= 32)
387 break; 386 break;
388 base_port_channel = port_offset * ni_65xx_channels_per_port;
389 port_mask = data[0]; 387 port_mask = data[0];
390 port_data = data[1]; 388 port_data = data[1];
391 bitshift = base_port_channel - base_bitfield_channel;
392 if (bitshift >= 32 || bitshift <= -32)
393 break;
394 if (bitshift > 0) { 389 if (bitshift > 0) {
395 port_mask >>= bitshift; 390 port_mask >>= bitshift;
396 port_data >>= bitshift; 391 port_data >>= bitshift;
diff --git a/drivers/staging/media/msi3101/Kconfig b/drivers/staging/media/msi3101/Kconfig
index b94a95a597d6..76d5bbd4d93c 100644
--- a/drivers/staging/media/msi3101/Kconfig
+++ b/drivers/staging/media/msi3101/Kconfig
@@ -1,3 +1,4 @@
1config USB_MSI3101 1config USB_MSI3101
2 tristate "Mirics MSi3101 SDR Dongle" 2 tristate "Mirics MSi3101 SDR Dongle"
3 depends on USB && VIDEO_DEV && VIDEO_V4L2 3 depends on USB && VIDEO_DEV && VIDEO_V4L2
4 select VIDEOBUF2_VMALLOC
diff --git a/drivers/staging/media/msi3101/sdr-msi3101.c b/drivers/staging/media/msi3101/sdr-msi3101.c
index 24c7b70a6cbf..4c3bf776bb20 100644
--- a/drivers/staging/media/msi3101/sdr-msi3101.c
+++ b/drivers/staging/media/msi3101/sdr-msi3101.c
@@ -1131,7 +1131,13 @@ static int msi3101_queue_setup(struct vb2_queue *vq,
1131 /* Absolute min and max number of buffers available for mmap() */ 1131 /* Absolute min and max number of buffers available for mmap() */
1132 *nbuffers = 32; 1132 *nbuffers = 32;
1133 *nplanes = 1; 1133 *nplanes = 1;
1134 sizes[0] = PAGE_ALIGN(3 * 3072); /* 3 * 768 * 4 */ 1134 /*
1135 * 3, wMaxPacketSize 3x 1024 bytes
1136 * 504, max IQ sample pairs per 1024 frame
1137 * 2, two samples, I and Q
1138 * 4, 32-bit float
1139 */
1140 sizes[0] = PAGE_ALIGN(3 * 504 * 2 * 4); /* = 12096 */
1135 dev_dbg(&s->udev->dev, "%s: nbuffers=%d sizes[0]=%d\n", 1141 dev_dbg(&s->udev->dev, "%s: nbuffers=%d sizes[0]=%d\n",
1136 __func__, *nbuffers, sizes[0]); 1142 __func__, *nbuffers, sizes[0]);
1137 return 0; 1143 return 0;
@@ -1657,7 +1663,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
1657 f->frequency * 625UL / 10UL); 1663 f->frequency * 625UL / 10UL);
1658} 1664}
1659 1665
1660const struct v4l2_ioctl_ops msi3101_ioctl_ops = { 1666static const struct v4l2_ioctl_ops msi3101_ioctl_ops = {
1661 .vidioc_querycap = msi3101_querycap, 1667 .vidioc_querycap = msi3101_querycap,
1662 1668
1663 .vidioc_enum_input = msi3101_enum_input, 1669 .vidioc_enum_input = msi3101_enum_input,
diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
index 6ccb64fb0786..6ce0af9977d8 100644
--- a/drivers/staging/ozwpan/ozcdev.c
+++ b/drivers/staging/ozwpan/ozcdev.c
@@ -155,6 +155,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
155 struct oz_app_hdr *app_hdr; 155 struct oz_app_hdr *app_hdr;
156 struct oz_serial_ctx *ctx; 156 struct oz_serial_ctx *ctx;
157 157
158 if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
159 return -EINVAL;
160
158 spin_lock_bh(&g_cdev.lock); 161 spin_lock_bh(&g_cdev.lock);
159 pd = g_cdev.active_pd; 162 pd = g_cdev.active_pd;
160 if (pd) 163 if (pd)
diff --git a/drivers/staging/sb105x/sb_pci_mp.c b/drivers/staging/sb105x/sb_pci_mp.c
index 23db32f07fd5..a10cdb17038b 100644
--- a/drivers/staging/sb105x/sb_pci_mp.c
+++ b/drivers/staging/sb105x/sb_pci_mp.c
@@ -1063,7 +1063,7 @@ static int mp_wait_modem_status(struct sb_uart_state *state, unsigned long arg)
1063 1063
1064static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt) 1064static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt)
1065{ 1065{
1066 struct serial_icounter_struct icount; 1066 struct serial_icounter_struct icount = {};
1067 struct sb_uart_icount cnow; 1067 struct sb_uart_icount cnow;
1068 struct sb_uart_port *port = state->port; 1068 struct sb_uart_port *port = state->port;
1069 1069
diff --git a/drivers/staging/wlags49_h2/wl_priv.c b/drivers/staging/wlags49_h2/wl_priv.c
index c97e0e154d28..7e10dcdc3090 100644
--- a/drivers/staging/wlags49_h2/wl_priv.c
+++ b/drivers/staging/wlags49_h2/wl_priv.c
@@ -570,6 +570,7 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
570 ltv_t *pLtv; 570 ltv_t *pLtv;
571 bool_t ltvAllocated = FALSE; 571 bool_t ltvAllocated = FALSE;
572 ENCSTRCT sEncryption; 572 ENCSTRCT sEncryption;
573 size_t len;
573 574
574#ifdef USE_WDS 575#ifdef USE_WDS
575 hcf_16 hcfPort = HCF_PORT_0; 576 hcf_16 hcfPort = HCF_PORT_0;
@@ -686,7 +687,8 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
686 break; 687 break;
687 case CFG_CNF_OWN_NAME: 688 case CFG_CNF_OWN_NAME:
688 memset(lp->StationName, 0, sizeof(lp->StationName)); 689 memset(lp->StationName, 0, sizeof(lp->StationName));
689 memcpy((void *)lp->StationName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]); 690 len = min_t(size_t, pLtv->u.u16[0], sizeof(lp->StationName));
691 strlcpy(lp->StationName, &pLtv->u.u8[2], len);
690 pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); 692 pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
691 break; 693 break;
692 case CFG_CNF_LOAD_BALANCING: 694 case CFG_CNF_LOAD_BALANCING:
@@ -1783,6 +1785,7 @@ int wvlan_set_station_nickname(struct net_device *dev,
1783{ 1785{
1784 struct wl_private *lp = wl_priv(dev); 1786 struct wl_private *lp = wl_priv(dev);
1785 unsigned long flags; 1787 unsigned long flags;
1788 size_t len;
1786 int ret = 0; 1789 int ret = 0;
1787 /*------------------------------------------------------------------------*/ 1790 /*------------------------------------------------------------------------*/
1788 1791
@@ -1793,8 +1796,8 @@ int wvlan_set_station_nickname(struct net_device *dev,
1793 wl_lock(lp, &flags); 1796 wl_lock(lp, &flags);
1794 1797
1795 memset(lp->StationName, 0, sizeof(lp->StationName)); 1798 memset(lp->StationName, 0, sizeof(lp->StationName));
1796 1799 len = min_t(size_t, wrqu->data.length, sizeof(lp->StationName));
1797 memcpy(lp->StationName, extra, wrqu->data.length); 1800 strlcpy(lp->StationName, extra, len);
1798 1801
1799 /* Commit the adapter parameters */ 1802 /* Commit the adapter parameters */
1800 wl_apply(lp); 1803 wl_apply(lp);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 35b61f7d6c63..38e44b9abf0f 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -753,7 +753,8 @@ static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
753 753
754static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) 754static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
755{ 755{
756 struct iscsi_cmd *cmd; 756 LIST_HEAD(ack_list);
757 struct iscsi_cmd *cmd, *cmd_p;
757 758
758 conn->exp_statsn = exp_statsn; 759 conn->exp_statsn = exp_statsn;
759 760
@@ -761,19 +762,23 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
761 return; 762 return;
762 763
763 spin_lock_bh(&conn->cmd_lock); 764 spin_lock_bh(&conn->cmd_lock);
764 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 765 list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) {
765 spin_lock(&cmd->istate_lock); 766 spin_lock(&cmd->istate_lock);
766 if ((cmd->i_state == ISTATE_SENT_STATUS) && 767 if ((cmd->i_state == ISTATE_SENT_STATUS) &&
767 iscsi_sna_lt(cmd->stat_sn, exp_statsn)) { 768 iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
768 cmd->i_state = ISTATE_REMOVE; 769 cmd->i_state = ISTATE_REMOVE;
769 spin_unlock(&cmd->istate_lock); 770 spin_unlock(&cmd->istate_lock);
770 iscsit_add_cmd_to_immediate_queue(cmd, conn, 771 list_move_tail(&cmd->i_conn_node, &ack_list);
771 cmd->i_state);
772 continue; 772 continue;
773 } 773 }
774 spin_unlock(&cmd->istate_lock); 774 spin_unlock(&cmd->istate_lock);
775 } 775 }
776 spin_unlock_bh(&conn->cmd_lock); 776 spin_unlock_bh(&conn->cmd_lock);
777
778 list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
779 list_del(&cmd->i_conn_node);
780 iscsit_free_cmd(cmd, false);
781 }
777} 782}
778 783
779static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) 784static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 14d1aed5af1d..ef6d836a4d09 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -1192,7 +1192,7 @@ get_target:
1192 */ 1192 */
1193alloc_tags: 1193alloc_tags:
1194 tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth); 1194 tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth);
1195 tag_num += ISCSIT_EXTRA_TAGS; 1195 tag_num += (tag_num / 2) + ISCSIT_EXTRA_TAGS;
1196 tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; 1196 tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
1197 1197
1198 ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size); 1198 ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index f2de28e178fd..b0cac0c342e1 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -736,7 +736,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
736 * Fallthrough 736 * Fallthrough
737 */ 737 */
738 case ISCSI_OP_SCSI_TMFUNC: 738 case ISCSI_OP_SCSI_TMFUNC:
739 rc = transport_generic_free_cmd(&cmd->se_cmd, 1); 739 rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
740 if (!rc && shutdown && se_cmd && se_cmd->se_sess) { 740 if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
741 __iscsit_free_cmd(cmd, true, shutdown); 741 __iscsit_free_cmd(cmd, true, shutdown);
742 target_put_sess_cmd(se_cmd->se_sess, se_cmd); 742 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
@@ -752,7 +752,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
752 se_cmd = &cmd->se_cmd; 752 se_cmd = &cmd->se_cmd;
753 __iscsit_free_cmd(cmd, true, shutdown); 753 __iscsit_free_cmd(cmd, true, shutdown);
754 754
755 rc = transport_generic_free_cmd(&cmd->se_cmd, 1); 755 rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
756 if (!rc && shutdown && se_cmd->se_sess) { 756 if (!rc && shutdown && se_cmd->se_sess) {
757 __iscsit_free_cmd(cmd, true, shutdown); 757 __iscsit_free_cmd(cmd, true, shutdown);
758 target_put_sess_cmd(se_cmd->se_sess, se_cmd); 758 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 551c96ca60ac..0f199f6a0738 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -134,10 +134,10 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
134 * pSCSI Host ID and enable for phba mode 134 * pSCSI Host ID and enable for phba mode
135 */ 135 */
136 sh = scsi_host_lookup(phv->phv_host_id); 136 sh = scsi_host_lookup(phv->phv_host_id);
137 if (IS_ERR(sh)) { 137 if (!sh) {
138 pr_err("pSCSI: Unable to locate SCSI Host for" 138 pr_err("pSCSI: Unable to locate SCSI Host for"
139 " phv_host_id: %d\n", phv->phv_host_id); 139 " phv_host_id: %d\n", phv->phv_host_id);
140 return PTR_ERR(sh); 140 return -EINVAL;
141 } 141 }
142 142
143 phv->phv_lld_host = sh; 143 phv->phv_lld_host = sh;
@@ -515,10 +515,10 @@ static int pscsi_configure_device(struct se_device *dev)
515 sh = phv->phv_lld_host; 515 sh = phv->phv_lld_host;
516 } else { 516 } else {
517 sh = scsi_host_lookup(pdv->pdv_host_id); 517 sh = scsi_host_lookup(pdv->pdv_host_id);
518 if (IS_ERR(sh)) { 518 if (!sh) {
519 pr_err("pSCSI: Unable to locate" 519 pr_err("pSCSI: Unable to locate"
520 " pdv_host_id: %d\n", pdv->pdv_host_id); 520 " pdv_host_id: %d\n", pdv->pdv_host_id);
521 return PTR_ERR(sh); 521 return -EINVAL;
522 } 522 }
523 } 523 }
524 } else { 524 } else {
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 6c17295e8d7c..d9b92b2c524d 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -263,6 +263,11 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
263 sectors, cmd->se_dev->dev_attrib.max_write_same_len); 263 sectors, cmd->se_dev->dev_attrib.max_write_same_len);
264 return TCM_INVALID_CDB_FIELD; 264 return TCM_INVALID_CDB_FIELD;
265 } 265 }
266 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
267 if (flags[0] & 0x10) {
268 pr_warn("WRITE SAME with ANCHOR not supported\n");
269 return TCM_INVALID_CDB_FIELD;
270 }
266 /* 271 /*
267 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting 272 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
268 * translated into block discard requests within backend code. 273 * translated into block discard requests within backend code.
@@ -349,7 +354,16 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
349{ 354{
350 struct se_device *dev = cmd->se_dev; 355 struct se_device *dev = cmd->se_dev;
351 356
352 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 357 /*
358 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
359 * within target_complete_ok_work() if the command was successfully
360 * sent to the backend driver.
361 */
362 spin_lock_irq(&cmd->t_state_lock);
363 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
364 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
365 spin_unlock_irq(&cmd->t_state_lock);
366
353 /* 367 /*
354 * Unlock ->caw_sem originally obtained during sbc_compare_and_write() 368 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
355 * before the original READ I/O submission. 369 * before the original READ I/O submission.
@@ -363,7 +377,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
363{ 377{
364 struct se_device *dev = cmd->se_dev; 378 struct se_device *dev = cmd->se_dev;
365 struct scatterlist *write_sg = NULL, *sg; 379 struct scatterlist *write_sg = NULL, *sg;
366 unsigned char *buf, *addr; 380 unsigned char *buf = NULL, *addr;
367 struct sg_mapping_iter m; 381 struct sg_mapping_iter m;
368 unsigned int offset = 0, len; 382 unsigned int offset = 0, len;
369 unsigned int nlbas = cmd->t_task_nolb; 383 unsigned int nlbas = cmd->t_task_nolb;
@@ -378,6 +392,15 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
378 */ 392 */
379 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) 393 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
380 return TCM_NO_SENSE; 394 return TCM_NO_SENSE;
395 /*
396 * Immediately exit + release dev->caw_sem if command has already
397 * been failed with a non-zero SCSI status.
398 */
399 if (cmd->scsi_status) {
400 pr_err("compare_and_write_callback: non zero scsi_status:"
401 " 0x%02x\n", cmd->scsi_status);
402 goto out;
403 }
381 404
382 buf = kzalloc(cmd->data_length, GFP_KERNEL); 405 buf = kzalloc(cmd->data_length, GFP_KERNEL);
383 if (!buf) { 406 if (!buf) {
@@ -508,6 +531,12 @@ sbc_compare_and_write(struct se_cmd *cmd)
508 cmd->transport_complete_callback = NULL; 531 cmd->transport_complete_callback = NULL;
509 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 532 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
510 } 533 }
534 /*
535 * Reset cmd->data_length to individual block_size in order to not
536 * confuse backend drivers that depend on this value matching the
537 * size of the I/O being submitted.
538 */
539 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
511 540
512 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, 541 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
513 DMA_FROM_DEVICE); 542 DMA_FROM_DEVICE);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 84747cc1aac0..81e945eefbbd 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -236,17 +236,24 @@ int transport_alloc_session_tags(struct se_session *se_sess,
236{ 236{
237 int rc; 237 int rc;
238 238
239 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, GFP_KERNEL); 239 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
240 GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
240 if (!se_sess->sess_cmd_map) { 241 if (!se_sess->sess_cmd_map) {
241 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 242 se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
242 return -ENOMEM; 243 if (!se_sess->sess_cmd_map) {
244 pr_err("Unable to allocate se_sess->sess_cmd_map\n");
245 return -ENOMEM;
246 }
243 } 247 }
244 248
245 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); 249 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
246 if (rc < 0) { 250 if (rc < 0) {
247 pr_err("Unable to init se_sess->sess_tag_pool," 251 pr_err("Unable to init se_sess->sess_tag_pool,"
248 " tag_num: %u\n", tag_num); 252 " tag_num: %u\n", tag_num);
249 kfree(se_sess->sess_cmd_map); 253 if (is_vmalloc_addr(se_sess->sess_cmd_map))
254 vfree(se_sess->sess_cmd_map);
255 else
256 kfree(se_sess->sess_cmd_map);
250 se_sess->sess_cmd_map = NULL; 257 se_sess->sess_cmd_map = NULL;
251 return -ENOMEM; 258 return -ENOMEM;
252 } 259 }
@@ -412,7 +419,10 @@ void transport_free_session(struct se_session *se_sess)
412{ 419{
413 if (se_sess->sess_cmd_map) { 420 if (se_sess->sess_cmd_map) {
414 percpu_ida_destroy(&se_sess->sess_tag_pool); 421 percpu_ida_destroy(&se_sess->sess_tag_pool);
415 kfree(se_sess->sess_cmd_map); 422 if (is_vmalloc_addr(se_sess->sess_cmd_map))
423 vfree(se_sess->sess_cmd_map);
424 else
425 kfree(se_sess->sess_cmd_map);
416 } 426 }
417 kmem_cache_free(se_sess_cache, se_sess); 427 kmem_cache_free(se_sess_cache, se_sess);
418} 428}
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 4d22e7d2adca..474cd44fac14 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -82,6 +82,9 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
82 mutex_lock(&g_device_mutex); 82 mutex_lock(&g_device_mutex);
83 list_for_each_entry(se_dev, &g_device_list, g_dev_node) { 83 list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
84 84
85 if (!se_dev->dev_attrib.emulate_3pc)
86 continue;
87
85 memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); 88 memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
86 target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); 89 target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
87 90
@@ -298,8 +301,8 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
298 (unsigned long long)xop->dst_lba); 301 (unsigned long long)xop->dst_lba);
299 302
300 if (dc != 0) { 303 if (dc != 0) {
301 xop->dbl = (desc[29] << 16) & 0xff; 304 xop->dbl = (desc[29] & 0xff) << 16;
302 xop->dbl |= (desc[30] << 8) & 0xff; 305 xop->dbl |= (desc[30] & 0xff) << 8;
303 xop->dbl |= desc[31] & 0xff; 306 xop->dbl |= desc[31] & 0xff;
304 307
305 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl); 308 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
@@ -357,6 +360,7 @@ struct xcopy_pt_cmd {
357 struct se_cmd se_cmd; 360 struct se_cmd se_cmd;
358 struct xcopy_op *xcopy_op; 361 struct xcopy_op *xcopy_op;
359 struct completion xpt_passthrough_sem; 362 struct completion xpt_passthrough_sem;
363 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
360}; 364};
361 365
362static struct se_port xcopy_pt_port; 366static struct se_port xcopy_pt_port;
@@ -675,7 +679,8 @@ static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
675 679
676 pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n", 680 pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
677 se_cmd->scsi_status); 681 se_cmd->scsi_status);
678 return 0; 682
683 return (se_cmd->scsi_status) ? -EINVAL : 0;
679} 684}
680 685
681static int target_xcopy_read_source( 686static int target_xcopy_read_source(
@@ -708,7 +713,7 @@ static int target_xcopy_read_source(
708 (unsigned long long)src_lba, src_sectors, length); 713 (unsigned long long)src_lba, src_sectors, length);
709 714
710 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, 715 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
711 DMA_FROM_DEVICE, 0, NULL); 716 DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
712 xop->src_pt_cmd = xpt_cmd; 717 xop->src_pt_cmd = xpt_cmd;
713 718
714 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0], 719 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
@@ -768,7 +773,7 @@ static int target_xcopy_write_destination(
768 (unsigned long long)dst_lba, dst_sectors, length); 773 (unsigned long long)dst_lba, dst_sectors, length);
769 774
770 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, 775 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
771 DMA_TO_DEVICE, 0, NULL); 776 DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
772 xop->dst_pt_cmd = xpt_cmd; 777 xop->dst_pt_cmd = xpt_cmd;
773 778
774 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0], 779 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
@@ -884,30 +889,42 @@ out:
884 889
885sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) 890sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
886{ 891{
892 struct se_device *dev = se_cmd->se_dev;
887 struct xcopy_op *xop = NULL; 893 struct xcopy_op *xop = NULL;
888 unsigned char *p = NULL, *seg_desc; 894 unsigned char *p = NULL, *seg_desc;
889 unsigned int list_id, list_id_usage, sdll, inline_dl, sa; 895 unsigned int list_id, list_id_usage, sdll, inline_dl, sa;
896 sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
890 int rc; 897 int rc;
891 unsigned short tdll; 898 unsigned short tdll;
892 899
900 if (!dev->dev_attrib.emulate_3pc) {
901 pr_err("EXTENDED_COPY operation explicitly disabled\n");
902 return TCM_UNSUPPORTED_SCSI_OPCODE;
903 }
904
893 sa = se_cmd->t_task_cdb[1] & 0x1f; 905 sa = se_cmd->t_task_cdb[1] & 0x1f;
894 if (sa != 0x00) { 906 if (sa != 0x00) {
895 pr_err("EXTENDED_COPY(LID4) not supported\n"); 907 pr_err("EXTENDED_COPY(LID4) not supported\n");
896 return TCM_UNSUPPORTED_SCSI_OPCODE; 908 return TCM_UNSUPPORTED_SCSI_OPCODE;
897 } 909 }
898 910
911 xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
912 if (!xop) {
913 pr_err("Unable to allocate xcopy_op\n");
914 return TCM_OUT_OF_RESOURCES;
915 }
916 xop->xop_se_cmd = se_cmd;
917
899 p = transport_kmap_data_sg(se_cmd); 918 p = transport_kmap_data_sg(se_cmd);
900 if (!p) { 919 if (!p) {
901 pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n"); 920 pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
921 kfree(xop);
902 return TCM_OUT_OF_RESOURCES; 922 return TCM_OUT_OF_RESOURCES;
903 } 923 }
904 924
905 list_id = p[0]; 925 list_id = p[0];
906 if (list_id != 0x00) { 926 list_id_usage = (p[1] & 0x18) >> 3;
907 pr_err("XCOPY with non zero list_id: 0x%02x\n", list_id); 927
908 goto out;
909 }
910 list_id_usage = (p[1] & 0x18);
911 /* 928 /*
912 * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH 929 * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
913 */ 930 */
@@ -920,13 +937,6 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
920 goto out; 937 goto out;
921 } 938 }
922 939
923 xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
924 if (!xop) {
925 pr_err("Unable to allocate xcopy_op\n");
926 goto out;
927 }
928 xop->xop_se_cmd = se_cmd;
929
930 pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x" 940 pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
931 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, 941 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
932 tdll, sdll, inline_dl); 942 tdll, sdll, inline_dl);
@@ -935,6 +945,17 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
935 if (rc <= 0) 945 if (rc <= 0)
936 goto out; 946 goto out;
937 947
948 if (xop->src_dev->dev_attrib.block_size !=
949 xop->dst_dev->dev_attrib.block_size) {
950 pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev"
951 " block_size: %u currently unsupported\n",
952 xop->src_dev->dev_attrib.block_size,
953 xop->dst_dev->dev_attrib.block_size);
954 xcopy_pt_undepend_remotedev(xop);
955 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
956 goto out;
957 }
958
938 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, 959 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
939 rc * XCOPY_TARGET_DESC_LEN); 960 rc * XCOPY_TARGET_DESC_LEN);
940 seg_desc = &p[16]; 961 seg_desc = &p[16];
@@ -957,7 +978,7 @@ out:
957 if (p) 978 if (p)
958 transport_kunmap_data_sg(se_cmd); 979 transport_kunmap_data_sg(se_cmd);
959 kfree(xop); 980 kfree(xop);
960 return TCM_INVALID_CDB_FIELD; 981 return ret;
961} 982}
962 983
963static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd) 984static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c
index f10a6ad37c06..c2301da08ac7 100644
--- a/drivers/thermal/samsung/exynos_thermal_common.c
+++ b/drivers/thermal/samsung/exynos_thermal_common.c
@@ -310,8 +310,6 @@ void exynos_report_trigger(struct thermal_sensor_conf *conf)
310 } 310 }
311 311
312 th_zone = conf->pzone_data; 312 th_zone = conf->pzone_data;
313 if (th_zone->therm_dev)
314 return;
315 313
316 if (th_zone->bind == false) { 314 if (th_zone->bind == false) {
317 for (i = 0; i < th_zone->cool_dev_size; i++) { 315 for (i = 0; i < th_zone->cool_dev_size; i++) {
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index b43afda8acd1..32f38b90c4f6 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -317,6 +317,9 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
317 317
318 con = readl(data->base + reg->tmu_ctrl); 318 con = readl(data->base + reg->tmu_ctrl);
319 319
320 if (pdata->test_mux)
321 con |= (pdata->test_mux << reg->test_mux_addr_shift);
322
320 if (pdata->reference_voltage) { 323 if (pdata->reference_voltage) {
321 con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift); 324 con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift);
322 con |= pdata->reference_voltage << reg->buf_vref_sel_shift; 325 con |= pdata->reference_voltage << reg->buf_vref_sel_shift;
@@ -488,7 +491,7 @@ static const struct of_device_id exynos_tmu_match[] = {
488 }, 491 },
489 { 492 {
490 .compatible = "samsung,exynos4412-tmu", 493 .compatible = "samsung,exynos4412-tmu",
491 .data = (void *)EXYNOS5250_TMU_DRV_DATA, 494 .data = (void *)EXYNOS4412_TMU_DRV_DATA,
492 }, 495 },
493 { 496 {
494 .compatible = "samsung,exynos5250-tmu", 497 .compatible = "samsung,exynos5250-tmu",
@@ -629,9 +632,10 @@ static int exynos_tmu_probe(struct platform_device *pdev)
629 if (ret) 632 if (ret)
630 return ret; 633 return ret;
631 634
632 if (pdata->type == SOC_ARCH_EXYNOS || 635 if (pdata->type == SOC_ARCH_EXYNOS4210 ||
633 pdata->type == SOC_ARCH_EXYNOS4210 || 636 pdata->type == SOC_ARCH_EXYNOS4412 ||
634 pdata->type == SOC_ARCH_EXYNOS5440) 637 pdata->type == SOC_ARCH_EXYNOS5250 ||
638 pdata->type == SOC_ARCH_EXYNOS5440)
635 data->soc = pdata->type; 639 data->soc = pdata->type;
636 else { 640 else {
637 ret = -EINVAL; 641 ret = -EINVAL;
diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h
index b364c9eee701..3fb65547e64c 100644
--- a/drivers/thermal/samsung/exynos_tmu.h
+++ b/drivers/thermal/samsung/exynos_tmu.h
@@ -41,7 +41,8 @@ enum calibration_mode {
41 41
42enum soc_type { 42enum soc_type {
43 SOC_ARCH_EXYNOS4210 = 1, 43 SOC_ARCH_EXYNOS4210 = 1,
44 SOC_ARCH_EXYNOS, 44 SOC_ARCH_EXYNOS4412,
45 SOC_ARCH_EXYNOS5250,
45 SOC_ARCH_EXYNOS5440, 46 SOC_ARCH_EXYNOS5440,
46}; 47};
47 48
@@ -84,6 +85,7 @@ enum soc_type {
84 * @triminfo_reload_shift: shift of triminfo reload enable bit in triminfo_ctrl 85 * @triminfo_reload_shift: shift of triminfo reload enable bit in triminfo_ctrl
85 reg. 86 reg.
86 * @tmu_ctrl: TMU main controller register. 87 * @tmu_ctrl: TMU main controller register.
88 * @test_mux_addr_shift: shift bits of test mux address.
87 * @buf_vref_sel_shift: shift bits of reference voltage in tmu_ctrl register. 89 * @buf_vref_sel_shift: shift bits of reference voltage in tmu_ctrl register.
88 * @buf_vref_sel_mask: mask bits of reference voltage in tmu_ctrl register. 90 * @buf_vref_sel_mask: mask bits of reference voltage in tmu_ctrl register.
89 * @therm_trip_mode_shift: shift bits of tripping mode in tmu_ctrl register. 91 * @therm_trip_mode_shift: shift bits of tripping mode in tmu_ctrl register.
@@ -150,6 +152,7 @@ struct exynos_tmu_registers {
150 u32 triminfo_reload_shift; 152 u32 triminfo_reload_shift;
151 153
152 u32 tmu_ctrl; 154 u32 tmu_ctrl;
155 u32 test_mux_addr_shift;
153 u32 buf_vref_sel_shift; 156 u32 buf_vref_sel_shift;
154 u32 buf_vref_sel_mask; 157 u32 buf_vref_sel_mask;
155 u32 therm_trip_mode_shift; 158 u32 therm_trip_mode_shift;
@@ -257,6 +260,7 @@ struct exynos_tmu_registers {
257 * @first_point_trim: temp value of the first point trimming 260 * @first_point_trim: temp value of the first point trimming
258 * @second_point_trim: temp value of the second point trimming 261 * @second_point_trim: temp value of the second point trimming
259 * @default_temp_offset: default temperature offset in case of no trimming 262 * @default_temp_offset: default temperature offset in case of no trimming
263 * @test_mux; information if SoC supports test MUX
260 * @cal_type: calibration type for temperature 264 * @cal_type: calibration type for temperature
261 * @cal_mode: calibration mode for temperature 265 * @cal_mode: calibration mode for temperature
262 * @freq_clip_table: Table representing frequency reduction percentage. 266 * @freq_clip_table: Table representing frequency reduction percentage.
@@ -286,6 +290,7 @@ struct exynos_tmu_platform_data {
286 u8 first_point_trim; 290 u8 first_point_trim;
287 u8 second_point_trim; 291 u8 second_point_trim;
288 u8 default_temp_offset; 292 u8 default_temp_offset;
293 u8 test_mux;
289 294
290 enum calibration_type cal_type; 295 enum calibration_type cal_type;
291 enum calibration_mode cal_mode; 296 enum calibration_mode cal_mode;
diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c
index 9002499c1f69..073c292baa53 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.c
+++ b/drivers/thermal/samsung/exynos_tmu_data.c
@@ -90,14 +90,15 @@ struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
90}; 90};
91#endif 91#endif
92 92
93#if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412) 93#if defined(CONFIG_SOC_EXYNOS4412) || defined(CONFIG_SOC_EXYNOS5250)
94static const struct exynos_tmu_registers exynos5250_tmu_registers = { 94static const struct exynos_tmu_registers exynos4412_tmu_registers = {
95 .triminfo_data = EXYNOS_TMU_REG_TRIMINFO, 95 .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
96 .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT, 96 .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
97 .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT, 97 .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
98 .triminfo_ctrl = EXYNOS_TMU_TRIMINFO_CON, 98 .triminfo_ctrl = EXYNOS_TMU_TRIMINFO_CON,
99 .triminfo_reload_shift = EXYNOS_TRIMINFO_RELOAD_SHIFT, 99 .triminfo_reload_shift = EXYNOS_TRIMINFO_RELOAD_SHIFT,
100 .tmu_ctrl = EXYNOS_TMU_REG_CONTROL, 100 .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
101 .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT,
101 .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT, 102 .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
102 .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK, 103 .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
103 .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT, 104 .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
@@ -128,7 +129,7 @@ static const struct exynos_tmu_registers exynos5250_tmu_registers = {
128 .emul_time_mask = EXYNOS_EMUL_TIME_MASK, 129 .emul_time_mask = EXYNOS_EMUL_TIME_MASK,
129}; 130};
130 131
131#define EXYNOS5250_TMU_DATA \ 132#define EXYNOS4412_TMU_DATA \
132 .threshold_falling = 10, \ 133 .threshold_falling = 10, \
133 .trigger_levels[0] = 85, \ 134 .trigger_levels[0] = 85, \
134 .trigger_levels[1] = 103, \ 135 .trigger_levels[1] = 103, \
@@ -162,15 +163,32 @@ static const struct exynos_tmu_registers exynos5250_tmu_registers = {
162 .temp_level = 103, \ 163 .temp_level = 103, \
163 }, \ 164 }, \
164 .freq_tab_count = 2, \ 165 .freq_tab_count = 2, \
165 .type = SOC_ARCH_EXYNOS, \ 166 .registers = &exynos4412_tmu_registers, \
166 .registers = &exynos5250_tmu_registers, \
167 .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \ 167 .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
168 TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \ 168 TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
169 TMU_SUPPORT_EMUL_TIME) 169 TMU_SUPPORT_EMUL_TIME)
170#endif
170 171
172#if defined(CONFIG_SOC_EXYNOS4412)
173struct exynos_tmu_init_data const exynos4412_default_tmu_data = {
174 .tmu_data = {
175 {
176 EXYNOS4412_TMU_DATA,
177 .type = SOC_ARCH_EXYNOS4412,
178 .test_mux = EXYNOS4412_MUX_ADDR_VALUE,
179 },
180 },
181 .tmu_count = 1,
182};
183#endif
184
185#if defined(CONFIG_SOC_EXYNOS5250)
171struct exynos_tmu_init_data const exynos5250_default_tmu_data = { 186struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
172 .tmu_data = { 187 .tmu_data = {
173 { EXYNOS5250_TMU_DATA }, 188 {
189 EXYNOS4412_TMU_DATA,
190 .type = SOC_ARCH_EXYNOS5250,
191 },
174 }, 192 },
175 .tmu_count = 1, 193 .tmu_count = 1,
176}; 194};
diff --git a/drivers/thermal/samsung/exynos_tmu_data.h b/drivers/thermal/samsung/exynos_tmu_data.h
index dc7feb51099b..a1ea19d9e0a6 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.h
+++ b/drivers/thermal/samsung/exynos_tmu_data.h
@@ -95,6 +95,10 @@
95 95
96#define EXYNOS_MAX_TRIGGER_PER_REG 4 96#define EXYNOS_MAX_TRIGGER_PER_REG 4
97 97
98/* Exynos4412 specific */
99#define EXYNOS4412_MUX_ADDR_VALUE 6
100#define EXYNOS4412_MUX_ADDR_SHIFT 20
101
98/*exynos5440 specific registers*/ 102/*exynos5440 specific registers*/
99#define EXYNOS5440_TMU_S0_7_TRIM 0x000 103#define EXYNOS5440_TMU_S0_7_TRIM 0x000
100#define EXYNOS5440_TMU_S0_7_CTRL 0x020 104#define EXYNOS5440_TMU_S0_7_CTRL 0x020
@@ -138,7 +142,14 @@ extern struct exynos_tmu_init_data const exynos4210_default_tmu_data;
138#define EXYNOS4210_TMU_DRV_DATA (NULL) 142#define EXYNOS4210_TMU_DRV_DATA (NULL)
139#endif 143#endif
140 144
141#if (defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412)) 145#if defined(CONFIG_SOC_EXYNOS4412)
146extern struct exynos_tmu_init_data const exynos4412_default_tmu_data;
147#define EXYNOS4412_TMU_DRV_DATA (&exynos4412_default_tmu_data)
148#else
149#define EXYNOS4412_TMU_DRV_DATA (NULL)
150#endif
151
152#if defined(CONFIG_SOC_EXYNOS5250)
142extern struct exynos_tmu_init_data const exynos5250_default_tmu_data; 153extern struct exynos_tmu_init_data const exynos5250_default_tmu_data;
143#define EXYNOS5250_TMU_DRV_DATA (&exynos5250_default_tmu_data) 154#define EXYNOS5250_TMU_DRV_DATA (&exynos5250_default_tmu_data)
144#else 155#else
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index eeef0e2498ca..fdb07199d9c2 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -159,7 +159,7 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
159 159
160 INIT_LIST_HEAD(&hwmon->tz_list); 160 INIT_LIST_HEAD(&hwmon->tz_list);
161 strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH); 161 strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
162 hwmon->device = hwmon_device_register(&tz->device); 162 hwmon->device = hwmon_device_register(NULL);
163 if (IS_ERR(hwmon->device)) { 163 if (IS_ERR(hwmon->device)) {
164 result = PTR_ERR(hwmon->device); 164 result = PTR_ERR(hwmon->device);
165 goto free_mem; 165 goto free_mem;
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 4f8b9af54a5a..5a47cc8c8f85 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -110,6 +110,7 @@ static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal,
110 } else { 110 } else {
111 dev_err(bgp->dev, 111 dev_err(bgp->dev,
112 "Failed to read PCB state. Using defaults\n"); 112 "Failed to read PCB state. Using defaults\n");
113 ret = 0;
113 } 114 }
114 } 115 }
115 *temp = ti_thermal_hotspot_temperature(tmp, slope, constant); 116 *temp = ti_thermal_hotspot_temperature(tmp, slope, constant);
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index f36950e4134f..7722cb9d5a80 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -316,18 +316,19 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
316 int phy_id = topology_physical_package_id(cpu); 316 int phy_id = topology_physical_package_id(cpu);
317 struct phy_dev_entry *phdev = pkg_temp_thermal_get_phy_entry(cpu); 317 struct phy_dev_entry *phdev = pkg_temp_thermal_get_phy_entry(cpu);
318 bool notify = false; 318 bool notify = false;
319 unsigned long flags;
319 320
320 if (!phdev) 321 if (!phdev)
321 return; 322 return;
322 323
323 spin_lock(&pkg_work_lock); 324 spin_lock_irqsave(&pkg_work_lock, flags);
324 ++pkg_work_cnt; 325 ++pkg_work_cnt;
325 if (unlikely(phy_id > max_phy_id)) { 326 if (unlikely(phy_id > max_phy_id)) {
326 spin_unlock(&pkg_work_lock); 327 spin_unlock_irqrestore(&pkg_work_lock, flags);
327 return; 328 return;
328 } 329 }
329 pkg_work_scheduled[phy_id] = 0; 330 pkg_work_scheduled[phy_id] = 0;
330 spin_unlock(&pkg_work_lock); 331 spin_unlock_irqrestore(&pkg_work_lock, flags);
331 332
332 enable_pkg_thres_interrupt(); 333 enable_pkg_thres_interrupt();
333 rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); 334 rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
@@ -397,6 +398,7 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
397 int thres_count; 398 int thres_count;
398 u32 eax, ebx, ecx, edx; 399 u32 eax, ebx, ecx, edx;
399 u8 *temp; 400 u8 *temp;
401 unsigned long flags;
400 402
401 cpuid(6, &eax, &ebx, &ecx, &edx); 403 cpuid(6, &eax, &ebx, &ecx, &edx);
402 thres_count = ebx & 0x07; 404 thres_count = ebx & 0x07;
@@ -420,19 +422,19 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
420 goto err_ret_unlock; 422 goto err_ret_unlock;
421 } 423 }
422 424
423 spin_lock(&pkg_work_lock); 425 spin_lock_irqsave(&pkg_work_lock, flags);
424 if (topology_physical_package_id(cpu) > max_phy_id) 426 if (topology_physical_package_id(cpu) > max_phy_id)
425 max_phy_id = topology_physical_package_id(cpu); 427 max_phy_id = topology_physical_package_id(cpu);
426 temp = krealloc(pkg_work_scheduled, 428 temp = krealloc(pkg_work_scheduled,
427 (max_phy_id+1) * sizeof(u8), GFP_ATOMIC); 429 (max_phy_id+1) * sizeof(u8), GFP_ATOMIC);
428 if (!temp) { 430 if (!temp) {
429 spin_unlock(&pkg_work_lock); 431 spin_unlock_irqrestore(&pkg_work_lock, flags);
430 err = -ENOMEM; 432 err = -ENOMEM;
431 goto err_ret_free; 433 goto err_ret_free;
432 } 434 }
433 pkg_work_scheduled = temp; 435 pkg_work_scheduled = temp;
434 pkg_work_scheduled[topology_physical_package_id(cpu)] = 0; 436 pkg_work_scheduled[topology_physical_package_id(cpu)] = 0;
435 spin_unlock(&pkg_work_lock); 437 spin_unlock_irqrestore(&pkg_work_lock, flags);
436 438
437 phy_dev_entry->phys_proc_id = topology_physical_package_id(cpu); 439 phy_dev_entry->phys_proc_id = topology_physical_package_id(cpu);
438 phy_dev_entry->first_cpu = cpu; 440 phy_dev_entry->first_cpu = cpu;
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index e61c36cbb866..c193af6a628f 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -636,6 +636,7 @@ struct console xenboot_console = {
636 .name = "xenboot", 636 .name = "xenboot",
637 .write = xenboot_write_console, 637 .write = xenboot_write_console,
638 .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME, 638 .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
639 .index = -1,
639}; 640};
640#endif /* CONFIG_EARLY_PRINTK */ 641#endif /* CONFIG_EARLY_PRINTK */
641 642
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 01bf5eb4f238..7a744b69c3d1 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2183,28 +2183,34 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
2183 2183
2184 if (!input_available_p(tty, 0)) { 2184 if (!input_available_p(tty, 0)) {
2185 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { 2185 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
2186 retval = -EIO; 2186 up_read(&tty->termios_rwsem);
2187 break; 2187 tty_flush_to_ldisc(tty);
2188 } 2188 down_read(&tty->termios_rwsem);
2189 if (tty_hung_up_p(file)) 2189 if (!input_available_p(tty, 0)) {
2190 break; 2190 retval = -EIO;
2191 if (!timeout) 2191 break;
2192 break; 2192 }
2193 if (file->f_flags & O_NONBLOCK) { 2193 } else {
2194 retval = -EAGAIN; 2194 if (tty_hung_up_p(file))
2195 break; 2195 break;
2196 } 2196 if (!timeout)
2197 if (signal_pending(current)) { 2197 break;
2198 retval = -ERESTARTSYS; 2198 if (file->f_flags & O_NONBLOCK) {
2199 break; 2199 retval = -EAGAIN;
2200 } 2200 break;
2201 n_tty_set_room(tty); 2201 }
2202 up_read(&tty->termios_rwsem); 2202 if (signal_pending(current)) {
2203 retval = -ERESTARTSYS;
2204 break;
2205 }
2206 n_tty_set_room(tty);
2207 up_read(&tty->termios_rwsem);
2203 2208
2204 timeout = schedule_timeout(timeout); 2209 timeout = schedule_timeout(timeout);
2205 2210
2206 down_read(&tty->termios_rwsem); 2211 down_read(&tty->termios_rwsem);
2207 continue; 2212 continue;
2213 }
2208 } 2214 }
2209 __set_current_state(TASK_RUNNING); 2215 __set_current_state(TASK_RUNNING);
2210 2216
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index d067285a2d20..6b0f75eac8a2 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1499,7 +1499,7 @@ static void atmel_set_ops(struct uart_port *port)
1499/* 1499/*
1500 * Get ip name usart or uart 1500 * Get ip name usart or uart
1501 */ 1501 */
1502static int atmel_get_ip_name(struct uart_port *port) 1502static void atmel_get_ip_name(struct uart_port *port)
1503{ 1503{
1504 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1504 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1505 int name = UART_GET_IP_NAME(port); 1505 int name = UART_GET_IP_NAME(port);
@@ -1518,10 +1518,7 @@ static int atmel_get_ip_name(struct uart_port *port)
1518 atmel_port->is_usart = false; 1518 atmel_port->is_usart = false;
1519 } else { 1519 } else {
1520 dev_err(port->dev, "Not supported ip name, set to uart\n"); 1520 dev_err(port->dev, "Not supported ip name, set to uart\n");
1521 return -EINVAL;
1522 } 1521 }
1523
1524 return 0;
1525} 1522}
1526 1523
1527/* 1524/*
@@ -2405,9 +2402,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
2405 /* 2402 /*
2406 * Get port name of usart or uart 2403 * Get port name of usart or uart
2407 */ 2404 */
2408 ret = atmel_get_ip_name(&port->uart); 2405 atmel_get_ip_name(&port->uart);
2409 if (ret < 0)
2410 goto err_add_port;
2411 2406
2412 return 0; 2407 return 0;
2413 2408
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index a0ebbc9ce5cd..042aa077b5b3 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1912,9 +1912,6 @@ static int serial_imx_probe_dt(struct imx_port *sport,
1912 1912
1913 sport->devdata = of_id->data; 1913 sport->devdata = of_id->data;
1914 1914
1915 if (of_device_is_stdout_path(np))
1916 add_preferred_console(imx_reg.cons->name, sport->port.line, 0);
1917
1918 return 0; 1915 return 0;
1919} 1916}
1920#else 1917#else
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index 93b697a0de65..15ad6fcda88b 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -561,12 +561,13 @@ static int vt8500_serial_probe(struct platform_device *pdev)
561 if (!mmres || !irqres) 561 if (!mmres || !irqres)
562 return -ENODEV; 562 return -ENODEV;
563 563
564 if (np) 564 if (np) {
565 port = of_alias_get_id(np, "serial"); 565 port = of_alias_get_id(np, "serial");
566 if (port >= VT8500_MAX_PORTS) 566 if (port >= VT8500_MAX_PORTS)
567 port = -1; 567 port = -1;
568 else 568 } else {
569 port = -1; 569 port = -1;
570 }
570 571
571 if (port < 0) { 572 if (port < 0) {
572 /* calculate the port id */ 573 /* calculate the port id */
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index ba475632c5fa..0e808cf91d97 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -642,16 +642,29 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
642{ 642{
643 struct uio_device *idev = vma->vm_private_data; 643 struct uio_device *idev = vma->vm_private_data;
644 int mi = uio_find_mem_index(vma); 644 int mi = uio_find_mem_index(vma);
645 struct uio_mem *mem;
645 if (mi < 0) 646 if (mi < 0)
646 return -EINVAL; 647 return -EINVAL;
648 mem = idev->info->mem + mi;
647 649
648 vma->vm_ops = &uio_physical_vm_ops; 650 if (vma->vm_end - vma->vm_start > mem->size)
651 return -EINVAL;
649 652
653 vma->vm_ops = &uio_physical_vm_ops;
650 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 654 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
651 655
656 /*
657 * We cannot use the vm_iomap_memory() helper here,
658 * because vma->vm_pgoff is the map index we looked
659 * up above in uio_find_mem_index(), rather than an
660 * actual page offset into the mmap.
661 *
662 * So we just do the physical mmap without a page
663 * offset.
664 */
652 return remap_pfn_range(vma, 665 return remap_pfn_range(vma,
653 vma->vm_start, 666 vma->vm_start,
654 idev->info->mem[mi].addr >> PAGE_SHIFT, 667 mem->addr >> PAGE_SHIFT,
655 vma->vm_end - vma->vm_start, 668 vma->vm_end - vma->vm_start,
656 vma->vm_page_prot); 669 vma->vm_page_prot);
657} 670}
diff --git a/drivers/usb/chipidea/ci_hdrc_pci.c b/drivers/usb/chipidea/ci_hdrc_pci.c
index 042320a6c6c7..d514332ac081 100644
--- a/drivers/usb/chipidea/ci_hdrc_pci.c
+++ b/drivers/usb/chipidea/ci_hdrc_pci.c
@@ -129,7 +129,12 @@ static DEFINE_PCI_DEVICE_TABLE(ci_hdrc_pci_id_table) = {
129 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829), 129 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829),
130 .driver_data = (kernel_ulong_t)&penwell_pci_platdata, 130 .driver_data = (kernel_ulong_t)&penwell_pci_platdata,
131 }, 131 },
132 { 0, 0, 0, 0, 0, 0, 0 /* end: all zeroes */ } 132 {
133 /* Intel Clovertrail */
134 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe006),
135 .driver_data = (kernel_ulong_t)&penwell_pci_platdata,
136 },
137 { 0 } /* end: all zeroes */
133}; 138};
134MODULE_DEVICE_TABLE(pci, ci_hdrc_pci_id_table); 139MODULE_DEVICE_TABLE(pci, ci_hdrc_pci_id_table);
135 140
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 6f96795dd20c..64d7a6d9a1ad 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -100,8 +100,10 @@ static void host_stop(struct ci_hdrc *ci)
100{ 100{
101 struct usb_hcd *hcd = ci->hcd; 101 struct usb_hcd *hcd = ci->hcd;
102 102
103 usb_remove_hcd(hcd); 103 if (hcd) {
104 usb_put_hcd(hcd); 104 usb_remove_hcd(hcd);
105 usb_put_hcd(hcd);
106 }
105 if (ci->platdata->reg_vbus) 107 if (ci->platdata->reg_vbus)
106 regulator_disable(ci->platdata->reg_vbus); 108 regulator_disable(ci->platdata->reg_vbus);
107} 109}
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 5b44cd47da5b..01fe36273f3b 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -97,6 +97,9 @@ static const struct usb_device_id usb_quirk_list[] = {
97 /* Alcor Micro Corp. Hub */ 97 /* Alcor Micro Corp. Hub */
98 { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME }, 98 { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
99 99
100 /* MicroTouch Systems touchscreen */
101 { USB_DEVICE(0x0596, 0x051e), .driver_info = USB_QUIRK_RESET_RESUME },
102
100 /* appletouch */ 103 /* appletouch */
101 { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, 104 { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
102 105
@@ -130,6 +133,9 @@ static const struct usb_device_id usb_quirk_list[] = {
130 /* Broadcom BCM92035DGROM BT dongle */ 133 /* Broadcom BCM92035DGROM BT dongle */
131 { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME }, 134 { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
132 135
136 /* MAYA44USB sound device */
137 { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
138
133 /* Action Semiconductor flash disk */ 139 /* Action Semiconductor flash disk */
134 { USB_DEVICE(0x10d6, 0x2200), .driver_info = 140 { USB_DEVICE(0x10d6, 0x2200), .driver_info =
135 USB_QUIRK_STRING_FETCH_255 }, 141 USB_QUIRK_STRING_FETCH_255 },
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 0658908d8968..44cf775a8627 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -2256,6 +2256,8 @@ static int ffs_func_bind(struct usb_configuration *c,
2256 data->raw_descs + ret, 2256 data->raw_descs + ret,
2257 (sizeof data->raw_descs) - ret, 2257 (sizeof data->raw_descs) - ret,
2258 __ffs_func_bind_do_descs, func); 2258 __ffs_func_bind_do_descs, func);
2259 if (unlikely(ret < 0))
2260 goto error;
2259 } 2261 }
2260 2262
2261 /* 2263 /*
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index cc9207473dbc..0ac6064aa3b8 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -2054,7 +2054,7 @@ static struct pxa25x_udc memory = {
2054/* 2054/*
2055 * probe - binds to the platform device 2055 * probe - binds to the platform device
2056 */ 2056 */
2057static int __init pxa25x_udc_probe(struct platform_device *pdev) 2057static int pxa25x_udc_probe(struct platform_device *pdev)
2058{ 2058{
2059 struct pxa25x_udc *dev = &memory; 2059 struct pxa25x_udc *dev = &memory;
2060 int retval, irq; 2060 int retval, irq;
@@ -2203,7 +2203,7 @@ static void pxa25x_udc_shutdown(struct platform_device *_dev)
2203 pullup_off(); 2203 pullup_off();
2204} 2204}
2205 2205
2206static int __exit pxa25x_udc_remove(struct platform_device *pdev) 2206static int pxa25x_udc_remove(struct platform_device *pdev)
2207{ 2207{
2208 struct pxa25x_udc *dev = platform_get_drvdata(pdev); 2208 struct pxa25x_udc *dev = platform_get_drvdata(pdev);
2209 2209
@@ -2294,7 +2294,8 @@ static int pxa25x_udc_resume(struct platform_device *dev)
2294 2294
2295static struct platform_driver udc_driver = { 2295static struct platform_driver udc_driver = {
2296 .shutdown = pxa25x_udc_shutdown, 2296 .shutdown = pxa25x_udc_shutdown,
2297 .remove = __exit_p(pxa25x_udc_remove), 2297 .probe = pxa25x_udc_probe,
2298 .remove = pxa25x_udc_remove,
2298 .suspend = pxa25x_udc_suspend, 2299 .suspend = pxa25x_udc_suspend,
2299 .resume = pxa25x_udc_resume, 2300 .resume = pxa25x_udc_resume,
2300 .driver = { 2301 .driver = {
@@ -2303,7 +2304,7 @@ static struct platform_driver udc_driver = {
2303 }, 2304 },
2304}; 2305};
2305 2306
2306module_platform_driver_probe(udc_driver, pxa25x_udc_probe); 2307module_platform_driver(udc_driver);
2307 2308
2308MODULE_DESCRIPTION(DRIVER_DESC); 2309MODULE_DESCRIPTION(DRIVER_DESC);
2309MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell"); 2310MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 6bddf1aa2347..a8a99e4748d5 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -543,7 +543,7 @@ static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
543 * FIFO, requests of >512 cause the endpoint to get stuck with a 543 * FIFO, requests of >512 cause the endpoint to get stuck with a
544 * fragment of the end of the transfer in it. 544 * fragment of the end of the transfer in it.
545 */ 545 */
546 if (can_write > 512) 546 if (can_write > 512 && !periodic)
547 can_write = 512; 547 can_write = 512;
548 548
549 /* 549 /*
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 2c76ef1320ea..08ef2829a7e2 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -799,7 +799,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
799 * switchable ports. 799 * switchable ports.
800 */ 800 */
801 pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 801 pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
802 cpu_to_le32(ports_available)); 802 ports_available);
803 803
804 pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 804 pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
805 &ports_available); 805 &ports_available);
@@ -821,7 +821,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
821 * host. 821 * host.
822 */ 822 */
823 pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 823 pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
824 cpu_to_le32(ports_available)); 824 ports_available);
825 825
826 pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 826 pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
827 &ports_available); 827 &ports_available);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 773a6b28c4f1..e8b4c56dcf62 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1157,18 +1157,6 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1157 t1 = xhci_port_state_to_neutral(t1); 1157 t1 = xhci_port_state_to_neutral(t1);
1158 if (t1 != t2) 1158 if (t1 != t2)
1159 xhci_writel(xhci, t2, port_array[port_index]); 1159 xhci_writel(xhci, t2, port_array[port_index]);
1160
1161 if (hcd->speed != HCD_USB3) {
1162 /* enable remote wake up for USB 2.0 */
1163 __le32 __iomem *addr;
1164 u32 tmp;
1165
1166 /* Get the port power control register address. */
1167 addr = port_array[port_index] + PORTPMSC;
1168 tmp = xhci_readl(xhci, addr);
1169 tmp |= PORT_RWE;
1170 xhci_writel(xhci, tmp, addr);
1171 }
1172 } 1160 }
1173 hcd->state = HC_STATE_SUSPENDED; 1161 hcd->state = HC_STATE_SUSPENDED;
1174 bus_state->next_statechange = jiffies + msecs_to_jiffies(10); 1162 bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
@@ -1247,20 +1235,6 @@ int xhci_bus_resume(struct usb_hcd *hcd)
1247 xhci_ring_device(xhci, slot_id); 1235 xhci_ring_device(xhci, slot_id);
1248 } else 1236 } else
1249 xhci_writel(xhci, temp, port_array[port_index]); 1237 xhci_writel(xhci, temp, port_array[port_index]);
1250
1251 if (hcd->speed != HCD_USB3) {
1252 /* disable remote wake up for USB 2.0 */
1253 __le32 __iomem *addr;
1254 u32 tmp;
1255
1256 /* Add one to the port status register address to get
1257 * the port power control register address.
1258 */
1259 addr = port_array[port_index] + PORTPMSC;
1260 tmp = xhci_readl(xhci, addr);
1261 tmp &= ~PORT_RWE;
1262 xhci_writel(xhci, tmp, addr);
1263 }
1264 } 1238 }
1265 1239
1266 (void) xhci_readl(xhci, &xhci->op_regs->command); 1240 (void) xhci_readl(xhci, &xhci->op_regs->command);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 236c3aabe940..b8dffd59eb25 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -35,6 +35,9 @@
35#define PCI_VENDOR_ID_ETRON 0x1b6f 35#define PCI_VENDOR_ID_ETRON 0x1b6f
36#define PCI_DEVICE_ID_ASROCK_P67 0x7023 36#define PCI_DEVICE_ID_ASROCK_P67 0x7023
37 37
38#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
39#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
40
38static const char hcd_name[] = "xhci_hcd"; 41static const char hcd_name[] = "xhci_hcd";
39 42
40/* called after powerup, by probe or system-pm "wakeup" */ 43/* called after powerup, by probe or system-pm "wakeup" */
@@ -69,6 +72,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
69 "QUIRK: Fresco Logic xHC needs configure" 72 "QUIRK: Fresco Logic xHC needs configure"
70 " endpoint cmd after reset endpoint"); 73 " endpoint cmd after reset endpoint");
71 } 74 }
75 if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
76 pdev->revision == 0x4) {
77 xhci->quirks |= XHCI_SLOW_SUSPEND;
78 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
79 "QUIRK: Fresco Logic xHC revision %u"
80 "must be suspended extra slowly",
81 pdev->revision);
82 }
72 /* Fresco Logic confirms: all revisions of this chip do not 83 /* Fresco Logic confirms: all revisions of this chip do not
73 * support MSI, even though some of them claim to in their PCI 84 * support MSI, even though some of them claim to in their PCI
74 * capabilities. 85 * capabilities.
@@ -110,6 +121,15 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
110 xhci->quirks |= XHCI_SPURIOUS_REBOOT; 121 xhci->quirks |= XHCI_SPURIOUS_REBOOT;
111 xhci->quirks |= XHCI_AVOID_BEI; 122 xhci->quirks |= XHCI_AVOID_BEI;
112 } 123 }
124 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
125 (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI ||
126 pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI)) {
127 /* Workaround for occasional spurious wakeups from S5 (or
128 * any other sleep) on Haswell machines with LPT and LPT-LP
129 * with the new Intel BIOS
130 */
131 xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
132 }
113 if (pdev->vendor == PCI_VENDOR_ID_ETRON && 133 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
114 pdev->device == PCI_DEVICE_ID_ASROCK_P67) { 134 pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
115 xhci->quirks |= XHCI_RESET_ON_RESUME; 135 xhci->quirks |= XHCI_RESET_ON_RESUME;
@@ -217,6 +237,11 @@ static void xhci_pci_remove(struct pci_dev *dev)
217 usb_put_hcd(xhci->shared_hcd); 237 usb_put_hcd(xhci->shared_hcd);
218 } 238 }
219 usb_hcd_pci_remove(dev); 239 usb_hcd_pci_remove(dev);
240
241 /* Workaround for spurious wakeups at shutdown with HSW */
242 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
243 pci_set_power_state(dev, PCI_D3hot);
244
220 kfree(xhci); 245 kfree(xhci);
221} 246}
222 247
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1e36dbb48366..6e0d886bcce5 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -730,6 +730,9 @@ void xhci_shutdown(struct usb_hcd *hcd)
730 730
731 spin_lock_irq(&xhci->lock); 731 spin_lock_irq(&xhci->lock);
732 xhci_halt(xhci); 732 xhci_halt(xhci);
733 /* Workaround for spurious wakeups at shutdown with HSW */
734 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
735 xhci_reset(xhci);
733 spin_unlock_irq(&xhci->lock); 736 spin_unlock_irq(&xhci->lock);
734 737
735 xhci_cleanup_msix(xhci); 738 xhci_cleanup_msix(xhci);
@@ -737,6 +740,10 @@ void xhci_shutdown(struct usb_hcd *hcd)
737 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 740 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
738 "xhci_shutdown completed - status = %x", 741 "xhci_shutdown completed - status = %x",
739 xhci_readl(xhci, &xhci->op_regs->status)); 742 xhci_readl(xhci, &xhci->op_regs->status));
743
744 /* Yet another workaround for spurious wakeups at shutdown with HSW */
745 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
746 pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
740} 747}
741 748
742#ifdef CONFIG_PM 749#ifdef CONFIG_PM
@@ -839,6 +846,7 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
839int xhci_suspend(struct xhci_hcd *xhci) 846int xhci_suspend(struct xhci_hcd *xhci)
840{ 847{
841 int rc = 0; 848 int rc = 0;
849 unsigned int delay = XHCI_MAX_HALT_USEC;
842 struct usb_hcd *hcd = xhci_to_hcd(xhci); 850 struct usb_hcd *hcd = xhci_to_hcd(xhci);
843 u32 command; 851 u32 command;
844 852
@@ -861,8 +869,12 @@ int xhci_suspend(struct xhci_hcd *xhci)
861 command = xhci_readl(xhci, &xhci->op_regs->command); 869 command = xhci_readl(xhci, &xhci->op_regs->command);
862 command &= ~CMD_RUN; 870 command &= ~CMD_RUN;
863 xhci_writel(xhci, command, &xhci->op_regs->command); 871 xhci_writel(xhci, command, &xhci->op_regs->command);
872
873 /* Some chips from Fresco Logic need an extraordinary delay */
874 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
875
864 if (xhci_handshake(xhci, &xhci->op_regs->status, 876 if (xhci_handshake(xhci, &xhci->op_regs->status,
865 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) { 877 STS_HALT, STS_HALT, delay)) {
866 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); 878 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
867 spin_unlock_irq(&xhci->lock); 879 spin_unlock_irq(&xhci->lock);
868 return -ETIMEDOUT; 880 return -ETIMEDOUT;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 289fbfbae746..941d5f59e4dc 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1548,6 +1548,8 @@ struct xhci_hcd {
1548#define XHCI_COMP_MODE_QUIRK (1 << 14) 1548#define XHCI_COMP_MODE_QUIRK (1 << 14)
1549#define XHCI_AVOID_BEI (1 << 15) 1549#define XHCI_AVOID_BEI (1 << 15)
1550#define XHCI_PLAT (1 << 16) 1550#define XHCI_PLAT (1 << 16)
1551#define XHCI_SLOW_SUSPEND (1 << 17)
1552#define XHCI_SPURIOUS_WAKEUP (1 << 18)
1551 unsigned int num_active_eps; 1553 unsigned int num_active_eps;
1552 unsigned int limit_active_eps; 1554 unsigned int limit_active_eps;
1553 /* There are two roothubs to keep track of bus suspend info for */ 1555 /* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index e2b21c1d9c40..ba5f70f92888 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -246,6 +246,6 @@ config USB_EZUSB_FX2
246config USB_HSIC_USB3503 246config USB_HSIC_USB3503
247 tristate "USB3503 HSIC to USB20 Driver" 247 tristate "USB3503 HSIC to USB20 Driver"
248 depends on I2C 248 depends on I2C
249 select REGMAP 249 select REGMAP_I2C
250 help 250 help
251 This option enables support for SMSC USB3503 HSIC to USB 2.0 Driver. 251 This option enables support for SMSC USB3503 HSIC to USB 2.0 Driver.
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 18e877ffe7b7..cd70cc886171 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -922,6 +922,52 @@ static void musb_generic_disable(struct musb *musb)
922} 922}
923 923
924/* 924/*
925 * Program the HDRC to start (enable interrupts, dma, etc.).
926 */
927void musb_start(struct musb *musb)
928{
929 void __iomem *regs = musb->mregs;
930 u8 devctl = musb_readb(regs, MUSB_DEVCTL);
931
932 dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
933
934 /* Set INT enable registers, enable interrupts */
935 musb->intrtxe = musb->epmask;
936 musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
937 musb->intrrxe = musb->epmask & 0xfffe;
938 musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
939 musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
940
941 musb_writeb(regs, MUSB_TESTMODE, 0);
942
943 /* put into basic highspeed mode and start session */
944 musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
945 | MUSB_POWER_HSENAB
946 /* ENSUSPEND wedges tusb */
947 /* | MUSB_POWER_ENSUSPEND */
948 );
949
950 musb->is_active = 0;
951 devctl = musb_readb(regs, MUSB_DEVCTL);
952 devctl &= ~MUSB_DEVCTL_SESSION;
953
954 /* session started after:
955 * (a) ID-grounded irq, host mode;
956 * (b) vbus present/connect IRQ, peripheral mode;
957 * (c) peripheral initiates, using SRP
958 */
959 if (musb->port_mode != MUSB_PORT_MODE_HOST &&
960 (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
961 musb->is_active = 1;
962 } else {
963 devctl |= MUSB_DEVCTL_SESSION;
964 }
965
966 musb_platform_enable(musb);
967 musb_writeb(regs, MUSB_DEVCTL, devctl);
968}
969
970/*
925 * Make the HDRC stop (disable interrupts, etc.); 971 * Make the HDRC stop (disable interrupts, etc.);
926 * reversible by musb_start 972 * reversible by musb_start
927 * called on gadget driver unregister 973 * called on gadget driver unregister
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 65f3917b4fc5..1c5bf75ee8ff 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -503,6 +503,7 @@ static inline void musb_configure_ep0(struct musb *musb)
503extern const char musb_driver_name[]; 503extern const char musb_driver_name[];
504 504
505extern void musb_stop(struct musb *musb); 505extern void musb_stop(struct musb *musb);
506extern void musb_start(struct musb *musb);
506 507
507extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src); 508extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
508extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst); 509extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 4047cbb91bac..bd4138d80a48 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -535,6 +535,9 @@ static int dsps_probe(struct platform_device *pdev)
535 struct dsps_glue *glue; 535 struct dsps_glue *glue;
536 int ret; 536 int ret;
537 537
538 if (!strcmp(pdev->name, "musb-hdrc"))
539 return -ENODEV;
540
538 match = of_match_node(musb_dsps_of_match, pdev->dev.of_node); 541 match = of_match_node(musb_dsps_of_match, pdev->dev.of_node);
539 if (!match) { 542 if (!match) {
540 dev_err(&pdev->dev, "fail to get matching of_match struct\n"); 543 dev_err(&pdev->dev, "fail to get matching of_match struct\n");
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 9a08679d204d..3671898a4535 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1790,6 +1790,10 @@ int musb_gadget_setup(struct musb *musb)
1790 musb->g.max_speed = USB_SPEED_HIGH; 1790 musb->g.max_speed = USB_SPEED_HIGH;
1791 musb->g.speed = USB_SPEED_UNKNOWN; 1791 musb->g.speed = USB_SPEED_UNKNOWN;
1792 1792
1793 MUSB_DEV_MODE(musb);
1794 musb->xceiv->otg->default_a = 0;
1795 musb->xceiv->state = OTG_STATE_B_IDLE;
1796
1793 /* this "gadget" abstracts/virtualizes the controller */ 1797 /* this "gadget" abstracts/virtualizes the controller */
1794 musb->g.name = musb_driver_name; 1798 musb->g.name = musb_driver_name;
1795 musb->g.is_otg = 1; 1799 musb->g.is_otg = 1;
@@ -1855,6 +1859,8 @@ static int musb_gadget_start(struct usb_gadget *g,
1855 musb->xceiv->state = OTG_STATE_B_IDLE; 1859 musb->xceiv->state = OTG_STATE_B_IDLE;
1856 spin_unlock_irqrestore(&musb->lock, flags); 1860 spin_unlock_irqrestore(&musb->lock, flags);
1857 1861
1862 musb_start(musb);
1863
1858 /* REVISIT: funcall to other code, which also 1864 /* REVISIT: funcall to other code, which also
1859 * handles power budgeting ... this way also 1865 * handles power budgeting ... this way also
1860 * ensures HdrcStart is indirectly called. 1866 * ensures HdrcStart is indirectly called.
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index a523950c2b32..d1d6b83aabca 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -44,52 +44,6 @@
44 44
45#include "musb_core.h" 45#include "musb_core.h"
46 46
47/*
48* Program the HDRC to start (enable interrupts, dma, etc.).
49*/
50static void musb_start(struct musb *musb)
51{
52 void __iomem *regs = musb->mregs;
53 u8 devctl = musb_readb(regs, MUSB_DEVCTL);
54
55 dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
56
57 /* Set INT enable registers, enable interrupts */
58 musb->intrtxe = musb->epmask;
59 musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
60 musb->intrrxe = musb->epmask & 0xfffe;
61 musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
62 musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
63
64 musb_writeb(regs, MUSB_TESTMODE, 0);
65
66 /* put into basic highspeed mode and start session */
67 musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
68 | MUSB_POWER_HSENAB
69 /* ENSUSPEND wedges tusb */
70 /* | MUSB_POWER_ENSUSPEND */
71 );
72
73 musb->is_active = 0;
74 devctl = musb_readb(regs, MUSB_DEVCTL);
75 devctl &= ~MUSB_DEVCTL_SESSION;
76
77 /* session started after:
78 * (a) ID-grounded irq, host mode;
79 * (b) vbus present/connect IRQ, peripheral mode;
80 * (c) peripheral initiates, using SRP
81 */
82 if (musb->port_mode != MUSB_PORT_MODE_HOST &&
83 (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
84 musb->is_active = 1;
85 } else {
86 devctl |= MUSB_DEVCTL_SESSION;
87 }
88
89 musb_platform_enable(musb);
90 musb_writeb(regs, MUSB_DEVCTL, devctl);
91}
92
93static void musb_port_suspend(struct musb *musb, bool do_suspend) 47static void musb_port_suspend(struct musb *musb, bool do_suspend)
94{ 48{
95 struct usb_otg *otg = musb->xceiv->otg; 49 struct usb_otg *otg = musb->xceiv->otg;
diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c
index b2f29c9aebbf..02799a5efcd4 100644
--- a/drivers/usb/phy/phy-gpio-vbus-usb.c
+++ b/drivers/usb/phy/phy-gpio-vbus-usb.c
@@ -241,7 +241,7 @@ static int gpio_vbus_set_suspend(struct usb_phy *phy, int suspend)
241 241
242/* platform driver interface */ 242/* platform driver interface */
243 243
244static int __init gpio_vbus_probe(struct platform_device *pdev) 244static int gpio_vbus_probe(struct platform_device *pdev)
245{ 245{
246 struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev); 246 struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
247 struct gpio_vbus_data *gpio_vbus; 247 struct gpio_vbus_data *gpio_vbus;
@@ -349,7 +349,7 @@ err_gpio:
349 return err; 349 return err;
350} 350}
351 351
352static int __exit gpio_vbus_remove(struct platform_device *pdev) 352static int gpio_vbus_remove(struct platform_device *pdev)
353{ 353{
354 struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev); 354 struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev);
355 struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev); 355 struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
@@ -398,8 +398,6 @@ static const struct dev_pm_ops gpio_vbus_dev_pm_ops = {
398}; 398};
399#endif 399#endif
400 400
401/* NOTE: the gpio-vbus device may *NOT* be hotplugged */
402
403MODULE_ALIAS("platform:gpio-vbus"); 401MODULE_ALIAS("platform:gpio-vbus");
404 402
405static struct platform_driver gpio_vbus_driver = { 403static struct platform_driver gpio_vbus_driver = {
@@ -410,10 +408,11 @@ static struct platform_driver gpio_vbus_driver = {
410 .pm = &gpio_vbus_dev_pm_ops, 408 .pm = &gpio_vbus_dev_pm_ops,
411#endif 409#endif
412 }, 410 },
413 .remove = __exit_p(gpio_vbus_remove), 411 .probe = gpio_vbus_probe,
412 .remove = gpio_vbus_remove,
414}; 413};
415 414
416module_platform_driver_probe(gpio_vbus_driver, gpio_vbus_probe); 415module_platform_driver(gpio_vbus_driver);
417 416
418MODULE_DESCRIPTION("simple GPIO controlled OTG transceiver driver"); 417MODULE_DESCRIPTION("simple GPIO controlled OTG transceiver driver");
419MODULE_AUTHOR("Philipp Zabel"); 418MODULE_AUTHOR("Philipp Zabel");
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index c45f9c0a1b34..b21d553c245b 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -904,6 +904,7 @@ static struct usb_device_id id_table_combined [] = {
904 { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) }, 904 { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
905 /* Crucible Devices */ 905 /* Crucible Devices */
906 { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) }, 906 { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
907 { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
907 { } /* Terminating entry */ 908 { } /* Terminating entry */
908}; 909};
909 910
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 1b8af461b522..a7019d1e3058 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1307,3 +1307,9 @@
1307 * Manufacturer: Crucible Technologies 1307 * Manufacturer: Crucible Technologies
1308 */ 1308 */
1309#define FTDI_CT_COMET_PID 0x8e08 1309#define FTDI_CT_COMET_PID 0x8e08
1310
1311/*
1312 * Product: Z3X Box
1313 * Manufacturer: Smart GSM Team
1314 */
1315#define FTDI_Z3X_PID 0x0011
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1cf6f125f5f0..acaee066b99a 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -81,6 +81,7 @@ static void option_instat_callback(struct urb *urb);
81 81
82#define HUAWEI_VENDOR_ID 0x12D1 82#define HUAWEI_VENDOR_ID 0x12D1
83#define HUAWEI_PRODUCT_E173 0x140C 83#define HUAWEI_PRODUCT_E173 0x140C
84#define HUAWEI_PRODUCT_E1750 0x1406
84#define HUAWEI_PRODUCT_K4505 0x1464 85#define HUAWEI_PRODUCT_K4505 0x1464
85#define HUAWEI_PRODUCT_K3765 0x1465 86#define HUAWEI_PRODUCT_K3765 0x1465
86#define HUAWEI_PRODUCT_K4605 0x14C6 87#define HUAWEI_PRODUCT_K4605 0x14C6
@@ -450,6 +451,10 @@ static void option_instat_callback(struct urb *urb);
450#define CHANGHONG_VENDOR_ID 0x2077 451#define CHANGHONG_VENDOR_ID 0x2077
451#define CHANGHONG_PRODUCT_CH690 0x7001 452#define CHANGHONG_PRODUCT_CH690 0x7001
452 453
454/* Inovia */
455#define INOVIA_VENDOR_ID 0x20a6
456#define INOVIA_SEW858 0x1105
457
453/* some devices interfaces need special handling due to a number of reasons */ 458/* some devices interfaces need special handling due to a number of reasons */
454enum option_blacklist_reason { 459enum option_blacklist_reason {
455 OPTION_BLACKLIST_NONE = 0, 460 OPTION_BLACKLIST_NONE = 0,
@@ -567,6 +572,8 @@ static const struct usb_device_id option_ids[] = {
567 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, 572 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
568 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), 573 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
569 .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, 574 .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
575 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
576 .driver_info = (kernel_ulong_t) &net_intf2_blacklist },
570 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, 577 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
571 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) }, 578 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
572 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), 579 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
@@ -686,6 +693,222 @@ static const struct usb_device_id option_ids[] = {
686 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) }, 693 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
687 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) }, 694 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) },
688 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) }, 695 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) },
696 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x01) },
697 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x02) },
698 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x03) },
699 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x04) },
700 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x05) },
701 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x06) },
702 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0A) },
703 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0B) },
704 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0D) },
705 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0E) },
706 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0F) },
707 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x10) },
708 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x12) },
709 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x13) },
710 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x14) },
711 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x15) },
712 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x17) },
713 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x18) },
714 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x19) },
715 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1A) },
716 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1B) },
717 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1C) },
718 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x31) },
719 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x32) },
720 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x33) },
721 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x34) },
722 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x35) },
723 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x36) },
724 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3A) },
725 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3B) },
726 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3D) },
727 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3E) },
728 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3F) },
729 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x48) },
730 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x49) },
731 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4A) },
732 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4B) },
733 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4C) },
734 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x61) },
735 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x62) },
736 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x63) },
737 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x64) },
738 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x65) },
739 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x66) },
740 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6A) },
741 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6B) },
742 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
743 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
744 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
745 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
746 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
747 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
748 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7B) },
749 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7C) },
750 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x01) },
751 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x02) },
752 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x03) },
753 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x04) },
754 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x05) },
755 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x06) },
756 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0A) },
757 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0B) },
758 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0D) },
759 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0E) },
760 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0F) },
761 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x10) },
762 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x12) },
763 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x13) },
764 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x14) },
765 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x15) },
766 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x17) },
767 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x18) },
768 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x19) },
769 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1A) },
770 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1B) },
771 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1C) },
772 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x31) },
773 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x32) },
774 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x33) },
775 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x34) },
776 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x35) },
777 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x36) },
778 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3A) },
779 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3B) },
780 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3D) },
781 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3E) },
782 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3F) },
783 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x48) },
784 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x49) },
785 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4A) },
786 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4B) },
787 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4C) },
788 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x61) },
789 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x62) },
790 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x63) },
791 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x64) },
792 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x65) },
793 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x66) },
794 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6A) },
795 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6B) },
796 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
797 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
798 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
799 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
800 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
801 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
802 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7B) },
803 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7C) },
804 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x01) },
805 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x02) },
806 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x03) },
807 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x04) },
808 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x05) },
809 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x06) },
810 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0A) },
811 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0B) },
812 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0D) },
813 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0E) },
814 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0F) },
815 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x10) },
816 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x12) },
817 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x13) },
818 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x14) },
819 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x15) },
820 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x17) },
821 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x18) },
822 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x19) },
823 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1A) },
824 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1B) },
825 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1C) },
826 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x31) },
827 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x32) },
828 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x33) },
829 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x34) },
830 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x35) },
831 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x36) },
832 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3A) },
833 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3B) },
834 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3D) },
835 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3E) },
836 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3F) },
837 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x48) },
838 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x49) },
839 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4A) },
840 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4B) },
841 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4C) },
842 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x61) },
843 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x62) },
844 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x63) },
845 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x64) },
846 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x65) },
847 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x66) },
848 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6A) },
849 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6B) },
850 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
851 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
852 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
853 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
854 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
855 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
856 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7B) },
857 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7C) },
858 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x01) },
859 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x02) },
860 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x03) },
861 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x04) },
862 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x05) },
863 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x06) },
864 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0A) },
865 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0B) },
866 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0D) },
867 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0E) },
868 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0F) },
869 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x10) },
870 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x12) },
871 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x13) },
872 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x14) },
873 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x15) },
874 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x17) },
875 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x18) },
876 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x19) },
877 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1A) },
878 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1B) },
879 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1C) },
880 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x31) },
881 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x32) },
882 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x33) },
883 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x34) },
884 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x35) },
885 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x36) },
886 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3A) },
887 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3B) },
888 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3D) },
889 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3E) },
890 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3F) },
891 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x48) },
892 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x49) },
893 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4A) },
894 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4B) },
895 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4C) },
896 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x61) },
897 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x62) },
898 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x63) },
899 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x64) },
900 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x65) },
901 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x66) },
902 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6A) },
903 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6B) },
904 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
905 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
906 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
907 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
908 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
909 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },
910 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
911 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
689 912
690 913
691 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, 914 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
@@ -1254,7 +1477,9 @@ static const struct usb_device_id option_ids[] = {
1254 1477
1255 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, 1478 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
1256 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) }, 1479 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
1257 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) }, 1480 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
1481 .driver_info = (kernel_ulong_t)&net_intf6_blacklist
1482 },
1258 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ 1483 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
1259 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ 1484 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
1260 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, 1485 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
@@ -1342,6 +1567,7 @@ static const struct usb_device_id option_ids[] = {
1342 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, 1567 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
1343 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 1568 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1344 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 1569 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1570 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
1345 { } /* Terminating entry */ 1571 { } /* Terminating entry */
1346}; 1572};
1347MODULE_DEVICE_TABLE(usb, option_ids); 1573MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index bedf8e47713b..1e6de4cd079d 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -4,11 +4,6 @@
4 * Copyright (C) 2001-2007 Greg Kroah-Hartman (greg@kroah.com) 4 * Copyright (C) 2001-2007 Greg Kroah-Hartman (greg@kroah.com)
5 * Copyright (C) 2003 IBM Corp. 5 * Copyright (C) 2003 IBM Corp.
6 * 6 *
7 * Copyright (C) 2009, 2013 Frank Schäfer <fschaefer.oss@googlemail.com>
8 * - fixes, improvements and documentation for the baud rate encoding methods
9 * Copyright (C) 2013 Reinhard Max <max@suse.de>
10 * - fixes and improvements for the divisor based baud rate encoding method
11 *
12 * Original driver for 2.2.x by anonymous 7 * Original driver for 2.2.x by anonymous
13 * 8 *
14 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -134,18 +129,10 @@ MODULE_DEVICE_TABLE(usb, id_table);
134 129
135 130
136enum pl2303_type { 131enum pl2303_type {
137 type_0, /* H version ? */ 132 type_0, /* don't know the difference between type 0 and */
138 type_1, /* H version ? */ 133 type_1, /* type 1, until someone from prolific tells us... */
139 HX_TA, /* HX(A) / X(A) / TA version */ /* TODO: improve */ 134 HX, /* HX version of the pl2303 chip */
140 HXD_EA_RA_SA, /* HXD / EA / RA / SA version */ /* TODO: improve */
141 TB, /* TB version */
142 HX_CLONE, /* Cheap and less functional clone of the HX chip */
143}; 135};
144/*
145 * NOTE: don't know the difference between type 0 and type 1,
146 * until someone from Prolific tells us...
147 * TODO: distinguish between X/HX, TA and HXD, EA, RA, SA variants
148 */
149 136
150struct pl2303_serial_private { 137struct pl2303_serial_private {
151 enum pl2303_type type; 138 enum pl2303_type type;
@@ -185,7 +172,6 @@ static int pl2303_startup(struct usb_serial *serial)
185{ 172{
186 struct pl2303_serial_private *spriv; 173 struct pl2303_serial_private *spriv;
187 enum pl2303_type type = type_0; 174 enum pl2303_type type = type_0;
188 char *type_str = "unknown (treating as type_0)";
189 unsigned char *buf; 175 unsigned char *buf;
190 176
191 spriv = kzalloc(sizeof(*spriv), GFP_KERNEL); 177 spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
@@ -198,53 +184,15 @@ static int pl2303_startup(struct usb_serial *serial)
198 return -ENOMEM; 184 return -ENOMEM;
199 } 185 }
200 186
201 if (serial->dev->descriptor.bDeviceClass == 0x02) { 187 if (serial->dev->descriptor.bDeviceClass == 0x02)
202 type = type_0; 188 type = type_0;
203 type_str = "type_0"; 189 else if (serial->dev->descriptor.bMaxPacketSize0 == 0x40)
204 } else if (serial->dev->descriptor.bMaxPacketSize0 == 0x40) { 190 type = HX;
205 /* 191 else if (serial->dev->descriptor.bDeviceClass == 0x00)
206 * NOTE: The bcdDevice version is the only difference between
207 * the device descriptors of the X/HX, HXD, EA, RA, SA, TA, TB
208 */
209 if (le16_to_cpu(serial->dev->descriptor.bcdDevice) == 0x300) {
210 /* Check if the device is a clone */
211 pl2303_vendor_read(0x9494, 0, serial, buf);
212 /*
213 * NOTE: Not sure if this read is really needed.
214 * The HX returns 0x00, the clone 0x02, but the Windows
215 * driver seems to ignore the value and continues.
216 */
217 pl2303_vendor_write(0x0606, 0xaa, serial);
218 pl2303_vendor_read(0x8686, 0, serial, buf);
219 if (buf[0] != 0xaa) {
220 type = HX_CLONE;
221 type_str = "X/HX clone (limited functionality)";
222 } else {
223 type = HX_TA;
224 type_str = "X/HX/TA";
225 }
226 pl2303_vendor_write(0x0606, 0x00, serial);
227 } else if (le16_to_cpu(serial->dev->descriptor.bcdDevice)
228 == 0x400) {
229 type = HXD_EA_RA_SA;
230 type_str = "HXD/EA/RA/SA";
231 } else if (le16_to_cpu(serial->dev->descriptor.bcdDevice)
232 == 0x500) {
233 type = TB;
234 type_str = "TB";
235 } else {
236 dev_info(&serial->interface->dev,
237 "unknown/unsupported device type\n");
238 kfree(spriv);
239 kfree(buf);
240 return -ENODEV;
241 }
242 } else if (serial->dev->descriptor.bDeviceClass == 0x00
243 || serial->dev->descriptor.bDeviceClass == 0xFF) {
244 type = type_1; 192 type = type_1;
245 type_str = "type_1"; 193 else if (serial->dev->descriptor.bDeviceClass == 0xFF)
246 } 194 type = type_1;
247 dev_dbg(&serial->interface->dev, "device type: %s\n", type_str); 195 dev_dbg(&serial->interface->dev, "device type: %d\n", type);
248 196
249 spriv->type = type; 197 spriv->type = type;
250 usb_set_serial_data(serial, spriv); 198 usb_set_serial_data(serial, spriv);
@@ -259,10 +207,10 @@ static int pl2303_startup(struct usb_serial *serial)
259 pl2303_vendor_read(0x8383, 0, serial, buf); 207 pl2303_vendor_read(0x8383, 0, serial, buf);
260 pl2303_vendor_write(0, 1, serial); 208 pl2303_vendor_write(0, 1, serial);
261 pl2303_vendor_write(1, 0, serial); 209 pl2303_vendor_write(1, 0, serial);
262 if (type == type_0 || type == type_1) 210 if (type == HX)
263 pl2303_vendor_write(2, 0x24, serial);
264 else
265 pl2303_vendor_write(2, 0x44, serial); 211 pl2303_vendor_write(2, 0x44, serial);
212 else
213 pl2303_vendor_write(2, 0x24, serial);
266 214
267 kfree(buf); 215 kfree(buf);
268 return 0; 216 return 0;
@@ -316,174 +264,65 @@ static int pl2303_set_control_lines(struct usb_serial_port *port, u8 value)
316 return retval; 264 return retval;
317} 265}
318 266
319static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type, 267static void pl2303_encode_baudrate(struct tty_struct *tty,
320 u8 buf[4]) 268 struct usb_serial_port *port,
269 u8 buf[4])
321{ 270{
322 /*
323 * NOTE: Only the values defined in baud_sup are supported !
324 * => if unsupported values are set, the PL2303 uses 9600 baud instead
325 * => HX clones just don't work at unsupported baud rates < 115200 baud,
326 * for baud rates > 115200 they run at 115200 baud
327 */
328 const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600, 271 const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600,
329 4800, 7200, 9600, 14400, 19200, 28800, 38400, 272 4800, 7200, 9600, 14400, 19200, 28800, 38400,
330 57600, 115200, 230400, 460800, 614400, 921600, 273 57600, 115200, 230400, 460800, 500000, 614400,
331 1228800, 2457600, 3000000, 6000000, 12000000 }; 274 921600, 1228800, 2457600, 3000000, 6000000 };
275
276 struct usb_serial *serial = port->serial;
277 struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
278 int baud;
279 int i;
280
332 /* 281 /*
333 * NOTE: With the exception of type_0/1 devices, the following 282 * NOTE: Only the values defined in baud_sup are supported!
334 * additional baud rates are supported (tested with HX rev. 3A only): 283 * => if unsupported values are set, the PL2303 seems to use
335 * 110*, 56000*, 128000, 134400, 161280, 201600, 256000*, 268800, 284 * 9600 baud (at least my PL2303X always does)
336 * 403200, 806400. (*: not HX and HX clones)
337 *
338 * Maximum values: HXD, TB: 12000000; HX, TA: 6000000;
339 * type_0+1: 1228800; RA: 921600; HX clones, SA: 115200
340 *
341 * As long as we are not using this encoding method for anything else
342 * than the type_0+1, HX and HX clone chips, there is no point in
343 * complicating the code to support them.
344 */ 285 */
345 int i; 286 baud = tty_get_baud_rate(tty);
287 dev_dbg(&port->dev, "baud requested = %d\n", baud);
288 if (!baud)
289 return;
346 290
347 /* Set baudrate to nearest supported value */ 291 /* Set baudrate to nearest supported value */
348 for (i = 0; i < ARRAY_SIZE(baud_sup); ++i) { 292 for (i = 0; i < ARRAY_SIZE(baud_sup); ++i) {
349 if (baud_sup[i] > baud) 293 if (baud_sup[i] > baud)
350 break; 294 break;
351 } 295 }
296
352 if (i == ARRAY_SIZE(baud_sup)) 297 if (i == ARRAY_SIZE(baud_sup))
353 baud = baud_sup[i - 1]; 298 baud = baud_sup[i - 1];
354 else if (i > 0 && (baud_sup[i] - baud) > (baud - baud_sup[i - 1])) 299 else if (i > 0 && (baud_sup[i] - baud) > (baud - baud_sup[i - 1]))
355 baud = baud_sup[i - 1]; 300 baud = baud_sup[i - 1];
356 else 301 else
357 baud = baud_sup[i]; 302 baud = baud_sup[i];
358 /* Respect the chip type specific baud rate limits */
359 /*
360 * FIXME: as long as we don't know how to distinguish between the
361 * HXD, EA, RA, and SA chip variants, allow the max. value of 12M.
362 */
363 if (type == HX_TA)
364 baud = min_t(int, baud, 6000000);
365 else if (type == type_0 || type == type_1)
366 baud = min_t(int, baud, 1228800);
367 else if (type == HX_CLONE)
368 baud = min_t(int, baud, 115200);
369 /* Direct (standard) baud rate encoding method */
370 put_unaligned_le32(baud, buf);
371
372 return baud;
373}
374 303
375static int pl2303_baudrate_encode_divisor(int baud, enum pl2303_type type, 304 /* type_0, type_1 only support up to 1228800 baud */
376 u8 buf[4]) 305 if (spriv->type != HX)
377{ 306 baud = min_t(int, baud, 1228800);
378 /*
379 * Divisor based baud rate encoding method
380 *
381 * NOTE: HX clones do NOT support this method.
382 * It's not clear if the type_0/1 chips support it.
383 *
384 * divisor = 12MHz * 32 / baudrate = 2^A * B
385 *
386 * with
387 *
388 * A = buf[1] & 0x0e
389 * B = buf[0] + (buf[1] & 0x01) << 8
390 *
391 * Special cases:
392 * => 8 < B < 16: device seems to work not properly
393 * => B <= 8: device uses the max. value B = 512 instead
394 */
395 unsigned int A, B;
396 307
397 /* 308 if (baud <= 115200) {
398 * NOTE: The Windows driver allows maximum baud rates of 110% of the 309 put_unaligned_le32(baud, buf);
399 * specified maximium value.
400 * Quick tests with early (2004) HX (rev. A) chips suggest, that even
401 * higher baud rates (up to the maximum of 24M baud !) are working fine,
402 * but that should really be tested carefully in "real life" scenarios
403 * before removing the upper limit completely.
404 * Baud rates smaller than the specified 75 baud are definitely working
405 * fine.
406 */
407 if (type == type_0 || type == type_1)
408 baud = min_t(int, baud, 1228800 * 1.1);
409 else if (type == HX_TA)
410 baud = min_t(int, baud, 6000000 * 1.1);
411 else if (type == HXD_EA_RA_SA)
412 /* HXD, EA: 12Mbps; RA: 1Mbps; SA: 115200 bps */
413 /*
414 * FIXME: as long as we don't know how to distinguish between
415 * these chip variants, allow the max. of these values
416 */
417 baud = min_t(int, baud, 12000000 * 1.1);
418 else if (type == TB)
419 baud = min_t(int, baud, 12000000 * 1.1);
420 /* Determine factors A and B */
421 A = 0;
422 B = 12000000 * 32 / baud; /* 12MHz */
423 B <<= 1; /* Add one bit for rounding */
424 while (B > (512 << 1) && A <= 14) {
425 A += 2;
426 B >>= 2;
427 }
428 if (A > 14) { /* max. divisor = min. baudrate reached */
429 A = 14;
430 B = 512;
431 /* => ~45.78 baud */
432 } else { 310 } else {
433 B = (B + 1) >> 1; /* Round the last bit */
434 }
435 /* Handle special cases */
436 if (B == 512)
437 B = 0; /* also: 1 to 8 */
438 else if (B < 16)
439 /* 311 /*
440 * NOTE: With the current algorithm this happens 312 * Apparently the formula for higher speeds is:
441 * only for A=0 and means that the min. divisor 313 * baudrate = 12M * 32 / (2^buf[1]) / buf[0]
442 * (respectively: the max. baudrate) is reached.
443 */ 314 */
444 B = 16; /* => 24 MBaud */ 315 unsigned tmp = 12000000 * 32 / baud;
445 /* Encode the baud rate */ 316 buf[3] = 0x80;
446 buf[3] = 0x80; /* Select divisor encoding method */ 317 buf[2] = 0;
447 buf[2] = 0; 318 buf[1] = (tmp >= 256);
448 buf[1] = (A & 0x0e); /* A */ 319 while (tmp >= 256) {
449 buf[1] |= ((B & 0x100) >> 8); /* MSB of B */ 320 tmp >>= 2;
450 buf[0] = B & 0xff; /* 8 LSBs of B */ 321 buf[1] <<= 1;
451 /* Calculate the actual/resulting baud rate */ 322 }
452 if (B <= 8) 323 buf[0] = tmp;
453 B = 512; 324 }
454 baud = 12000000 * 32 / ((1 << A) * B);
455
456 return baud;
457}
458
459static void pl2303_encode_baudrate(struct tty_struct *tty,
460 struct usb_serial_port *port,
461 enum pl2303_type type,
462 u8 buf[4])
463{
464 int baud;
465 325
466 baud = tty_get_baud_rate(tty);
467 dev_dbg(&port->dev, "baud requested = %d\n", baud);
468 if (!baud)
469 return;
470 /*
471 * There are two methods for setting/encoding the baud rate
472 * 1) Direct method: encodes the baud rate value directly
473 * => supported by all chip types
474 * 2) Divisor based method: encodes a divisor to a base value (12MHz*32)
475 * => not supported by HX clones (and likely type_0/1 chips)
476 *
477 * NOTE: Although the divisor based baud rate encoding method is much
478 * more flexible, some of the standard baud rate values can not be
479 * realized exactly. But the difference is very small (max. 0.2%) and
480 * the device likely uses the same baud rate generator for both methods
481 * so that there is likley no difference.
482 */
483 if (type == type_0 || type == type_1 || type == HX_CLONE)
484 baud = pl2303_baudrate_encode_direct(baud, type, buf);
485 else
486 baud = pl2303_baudrate_encode_divisor(baud, type, buf);
487 /* Save resulting baud rate */ 326 /* Save resulting baud rate */
488 tty_encode_baud_rate(tty, baud, baud); 327 tty_encode_baud_rate(tty, baud, baud);
489 dev_dbg(&port->dev, "baud set = %d\n", baud); 328 dev_dbg(&port->dev, "baud set = %d\n", baud);
@@ -540,8 +379,8 @@ static void pl2303_set_termios(struct tty_struct *tty,
540 dev_dbg(&port->dev, "data bits = %d\n", buf[6]); 379 dev_dbg(&port->dev, "data bits = %d\n", buf[6]);
541 } 380 }
542 381
543 /* For reference: buf[0]:buf[3] baud rate value */ 382 /* For reference buf[0]:buf[3] baud rate value */
544 pl2303_encode_baudrate(tty, port, spriv->type, buf); 383 pl2303_encode_baudrate(tty, port, &buf[0]);
545 384
546 /* For reference buf[4]=0 is 1 stop bits */ 385 /* For reference buf[4]=0 is 1 stop bits */
547 /* For reference buf[4]=1 is 1.5 stop bits */ 386 /* For reference buf[4]=1 is 1.5 stop bits */
@@ -618,10 +457,10 @@ static void pl2303_set_termios(struct tty_struct *tty,
618 dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf); 457 dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf);
619 458
620 if (C_CRTSCTS(tty)) { 459 if (C_CRTSCTS(tty)) {
621 if (spriv->type == type_0 || spriv->type == type_1) 460 if (spriv->type == HX)
622 pl2303_vendor_write(0x0, 0x41, serial);
623 else
624 pl2303_vendor_write(0x0, 0x61, serial); 461 pl2303_vendor_write(0x0, 0x61, serial);
462 else
463 pl2303_vendor_write(0x0, 0x41, serial);
625 } else { 464 } else {
626 pl2303_vendor_write(0x0, 0x0, serial); 465 pl2303_vendor_write(0x0, 0x0, serial);
627 } 466 }
@@ -658,7 +497,7 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
658 struct pl2303_serial_private *spriv = usb_get_serial_data(serial); 497 struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
659 int result; 498 int result;
660 499
661 if (spriv->type == type_0 || spriv->type == type_1) { 500 if (spriv->type != HX) {
662 usb_clear_halt(serial->dev, port->write_urb->pipe); 501 usb_clear_halt(serial->dev, port->write_urb->pipe);
663 usb_clear_halt(serial->dev, port->read_urb->pipe); 502 usb_clear_halt(serial->dev, port->read_urb->pipe);
664 } else { 503 } else {
@@ -833,7 +672,6 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
833 result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 672 result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
834 BREAK_REQUEST, BREAK_REQUEST_TYPE, state, 673 BREAK_REQUEST, BREAK_REQUEST_TYPE, state,
835 0, NULL, 0, 100); 674 0, NULL, 0, 100);
836 /* NOTE: HX clones don't support sending breaks, -EPIPE is returned */
837 if (result) 675 if (result)
838 dev_err(&port->dev, "error sending break = %d\n", result); 676 dev_err(&port->dev, "error sending break = %d\n", result);
839} 677}
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 760b78560f67..c9a35697ebe9 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -190,6 +190,7 @@ static struct usb_device_id ti_id_table_combined[] = {
190 { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, 190 { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
191 { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, 191 { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
192 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, 192 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
193 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
193 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, 194 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
194 { } /* terminator */ 195 { } /* terminator */
195}; 196};
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 94d75edef77f..18509e6c21ab 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -211,8 +211,11 @@ static int slave_configure(struct scsi_device *sdev)
211 /* 211 /*
212 * Many devices do not respond properly to READ_CAPACITY_16. 212 * Many devices do not respond properly to READ_CAPACITY_16.
213 * Tell the SCSI layer to try READ_CAPACITY_10 first. 213 * Tell the SCSI layer to try READ_CAPACITY_10 first.
214 * However some USB 3.0 drive enclosures return capacity
215 * modulo 2TB. Those must use READ_CAPACITY_16
214 */ 216 */
215 sdev->try_rc_10_first = 1; 217 if (!(us->fflags & US_FL_NEEDS_CAP16))
218 sdev->try_rc_10_first = 1;
216 219
217 /* assume SPC3 or latter devices support sense size > 18 */ 220 /* assume SPC3 or latter devices support sense size > 18 */
218 if (sdev->scsi_level > SCSI_SPC_2) 221 if (sdev->scsi_level > SCSI_SPC_2)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index c015f2c16729..de32cfa5bfa6 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1925,6 +1925,13 @@ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
1925 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1925 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1926 US_FL_IGNORE_RESIDUE ), 1926 US_FL_IGNORE_RESIDUE ),
1927 1927
1928/* Reported by Oliver Neukum <oneukum@suse.com> */
1929UNUSUAL_DEV( 0x174c, 0x55aa, 0x0100, 0x0100,
1930 "ASMedia",
1931 "AS2105",
1932 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1933 US_FL_NEEDS_CAP16),
1934
1928/* Reported by Jesse Feddema <jdfeddema@gmail.com> */ 1935/* Reported by Jesse Feddema <jdfeddema@gmail.com> */
1929UNUSUAL_DEV( 0x177f, 0x0400, 0x0000, 0x0000, 1936UNUSUAL_DEV( 0x177f, 0x0400, 0x0000, 0x0000,
1930 "Yarvik", 1937 "Yarvik",
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index a9807dea3887..4fb7a8f83c8a 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -545,6 +545,8 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
545 long npage; 545 long npage;
546 int ret = 0, prot = 0; 546 int ret = 0, prot = 0;
547 uint64_t mask; 547 uint64_t mask;
548 struct vfio_dma *dma = NULL;
549 unsigned long pfn;
548 550
549 end = map->iova + map->size; 551 end = map->iova + map->size;
550 552
@@ -587,8 +589,6 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
587 } 589 }
588 590
589 for (iova = map->iova; iova < end; iova += size, vaddr += size) { 591 for (iova = map->iova; iova < end; iova += size, vaddr += size) {
590 struct vfio_dma *dma = NULL;
591 unsigned long pfn;
592 long i; 592 long i;
593 593
594 /* Pin a contiguous chunk of memory */ 594 /* Pin a contiguous chunk of memory */
@@ -597,16 +597,15 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
597 if (npage <= 0) { 597 if (npage <= 0) {
598 WARN_ON(!npage); 598 WARN_ON(!npage);
599 ret = (int)npage; 599 ret = (int)npage;
600 break; 600 goto out;
601 } 601 }
602 602
603 /* Verify pages are not already mapped */ 603 /* Verify pages are not already mapped */
604 for (i = 0; i < npage; i++) { 604 for (i = 0; i < npage; i++) {
605 if (iommu_iova_to_phys(iommu->domain, 605 if (iommu_iova_to_phys(iommu->domain,
606 iova + (i << PAGE_SHIFT))) { 606 iova + (i << PAGE_SHIFT))) {
607 vfio_unpin_pages(pfn, npage, prot, true);
608 ret = -EBUSY; 607 ret = -EBUSY;
609 break; 608 goto out_unpin;
610 } 609 }
611 } 610 }
612 611
@@ -616,8 +615,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
616 if (ret) { 615 if (ret) {
617 if (ret != -EBUSY || 616 if (ret != -EBUSY ||
618 map_try_harder(iommu, iova, pfn, npage, prot)) { 617 map_try_harder(iommu, iova, pfn, npage, prot)) {
619 vfio_unpin_pages(pfn, npage, prot, true); 618 goto out_unpin;
620 break;
621 } 619 }
622 } 620 }
623 621
@@ -672,9 +670,8 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
672 dma = kzalloc(sizeof(*dma), GFP_KERNEL); 670 dma = kzalloc(sizeof(*dma), GFP_KERNEL);
673 if (!dma) { 671 if (!dma) {
674 iommu_unmap(iommu->domain, iova, size); 672 iommu_unmap(iommu->domain, iova, size);
675 vfio_unpin_pages(pfn, npage, prot, true);
676 ret = -ENOMEM; 673 ret = -ENOMEM;
677 break; 674 goto out_unpin;
678 } 675 }
679 676
680 dma->size = size; 677 dma->size = size;
@@ -685,16 +682,21 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
685 } 682 }
686 } 683 }
687 684
688 if (ret) { 685 WARN_ON(ret);
689 struct vfio_dma *tmp; 686 mutex_unlock(&iommu->lock);
690 iova = map->iova; 687 return ret;
691 size = map->size; 688
692 while ((tmp = vfio_find_dma(iommu, iova, size))) { 689out_unpin:
693 int r = vfio_remove_dma_overlap(iommu, iova, 690 vfio_unpin_pages(pfn, npage, prot, true);
694 &size, tmp); 691
695 if (WARN_ON(r || !size)) 692out:
696 break; 693 iova = map->iova;
697 } 694 size = map->size;
695 while ((dma = vfio_find_dma(iommu, iova, size))) {
696 int r = vfio_remove_dma_overlap(iommu, iova,
697 &size, dma);
698 if (WARN_ON(r || !size))
699 break;
698 } 700 }
699 701
700 mutex_unlock(&iommu->lock); 702 mutex_unlock(&iommu->lock);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 592b31698fc8..e663921eebb6 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -728,7 +728,12 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq,
728 } 728 }
729 se_sess = tv_nexus->tvn_se_sess; 729 se_sess = tv_nexus->tvn_se_sess;
730 730
731 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_KERNEL); 731 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
732 if (tag < 0) {
733 pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
734 return ERR_PTR(-ENOMEM);
735 }
736
732 cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag]; 737 cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
733 sg = cmd->tvc_sgl; 738 sg = cmd->tvc_sgl;
734 pages = cmd->tvc_upages; 739 pages = cmd->tvc_upages;
@@ -1051,7 +1056,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1051 if (data_direction != DMA_NONE) { 1056 if (data_direction != DMA_NONE) {
1052 ret = vhost_scsi_map_iov_to_sgl(cmd, 1057 ret = vhost_scsi_map_iov_to_sgl(cmd,
1053 &vq->iov[data_first], data_num, 1058 &vq->iov[data_first], data_num,
1054 data_direction == DMA_TO_DEVICE); 1059 data_direction == DMA_FROM_DEVICE);
1055 if (unlikely(ret)) { 1060 if (unlikely(ret)) {
1056 vq_err(vq, "Failed to map iov to sgl\n"); 1061 vq_err(vq, "Failed to map iov to sgl\n");
1057 goto err_free; 1062 goto err_free;
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index a54ccdc4d661..22ad85242e5b 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -361,37 +361,13 @@ void au1100fb_fb_rotate(struct fb_info *fbi, int angle)
361int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma) 361int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
362{ 362{
363 struct au1100fb_device *fbdev; 363 struct au1100fb_device *fbdev;
364 unsigned int len;
365 unsigned long start=0, off;
366 364
367 fbdev = to_au1100fb_device(fbi); 365 fbdev = to_au1100fb_device(fbi);
368 366
369 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
370 return -EINVAL;
371 }
372
373 start = fbdev->fb_phys & PAGE_MASK;
374 len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
375
376 off = vma->vm_pgoff << PAGE_SHIFT;
377
378 if ((vma->vm_end - vma->vm_start + off) > len) {
379 return -EINVAL;
380 }
381
382 off += start;
383 vma->vm_pgoff = off >> PAGE_SHIFT;
384
385 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 367 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
386 pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6 368 pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
387 369
388 if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, 370 return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
389 vma->vm_end - vma->vm_start,
390 vma->vm_page_prot)) {
391 return -EAGAIN;
392 }
393
394 return 0;
395} 371}
396 372
397static struct fb_ops au1100fb_ops = 373static struct fb_ops au1100fb_ops =
diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
index 301224ecc950..1d02897d17f2 100644
--- a/drivers/video/au1200fb.c
+++ b/drivers/video/au1200fb.c
@@ -1233,34 +1233,13 @@ static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
1233 * method mainly to allow the use of the TLB streaming flag (CCA=6) 1233 * method mainly to allow the use of the TLB streaming flag (CCA=6)
1234 */ 1234 */
1235static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) 1235static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
1236
1237{ 1236{
1238 unsigned int len;
1239 unsigned long start=0, off;
1240 struct au1200fb_device *fbdev = info->par; 1237 struct au1200fb_device *fbdev = info->par;
1241 1238
1242 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
1243 return -EINVAL;
1244 }
1245
1246 start = fbdev->fb_phys & PAGE_MASK;
1247 len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
1248
1249 off = vma->vm_pgoff << PAGE_SHIFT;
1250
1251 if ((vma->vm_end - vma->vm_start + off) > len) {
1252 return -EINVAL;
1253 }
1254
1255 off += start;
1256 vma->vm_pgoff = off >> PAGE_SHIFT;
1257
1258 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1239 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1259 pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */ 1240 pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
1260 1241
1261 return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, 1242 return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
1262 vma->vm_end - vma->vm_start,
1263 vma->vm_page_prot);
1264} 1243}
1265 1244
1266static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata) 1245static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index c7c64f18773d..fa932c2f7d97 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -613,6 +613,9 @@ static int w1_bus_notify(struct notifier_block *nb, unsigned long action,
613 sl = dev_to_w1_slave(dev); 613 sl = dev_to_w1_slave(dev);
614 fops = sl->family->fops; 614 fops = sl->family->fops;
615 615
616 if (!fops)
617 return 0;
618
616 switch (action) { 619 switch (action) {
617 case BUS_NOTIFY_ADD_DEVICE: 620 case BUS_NOTIFY_ADD_DEVICE:
618 /* if the family driver needs to initialize something... */ 621 /* if the family driver needs to initialize something... */
@@ -713,7 +716,10 @@ static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
713 atomic_set(&sl->refcnt, 0); 716 atomic_set(&sl->refcnt, 0);
714 init_completion(&sl->released); 717 init_completion(&sl->released);
715 718
719 /* slave modules need to be loaded in a context with unlocked mutex */
720 mutex_unlock(&dev->mutex);
716 request_module("w1-family-0x%0x", rn->family); 721 request_module("w1-family-0x%0x", rn->family);
722 mutex_lock(&dev->mutex);
717 723
718 spin_lock(&w1_flock); 724 spin_lock(&w1_flock);
719 f = w1_family_registered(rn->family); 725 f = w1_family_registered(rn->family);
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 5be5e3d14f79..19f3c3fc65f4 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -802,6 +802,12 @@ static int hpwdt_init_one(struct pci_dev *dev,
802 return -ENODEV; 802 return -ENODEV;
803 } 803 }
804 804
805 /*
806 * Ignore all auxilary iLO devices with the following PCI ID
807 */
808 if (dev->subsystem_device == 0x1979)
809 return -ENODEV;
810
805 if (pci_enable_device(dev)) { 811 if (pci_enable_device(dev)) {
806 dev_warn(&dev->dev, 812 dev_warn(&dev->dev,
807 "Not possible to enable PCI Device: 0x%x:0x%x.\n", 813 "Not possible to enable PCI Device: 0x%x:0x%x.\n",
diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
index 491419e0772a..5c3d4df63e68 100644
--- a/drivers/watchdog/kempld_wdt.c
+++ b/drivers/watchdog/kempld_wdt.c
@@ -35,7 +35,7 @@
35#define KEMPLD_WDT_STAGE_TIMEOUT(x) (0x1b + (x) * 4) 35#define KEMPLD_WDT_STAGE_TIMEOUT(x) (0x1b + (x) * 4)
36#define KEMPLD_WDT_STAGE_CFG(x) (0x18 + (x)) 36#define KEMPLD_WDT_STAGE_CFG(x) (0x18 + (x))
37#define STAGE_CFG_GET_PRESCALER(x) (((x) & 0x30) >> 4) 37#define STAGE_CFG_GET_PRESCALER(x) (((x) & 0x30) >> 4)
38#define STAGE_CFG_SET_PRESCALER(x) (((x) & 0x30) << 4) 38#define STAGE_CFG_SET_PRESCALER(x) (((x) & 0x3) << 4)
39#define STAGE_CFG_PRESCALER_MASK 0x30 39#define STAGE_CFG_PRESCALER_MASK 0x30
40#define STAGE_CFG_ACTION_MASK 0x7 40#define STAGE_CFG_ACTION_MASK 0x7
41#define STAGE_CFG_ASSERT (1 << 3) 41#define STAGE_CFG_ASSERT (1 << 3)
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index 1f94b42764aa..f6caa77151c7 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -146,7 +146,7 @@ static const struct watchdog_ops sunxi_wdt_ops = {
146 .set_timeout = sunxi_wdt_set_timeout, 146 .set_timeout = sunxi_wdt_set_timeout,
147}; 147};
148 148
149static int __init sunxi_wdt_probe(struct platform_device *pdev) 149static int sunxi_wdt_probe(struct platform_device *pdev)
150{ 150{
151 struct sunxi_wdt_dev *sunxi_wdt; 151 struct sunxi_wdt_dev *sunxi_wdt;
152 struct resource *res; 152 struct resource *res;
@@ -187,7 +187,7 @@ static int __init sunxi_wdt_probe(struct platform_device *pdev)
187 return 0; 187 return 0;
188} 188}
189 189
190static int __exit sunxi_wdt_remove(struct platform_device *pdev) 190static int sunxi_wdt_remove(struct platform_device *pdev)
191{ 191{
192 struct sunxi_wdt_dev *sunxi_wdt = platform_get_drvdata(pdev); 192 struct sunxi_wdt_dev *sunxi_wdt = platform_get_drvdata(pdev);
193 193
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 42913f131dc2..c9b0c627fe7e 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -310,7 +310,8 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd,
310 310
311 case WDIOC_GETSTATUS: 311 case WDIOC_GETSTATUS:
312 case WDIOC_GETBOOTSTATUS: 312 case WDIOC_GETBOOTSTATUS:
313 return put_user(0, p); 313 error = put_user(0, p);
314 break;
314 315
315 case WDIOC_KEEPALIVE: 316 case WDIOC_KEEPALIVE:
316 ts72xx_wdt_kick(wdt); 317 ts72xx_wdt_kick(wdt);