aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-01-19 19:21:54 -0500
committerDave Airlie <airlied@redhat.com>2014-01-19 19:21:54 -0500
commitcfd72a4c2089aa3938f37281a34d6eb3306d5fd8 (patch)
treee63f6df423aeb59d1ea5f7af3597d6718e75c335 /drivers
parent9354eafd893f45320a37da360e1728104e49cc2f (diff)
parent0d9d349d8788d30f3fc3bb39279c370f94d9dbec (diff)
Merge branch 'drm-intel-next' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
drm-intel-next-2014-01-10: - final bits for runtime D3 on Haswell from Paul (now enabled fully) - parse the backlight modulation freq information in the VBT from Jani (but not yet used) - more watermark improvements from Ville for ilk-ivb and bdw - bugfixes for fastboot from Jesse - watermark fix for i830M (but not yet everything) - vlv vga hotplug w/a (Imre) - piles of other small improvements, cleanups and fixes all over Note that the pull request includes a backmerge of the last drm-fixes pulled into Linus' tree - things where getting a bit too messy. So the shortlog also contains a bunch of patches from Linus tree. Please yell if you want me to frob it for you a bit. * 'drm-intel-next' of git://people.freedesktop.org/~danvet/drm-intel: (609 commits) drm/i915/bdw: make sure south port interrupts are enabled properly v2 drm/i915: Include more information in disabled hotplug interrupt warning drm/i915: Only complain about a rogue hotplug IRQ after disabling drm/i915: Only WARN about a stuck hotplug irq ONCE drm/i915: s/hotplugt_status_gen4/hotplug_status_g4x/
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/ac.c4
-rw-r--r--drivers/acpi/acpi_lpss.c1
-rw-r--r--drivers/acpi/apei/Kconfig1
-rw-r--r--drivers/acpi/apei/erst.c1
-rw-r--r--drivers/acpi/battery.c22
-rw-r--r--drivers/acpi/bus.c10
-rw-r--r--drivers/ata/ahci.c21
-rw-r--r--drivers/ata/ahci_imx.c3
-rw-r--r--drivers/ata/libata-core.c19
-rw-r--r--drivers/ata/libata-scsi.c21
-rw-r--r--drivers/ata/sata_sis.c4
-rw-r--r--drivers/block/null_blk.c112
-rw-r--r--drivers/block/skd_main.c4
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/char/tpm/tpm_ppi.c15
-rw-r--r--drivers/clk/clk-divider.c2
-rw-r--r--drivers/clk/clk-s2mps11.c6
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c10
-rw-r--r--drivers/clk/samsung/clk-exynos4.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c14
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/clksrc-of.c1
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c7
-rw-r--r--drivers/clocksource/sun4i_timer.c3
-rw-r--r--drivers/clocksource/time-armada-370-xp.c10
-rw-r--r--drivers/cpufreq/cpufreq.c104
-rw-r--r--drivers/cpufreq/intel_pstate.c8
-rw-r--r--drivers/cpuidle/cpuidle-calxeda.c2
-rw-r--r--drivers/crypto/ixp4xx_crypto.c4
-rw-r--r--drivers/dma/Kconfig7
-rw-r--r--drivers/dma/at_hdmac_regs.h4
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/dmatest.c8
-rw-r--r--drivers/dma/fsldma.c31
-rw-r--r--drivers/dma/ioat/dma.c11
-rw-r--r--drivers/dma/mv_xor.c101
-rw-r--r--drivers/dma/pl330.c5
-rw-r--r--drivers/dma/ppc4xx/adma.c27
-rw-r--r--drivers/dma/txx9dmac.c1
-rw-r--r--drivers/firewire/sbp2.c1
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/efi/Kconfig6
-rw-r--r--drivers/firmware/efi/Makefile2
-rw-r--r--drivers/firmware/efi/efi-pstore.c1
-rw-r--r--drivers/gpio/gpio-msm-v2.c4
-rw-r--r--drivers/gpio/gpio-rcar.c3
-rw-r--r--drivers/gpio/gpio-twl4030.c15
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h1
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c7
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c20
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c7
-rw-r--r--drivers/gpu/drm/drm_edid.c8
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/drm_stub.c6
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c80
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c22
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h17
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c50
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c28
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c1
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c17
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h92
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c4
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c29
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h16
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_display.c156
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h6
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1051
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c23
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/core/subdev.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/instmem.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c87
-rw-r--r--drivers/gpu/drm/radeon/cik.c12
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c2
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c4
-rw-r--r--drivers/gpu/drm/radeon/ni.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/rs690.c10
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c6
-rw-r--r--drivers/idle/intel_idle.c17
-rw-r--r--drivers/iio/adc/ad7887.c16
-rw-r--r--drivers/iio/imu/adis16400_core.c7
-rw-r--r--drivers/iio/light/cm36651.c2
-rw-r--r--drivers/infiniband/core/iwcm.c11
-rw-r--r--drivers/infiniband/core/uverbs.h10
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c17
-rw-r--r--drivers/infiniband/core/uverbs_main.c27
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c78
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c3
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c22
-rw-r--r--drivers/input/input.c4
-rw-r--r--drivers/input/touchscreen/zforce_ts.c21
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c8
-rw-r--r--drivers/isdn/hisax/hfc_pci.c4
-rw-r--r--drivers/isdn/hisax/telespci.c4
-rw-r--r--drivers/leds/leds-lp5521.c12
-rw-r--r--drivers/leds/leds-lp5523.c12
-rw-r--r--drivers/md/bcache/alloc.c2
-rw-r--r--drivers/md/bcache/bcache.h12
-rw-r--r--drivers/md/bcache/btree.c27
-rw-r--r--drivers/md/bcache/movinggc.c21
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/md/bcache/sysfs.c50
-rw-r--r--drivers/md/bcache/util.c8
-rw-r--r--drivers/md/bcache/util.h2
-rw-r--r--drivers/md/bcache/writeback.c53
-rw-r--r--drivers/mfd/rtsx_pcr.c10
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c45
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/can/usb/ems_usb.c3
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c3
-rw-r--r--drivers/net/ethernet/arc/emac_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h51
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c94
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c254
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c28
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c59
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c103
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h73
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c33
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c29
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c33
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/lantiq_etop.c3
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c54
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c65
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c60
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c22
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c4
-rw-r--r--drivers/net/ethernet/tile/tilegx.c3
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/hyperv/netvsc_drv.c21
-rw-r--r--drivers/net/macvlan.c26
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/team/team.c3
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/usb/Kconfig6
-rw-r--r--drivers/net/usb/dm9601.c44
-rw-r--r--drivers/net/usb/hso.c13
-rw-r--r--drivers/net/usb/mcs7830.c19
-rw-r--r--drivers/net/virtio_net.c11
-rw-r--r--drivers/net/vxlan.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c52
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c5
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c10
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/mwifiex/main.c3
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c4
-rw-r--r--drivers/net/xen-netback/common.h19
-rw-r--r--drivers/net/xen-netback/interface.c11
-rw-r--r--drivers/net/xen-netback/netback.c21
-rw-r--r--drivers/of/Kconfig2
-rw-r--r--drivers/of/address.c8
-rw-r--r--drivers/of/fdt.c12
-rw-r--r--drivers/of/irq.c5
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c30
-rw-r--r--drivers/pci/pci-acpi.c21
-rw-r--r--drivers/phy/Kconfig4
-rw-r--r--drivers/phy/phy-core.c26
-rw-r--r--drivers/pinctrl/pinctrl-baytrail.c1
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h2
-rw-r--r--drivers/power/Kconfig1
-rw-r--r--drivers/power/power_supply_core.c12
-rw-r--r--drivers/powercap/intel_rapl.c13
-rw-r--r--drivers/regulator/s2mps11.c2
-rw-r--r--drivers/s390/char/tty3270.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c10
-rw-r--r--drivers/staging/bcm/Bcmnet.c3
-rw-r--r--drivers/staging/comedi/drivers.c2
-rw-r--r--drivers/staging/comedi/drivers/8255_pci.c15
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c7
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c40
-rw-r--r--drivers/staging/imx-drm/imx-tve.c9
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-common.c32
-rw-r--r--drivers/staging/netlogic/xlr_net.c3
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c3
-rw-r--r--drivers/target/iscsi/iscsi_target.c27
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c6
-rw-r--r--drivers/target/target_core_device.c5
-rw-r--r--drivers/target/target_core_file.c8
-rw-r--r--drivers/target/target_core_file.h5
-rw-r--r--drivers/target/target_core_tpg.c10
-rw-r--r--drivers/tty/n_tty.c7
-rw-r--r--drivers/tty/serial/8250/8250_dw.c8
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/tty_ldsem.c16
-rw-r--r--drivers/usb/chipidea/core.c4
-rw-r--r--drivers/usb/chipidea/host.c3
-rw-r--r--drivers/usb/chipidea/udc.c3
-rw-r--r--drivers/usb/class/cdc-wdm.c8
-rw-r--r--drivers/usb/dwc3/core.c8
-rw-r--r--drivers/usb/host/ohci-at91.c26
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/phy/Kconfig4
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c2
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c3
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/zte_ev.c3
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/xen/balloon.c63
-rw-r--r--drivers/xen/grant-table.c3
-rw-r--r--drivers/xen/privcmd.c9
271 files changed, 2980 insertions, 2302 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 5d9248526d78..4770de5707b9 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -348,7 +348,6 @@ source "drivers/acpi/apei/Kconfig"
348config ACPI_EXTLOG 348config ACPI_EXTLOG
349 tristate "Extended Error Log support" 349 tristate "Extended Error Log support"
350 depends on X86_MCE && X86_LOCAL_APIC 350 depends on X86_MCE && X86_LOCAL_APIC
351 select EFI
352 select UEFI_CPER 351 select UEFI_CPER
353 default n 352 default n
354 help 353 help
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 8711e3797165..3c2e4aa529c4 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -207,7 +207,7 @@ static int acpi_ac_probe(struct platform_device *pdev)
207 goto end; 207 goto end;
208 208
209 result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev), 209 result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
210 ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler, ac); 210 ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac);
211 if (result) { 211 if (result) {
212 power_supply_unregister(&ac->charger); 212 power_supply_unregister(&ac->charger);
213 goto end; 213 goto end;
@@ -255,7 +255,7 @@ static int acpi_ac_remove(struct platform_device *pdev)
255 return -EINVAL; 255 return -EINVAL;
256 256
257 acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), 257 acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
258 ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler); 258 ACPI_ALL_NOTIFY, acpi_ac_notify_handler);
259 259
260 ac = platform_get_drvdata(pdev); 260 ac = platform_get_drvdata(pdev);
261 if (ac->charger.dev) 261 if (ac->charger.dev)
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 6745fe137b9e..e60390597372 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -162,6 +162,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
162 { "80860F14", (unsigned long)&byt_sdio_dev_desc }, 162 { "80860F14", (unsigned long)&byt_sdio_dev_desc },
163 { "80860F41", (unsigned long)&byt_i2c_dev_desc }, 163 { "80860F41", (unsigned long)&byt_i2c_dev_desc },
164 { "INT33B2", }, 164 { "INT33B2", },
165 { "INT33FC", },
165 166
166 { "INT3430", (unsigned long)&lpt_dev_desc }, 167 { "INT3430", (unsigned long)&lpt_dev_desc },
167 { "INT3431", (unsigned long)&lpt_dev_desc }, 168 { "INT3431", (unsigned long)&lpt_dev_desc },
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index 786294bb682c..3650b2183227 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -2,7 +2,6 @@ config ACPI_APEI
2 bool "ACPI Platform Error Interface (APEI)" 2 bool "ACPI Platform Error Interface (APEI)"
3 select MISC_FILESYSTEMS 3 select MISC_FILESYSTEMS
4 select PSTORE 4 select PSTORE
5 select EFI
6 select UEFI_CPER 5 select UEFI_CPER
7 depends on X86 6 depends on X86
8 help 7 help
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 26311f23c824..cb1d557fc22c 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -942,6 +942,7 @@ static int erst_clearer(enum pstore_type_id type, u64 id, int count,
942static struct pstore_info erst_info = { 942static struct pstore_info erst_info = {
943 .owner = THIS_MODULE, 943 .owner = THIS_MODULE,
944 .name = "erst", 944 .name = "erst",
945 .flags = PSTORE_FLAGS_FRAGILE,
945 .open = erst_open_pstore, 946 .open = erst_open_pstore,
946 .close = erst_close_pstore, 947 .close = erst_close_pstore,
947 .read = erst_reader, 948 .read = erst_reader,
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index fbf1aceda8b8..5876a49dfd38 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -62,6 +62,7 @@ MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>");
62MODULE_DESCRIPTION("ACPI Battery Driver"); 62MODULE_DESCRIPTION("ACPI Battery Driver");
63MODULE_LICENSE("GPL"); 63MODULE_LICENSE("GPL");
64 64
65static int battery_bix_broken_package;
65static unsigned int cache_time = 1000; 66static unsigned int cache_time = 1000;
66module_param(cache_time, uint, 0644); 67module_param(cache_time, uint, 0644);
67MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); 68MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -416,7 +417,12 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
416 ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name)); 417 ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name));
417 return -ENODEV; 418 return -ENODEV;
418 } 419 }
419 if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags)) 420
421 if (battery_bix_broken_package)
422 result = extract_package(battery, buffer.pointer,
423 extended_info_offsets + 1,
424 ARRAY_SIZE(extended_info_offsets) - 1);
425 else if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
420 result = extract_package(battery, buffer.pointer, 426 result = extract_package(battery, buffer.pointer,
421 extended_info_offsets, 427 extended_info_offsets,
422 ARRAY_SIZE(extended_info_offsets)); 428 ARRAY_SIZE(extended_info_offsets));
@@ -754,6 +760,17 @@ static int battery_notify(struct notifier_block *nb,
754 return 0; 760 return 0;
755} 761}
756 762
763static struct dmi_system_id bat_dmi_table[] = {
764 {
765 .ident = "NEC LZ750/LS",
766 .matches = {
767 DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
768 DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"),
769 },
770 },
771 {},
772};
773
757static int acpi_battery_add(struct acpi_device *device) 774static int acpi_battery_add(struct acpi_device *device)
758{ 775{
759 int result = 0; 776 int result = 0;
@@ -846,6 +863,9 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
846{ 863{
847 if (acpi_disabled) 864 if (acpi_disabled)
848 return; 865 return;
866
867 if (dmi_check_system(bat_dmi_table))
868 battery_bix_broken_package = 1;
849 acpi_bus_register_driver(&acpi_battery_driver); 869 acpi_bus_register_driver(&acpi_battery_driver);
850} 870}
851 871
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index bba9b72e25f8..0710004055c8 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -156,6 +156,16 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data)
156} 156}
157EXPORT_SYMBOL(acpi_bus_get_private_data); 157EXPORT_SYMBOL(acpi_bus_get_private_data);
158 158
159void acpi_bus_no_hotplug(acpi_handle handle)
160{
161 struct acpi_device *adev = NULL;
162
163 acpi_bus_get_device(handle, &adev);
164 if (adev)
165 adev->flags.no_hotplug = true;
166}
167EXPORT_SYMBOL_GPL(acpi_bus_no_hotplug);
168
159static void acpi_print_osc_error(acpi_handle handle, 169static void acpi_print_osc_error(acpi_handle handle,
160 struct acpi_osc_context *context, char *error) 170 struct acpi_osc_context *context, char *error)
161{ 171{
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 14f1e9506338..e3a92a6da39a 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -427,6 +427,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
427 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ 427 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
428 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125), 428 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125),
429 .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ 429 .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
430 { PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178,
431 PCI_VENDOR_ID_MARVELL_EXT, 0x9170),
432 .driver_data = board_ahci_yes_fbs }, /* 88se9170 */
430 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a), 433 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
431 .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ 434 .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
432 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172), 435 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
@@ -1238,15 +1241,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1238 if (rc) 1241 if (rc)
1239 return rc; 1242 return rc;
1240 1243
1241 /* AHCI controllers often implement SFF compatible interface.
1242 * Grab all PCI BARs just in case.
1243 */
1244 rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
1245 if (rc == -EBUSY)
1246 pcim_pin_device(pdev);
1247 if (rc)
1248 return rc;
1249
1250 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 1244 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
1251 (pdev->device == 0x2652 || pdev->device == 0x2653)) { 1245 (pdev->device == 0x2652 || pdev->device == 0x2653)) {
1252 u8 map; 1246 u8 map;
@@ -1263,6 +1257,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1263 } 1257 }
1264 } 1258 }
1265 1259
1260 /* AHCI controllers often implement SFF compatible interface.
1261 * Grab all PCI BARs just in case.
1262 */
1263 rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
1264 if (rc == -EBUSY)
1265 pcim_pin_device(pdev);
1266 if (rc)
1267 return rc;
1268
1266 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); 1269 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
1267 if (!hpriv) 1270 if (!hpriv)
1268 return -ENOMEM; 1271 return -ENOMEM;
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index ae2d73fe321e..3e23e9941dad 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -113,7 +113,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
113 /* 113 /*
114 * set PHY Paremeters, two steps to configure the GPR13, 114 * set PHY Paremeters, two steps to configure the GPR13,
115 * one write for rest of parameters, mask of first write 115 * one write for rest of parameters, mask of first write
116 * is 0x07fffffd, and the other one write for setting 116 * is 0x07ffffff, and the other one write for setting
117 * the mpll_clk_en. 117 * the mpll_clk_en.
118 */ 118 */
119 regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK 119 regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK
@@ -124,6 +124,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
124 | IMX6Q_GPR13_SATA_TX_ATTEN_MASK 124 | IMX6Q_GPR13_SATA_TX_ATTEN_MASK
125 | IMX6Q_GPR13_SATA_TX_BOOST_MASK 125 | IMX6Q_GPR13_SATA_TX_BOOST_MASK
126 | IMX6Q_GPR13_SATA_TX_LVL_MASK 126 | IMX6Q_GPR13_SATA_TX_LVL_MASK
127 | IMX6Q_GPR13_SATA_MPLL_CLK_EN
127 | IMX6Q_GPR13_SATA_TX_EDGE_RATE 128 | IMX6Q_GPR13_SATA_TX_EDGE_RATE
128 , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB 129 , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB
129 | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M 130 | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 75b93678bbcd..1393a5890ed5 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2149,9 +2149,16 @@ static int ata_dev_config_ncq(struct ata_device *dev,
2149 "failed to get NCQ Send/Recv Log Emask 0x%x\n", 2149 "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2150 err_mask); 2150 err_mask);
2151 } else { 2151 } else {
2152 u8 *cmds = dev->ncq_send_recv_cmds;
2153
2152 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; 2154 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2153 memcpy(dev->ncq_send_recv_cmds, ap->sector_buf, 2155 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2154 ATA_LOG_NCQ_SEND_RECV_SIZE); 2156
2157 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2158 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2159 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2160 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2161 }
2155 } 2162 }
2156 } 2163 }
2157 2164
@@ -4156,6 +4163,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4156 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4163 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4157 ATA_HORKAGE_FIRMWARE_WARN }, 4164 ATA_HORKAGE_FIRMWARE_WARN },
4158 4165
4166 /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
4167 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4168
4159 /* Blacklist entries taken from Silicon Image 3124/3132 4169 /* Blacklist entries taken from Silicon Image 3124/3132
4160 Windows driver .inf file - also several Linux problem reports */ 4170 Windows driver .inf file - also several Linux problem reports */
4161 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 4171 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
@@ -4202,6 +4212,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4202 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, 4212 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4203 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4213 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4204 4214
4215 /* devices that don't properly handle queued TRIM commands */
4216 { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4217 { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4218
4205 /* End Marker */ 4219 /* End Marker */
4206 { } 4220 { }
4207}; 4221};
@@ -6519,6 +6533,7 @@ static int __init ata_parse_force_one(char **cur,
6519 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, 6533 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6520 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, 6534 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
6521 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, 6535 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
6536 { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
6522 }; 6537 };
6523 char *start = *cur, *p = *cur; 6538 char *start = *cur, *p = *cur;
6524 char *id, *val, *endp; 6539 char *id, *val, *endp;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index ab58556d347c..377eb889f555 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3872,6 +3872,27 @@ void ata_scsi_hotplug(struct work_struct *work)
3872 return; 3872 return;
3873 } 3873 }
3874 3874
3875 /*
3876 * XXX - UGLY HACK
3877 *
3878 * The block layer suspend/resume path is fundamentally broken due
3879 * to freezable kthreads and workqueue and may deadlock if a block
3880 * device gets removed while resume is in progress. I don't know
3881 * what the solution is short of removing freezable kthreads and
3882 * workqueues altogether.
3883 *
3884 * The following is an ugly hack to avoid kicking off device
3885 * removal while freezer is active. This is a joke but does avoid
3886 * this particular deadlock scenario.
3887 *
3888 * https://bugzilla.kernel.org/show_bug.cgi?id=62801
3889 * http://marc.info/?l=linux-kernel&m=138695698516487
3890 */
3891#ifdef CONFIG_FREEZER
3892 while (pm_freezing)
3893 msleep(10);
3894#endif
3895
3875 DPRINTK("ENTER\n"); 3896 DPRINTK("ENTER\n");
3876 mutex_lock(&ap->scsi_scan_mutex); 3897 mutex_lock(&ap->scsi_scan_mutex);
3877 3898
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index fe3ca0989b14..1ad2f62d34b9 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -83,6 +83,10 @@ static struct pci_driver sis_pci_driver = {
83 .id_table = sis_pci_tbl, 83 .id_table = sis_pci_tbl,
84 .probe = sis_init_one, 84 .probe = sis_init_one,
85 .remove = ata_pci_remove_one, 85 .remove = ata_pci_remove_one,
86#ifdef CONFIG_PM
87 .suspend = ata_pci_device_suspend,
88 .resume = ata_pci_device_resume,
89#endif
86}; 90};
87 91
88static struct scsi_host_template sis_sht = { 92static struct scsi_host_template sis_sht = {
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index f370fc13aea5..83a598ebb65a 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -1,4 +1,5 @@
1#include <linux/module.h> 1#include <linux/module.h>
2
2#include <linux/moduleparam.h> 3#include <linux/moduleparam.h>
3#include <linux/sched.h> 4#include <linux/sched.h>
4#include <linux/fs.h> 5#include <linux/fs.h>
@@ -65,7 +66,7 @@ enum {
65 NULL_Q_MQ = 2, 66 NULL_Q_MQ = 2,
66}; 67};
67 68
68static int submit_queues = 1; 69static int submit_queues;
69module_param(submit_queues, int, S_IRUGO); 70module_param(submit_queues, int, S_IRUGO);
70MODULE_PARM_DESC(submit_queues, "Number of submission queues"); 71MODULE_PARM_DESC(submit_queues, "Number of submission queues");
71 72
@@ -101,9 +102,9 @@ static int hw_queue_depth = 64;
101module_param(hw_queue_depth, int, S_IRUGO); 102module_param(hw_queue_depth, int, S_IRUGO);
102MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); 103MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
103 104
104static bool use_per_node_hctx = true; 105static bool use_per_node_hctx = false;
105module_param(use_per_node_hctx, bool, S_IRUGO); 106module_param(use_per_node_hctx, bool, S_IRUGO);
106MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true"); 107MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
107 108
108static void put_tag(struct nullb_queue *nq, unsigned int tag) 109static void put_tag(struct nullb_queue *nq, unsigned int tag)
109{ 110{
@@ -346,8 +347,37 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
346 347
347static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) 348static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index)
348{ 349{
349 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, 350 int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes);
350 hctx_index); 351 int tip = (reg->nr_hw_queues % nr_online_nodes);
352 int node = 0, i, n;
353
354 /*
355 * Split submit queues evenly wrt to the number of nodes. If uneven,
356 * fill the first buckets with one extra, until the rest is filled with
357 * no extra.
358 */
359 for (i = 0, n = 1; i < hctx_index; i++, n++) {
360 if (n % b_size == 0) {
361 n = 0;
362 node++;
363
364 tip--;
365 if (!tip)
366 b_size = reg->nr_hw_queues / nr_online_nodes;
367 }
368 }
369
370 /*
371 * A node might not be online, therefore map the relative node id to the
372 * real node id.
373 */
374 for_each_online_node(n) {
375 if (!node)
376 break;
377 node--;
378 }
379
380 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n);
351} 381}
352 382
353static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) 383static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
@@ -355,16 +385,24 @@ static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
355 kfree(hctx); 385 kfree(hctx);
356} 386}
357 387
388static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
389{
390 BUG_ON(!nullb);
391 BUG_ON(!nq);
392
393 init_waitqueue_head(&nq->wait);
394 nq->queue_depth = nullb->queue_depth;
395}
396
358static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 397static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
359 unsigned int index) 398 unsigned int index)
360{ 399{
361 struct nullb *nullb = data; 400 struct nullb *nullb = data;
362 struct nullb_queue *nq = &nullb->queues[index]; 401 struct nullb_queue *nq = &nullb->queues[index];
363 402
364 init_waitqueue_head(&nq->wait);
365 nq->queue_depth = nullb->queue_depth;
366 nullb->nr_queues++;
367 hctx->driver_data = nq; 403 hctx->driver_data = nq;
404 null_init_queue(nullb, nq);
405 nullb->nr_queues++;
368 406
369 return 0; 407 return 0;
370} 408}
@@ -387,10 +425,7 @@ static void null_del_dev(struct nullb *nullb)
387 list_del_init(&nullb->list); 425 list_del_init(&nullb->list);
388 426
389 del_gendisk(nullb->disk); 427 del_gendisk(nullb->disk);
390 if (queue_mode == NULL_Q_MQ) 428 blk_cleanup_queue(nullb->q);
391 blk_mq_free_queue(nullb->q);
392 else
393 blk_cleanup_queue(nullb->q);
394 put_disk(nullb->disk); 429 put_disk(nullb->disk);
395 kfree(nullb); 430 kfree(nullb);
396} 431}
@@ -417,13 +452,13 @@ static int setup_commands(struct nullb_queue *nq)
417 452
418 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); 453 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
419 if (!nq->cmds) 454 if (!nq->cmds)
420 return 1; 455 return -ENOMEM;
421 456
422 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; 457 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
423 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); 458 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
424 if (!nq->tag_map) { 459 if (!nq->tag_map) {
425 kfree(nq->cmds); 460 kfree(nq->cmds);
426 return 1; 461 return -ENOMEM;
427 } 462 }
428 463
429 for (i = 0; i < nq->queue_depth; i++) { 464 for (i = 0; i < nq->queue_depth; i++) {
@@ -454,33 +489,37 @@ static void cleanup_queues(struct nullb *nullb)
454 489
455static int setup_queues(struct nullb *nullb) 490static int setup_queues(struct nullb *nullb)
456{ 491{
457 struct nullb_queue *nq; 492 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
458 int i; 493 GFP_KERNEL);
459
460 nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL);
461 if (!nullb->queues) 494 if (!nullb->queues)
462 return 1; 495 return -ENOMEM;
463 496
464 nullb->nr_queues = 0; 497 nullb->nr_queues = 0;
465 nullb->queue_depth = hw_queue_depth; 498 nullb->queue_depth = hw_queue_depth;
466 499
467 if (queue_mode == NULL_Q_MQ) 500 return 0;
468 return 0; 501}
502
503static int init_driver_queues(struct nullb *nullb)
504{
505 struct nullb_queue *nq;
506 int i, ret = 0;
469 507
470 for (i = 0; i < submit_queues; i++) { 508 for (i = 0; i < submit_queues; i++) {
471 nq = &nullb->queues[i]; 509 nq = &nullb->queues[i];
472 init_waitqueue_head(&nq->wait); 510
473 nq->queue_depth = hw_queue_depth; 511 null_init_queue(nullb, nq);
474 if (setup_commands(nq)) 512
475 break; 513 ret = setup_commands(nq);
514 if (ret)
515 goto err_queue;
476 nullb->nr_queues++; 516 nullb->nr_queues++;
477 } 517 }
478 518
479 if (i == submit_queues) 519 return 0;
480 return 0; 520err_queue:
481
482 cleanup_queues(nullb); 521 cleanup_queues(nullb);
483 return 1; 522 return ret;
484} 523}
485 524
486static int null_add_dev(void) 525static int null_add_dev(void)
@@ -518,11 +557,13 @@ static int null_add_dev(void)
518 } else if (queue_mode == NULL_Q_BIO) { 557 } else if (queue_mode == NULL_Q_BIO) {
519 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); 558 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
520 blk_queue_make_request(nullb->q, null_queue_bio); 559 blk_queue_make_request(nullb->q, null_queue_bio);
560 init_driver_queues(nullb);
521 } else { 561 } else {
522 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); 562 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
523 blk_queue_prep_rq(nullb->q, null_rq_prep_fn); 563 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
524 if (nullb->q) 564 if (nullb->q)
525 blk_queue_softirq_done(nullb->q, null_softirq_done_fn); 565 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
566 init_driver_queues(nullb);
526 } 567 }
527 568
528 if (!nullb->q) 569 if (!nullb->q)
@@ -534,10 +575,7 @@ static int null_add_dev(void)
534 disk = nullb->disk = alloc_disk_node(1, home_node); 575 disk = nullb->disk = alloc_disk_node(1, home_node);
535 if (!disk) { 576 if (!disk) {
536queue_fail: 577queue_fail:
537 if (queue_mode == NULL_Q_MQ) 578 blk_cleanup_queue(nullb->q);
538 blk_mq_free_queue(nullb->q);
539 else
540 blk_cleanup_queue(nullb->q);
541 cleanup_queues(nullb); 579 cleanup_queues(nullb);
542err: 580err:
543 kfree(nullb); 581 kfree(nullb);
@@ -579,7 +617,13 @@ static int __init null_init(void)
579 } 617 }
580#endif 618#endif
581 619
582 if (submit_queues > nr_cpu_ids) 620 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
621 if (submit_queues < nr_online_nodes) {
622 pr_warn("null_blk: submit_queues param is set to %u.",
623 nr_online_nodes);
624 submit_queues = nr_online_nodes;
625 }
626 } else if (submit_queues > nr_cpu_ids)
583 submit_queues = nr_cpu_ids; 627 submit_queues = nr_cpu_ids;
584 else if (!submit_queues) 628 else if (!submit_queues)
585 submit_queues = 1; 629 submit_queues = 1;
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 9199c93be926..eb6e1e0e8db2 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -5269,7 +5269,7 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state)
5269 } 5269 }
5270} 5270}
5271 5271
5272const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) 5272static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
5273{ 5273{
5274 switch (state) { 5274 switch (state) {
5275 case SKD_MSG_STATE_IDLE: 5275 case SKD_MSG_STATE_IDLE:
@@ -5281,7 +5281,7 @@ const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
5281 } 5281 }
5282} 5282}
5283 5283
5284const char *skd_skreq_state_to_str(enum skd_req_state state) 5284static const char *skd_skreq_state_to_str(enum skd_req_state state)
5285{ 5285{
5286 switch (state) { 5286 switch (state) {
5287 case SKD_REQ_STATE_IDLE: 5287 case SKD_REQ_STATE_IDLE:
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 6bfc1bb318f6..dceb85f8d9a8 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -87,6 +87,7 @@ static const struct usb_device_id ath3k_table[] = {
87 { USB_DEVICE(0x0CF3, 0xE004) }, 87 { USB_DEVICE(0x0CF3, 0xE004) },
88 { USB_DEVICE(0x0CF3, 0xE005) }, 88 { USB_DEVICE(0x0CF3, 0xE005) },
89 { USB_DEVICE(0x0930, 0x0219) }, 89 { USB_DEVICE(0x0930, 0x0219) },
90 { USB_DEVICE(0x0930, 0x0220) },
90 { USB_DEVICE(0x0489, 0xe057) }, 91 { USB_DEVICE(0x0489, 0xe057) },
91 { USB_DEVICE(0x13d3, 0x3393) }, 92 { USB_DEVICE(0x13d3, 0x3393) },
92 { USB_DEVICE(0x0489, 0xe04e) }, 93 { USB_DEVICE(0x0489, 0xe04e) },
@@ -129,6 +130,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
129 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 130 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
130 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, 131 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
131 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 132 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
133 { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
132 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 134 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
133 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 135 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
134 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, 136 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index c0ff34f2d2df..3980fd18f6ea 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -154,6 +154,7 @@ static const struct usb_device_id blacklist_table[] = {
154 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 154 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
155 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, 155 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
156 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 156 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
157 { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
157 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 158 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
158 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 159 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
159 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, 160 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index 8e562dc65601..e1f3337a0cf9 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -27,15 +27,18 @@ static char *tpm_device_name = "TPM";
27static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context, 27static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context,
28 void **return_value) 28 void **return_value)
29{ 29{
30 acpi_status status; 30 acpi_status status = AE_OK;
31 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 31 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
32 status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); 32
33 if (strstr(buffer.pointer, context) != NULL) { 33 if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) {
34 *return_value = handle; 34 if (strstr(buffer.pointer, context) != NULL) {
35 *return_value = handle;
36 status = AE_CTRL_TERMINATE;
37 }
35 kfree(buffer.pointer); 38 kfree(buffer.pointer);
36 return AE_CTRL_TERMINATE;
37 } 39 }
38 return AE_OK; 40
41 return status;
39} 42}
40 43
41static inline void ppi_assign_params(union acpi_object params[4], 44static inline void ppi_assign_params(union acpi_object params[4],
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 8d3009e44fba..5543b7df8e16 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -87,7 +87,7 @@ static unsigned int _get_table_val(const struct clk_div_table *table,
87 return 0; 87 return 0;
88} 88}
89 89
90static unsigned int _get_val(struct clk_divider *divider, u8 div) 90static unsigned int _get_val(struct clk_divider *divider, unsigned int div)
91{ 91{
92 if (divider->flags & CLK_DIVIDER_ONE_BASED) 92 if (divider->flags & CLK_DIVIDER_ONE_BASED)
93 return div; 93 return div;
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 7be41e676a64..00a3abe103a5 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -60,7 +60,7 @@ static int s2mps11_clk_prepare(struct clk_hw *hw)
60 struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); 60 struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
61 int ret; 61 int ret;
62 62
63 ret = regmap_update_bits(s2mps11->iodev->regmap, 63 ret = regmap_update_bits(s2mps11->iodev->regmap_pmic,
64 S2MPS11_REG_RTC_CTRL, 64 S2MPS11_REG_RTC_CTRL,
65 s2mps11->mask, s2mps11->mask); 65 s2mps11->mask, s2mps11->mask);
66 if (!ret) 66 if (!ret)
@@ -74,7 +74,7 @@ static void s2mps11_clk_unprepare(struct clk_hw *hw)
74 struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); 74 struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
75 int ret; 75 int ret;
76 76
77 ret = regmap_update_bits(s2mps11->iodev->regmap, S2MPS11_REG_RTC_CTRL, 77 ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, S2MPS11_REG_RTC_CTRL,
78 s2mps11->mask, ~s2mps11->mask); 78 s2mps11->mask, ~s2mps11->mask);
79 79
80 if (!ret) 80 if (!ret)
@@ -174,7 +174,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
174 s2mps11_clk->hw.init = &s2mps11_clks_init[i]; 174 s2mps11_clk->hw.init = &s2mps11_clks_init[i];
175 s2mps11_clk->mask = 1 << i; 175 s2mps11_clk->mask = 1 << i;
176 176
177 ret = regmap_read(s2mps11_clk->iodev->regmap, 177 ret = regmap_read(s2mps11_clk->iodev->regmap_pmic,
178 S2MPS11_REG_RTC_CTRL, &val); 178 S2MPS11_REG_RTC_CTRL, &val);
179 if (ret < 0) 179 if (ret < 0)
180 goto err_reg; 180 goto err_reg;
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 39b40aaede2b..68e515d093d8 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -26,17 +26,17 @@ static struct clk_onecell_data clk_data;
26#define ASS_CLK_DIV 0x4 26#define ASS_CLK_DIV 0x4
27#define ASS_CLK_GATE 0x8 27#define ASS_CLK_GATE 0x8
28 28
29/* list of all parent clock list */
30static const char *mout_audss_p[] = { "fin_pll", "fout_epll" };
31static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" };
32
33#ifdef CONFIG_PM_SLEEP
29static unsigned long reg_save[][2] = { 34static unsigned long reg_save[][2] = {
30 {ASS_CLK_SRC, 0}, 35 {ASS_CLK_SRC, 0},
31 {ASS_CLK_DIV, 0}, 36 {ASS_CLK_DIV, 0},
32 {ASS_CLK_GATE, 0}, 37 {ASS_CLK_GATE, 0},
33}; 38};
34 39
35/* list of all parent clock list */
36static const char *mout_audss_p[] = { "fin_pll", "fout_epll" };
37static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" };
38
39#ifdef CONFIG_PM_SLEEP
40static int exynos_audss_clk_suspend(void) 40static int exynos_audss_clk_suspend(void)
41{ 41{
42 int i; 42 int i;
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index ad5ff50c5f28..1a7c1b929c69 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -39,7 +39,7 @@
39#define SRC_TOP1 0xc214 39#define SRC_TOP1 0xc214
40#define SRC_CAM 0xc220 40#define SRC_CAM 0xc220
41#define SRC_TV 0xc224 41#define SRC_TV 0xc224
42#define SRC_MFC 0xcc28 42#define SRC_MFC 0xc228
43#define SRC_G3D 0xc22c 43#define SRC_G3D 0xc22c
44#define E4210_SRC_IMAGE 0xc230 44#define E4210_SRC_IMAGE 0xc230
45#define SRC_LCD0 0xc234 45#define SRC_LCD0 0xc234
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index adf32343c9f9..e52359cf9b6f 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -25,6 +25,7 @@
25#define MPLL_LOCK 0x4000 25#define MPLL_LOCK 0x4000
26#define MPLL_CON0 0x4100 26#define MPLL_CON0 0x4100
27#define SRC_CORE1 0x4204 27#define SRC_CORE1 0x4204
28#define GATE_IP_ACP 0x8800
28#define CPLL_LOCK 0x10020 29#define CPLL_LOCK 0x10020
29#define EPLL_LOCK 0x10030 30#define EPLL_LOCK 0x10030
30#define VPLL_LOCK 0x10040 31#define VPLL_LOCK 0x10040
@@ -75,7 +76,6 @@
75#define SRC_CDREX 0x20200 76#define SRC_CDREX 0x20200
76#define PLL_DIV2_SEL 0x20a24 77#define PLL_DIV2_SEL 0x20a24
77#define GATE_IP_DISP1 0x10928 78#define GATE_IP_DISP1 0x10928
78#define GATE_IP_ACP 0x10000
79 79
80/* list of PLLs to be registered */ 80/* list of PLLs to be registered */
81enum exynos5250_plls { 81enum exynos5250_plls {
@@ -120,7 +120,8 @@ enum exynos5250_clks {
120 spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2, 120 spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2,
121 hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1, 121 hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1,
122 tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct, 122 tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct,
123 wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d, 123 wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d, mdma0,
124 smmu_mdma0,
124 125
125 /* mux clocks */ 126 /* mux clocks */
126 mout_hdmi = 1024, 127 mout_hdmi = 1024,
@@ -354,8 +355,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
354 GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0), 355 GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0),
355 GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0), 356 GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0),
356 GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0), 357 GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0),
357 GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 1, 0, 0), 358 GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 2, 0, 0),
358 GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 2, 0, 0), 359 GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 1, 0, 0),
359 GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0), 360 GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0),
360 GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0), 361 GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0),
361 GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0), 362 GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0),
@@ -406,7 +407,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
406 GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0), 407 GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0),
407 GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0), 408 GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0),
408 GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0), 409 GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0),
409 GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0), 410 GATE(sysreg, "sysreg", "aclk66",
411 GATE_IP_PERIS, 1, CLK_IGNORE_UNUSED, 0),
410 GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0), 412 GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0),
411 GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0), 413 GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0),
412 GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0), 414 GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0),
@@ -492,6 +494,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
492 GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0), 494 GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0),
493 GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0), 495 GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
494 GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0), 496 GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0),
497 GATE(mdma0, "mdma0", "aclk266", GATE_IP_ACP, 1, 0, 0),
498 GATE(smmu_mdma0, "smmu_mdma0", "aclk266", GATE_IP_ACP, 5, 0, 0),
495}; 499};
496 500
497static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = { 501static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = {
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 5c07a56962db..634c4d6dd45a 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -75,6 +75,7 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
75config CLKSRC_EFM32 75config CLKSRC_EFM32
76 bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32 76 bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32
77 depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) 77 depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
78 select CLKSRC_MMIO
78 default ARCH_EFM32 79 default ARCH_EFM32
79 help 80 help
80 Support to use the timers of EFM32 SoCs as clock source and clock 81 Support to use the timers of EFM32 SoCs as clock source and clock
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
index 35639cf4e5a2..b9ddd9e3a2f5 100644
--- a/drivers/clocksource/clksrc-of.c
+++ b/drivers/clocksource/clksrc-of.c
@@ -35,6 +35,5 @@ void __init clocksource_of_init(void)
35 35
36 init_func = match->data; 36 init_func = match->data;
37 init_func(np); 37 init_func(np);
38 of_node_put(np);
39 } 38 }
40} 39}
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 45ba8aecc729..2a2ea2717f3a 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -108,12 +108,11 @@ static void __init add_clocksource(struct device_node *source_timer)
108 108
109static u64 read_sched_clock(void) 109static u64 read_sched_clock(void)
110{ 110{
111 return __raw_readl(sched_io_base); 111 return ~__raw_readl(sched_io_base);
112} 112}
113 113
114static const struct of_device_id sptimer_ids[] __initconst = { 114static const struct of_device_id sptimer_ids[] __initconst = {
115 { .compatible = "picochip,pc3x2-rtc" }, 115 { .compatible = "picochip,pc3x2-rtc" },
116 { .compatible = "snps,dw-apb-timer-sp" },
117 { /* Sentinel */ }, 116 { /* Sentinel */ },
118}; 117};
119 118
@@ -151,4 +150,6 @@ static void __init dw_apb_timer_init(struct device_node *timer)
151 num_called++; 150 num_called++;
152} 151}
153CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init); 152CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init);
154CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer-osc", dw_apb_timer_init); 153CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
154CLOCKSOURCE_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init);
155CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init);
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index 2fb4695a28d8..a4f6119aafd8 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -179,6 +179,9 @@ static void __init sun4i_timer_init(struct device_node *node)
179 writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), 179 writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
180 timer_base + TIMER_CTL_REG(0)); 180 timer_base + TIMER_CTL_REG(0));
181 181
182 /* Make sure timer is stopped before playing with interrupts */
183 sun4i_clkevt_time_stop(0);
184
182 ret = setup_irq(irq, &sun4i_timer_irq); 185 ret = setup_irq(irq, &sun4i_timer_irq);
183 if (ret) 186 if (ret)
184 pr_warn("failed to setup irq %d\n", irq); 187 pr_warn("failed to setup irq %d\n", irq);
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index d8e47e502785..4e7f6802e840 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -256,11 +256,6 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
256 ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; 256 ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
257 257
258 /* 258 /*
259 * Set scale and timer for sched_clock.
260 */
261 sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
262
263 /*
264 * Setup free-running clocksource timer (interrupts 259 * Setup free-running clocksource timer (interrupts
265 * disabled). 260 * disabled).
266 */ 261 */
@@ -270,6 +265,11 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
270 timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN | 265 timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN |
271 TIMER0_DIV(TIMER_DIVIDER_SHIFT)); 266 TIMER0_DIV(TIMER_DIVIDER_SHIFT));
272 267
268 /*
269 * Set scale and timer for sched_clock.
270 */
271 sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
272
273 clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, 273 clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
274 "armada_370_xp_clocksource", 274 "armada_370_xp_clocksource",
275 timer_clk, 300, 32, clocksource_mmio_readl_down); 275 timer_clk, 300, 32, clocksource_mmio_readl_down);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 02d534da22dd..8d19f7c06010 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -828,14 +828,17 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
828 int ret = 0; 828 int ret = 0;
829 829
830 memcpy(&new_policy, policy, sizeof(*policy)); 830 memcpy(&new_policy, policy, sizeof(*policy));
831
832 /* Use the default policy if its valid. */
833 if (cpufreq_driver->setpolicy)
834 cpufreq_parse_governor(policy->governor->name,
835 &new_policy.policy, NULL);
836
831 /* assure that the starting sequence is run in cpufreq_set_policy */ 837 /* assure that the starting sequence is run in cpufreq_set_policy */
832 policy->governor = NULL; 838 policy->governor = NULL;
833 839
834 /* set default policy */ 840 /* set default policy */
835 ret = cpufreq_set_policy(policy, &new_policy); 841 ret = cpufreq_set_policy(policy, &new_policy);
836 policy->user_policy.policy = policy->policy;
837 policy->user_policy.governor = policy->governor;
838
839 if (ret) { 842 if (ret) {
840 pr_debug("setting policy failed\n"); 843 pr_debug("setting policy failed\n");
841 if (cpufreq_driver->exit) 844 if (cpufreq_driver->exit)
@@ -845,8 +848,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
845 848
846#ifdef CONFIG_HOTPLUG_CPU 849#ifdef CONFIG_HOTPLUG_CPU
847static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, 850static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
848 unsigned int cpu, struct device *dev, 851 unsigned int cpu, struct device *dev)
849 bool frozen)
850{ 852{
851 int ret = 0; 853 int ret = 0;
852 unsigned long flags; 854 unsigned long flags;
@@ -877,11 +879,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
877 } 879 }
878 } 880 }
879 881
880 /* Don't touch sysfs links during light-weight init */ 882 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
881 if (!frozen)
882 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
883
884 return ret;
885} 883}
886#endif 884#endif
887 885
@@ -926,6 +924,27 @@ err_free_policy:
926 return NULL; 924 return NULL;
927} 925}
928 926
927static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
928{
929 struct kobject *kobj;
930 struct completion *cmp;
931
932 down_read(&policy->rwsem);
933 kobj = &policy->kobj;
934 cmp = &policy->kobj_unregister;
935 up_read(&policy->rwsem);
936 kobject_put(kobj);
937
938 /*
939 * We need to make sure that the underlying kobj is
940 * actually not referenced anymore by anybody before we
941 * proceed with unloading.
942 */
943 pr_debug("waiting for dropping of refcount\n");
944 wait_for_completion(cmp);
945 pr_debug("wait complete\n");
946}
947
929static void cpufreq_policy_free(struct cpufreq_policy *policy) 948static void cpufreq_policy_free(struct cpufreq_policy *policy)
930{ 949{
931 free_cpumask_var(policy->related_cpus); 950 free_cpumask_var(policy->related_cpus);
@@ -986,7 +1005,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
986 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) { 1005 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
987 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) { 1006 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
988 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1007 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
989 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen); 1008 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
990 up_read(&cpufreq_rwsem); 1009 up_read(&cpufreq_rwsem);
991 return ret; 1010 return ret;
992 } 1011 }
@@ -994,15 +1013,17 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
994 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1013 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
995#endif 1014#endif
996 1015
997 if (frozen) 1016 /*
998 /* Restore the saved policy when doing light-weight init */ 1017 * Restore the saved policy when doing light-weight init and fall back
999 policy = cpufreq_policy_restore(cpu); 1018 * to the full init if that fails.
1000 else 1019 */
1020 policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
1021 if (!policy) {
1022 frozen = false;
1001 policy = cpufreq_policy_alloc(); 1023 policy = cpufreq_policy_alloc();
1002 1024 if (!policy)
1003 if (!policy) 1025 goto nomem_out;
1004 goto nomem_out; 1026 }
1005
1006 1027
1007 /* 1028 /*
1008 * In the resume path, since we restore a saved policy, the assignment 1029 * In the resume path, since we restore a saved policy, the assignment
@@ -1047,8 +1068,10 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1047 */ 1068 */
1048 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); 1069 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1049 1070
1050 policy->user_policy.min = policy->min; 1071 if (!frozen) {
1051 policy->user_policy.max = policy->max; 1072 policy->user_policy.min = policy->min;
1073 policy->user_policy.max = policy->max;
1074 }
1052 1075
1053 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1076 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1054 CPUFREQ_START, policy); 1077 CPUFREQ_START, policy);
@@ -1079,6 +1102,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1079 1102
1080 cpufreq_init_policy(policy); 1103 cpufreq_init_policy(policy);
1081 1104
1105 if (!frozen) {
1106 policy->user_policy.policy = policy->policy;
1107 policy->user_policy.governor = policy->governor;
1108 }
1109
1082 kobject_uevent(&policy->kobj, KOBJ_ADD); 1110 kobject_uevent(&policy->kobj, KOBJ_ADD);
1083 up_read(&cpufreq_rwsem); 1111 up_read(&cpufreq_rwsem);
1084 1112
@@ -1096,7 +1124,13 @@ err_get_freq:
1096 if (cpufreq_driver->exit) 1124 if (cpufreq_driver->exit)
1097 cpufreq_driver->exit(policy); 1125 cpufreq_driver->exit(policy);
1098err_set_policy_cpu: 1126err_set_policy_cpu:
1127 if (frozen) {
1128 /* Do not leave stale fallback data behind. */
1129 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1130 cpufreq_policy_put_kobj(policy);
1131 }
1099 cpufreq_policy_free(policy); 1132 cpufreq_policy_free(policy);
1133
1100nomem_out: 1134nomem_out:
1101 up_read(&cpufreq_rwsem); 1135 up_read(&cpufreq_rwsem);
1102 1136
@@ -1118,7 +1152,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1118} 1152}
1119 1153
1120static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, 1154static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1121 unsigned int old_cpu, bool frozen) 1155 unsigned int old_cpu)
1122{ 1156{
1123 struct device *cpu_dev; 1157 struct device *cpu_dev;
1124 int ret; 1158 int ret;
@@ -1126,10 +1160,6 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1126 /* first sibling now owns the new sysfs dir */ 1160 /* first sibling now owns the new sysfs dir */
1127 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu)); 1161 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
1128 1162
1129 /* Don't touch sysfs files during light-weight tear-down */
1130 if (frozen)
1131 return cpu_dev->id;
1132
1133 sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); 1163 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1134 ret = kobject_move(&policy->kobj, &cpu_dev->kobj); 1164 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1135 if (ret) { 1165 if (ret) {
@@ -1196,7 +1226,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1196 if (!frozen) 1226 if (!frozen)
1197 sysfs_remove_link(&dev->kobj, "cpufreq"); 1227 sysfs_remove_link(&dev->kobj, "cpufreq");
1198 } else if (cpus > 1) { 1228 } else if (cpus > 1) {
1199 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen); 1229 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
1200 if (new_cpu >= 0) { 1230 if (new_cpu >= 0) {
1201 update_policy_cpu(policy, new_cpu); 1231 update_policy_cpu(policy, new_cpu);
1202 1232
@@ -1218,8 +1248,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1218 int ret; 1248 int ret;
1219 unsigned long flags; 1249 unsigned long flags;
1220 struct cpufreq_policy *policy; 1250 struct cpufreq_policy *policy;
1221 struct kobject *kobj;
1222 struct completion *cmp;
1223 1251
1224 read_lock_irqsave(&cpufreq_driver_lock, flags); 1252 read_lock_irqsave(&cpufreq_driver_lock, flags);
1225 policy = per_cpu(cpufreq_cpu_data, cpu); 1253 policy = per_cpu(cpufreq_cpu_data, cpu);
@@ -1249,22 +1277,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1249 } 1277 }
1250 } 1278 }
1251 1279
1252 if (!frozen) { 1280 if (!frozen)
1253 down_read(&policy->rwsem); 1281 cpufreq_policy_put_kobj(policy);
1254 kobj = &policy->kobj;
1255 cmp = &policy->kobj_unregister;
1256 up_read(&policy->rwsem);
1257 kobject_put(kobj);
1258
1259 /*
1260 * We need to make sure that the underlying kobj is
1261 * actually not referenced anymore by anybody before we
1262 * proceed with unloading.
1263 */
1264 pr_debug("waiting for dropping of refcount\n");
1265 wait_for_completion(cmp);
1266 pr_debug("wait complete\n");
1267 }
1268 1282
1269 /* 1283 /*
1270 * Perform the ->exit() even during light-weight tear-down, 1284 * Perform the ->exit() even during light-weight tear-down,
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 5f1cbae36961..d51f17ed691e 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -581,7 +581,8 @@ static void intel_pstate_timer_func(unsigned long __data)
581} 581}
582 582
583#define ICPU(model, policy) \ 583#define ICPU(model, policy) \
584 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy } 584 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
585 (unsigned long)&policy }
585 586
586static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 587static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
587 ICPU(0x2a, core_params), 588 ICPU(0x2a, core_params),
@@ -614,6 +615,11 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
614 cpu = all_cpu_data[cpunum]; 615 cpu = all_cpu_data[cpunum];
615 616
616 intel_pstate_get_cpu_pstates(cpu); 617 intel_pstate_get_cpu_pstates(cpu);
618 if (!cpu->pstate.current_pstate) {
619 all_cpu_data[cpunum] = NULL;
620 kfree(cpu);
621 return -ENODATA;
622 }
617 623
618 cpu->cpu = cpunum; 624 cpu->cpu = cpunum;
619 625
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index 36795639df0d..6e51114057d0 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -65,7 +65,7 @@ static struct cpuidle_driver calxeda_idle_driver = {
65 .state_count = 2, 65 .state_count = 2,
66}; 66};
67 67
68static int __init calxeda_cpuidle_probe(struct platform_device *pdev) 68static int calxeda_cpuidle_probe(struct platform_device *pdev)
69{ 69{
70 return cpuidle_register(&calxeda_idle_driver, NULL); 70 return cpuidle_register(&calxeda_idle_driver, NULL);
71} 71}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 9dd6e01eac33..f757a0f428bd 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1410,14 +1410,12 @@ static const struct platform_device_info ixp_dev_info __initdata = {
1410static int __init ixp_module_init(void) 1410static int __init ixp_module_init(void)
1411{ 1411{
1412 int num = ARRAY_SIZE(ixp4xx_algos); 1412 int num = ARRAY_SIZE(ixp4xx_algos);
1413 int i, err ; 1413 int i, err;
1414 1414
1415 pdev = platform_device_register_full(&ixp_dev_info); 1415 pdev = platform_device_register_full(&ixp_dev_info);
1416 if (IS_ERR(pdev)) 1416 if (IS_ERR(pdev))
1417 return PTR_ERR(pdev); 1417 return PTR_ERR(pdev);
1418 1418
1419 dev = &pdev->dev;
1420
1421 spin_lock_init(&desc_lock); 1419 spin_lock_init(&desc_lock);
1422 spin_lock_init(&emerg_lock); 1420 spin_lock_init(&emerg_lock);
1423 1421
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 446687cc2334..c823daaf9043 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -62,6 +62,7 @@ config INTEL_IOATDMA
62 tristate "Intel I/OAT DMA support" 62 tristate "Intel I/OAT DMA support"
63 depends on PCI && X86 63 depends on PCI && X86
64 select DMA_ENGINE 64 select DMA_ENGINE
65 select DMA_ENGINE_RAID
65 select DCA 66 select DCA
66 help 67 help
67 Enable support for the Intel(R) I/OAT DMA engine present 68 Enable support for the Intel(R) I/OAT DMA engine present
@@ -112,6 +113,7 @@ config MV_XOR
112 bool "Marvell XOR engine support" 113 bool "Marvell XOR engine support"
113 depends on PLAT_ORION 114 depends on PLAT_ORION
114 select DMA_ENGINE 115 select DMA_ENGINE
116 select DMA_ENGINE_RAID
115 select ASYNC_TX_ENABLE_CHANNEL_SWITCH 117 select ASYNC_TX_ENABLE_CHANNEL_SWITCH
116 ---help--- 118 ---help---
117 Enable support for the Marvell XOR engine. 119 Enable support for the Marvell XOR engine.
@@ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA
187 tristate "AMCC PPC440SPe ADMA support" 189 tristate "AMCC PPC440SPe ADMA support"
188 depends on 440SPe || 440SP 190 depends on 440SPe || 440SP
189 select DMA_ENGINE 191 select DMA_ENGINE
192 select DMA_ENGINE_RAID
190 select ARCH_HAS_ASYNC_TX_FIND_CHANNEL 193 select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
191 select ASYNC_TX_ENABLE_CHANNEL_SWITCH 194 select ASYNC_TX_ENABLE_CHANNEL_SWITCH
192 help 195 help
@@ -352,6 +355,7 @@ config NET_DMA
352 bool "Network: TCP receive copy offload" 355 bool "Network: TCP receive copy offload"
353 depends on DMA_ENGINE && NET 356 depends on DMA_ENGINE && NET
354 default (INTEL_IOATDMA || FSL_DMA) 357 default (INTEL_IOATDMA || FSL_DMA)
358 depends on BROKEN
355 help 359 help
356 This enables the use of DMA engines in the network stack to 360 This enables the use of DMA engines in the network stack to
357 offload receive copy-to-user operations, freeing CPU cycles. 361 offload receive copy-to-user operations, freeing CPU cycles.
@@ -377,4 +381,7 @@ config DMATEST
377 Simple DMA test client. Say N unless you're debugging a 381 Simple DMA test client. Say N unless you're debugging a
378 DMA Device driver. 382 DMA Device driver.
379 383
384config DMA_ENGINE_RAID
385 bool
386
380endif 387endif
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index f31d647acdfa..2787aba60c6b 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan)
347{ 347{
348 return &chan->dev->device; 348 return &chan->dev->device;
349} 349}
350static struct device *chan2parent(struct dma_chan *chan)
351{
352 return chan->dev->device.parent;
353}
354 350
355#if defined(VERBOSE_DEBUG) 351#if defined(VERBOSE_DEBUG)
356static void vdbg_dump_regs(struct at_dma_chan *atchan) 352static void vdbg_dump_regs(struct at_dma_chan *atchan)
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 24095ff8a93b..ed610b497518 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -967,7 +967,7 @@ struct dmaengine_unmap_pool {
967#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } 967#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
968static struct dmaengine_unmap_pool unmap_pool[] = { 968static struct dmaengine_unmap_pool unmap_pool[] = {
969 __UNMAP_POOL(2), 969 __UNMAP_POOL(2),
970 #if IS_ENABLED(CONFIG_ASYNC_TX_DMA) 970 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
971 __UNMAP_POOL(16), 971 __UNMAP_POOL(16),
972 __UNMAP_POOL(128), 972 __UNMAP_POOL(128),
973 __UNMAP_POOL(256), 973 __UNMAP_POOL(256),
@@ -1109,7 +1109,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1109 dma_cookie_t cookie; 1109 dma_cookie_t cookie;
1110 unsigned long flags; 1110 unsigned long flags;
1111 1111
1112 unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO); 1112 unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
1113 if (!unmap) 1113 if (!unmap)
1114 return -ENOMEM; 1114 return -ENOMEM;
1115 1115
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 20f9a3aaf926..9dfcaf5c1288 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -539,9 +539,9 @@ static int dmatest_func(void *data)
539 539
540 um->len = params->buf_size; 540 um->len = params->buf_size;
541 for (i = 0; i < src_cnt; i++) { 541 for (i = 0; i < src_cnt; i++) {
542 unsigned long buf = (unsigned long) thread->srcs[i]; 542 void *buf = thread->srcs[i];
543 struct page *pg = virt_to_page(buf); 543 struct page *pg = virt_to_page(buf);
544 unsigned pg_off = buf & ~PAGE_MASK; 544 unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
545 545
546 um->addr[i] = dma_map_page(dev->dev, pg, pg_off, 546 um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
547 um->len, DMA_TO_DEVICE); 547 um->len, DMA_TO_DEVICE);
@@ -559,9 +559,9 @@ static int dmatest_func(void *data)
559 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ 559 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
560 dsts = &um->addr[src_cnt]; 560 dsts = &um->addr[src_cnt];
561 for (i = 0; i < dst_cnt; i++) { 561 for (i = 0; i < dst_cnt; i++) {
562 unsigned long buf = (unsigned long) thread->dsts[i]; 562 void *buf = thread->dsts[i];
563 struct page *pg = virt_to_page(buf); 563 struct page *pg = virt_to_page(buf);
564 unsigned pg_off = buf & ~PAGE_MASK; 564 unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
565 565
566 dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, 566 dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
567 DMA_BIDIRECTIONAL); 567 DMA_BIDIRECTIONAL);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 7086a16a55f2..f157c6f76b32 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan,
86 hw->count = CPU_TO_DMA(chan, count, 32); 86 hw->count = CPU_TO_DMA(chan, count, 32);
87} 87}
88 88
89static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
90{
91 return DMA_TO_CPU(chan, desc->hw.count, 32);
92}
93
94static void set_desc_src(struct fsldma_chan *chan, 89static void set_desc_src(struct fsldma_chan *chan,
95 struct fsl_dma_ld_hw *hw, dma_addr_t src) 90 struct fsl_dma_ld_hw *hw, dma_addr_t src)
96{ 91{
@@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan,
101 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); 96 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
102} 97}
103 98
104static dma_addr_t get_desc_src(struct fsldma_chan *chan,
105 struct fsl_desc_sw *desc)
106{
107 u64 snoop_bits;
108
109 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
110 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
111 return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
112}
113
114static void set_desc_dst(struct fsldma_chan *chan, 99static void set_desc_dst(struct fsldma_chan *chan,
115 struct fsl_dma_ld_hw *hw, dma_addr_t dst) 100 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
116{ 101{
@@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan,
121 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); 106 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
122} 107}
123 108
124static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
125 struct fsl_desc_sw *desc)
126{
127 u64 snoop_bits;
128
129 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
130 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
131 return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
132}
133
134static void set_desc_next(struct fsldma_chan *chan, 109static void set_desc_next(struct fsldma_chan *chan,
135 struct fsl_dma_ld_hw *hw, dma_addr_t next) 110 struct fsl_dma_ld_hw *hw, dma_addr_t next)
136{ 111{
@@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
408 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); 383 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
409 struct fsl_desc_sw *child; 384 struct fsl_desc_sw *child;
410 unsigned long flags; 385 unsigned long flags;
411 dma_cookie_t cookie; 386 dma_cookie_t cookie = -EINVAL;
412 387
413 spin_lock_irqsave(&chan->desc_lock, flags); 388 spin_lock_irqsave(&chan->desc_lock, flags);
414 389
@@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
854 struct fsl_desc_sw *desc) 829 struct fsl_desc_sw *desc)
855{ 830{
856 struct dma_async_tx_descriptor *txd = &desc->async_tx; 831 struct dma_async_tx_descriptor *txd = &desc->async_tx;
857 struct device *dev = chan->common.device->dev;
858 dma_addr_t src = get_desc_src(chan, desc);
859 dma_addr_t dst = get_desc_dst(chan, desc);
860 u32 len = get_desc_cnt(chan, desc);
861 832
862 /* Run the link descriptor callback function */ 833 /* Run the link descriptor callback function */
863 if (txd->callback) { 834 if (txd->callback) {
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1a49c777607c..87529181efcc 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -817,7 +817,15 @@ int ioat_dma_self_test(struct ioatdma_device *device)
817 } 817 }
818 818
819 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); 819 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
820 if (dma_mapping_error(dev, dma_src)) {
821 dev_err(dev, "mapping src buffer failed\n");
822 goto free_resources;
823 }
820 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); 824 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
825 if (dma_mapping_error(dev, dma_dest)) {
826 dev_err(dev, "mapping dest buffer failed\n");
827 goto unmap_src;
828 }
821 flags = DMA_PREP_INTERRUPT; 829 flags = DMA_PREP_INTERRUPT;
822 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, 830 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
823 IOAT_TEST_SIZE, flags); 831 IOAT_TEST_SIZE, flags);
@@ -855,8 +863,9 @@ int ioat_dma_self_test(struct ioatdma_device *device)
855 } 863 }
856 864
857unmap_dma: 865unmap_dma:
858 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
859 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); 866 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
867unmap_src:
868 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
860free_resources: 869free_resources:
861 dma->device_free_chan_resources(dma_chan); 870 dma->device_free_chan_resources(dma_chan);
862out: 871out:
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 7807f0ef4e20..53fb0c8365b0 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -54,12 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
54 hw_desc->desc_command = (1 << 31); 54 hw_desc->desc_command = (1 << 31);
55} 55}
56 56
57static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
58{
59 struct mv_xor_desc *hw_desc = desc->hw_desc;
60 return hw_desc->phy_dest_addr;
61}
62
63static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, 57static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
64 u32 byte_count) 58 u32 byte_count)
65{ 59{
@@ -787,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
787/* 781/*
788 * Perform a transaction to verify the HW works. 782 * Perform a transaction to verify the HW works.
789 */ 783 */
790#define MV_XOR_TEST_SIZE 2000
791 784
792static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) 785static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
793{ 786{
@@ -797,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
797 struct dma_chan *dma_chan; 790 struct dma_chan *dma_chan;
798 dma_cookie_t cookie; 791 dma_cookie_t cookie;
799 struct dma_async_tx_descriptor *tx; 792 struct dma_async_tx_descriptor *tx;
793 struct dmaengine_unmap_data *unmap;
800 int err = 0; 794 int err = 0;
801 795
802 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 796 src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
803 if (!src) 797 if (!src)
804 return -ENOMEM; 798 return -ENOMEM;
805 799
806 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 800 dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
807 if (!dest) { 801 if (!dest) {
808 kfree(src); 802 kfree(src);
809 return -ENOMEM; 803 return -ENOMEM;
810 } 804 }
811 805
812 /* Fill in src buffer */ 806 /* Fill in src buffer */
813 for (i = 0; i < MV_XOR_TEST_SIZE; i++) 807 for (i = 0; i < PAGE_SIZE; i++)
814 ((u8 *) src)[i] = (u8)i; 808 ((u8 *) src)[i] = (u8)i;
815 809
816 dma_chan = &mv_chan->dmachan; 810 dma_chan = &mv_chan->dmachan;
@@ -819,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
819 goto out; 813 goto out;
820 } 814 }
821 815
822 dest_dma = dma_map_single(dma_chan->device->dev, dest, 816 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
823 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 817 if (!unmap) {
818 err = -ENOMEM;
819 goto free_resources;
820 }
821
822 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
823 PAGE_SIZE, DMA_TO_DEVICE);
824 unmap->to_cnt = 1;
825 unmap->addr[0] = src_dma;
824 826
825 src_dma = dma_map_single(dma_chan->device->dev, src, 827 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
826 MV_XOR_TEST_SIZE, DMA_TO_DEVICE); 828 PAGE_SIZE, DMA_FROM_DEVICE);
829 unmap->from_cnt = 1;
830 unmap->addr[1] = dest_dma;
831
832 unmap->len = PAGE_SIZE;
827 833
828 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 834 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
829 MV_XOR_TEST_SIZE, 0); 835 PAGE_SIZE, 0);
830 cookie = mv_xor_tx_submit(tx); 836 cookie = mv_xor_tx_submit(tx);
831 mv_xor_issue_pending(dma_chan); 837 mv_xor_issue_pending(dma_chan);
832 async_tx_ack(tx); 838 async_tx_ack(tx);
@@ -841,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
841 } 847 }
842 848
843 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 849 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
844 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 850 PAGE_SIZE, DMA_FROM_DEVICE);
845 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { 851 if (memcmp(src, dest, PAGE_SIZE)) {
846 dev_err(dma_chan->device->dev, 852 dev_err(dma_chan->device->dev,
847 "Self-test copy failed compare, disabling\n"); 853 "Self-test copy failed compare, disabling\n");
848 err = -ENODEV; 854 err = -ENODEV;
@@ -850,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
850 } 856 }
851 857
852free_resources: 858free_resources:
859 dmaengine_unmap_put(unmap);
853 mv_xor_free_chan_resources(dma_chan); 860 mv_xor_free_chan_resources(dma_chan);
854out: 861out:
855 kfree(src); 862 kfree(src);
@@ -867,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
867 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; 874 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
868 dma_addr_t dest_dma; 875 dma_addr_t dest_dma;
869 struct dma_async_tx_descriptor *tx; 876 struct dma_async_tx_descriptor *tx;
877 struct dmaengine_unmap_data *unmap;
870 struct dma_chan *dma_chan; 878 struct dma_chan *dma_chan;
871 dma_cookie_t cookie; 879 dma_cookie_t cookie;
872 u8 cmp_byte = 0; 880 u8 cmp_byte = 0;
873 u32 cmp_word; 881 u32 cmp_word;
874 int err = 0; 882 int err = 0;
883 int src_count = MV_XOR_NUM_SRC_TEST;
875 884
876 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 885 for (src_idx = 0; src_idx < src_count; src_idx++) {
877 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 886 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
878 if (!xor_srcs[src_idx]) { 887 if (!xor_srcs[src_idx]) {
879 while (src_idx--) 888 while (src_idx--)
@@ -890,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
890 } 899 }
891 900
892 /* Fill in src buffers */ 901 /* Fill in src buffers */
893 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 902 for (src_idx = 0; src_idx < src_count; src_idx++) {
894 u8 *ptr = page_address(xor_srcs[src_idx]); 903 u8 *ptr = page_address(xor_srcs[src_idx]);
895 for (i = 0; i < PAGE_SIZE; i++) 904 for (i = 0; i < PAGE_SIZE; i++)
896 ptr[i] = (1 << src_idx); 905 ptr[i] = (1 << src_idx);
897 } 906 }
898 907
899 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) 908 for (src_idx = 0; src_idx < src_count; src_idx++)
900 cmp_byte ^= (u8) (1 << src_idx); 909 cmp_byte ^= (u8) (1 << src_idx);
901 910
902 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 911 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
@@ -910,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
910 goto out; 919 goto out;
911 } 920 }
912 921
922 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
923 GFP_KERNEL);
924 if (!unmap) {
925 err = -ENOMEM;
926 goto free_resources;
927 }
928
913 /* test xor */ 929 /* test xor */
914 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, 930 for (i = 0; i < src_count; i++) {
915 DMA_FROM_DEVICE); 931 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
932 0, PAGE_SIZE, DMA_TO_DEVICE);
933 dma_srcs[i] = unmap->addr[i];
934 unmap->to_cnt++;
935 }
916 936
917 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) 937 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
918 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 938 DMA_FROM_DEVICE);
919 0, PAGE_SIZE, DMA_TO_DEVICE); 939 dest_dma = unmap->addr[src_count];
940 unmap->from_cnt = 1;
941 unmap->len = PAGE_SIZE;
920 942
921 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 943 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
922 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); 944 src_count, PAGE_SIZE, 0);
923 945
924 cookie = mv_xor_tx_submit(tx); 946 cookie = mv_xor_tx_submit(tx);
925 mv_xor_issue_pending(dma_chan); 947 mv_xor_issue_pending(dma_chan);
@@ -948,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
948 } 970 }
949 971
950free_resources: 972free_resources:
973 dmaengine_unmap_put(unmap);
951 mv_xor_free_chan_resources(dma_chan); 974 mv_xor_free_chan_resources(dma_chan);
952out: 975out:
953 src_idx = MV_XOR_NUM_SRC_TEST; 976 src_idx = src_count;
954 while (src_idx--) 977 while (src_idx--)
955 __free_page(xor_srcs[src_idx]); 978 __free_page(xor_srcs[src_idx]);
956 __free_page(dest); 979 __free_page(dest);
@@ -1176,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev)
1176 int i = 0; 1199 int i = 0;
1177 1200
1178 for_each_child_of_node(pdev->dev.of_node, np) { 1201 for_each_child_of_node(pdev->dev.of_node, np) {
1202 struct mv_xor_chan *chan;
1179 dma_cap_mask_t cap_mask; 1203 dma_cap_mask_t cap_mask;
1180 int irq; 1204 int irq;
1181 1205
@@ -1193,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev)
1193 goto err_channel_add; 1217 goto err_channel_add;
1194 } 1218 }
1195 1219
1196 xordev->channels[i] = 1220 chan = mv_xor_channel_add(xordev, pdev, i,
1197 mv_xor_channel_add(xordev, pdev, i, 1221 cap_mask, irq);
1198 cap_mask, irq); 1222 if (IS_ERR(chan)) {
1199 if (IS_ERR(xordev->channels[i])) { 1223 ret = PTR_ERR(chan);
1200 ret = PTR_ERR(xordev->channels[i]);
1201 xordev->channels[i] = NULL;
1202 irq_dispose_mapping(irq); 1224 irq_dispose_mapping(irq);
1203 goto err_channel_add; 1225 goto err_channel_add;
1204 } 1226 }
1205 1227
1228 xordev->channels[i] = chan;
1206 i++; 1229 i++;
1207 } 1230 }
1208 } else if (pdata && pdata->channels) { 1231 } else if (pdata && pdata->channels) {
1209 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 1232 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1210 struct mv_xor_channel_data *cd; 1233 struct mv_xor_channel_data *cd;
1234 struct mv_xor_chan *chan;
1211 int irq; 1235 int irq;
1212 1236
1213 cd = &pdata->channels[i]; 1237 cd = &pdata->channels[i];
@@ -1222,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev)
1222 goto err_channel_add; 1246 goto err_channel_add;
1223 } 1247 }
1224 1248
1225 xordev->channels[i] = 1249 chan = mv_xor_channel_add(xordev, pdev, i,
1226 mv_xor_channel_add(xordev, pdev, i, 1250 cd->cap_mask, irq);
1227 cd->cap_mask, irq); 1251 if (IS_ERR(chan)) {
1228 if (IS_ERR(xordev->channels[i])) { 1252 ret = PTR_ERR(chan);
1229 ret = PTR_ERR(xordev->channels[i]);
1230 goto err_channel_add; 1253 goto err_channel_add;
1231 } 1254 }
1255
1256 xordev->channels[i] = chan;
1232 } 1257 }
1233 } 1258 }
1234 1259
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index cdf0483b8f2d..536632f6479c 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2492,12 +2492,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2492 2492
2493static inline void _init_desc(struct dma_pl330_desc *desc) 2493static inline void _init_desc(struct dma_pl330_desc *desc)
2494{ 2494{
2495 desc->pchan = NULL;
2496 desc->req.x = &desc->px; 2495 desc->req.x = &desc->px;
2497 desc->req.token = desc; 2496 desc->req.token = desc;
2498 desc->rqcfg.swap = SWAP_NO; 2497 desc->rqcfg.swap = SWAP_NO;
2499 desc->rqcfg.privileged = 0;
2500 desc->rqcfg.insnaccess = 0;
2501 desc->rqcfg.scctl = SCCTRL0; 2498 desc->rqcfg.scctl = SCCTRL0;
2502 desc->rqcfg.dcctl = DCCTRL0; 2499 desc->rqcfg.dcctl = DCCTRL0;
2503 desc->req.cfg = &desc->rqcfg; 2500 desc->req.cfg = &desc->rqcfg;
@@ -2517,7 +2514,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
2517 if (!pdmac) 2514 if (!pdmac)
2518 return 0; 2515 return 0;
2519 2516
2520 desc = kmalloc(count * sizeof(*desc), flg); 2517 desc = kcalloc(count, sizeof(*desc), flg);
2521 if (!desc) 2518 if (!desc)
2522 return 0; 2519 return 0;
2523 2520
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 8da48c6b2a38..8bba298535b0 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -533,29 +533,6 @@ static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
533} 533}
534 534
535/** 535/**
536 * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation
537 */
538static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc,
539 int value, unsigned long flags)
540{
541 struct dma_cdb *hw_desc = desc->hw_desc;
542
543 memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
544 desc->hw_next = NULL;
545 desc->src_cnt = 1;
546 desc->dst_cnt = 1;
547
548 if (flags & DMA_PREP_INTERRUPT)
549 set_bit(PPC440SPE_DESC_INT, &desc->flags);
550 else
551 clear_bit(PPC440SPE_DESC_INT, &desc->flags);
552
553 hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value);
554 hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value);
555 hw_desc->opc = DMA_CDB_OPC_DFILL128;
556}
557
558/**
559 * ppc440spe_desc_set_src_addr - set source address into the descriptor 536 * ppc440spe_desc_set_src_addr - set source address into the descriptor
560 */ 537 */
561static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc, 538static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc,
@@ -1504,8 +1481,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
1504 struct ppc440spe_adma_chan *chan, 1481 struct ppc440spe_adma_chan *chan,
1505 dma_cookie_t cookie) 1482 dma_cookie_t cookie)
1506{ 1483{
1507 int i;
1508
1509 BUG_ON(desc->async_tx.cookie < 0); 1484 BUG_ON(desc->async_tx.cookie < 0);
1510 if (desc->async_tx.cookie > 0) { 1485 if (desc->async_tx.cookie > 0) {
1511 cookie = desc->async_tx.cookie; 1486 cookie = desc->async_tx.cookie;
@@ -3898,7 +3873,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
3898 ppc440spe_adma_prep_dma_interrupt; 3873 ppc440spe_adma_prep_dma_interrupt;
3899 } 3874 }
3900 pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: " 3875 pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
3901 "( %s%s%s%s%s%s%s)\n", 3876 "( %s%s%s%s%s%s)\n",
3902 dev_name(adev->dev), 3877 dev_name(adev->dev),
3903 dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "", 3878 dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
3904 dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "", 3879 dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index bae6c29f5502..17686caf64d5 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -406,7 +406,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
406 dma_async_tx_callback callback; 406 dma_async_tx_callback callback;
407 void *param; 407 void *param;
408 struct dma_async_tx_descriptor *txd = &desc->txd; 408 struct dma_async_tx_descriptor *txd = &desc->txd;
409 struct txx9dmac_slave *ds = dc->chan.private;
410 409
411 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", 410 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
412 txd->cookie, desc); 411 txd->cookie, desc);
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index b0bb056458a3..281029daf98c 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1623,7 +1623,6 @@ static struct scsi_host_template scsi_driver_template = {
1623 .cmd_per_lun = 1, 1623 .cmd_per_lun = 1,
1624 .can_queue = 1, 1624 .can_queue = 1,
1625 .sdev_attrs = sbp2_scsi_sysfs_attrs, 1625 .sdev_attrs = sbp2_scsi_sysfs_attrs,
1626 .no_write_same = 1,
1627}; 1626};
1628 1627
1629MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); 1628MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 299fad6b5867..5373dc5b6011 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
14 14
15obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ 15obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
16obj-$(CONFIG_EFI) += efi/ 16obj-$(CONFIG_EFI) += efi/
17obj-$(CONFIG_UEFI_CPER) += efi/
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 3150aa4874e8..6aecbc86ec94 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -36,7 +36,7 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE
36 backend for pstore by default. This setting can be overridden 36 backend for pstore by default. This setting can be overridden
37 using the efivars module's pstore_disable parameter. 37 using the efivars module's pstore_disable parameter.
38 38
39config UEFI_CPER
40 def_bool n
41
42endmenu 39endmenu
40
41config UEFI_CPER
42 bool
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 9ba156d3c775..6c2a41ec21ba 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -1,7 +1,7 @@
1# 1#
2# Makefile for linux kernel 2# Makefile for linux kernel
3# 3#
4obj-y += efi.o vars.o 4obj-$(CONFIG_EFI) += efi.o vars.o
5obj-$(CONFIG_EFI_VARS) += efivars.o 5obj-$(CONFIG_EFI_VARS) += efivars.o
6obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o 6obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
7obj-$(CONFIG_UEFI_CPER) += cper.o 7obj-$(CONFIG_UEFI_CPER) += cper.o
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 743fd426f21b..4b9dc836dcf9 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -356,6 +356,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
356static struct pstore_info efi_pstore_info = { 356static struct pstore_info efi_pstore_info = {
357 .owner = THIS_MODULE, 357 .owner = THIS_MODULE,
358 .name = "efi", 358 .name = "efi",
359 .flags = PSTORE_FLAGS_FRAGILE,
359 .open = efi_pstore_open, 360 .open = efi_pstore_open,
360 .close = efi_pstore_close, 361 .close = efi_pstore_close,
361 .read = efi_pstore_read, 362 .read = efi_pstore_read,
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index 7b37300973db..2baf0ddf7e02 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -252,7 +252,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
252 252
253 spin_lock_irqsave(&tlmm_lock, irq_flags); 253 spin_lock_irqsave(&tlmm_lock, irq_flags);
254 writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio)); 254 writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
255 clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); 255 clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
256 __clear_bit(gpio, msm_gpio.enabled_irqs); 256 __clear_bit(gpio, msm_gpio.enabled_irqs);
257 spin_unlock_irqrestore(&tlmm_lock, irq_flags); 257 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
258} 258}
@@ -264,7 +264,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
264 264
265 spin_lock_irqsave(&tlmm_lock, irq_flags); 265 spin_lock_irqsave(&tlmm_lock, irq_flags);
266 __set_bit(gpio, msm_gpio.enabled_irqs); 266 __set_bit(gpio, msm_gpio.enabled_irqs);
267 set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); 267 set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
268 writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio)); 268 writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
269 spin_unlock_irqrestore(&tlmm_lock, irq_flags); 269 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
270} 270}
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index fe088a30567a..8b7e719a68c3 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -169,7 +169,8 @@ static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
169 u32 pending; 169 u32 pending;
170 unsigned int offset, irqs_handled = 0; 170 unsigned int offset, irqs_handled = 0;
171 171
172 while ((pending = gpio_rcar_read(p, INTDT))) { 172 while ((pending = gpio_rcar_read(p, INTDT) &
173 gpio_rcar_read(p, INTMSK))) {
173 offset = __ffs(pending); 174 offset = __ffs(pending);
174 gpio_rcar_write(p, INTCLR, BIT(offset)); 175 gpio_rcar_write(p, INTCLR, BIT(offset));
175 generic_handle_irq(irq_find_mapping(p->irq_domain, offset)); 176 generic_handle_irq(irq_find_mapping(p->irq_domain, offset));
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index b97d6a6577b9..f9996899c1f2 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -300,7 +300,7 @@ static int twl_direction_in(struct gpio_chip *chip, unsigned offset)
300 if (offset < TWL4030_GPIO_MAX) 300 if (offset < TWL4030_GPIO_MAX)
301 ret = twl4030_set_gpio_direction(offset, 1); 301 ret = twl4030_set_gpio_direction(offset, 1);
302 else 302 else
303 ret = -EINVAL; 303 ret = -EINVAL; /* LED outputs can't be set as input */
304 304
305 if (!ret) 305 if (!ret)
306 priv->direction &= ~BIT(offset); 306 priv->direction &= ~BIT(offset);
@@ -354,11 +354,20 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
354static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value) 354static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
355{ 355{
356 struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip); 356 struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
357 int ret = -EINVAL; 357 int ret = 0;
358 358
359 mutex_lock(&priv->mutex); 359 mutex_lock(&priv->mutex);
360 if (offset < TWL4030_GPIO_MAX) 360 if (offset < TWL4030_GPIO_MAX) {
361 ret = twl4030_set_gpio_direction(offset, 0); 361 ret = twl4030_set_gpio_direction(offset, 0);
362 if (ret) {
363 mutex_unlock(&priv->mutex);
364 return ret;
365 }
366 }
367
368 /*
369 * LED gpios i.e. offset >= TWL4030_GPIO_MAX are always output
370 */
362 371
363 priv->direction |= BIT(offset); 372 priv->direction |= BIT(offset);
364 mutex_unlock(&priv->mutex); 373 mutex_unlock(&priv->mutex);
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index eef09ec9a5ff..a72cae03b99b 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -103,6 +103,7 @@ void armada_drm_queue_unref_work(struct drm_device *,
103extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs; 103extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
104 104
105int armada_fbdev_init(struct drm_device *); 105int armada_fbdev_init(struct drm_device *);
106void armada_fbdev_lastclose(struct drm_device *);
106void armada_fbdev_fini(struct drm_device *); 107void armada_fbdev_fini(struct drm_device *);
107 108
108int armada_overlay_plane_create(struct drm_device *, unsigned long); 109int armada_overlay_plane_create(struct drm_device *, unsigned long);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 069f64533ac3..acf3a36c9ebc 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -322,6 +322,11 @@ static struct drm_ioctl_desc armada_ioctls[] = {
322 DRM_UNLOCKED), 322 DRM_UNLOCKED),
323}; 323};
324 324
325static void armada_drm_lastclose(struct drm_device *dev)
326{
327 armada_fbdev_lastclose(dev);
328}
329
325static const struct file_operations armada_drm_fops = { 330static const struct file_operations armada_drm_fops = {
326 .owner = THIS_MODULE, 331 .owner = THIS_MODULE,
327 .llseek = no_llseek, 332 .llseek = no_llseek,
@@ -338,7 +343,7 @@ static struct drm_driver armada_drm_driver = {
338 .open = NULL, 343 .open = NULL,
339 .preclose = NULL, 344 .preclose = NULL,
340 .postclose = NULL, 345 .postclose = NULL,
341 .lastclose = NULL, 346 .lastclose = armada_drm_lastclose,
342 .unload = armada_drm_unload, 347 .unload = armada_drm_unload,
343 .get_vblank_counter = drm_vblank_count, 348 .get_vblank_counter = drm_vblank_count,
344 .enable_vblank = armada_drm_enable_vblank, 349 .enable_vblank = armada_drm_enable_vblank,
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index dd5ea77dac96..948cb14c561e 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -105,9 +105,9 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
105 drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth); 105 drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
106 drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height); 106 drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
107 107
108 DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n", 108 DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n",
109 dfb->fb.width, dfb->fb.height, 109 dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel,
110 dfb->fb.bits_per_pixel, obj->phys_addr); 110 (unsigned long long)obj->phys_addr);
111 111
112 return 0; 112 return 0;
113 113
@@ -177,6 +177,16 @@ int armada_fbdev_init(struct drm_device *dev)
177 return ret; 177 return ret;
178} 178}
179 179
180void armada_fbdev_lastclose(struct drm_device *dev)
181{
182 struct armada_private *priv = dev->dev_private;
183
184 drm_modeset_lock_all(dev);
185 if (priv->fbdev)
186 drm_fb_helper_restore_fbdev_mode(priv->fbdev);
187 drm_modeset_unlock_all(dev);
188}
189
180void armada_fbdev_fini(struct drm_device *dev) 190void armada_fbdev_fini(struct drm_device *dev)
181{ 191{
182 struct armada_private *priv = dev->dev_private; 192 struct armada_private *priv = dev->dev_private;
@@ -192,11 +202,11 @@ void armada_fbdev_fini(struct drm_device *dev)
192 framebuffer_release(info); 202 framebuffer_release(info);
193 } 203 }
194 204
205 drm_fb_helper_fini(fbh);
206
195 if (fbh->fb) 207 if (fbh->fb)
196 fbh->fb->funcs->destroy(fbh->fb); 208 fbh->fb->funcs->destroy(fbh->fb);
197 209
198 drm_fb_helper_fini(fbh);
199
200 priv->fbdev = NULL; 210 priv->fbdev = NULL;
201 } 211 }
202} 212}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 9f2356bae7fd..887816f43476 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -172,8 +172,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
172 obj->dev_addr = obj->linear->start; 172 obj->dev_addr = obj->linear->start;
173 } 173 }
174 174
175 DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n", 175 DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
176 obj, obj->phys_addr, obj->dev_addr); 176 (unsigned long long)obj->phys_addr,
177 (unsigned long long)obj->dev_addr);
177 178
178 return 0; 179 return 0;
179} 180}
@@ -557,7 +558,6 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
557 * refcount on the gem object itself. 558 * refcount on the gem object itself.
558 */ 559 */
559 drm_gem_object_reference(obj); 560 drm_gem_object_reference(obj);
560 dma_buf_put(buf);
561 return obj; 561 return obj;
562 } 562 }
563 } 563 }
@@ -573,6 +573,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
573 } 573 }
574 574
575 dobj->obj.import_attach = attach; 575 dobj->obj.import_attach = attach;
576 get_dma_buf(buf);
576 577
577 /* 578 /*
578 * Don't call dma_buf_map_attachment() here - it maps the 579 * Don't call dma_buf_map_attachment() here - it maps the
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 0a1e4a5f4234..8835dcddfac3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -68,6 +68,8 @@
68#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) 68#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
69/* Force reduced-blanking timings for detailed modes */ 69/* Force reduced-blanking timings for detailed modes */
70#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) 70#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
71/* Force 8bpc */
72#define EDID_QUIRK_FORCE_8BPC (1 << 8)
71 73
72struct detailed_mode_closure { 74struct detailed_mode_closure {
73 struct drm_connector *connector; 75 struct drm_connector *connector;
@@ -128,6 +130,9 @@ static struct edid_quirk {
128 130
129 /* Medion MD 30217 PG */ 131 /* Medion MD 30217 PG */
130 { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, 132 { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
133
134 /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
135 { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
131}; 136};
132 137
133/* 138/*
@@ -3435,6 +3440,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
3435 3440
3436 drm_add_display_info(edid, &connector->display_info); 3441 drm_add_display_info(edid, &connector->display_info);
3437 3442
3443 if (quirks & EDID_QUIRK_FORCE_8BPC)
3444 connector->display_info.bpc = 8;
3445
3438 return num_modes; 3446 return num_modes;
3439} 3447}
3440EXPORT_SYMBOL(drm_add_edid_modes); 3448EXPORT_SYMBOL(drm_add_edid_modes);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 85071a1c4547..b0733153dfd2 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1041,7 +1041,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
1041 /* if equal delete the probed mode */ 1041 /* if equal delete the probed mode */
1042 mode->status = pmode->status; 1042 mode->status = pmode->status;
1043 /* Merge type bits together */ 1043 /* Merge type bits together */
1044 mode->type = pmode->type; 1044 mode->type |= pmode->type;
1045 list_del(&pmode->head); 1045 list_del(&pmode->head);
1046 drm_mode_destroy(connector->dev, pmode); 1046 drm_mode_destroy(connector->dev, pmode);
1047 break; 1047 break;
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index a4a5c6ac110a..98a33c580ca1 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -564,11 +564,11 @@ err_unload:
564 if (dev->driver->unload) 564 if (dev->driver->unload)
565 dev->driver->unload(dev); 565 dev->driver->unload(dev);
566err_primary_node: 566err_primary_node:
567 drm_put_minor(dev->primary); 567 drm_unplug_minor(dev->primary);
568err_render_node: 568err_render_node:
569 drm_put_minor(dev->render); 569 drm_unplug_minor(dev->render);
570err_control_node: 570err_control_node:
571 drm_put_minor(dev->control); 571 drm_unplug_minor(dev->control);
572out_unlock: 572out_unlock:
573 mutex_unlock(&drm_global_mutex); 573 mutex_unlock(&drm_global_mutex);
574 return ret; 574 return ret;
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 41838eaa799c..da682cbcb806 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -4,7 +4,6 @@
4 4
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6i915-y := i915_drv.o i915_dma.o i915_irq.o \ 6i915-y := i915_drv.o i915_dma.o i915_irq.o \
7 i915_debugfs.o \
8 i915_gpu_error.o \ 7 i915_gpu_error.o \
9 i915_suspend.o \ 8 i915_suspend.o \
10 i915_gem.o \ 9 i915_gem.o \
@@ -55,6 +54,8 @@ i915-$(CONFIG_ACPI) += intel_acpi.o
55 54
56i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o 55i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
57 56
57i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
58
58obj-$(CONFIG_DRM_I915) += i915.o 59obj-$(CONFIG_DRM_I915) += i915.o
59 60
60CFLAGS_i915_trace_points.o := -I$(src) 61CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 6badc1596ceb..b2b46c52294c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -40,8 +40,6 @@
40#include <drm/i915_drm.h> 40#include <drm/i915_drm.h>
41#include "i915_drv.h" 41#include "i915_drv.h"
42 42
43#if defined(CONFIG_DEBUG_FS)
44
45enum { 43enum {
46 ACTIVE_LIST, 44 ACTIVE_LIST,
47 INACTIVE_LIST, 45 INACTIVE_LIST,
@@ -406,16 +404,26 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
406 seq_putc(m, '\n'); 404 seq_putc(m, '\n');
407 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 405 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
408 struct file_stats stats; 406 struct file_stats stats;
407 struct task_struct *task;
409 408
410 memset(&stats, 0, sizeof(stats)); 409 memset(&stats, 0, sizeof(stats));
411 idr_for_each(&file->object_idr, per_file_stats, &stats); 410 idr_for_each(&file->object_idr, per_file_stats, &stats);
411 /*
412 * Although we have a valid reference on file->pid, that does
413 * not guarantee that the task_struct who called get_pid() is
414 * still alive (e.g. get_pid(current) => fork() => exit()).
415 * Therefore, we need to protect this ->comm access using RCU.
416 */
417 rcu_read_lock();
418 task = pid_task(file->pid, PIDTYPE_PID);
412 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n", 419 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
413 get_pid_task(file->pid, PIDTYPE_PID)->comm, 420 task ? task->comm : "<unknown>",
414 stats.count, 421 stats.count,
415 stats.total, 422 stats.total,
416 stats.active, 423 stats.active,
417 stats.inactive, 424 stats.inactive,
418 stats.unbound); 425 stats.unbound);
426 rcu_read_unlock();
419 } 427 }
420 428
421 mutex_unlock(&dev->struct_mutex); 429 mutex_unlock(&dev->struct_mutex);
@@ -1170,6 +1178,50 @@ static int ironlake_drpc_info(struct seq_file *m)
1170 return 0; 1178 return 0;
1171} 1179}
1172 1180
1181static int vlv_drpc_info(struct seq_file *m)
1182{
1183
1184 struct drm_info_node *node = (struct drm_info_node *) m->private;
1185 struct drm_device *dev = node->minor->dev;
1186 struct drm_i915_private *dev_priv = dev->dev_private;
1187 u32 rpmodectl1, rcctl1;
1188 unsigned fw_rendercount = 0, fw_mediacount = 0;
1189
1190 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1191 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1192
1193 seq_printf(m, "Video Turbo Mode: %s\n",
1194 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1195 seq_printf(m, "Turbo enabled: %s\n",
1196 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1197 seq_printf(m, "HW control enabled: %s\n",
1198 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1199 seq_printf(m, "SW control enabled: %s\n",
1200 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1201 GEN6_RP_MEDIA_SW_MODE));
1202 seq_printf(m, "RC6 Enabled: %s\n",
1203 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1204 GEN6_RC_CTL_EI_MODE(1))));
1205 seq_printf(m, "Render Power Well: %s\n",
1206 (I915_READ(VLV_GTLC_PW_STATUS) &
1207 VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1208 seq_printf(m, "Media Power Well: %s\n",
1209 (I915_READ(VLV_GTLC_PW_STATUS) &
1210 VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1211
1212 spin_lock_irq(&dev_priv->uncore.lock);
1213 fw_rendercount = dev_priv->uncore.fw_rendercount;
1214 fw_mediacount = dev_priv->uncore.fw_mediacount;
1215 spin_unlock_irq(&dev_priv->uncore.lock);
1216
1217 seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount);
1218 seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount);
1219
1220
1221 return 0;
1222}
1223
1224
1173static int gen6_drpc_info(struct seq_file *m) 1225static int gen6_drpc_info(struct seq_file *m)
1174{ 1226{
1175 1227
@@ -1275,7 +1327,9 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
1275 struct drm_info_node *node = (struct drm_info_node *) m->private; 1327 struct drm_info_node *node = (struct drm_info_node *) m->private;
1276 struct drm_device *dev = node->minor->dev; 1328 struct drm_device *dev = node->minor->dev;
1277 1329
1278 if (IS_GEN6(dev) || IS_GEN7(dev)) 1330 if (IS_VALLEYVIEW(dev))
1331 return vlv_drpc_info(m);
1332 else if (IS_GEN6(dev) || IS_GEN7(dev))
1279 return gen6_drpc_info(m); 1333 return gen6_drpc_info(m);
1280 else 1334 else
1281 return ironlake_drpc_info(m); 1335 return ironlake_drpc_info(m);
@@ -1287,7 +1341,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1287 struct drm_device *dev = node->minor->dev; 1341 struct drm_device *dev = node->minor->dev;
1288 drm_i915_private_t *dev_priv = dev->dev_private; 1342 drm_i915_private_t *dev_priv = dev->dev_private;
1289 1343
1290 if (!I915_HAS_FBC(dev)) { 1344 if (!HAS_FBC(dev)) {
1291 seq_puts(m, "FBC unsupported on this chipset\n"); 1345 seq_puts(m, "FBC unsupported on this chipset\n");
1292 return 0; 1346 return 0;
1293 } 1347 }
@@ -1349,7 +1403,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
1349 return 0; 1403 return 0;
1350 } 1404 }
1351 1405
1352 if (I915_READ(IPS_CTL) & IPS_ENABLE) 1406 if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
1353 seq_puts(m, "enabled\n"); 1407 seq_puts(m, "enabled\n");
1354 else 1408 else
1355 seq_puts(m, "disabled\n"); 1409 seq_puts(m, "disabled\n");
@@ -2117,8 +2171,8 @@ static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
2117 info->dev = dev; 2171 info->dev = dev;
2118 ent = debugfs_create_file(info->name, S_IRUGO, root, info, 2172 ent = debugfs_create_file(info->name, S_IRUGO, root, info,
2119 &i915_pipe_crc_fops); 2173 &i915_pipe_crc_fops);
2120 if (IS_ERR(ent)) 2174 if (!ent)
2121 return PTR_ERR(ent); 2175 return -ENOMEM;
2122 2176
2123 return drm_add_fake_info_node(minor, ent, info); 2177 return drm_add_fake_info_node(minor, ent, info);
2124} 2178}
@@ -3133,8 +3187,8 @@ static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
3133 S_IRUSR, 3187 S_IRUSR,
3134 root, dev, 3188 root, dev,
3135 &i915_forcewake_fops); 3189 &i915_forcewake_fops);
3136 if (IS_ERR(ent)) 3190 if (!ent)
3137 return PTR_ERR(ent); 3191 return -ENOMEM;
3138 3192
3139 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 3193 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
3140} 3194}
@@ -3151,8 +3205,8 @@ static int i915_debugfs_create(struct dentry *root,
3151 S_IRUGO | S_IWUSR, 3205 S_IRUGO | S_IWUSR,
3152 root, dev, 3206 root, dev,
3153 fops); 3207 fops);
3154 if (IS_ERR(ent)) 3208 if (!ent)
3155 return PTR_ERR(ent); 3209 return -ENOMEM;
3156 3210
3157 return drm_add_fake_info_node(minor, ent, fops); 3211 return drm_add_fake_info_node(minor, ent, fops);
3158} 3212}
@@ -3282,5 +3336,3 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
3282 drm_debugfs_remove_files(info_list, 1, minor); 3336 drm_debugfs_remove_files(info_list, 1, minor);
3283 } 3337 }
3284} 3338}
3285
3286#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 750918c779c8..e177d021c444 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -85,6 +85,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
85 drm_i915_private_t *dev_priv = dev->dev_private; 85 drm_i915_private_t *dev_priv = dev->dev_private;
86 struct drm_i915_master_private *master_priv; 86 struct drm_i915_master_private *master_priv;
87 87
88 /*
89 * The dri breadcrumb update races against the drm master disappearing.
90 * Instead of trying to fix this (this is by far not the only ums issue)
91 * just don't do the update in kms mode.
92 */
93 if (drm_core_check_feature(dev, DRIVER_MODESET))
94 return;
95
88 if (dev->primary->master) { 96 if (dev->primary->master) {
89 master_priv = dev->primary->master->driver_priv; 97 master_priv = dev->primary->master->driver_priv;
90 if (master_priv->sarea_priv) 98 if (master_priv->sarea_priv)
@@ -1405,7 +1413,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1405 master->driver_priv = NULL; 1413 master->driver_priv = NULL;
1406} 1414}
1407 1415
1408#ifdef CONFIG_DRM_I915_FBDEV 1416#if IS_ENABLED(CONFIG_FB)
1409static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 1417static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1410{ 1418{
1411 struct apertures_struct *ap; 1419 struct apertures_struct *ap;
@@ -1496,16 +1504,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1496 spin_lock_init(&dev_priv->uncore.lock); 1504 spin_lock_init(&dev_priv->uncore.lock);
1497 spin_lock_init(&dev_priv->mm.object_stat_lock); 1505 spin_lock_init(&dev_priv->mm.object_stat_lock);
1498 mutex_init(&dev_priv->dpio_lock); 1506 mutex_init(&dev_priv->dpio_lock);
1499 mutex_init(&dev_priv->rps.hw_lock);
1500 mutex_init(&dev_priv->modeset_restore_lock); 1507 mutex_init(&dev_priv->modeset_restore_lock);
1501 1508
1502 mutex_init(&dev_priv->pc8.lock); 1509 intel_pm_setup(dev);
1503 dev_priv->pc8.requirements_met = false;
1504 dev_priv->pc8.gpu_idle = false;
1505 dev_priv->pc8.irqs_disabled = false;
1506 dev_priv->pc8.enabled = false;
1507 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
1508 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
1509 1510
1510 intel_display_crc_init(dev); 1511 intel_display_crc_init(dev);
1511 1512
@@ -1609,7 +1610,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1609 } 1610 }
1610 1611
1611 intel_irq_init(dev); 1612 intel_irq_init(dev);
1612 intel_pm_init(dev);
1613 intel_uncore_sanitize(dev); 1613 intel_uncore_sanitize(dev);
1614 1614
1615 /* Try to make sure MCHBAR is enabled before poking at it */ 1615 /* Try to make sure MCHBAR is enabled before poking at it */
@@ -1855,8 +1855,10 @@ void i915_driver_lastclose(struct drm_device * dev)
1855 1855
1856void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1856void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1857{ 1857{
1858 mutex_lock(&dev->struct_mutex);
1858 i915_gem_context_close(dev, file_priv); 1859 i915_gem_context_close(dev, file_priv);
1859 i915_gem_release(dev, file_priv); 1860 i915_gem_release(dev, file_priv);
1861 mutex_unlock(&dev->struct_mutex);
1860} 1862}
1861 1863
1862void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 1864void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 74516930de7a..43245b3fd2a2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -480,12 +480,12 @@ check_next:
480bool i915_semaphore_is_enabled(struct drm_device *dev) 480bool i915_semaphore_is_enabled(struct drm_device *dev)
481{ 481{
482 if (INTEL_INFO(dev)->gen < 6) 482 if (INTEL_INFO(dev)->gen < 6)
483 return 0; 483 return false;
484 484
485 /* Until we get further testing... */ 485 /* Until we get further testing... */
486 if (IS_GEN8(dev)) { 486 if (IS_GEN8(dev)) {
487 WARN_ON(!i915_preliminary_hw_support); 487 WARN_ON(!i915_preliminary_hw_support);
488 return 0; 488 return false;
489 } 489 }
490 490
491 if (i915_semaphores >= 0) 491 if (i915_semaphores >= 0)
@@ -497,7 +497,7 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
497 return false; 497 return false;
498#endif 498#endif
499 499
500 return 1; 500 return true;
501} 501}
502 502
503static int i915_drm_freeze(struct drm_device *dev) 503static int i915_drm_freeze(struct drm_device *dev)
@@ -657,6 +657,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
657 intel_modeset_init_hw(dev); 657 intel_modeset_init_hw(dev);
658 658
659 drm_modeset_lock_all(dev); 659 drm_modeset_lock_all(dev);
660 drm_mode_config_reset(dev);
660 intel_modeset_setup_hw_state(dev, true); 661 intel_modeset_setup_hw_state(dev, true);
661 drm_modeset_unlock_all(dev); 662 drm_modeset_unlock_all(dev);
662 663
@@ -919,6 +920,9 @@ static int i915_runtime_suspend(struct device *device)
919 920
920 DRM_DEBUG_KMS("Suspending device\n"); 921 DRM_DEBUG_KMS("Suspending device\n");
921 922
923 i915_gem_release_all_mmaps(dev_priv);
924
925 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
922 dev_priv->pm.suspended = true; 926 dev_priv->pm.suspended = true;
923 intel_opregion_notify_adapter(dev, PCI_D3cold); 927 intel_opregion_notify_adapter(dev, PCI_D3cold);
924 928
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ae2c80c1981b..ff6f870d6621 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1184,6 +1184,11 @@ struct intel_vbt_data {
1184 int edp_bpp; 1184 int edp_bpp;
1185 struct edp_power_seq edp_pps; 1185 struct edp_power_seq edp_pps;
1186 1186
1187 struct {
1188 u16 pwm_freq_hz;
1189 bool active_low_pwm;
1190 } backlight;
1191
1187 /* MIPI DSI */ 1192 /* MIPI DSI */
1188 struct { 1193 struct {
1189 u16 panel_id; 1194 u16 panel_id;
@@ -1210,7 +1215,7 @@ struct intel_wm_level {
1210 uint32_t fbc_val; 1215 uint32_t fbc_val;
1211}; 1216};
1212 1217
1213struct hsw_wm_values { 1218struct ilk_wm_values {
1214 uint32_t wm_pipe[3]; 1219 uint32_t wm_pipe[3];
1215 uint32_t wm_lp[3]; 1220 uint32_t wm_lp[3];
1216 uint32_t wm_lp_spr[3]; 1221 uint32_t wm_lp_spr[3];
@@ -1396,7 +1401,6 @@ typedef struct drm_i915_private {
1396 1401
1397 /* overlay */ 1402 /* overlay */
1398 struct intel_overlay *overlay; 1403 struct intel_overlay *overlay;
1399 unsigned int sprite_scaling_enabled;
1400 1404
1401 /* backlight registers and fields in struct intel_panel */ 1405 /* backlight registers and fields in struct intel_panel */
1402 spinlock_t backlight_lock; 1406 spinlock_t backlight_lock;
@@ -1517,7 +1521,7 @@ typedef struct drm_i915_private {
1517 uint16_t cur_latency[5]; 1521 uint16_t cur_latency[5];
1518 1522
1519 /* current hardware state */ 1523 /* current hardware state */
1520 struct hsw_wm_values hw; 1524 struct ilk_wm_values hw;
1521 } wm; 1525 } wm;
1522 1526
1523 struct i915_package_c8 pc8; 1527 struct i915_package_c8 pc8;
@@ -1840,7 +1844,7 @@ struct drm_i915_file_private {
1840 1844
1841#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) 1845#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1842#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 1846#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1843#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1847#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1844 1848
1845#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev)) 1849#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev))
1846 1850
@@ -1848,7 +1852,7 @@ struct drm_i915_file_private {
1848#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 1852#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
1849#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1853#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
1850#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */ 1854#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */
1851#define HAS_RUNTIME_PM(dev) false 1855#define HAS_RUNTIME_PM(dev) (IS_HASWELL(dev))
1852 1856
1853#define INTEL_PCH_DEVICE_ID_MASK 0xff00 1857#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1854#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 1858#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -1933,9 +1937,7 @@ void i915_queue_hangcheck(struct drm_device *dev);
1933void i915_handle_error(struct drm_device *dev, bool wedged); 1937void i915_handle_error(struct drm_device *dev, bool wedged);
1934 1938
1935extern void intel_irq_init(struct drm_device *dev); 1939extern void intel_irq_init(struct drm_device *dev);
1936extern void intel_pm_init(struct drm_device *dev);
1937extern void intel_hpd_init(struct drm_device *dev); 1940extern void intel_hpd_init(struct drm_device *dev);
1938extern void intel_pm_init(struct drm_device *dev);
1939 1941
1940extern void intel_uncore_sanitize(struct drm_device *dev); 1942extern void intel_uncore_sanitize(struct drm_device *dev);
1941extern void intel_uncore_early_sanitize(struct drm_device *dev); 1943extern void intel_uncore_early_sanitize(struct drm_device *dev);
@@ -2015,6 +2017,7 @@ void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
2015int __must_check i915_vma_unbind(struct i915_vma *vma); 2017int __must_check i915_vma_unbind(struct i915_vma *vma);
2016int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj); 2018int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
2017int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2019int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
2020void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
2018void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 2021void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
2019void i915_gem_lastclose(struct drm_device *dev); 2022void i915_gem_lastclose(struct drm_device *dev);
2020 2023
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2be904c704e9..32636a470367 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1465,6 +1465,22 @@ out:
1465 return ret; 1465 return ret;
1466} 1466}
1467 1467
1468void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1469{
1470 struct i915_vma *vma;
1471
1472 /*
1473 * Only the global gtt is relevant for gtt memory mappings, so restrict
1474 * list traversal to objects bound into the global address space. Note
1475 * that the active list should be empty, but better safe than sorry.
1476 */
1477 WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1478 list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1479 i915_gem_release_mmap(vma->obj);
1480 list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1481 i915_gem_release_mmap(vma->obj);
1482}
1483
1468/** 1484/**
1469 * i915_gem_release_mmap - remove physical page mappings 1485 * i915_gem_release_mmap - remove physical page mappings
1470 * @obj: obj in question 1486 * @obj: obj in question
@@ -2354,15 +2370,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
2354 kfree(request); 2370 kfree(request);
2355} 2371}
2356 2372
2357static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, 2373static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2358 struct intel_ring_buffer *ring) 2374 struct intel_ring_buffer *ring)
2359{ 2375{
2360 u32 completed_seqno; 2376 u32 completed_seqno = ring->get_seqno(ring, false);
2361 u32 acthd; 2377 u32 acthd = intel_ring_get_active_head(ring);
2378 struct drm_i915_gem_request *request;
2379
2380 list_for_each_entry(request, &ring->request_list, list) {
2381 if (i915_seqno_passed(completed_seqno, request->seqno))
2382 continue;
2362 2383
2363 acthd = intel_ring_get_active_head(ring); 2384 i915_set_reset_status(ring, request, acthd);
2364 completed_seqno = ring->get_seqno(ring, false); 2385 }
2386}
2365 2387
2388static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2389 struct intel_ring_buffer *ring)
2390{
2366 while (!list_empty(&ring->request_list)) { 2391 while (!list_empty(&ring->request_list)) {
2367 struct drm_i915_gem_request *request; 2392 struct drm_i915_gem_request *request;
2368 2393
@@ -2370,9 +2395,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2370 struct drm_i915_gem_request, 2395 struct drm_i915_gem_request,
2371 list); 2396 list);
2372 2397
2373 if (request->seqno > completed_seqno)
2374 i915_set_reset_status(ring, request, acthd);
2375
2376 i915_gem_free_request(request); 2398 i915_gem_free_request(request);
2377 } 2399 }
2378 2400
@@ -2414,8 +2436,16 @@ void i915_gem_reset(struct drm_device *dev)
2414 struct intel_ring_buffer *ring; 2436 struct intel_ring_buffer *ring;
2415 int i; 2437 int i;
2416 2438
2439 /*
2440 * Before we free the objects from the requests, we need to inspect
2441 * them for finding the guilty party. As the requests only borrow
2442 * their reference to the objects, the inspection must be done first.
2443 */
2444 for_each_ring(ring, dev_priv, i)
2445 i915_gem_reset_ring_status(dev_priv, ring);
2446
2417 for_each_ring(ring, dev_priv, i) 2447 for_each_ring(ring, dev_priv, i)
2418 i915_gem_reset_ring_lists(dev_priv, ring); 2448 i915_gem_reset_ring_cleanup(dev_priv, ring);
2419 2449
2420 i915_gem_cleanup_ringbuffer(dev); 2450 i915_gem_cleanup_ringbuffer(dev);
2421 2451
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 41877045a1a0..e08acaba5402 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -345,10 +345,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
345{ 345{
346 struct drm_i915_file_private *file_priv = file->driver_priv; 346 struct drm_i915_file_private *file_priv = file->driver_priv;
347 347
348 mutex_lock(&dev->struct_mutex);
349 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 348 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
350 idr_destroy(&file_priv->context_idr); 349 idr_destroy(&file_priv->context_idr);
351 mutex_unlock(&dev->struct_mutex);
352} 350}
353 351
354static struct i915_hw_context * 352static struct i915_hw_context *
@@ -421,11 +419,21 @@ static int do_switch(struct i915_hw_context *to)
421 if (ret) 419 if (ret)
422 return ret; 420 return ret;
423 421
424 /* Clear this page out of any CPU caches for coherent swap-in/out. Note 422 /*
423 * Pin can switch back to the default context if we end up calling into
424 * evict_everything - as a last ditch gtt defrag effort that also
425 * switches to the default context. Hence we need to reload from here.
426 */
427 from = ring->last_context;
428
429 /*
430 * Clear this page out of any CPU caches for coherent swap-in/out. Note
425 * that thanks to write = false in this call and us not setting any gpu 431 * that thanks to write = false in this call and us not setting any gpu
426 * write domains when putting a context object onto the active list 432 * write domains when putting a context object onto the active list
427 * (when switching away from it), this won't block. 433 * (when switching away from it), this won't block.
428 * XXX: We need a real interface to do this instead of trickery. */ 434 *
435 * XXX: We need a real interface to do this instead of trickery.
436 */
429 ret = i915_gem_object_set_to_gtt_domain(to->obj, false); 437 ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
430 if (ret) { 438 if (ret) {
431 i915_gem_object_unpin(to->obj); 439 i915_gem_object_unpin(to->obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index b7376533633d..8f3adc7d0dc8 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -88,6 +88,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
88 } else 88 } else
89 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); 89 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
90 90
91search_again:
91 /* First see if there is a large enough contiguous idle region... */ 92 /* First see if there is a large enough contiguous idle region... */
92 list_for_each_entry(vma, &vm->inactive_list, mm_list) { 93 list_for_each_entry(vma, &vm->inactive_list, mm_list) {
93 if (mark_free(vma, &unwind_list)) 94 if (mark_free(vma, &unwind_list))
@@ -115,10 +116,17 @@ none:
115 list_del_init(&vma->exec_list); 116 list_del_init(&vma->exec_list);
116 } 117 }
117 118
118 /* We expect the caller to unpin, evict all and try again, or give up. 119 /* Can we unpin some objects such as idle hw contents,
119 * So calling i915_gem_evict_vm() is unnecessary. 120 * or pending flips?
120 */ 121 */
121 return -ENOSPC; 122 ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev);
123 if (ret)
124 return ret;
125
126 /* Only idle the GPU and repeat the search once */
127 i915_gem_retire_requests(dev);
128 nonblocking = true;
129 goto search_again;
122 130
123found: 131found:
124 /* drm_mm doesn't allow any other other operations while 132 /* drm_mm doesn't allow any other other operations while
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 87652fafeb49..8d795626a25e 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -93,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
93{ 93{
94 struct drm_i915_gem_object *obj; 94 struct drm_i915_gem_object *obj;
95 struct list_head objects; 95 struct list_head objects;
96 int i, ret = 0; 96 int i, ret;
97 97
98 INIT_LIST_HEAD(&objects); 98 INIT_LIST_HEAD(&objects);
99 spin_lock(&file->table_lock); 99 spin_lock(&file->table_lock);
@@ -106,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
106 DRM_DEBUG("Invalid object handle %d at index %d\n", 106 DRM_DEBUG("Invalid object handle %d at index %d\n",
107 exec[i].handle, i); 107 exec[i].handle, i);
108 ret = -ENOENT; 108 ret = -ENOENT;
109 goto out; 109 goto err;
110 } 110 }
111 111
112 if (!list_empty(&obj->obj_exec_link)) { 112 if (!list_empty(&obj->obj_exec_link)) {
@@ -114,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
114 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", 114 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
115 obj, exec[i].handle, i); 115 obj, exec[i].handle, i);
116 ret = -EINVAL; 116 ret = -EINVAL;
117 goto out; 117 goto err;
118 } 118 }
119 119
120 drm_gem_object_reference(&obj->base); 120 drm_gem_object_reference(&obj->base);
@@ -123,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb,
123 spin_unlock(&file->table_lock); 123 spin_unlock(&file->table_lock);
124 124
125 i = 0; 125 i = 0;
126 list_for_each_entry(obj, &objects, obj_exec_link) { 126 while (!list_empty(&objects)) {
127 struct i915_vma *vma; 127 struct i915_vma *vma;
128 128
129 obj = list_first_entry(&objects,
130 struct drm_i915_gem_object,
131 obj_exec_link);
132
129 /* 133 /*
130 * NOTE: We can leak any vmas created here when something fails 134 * NOTE: We can leak any vmas created here when something fails
131 * later on. But that's no issue since vma_unbind can deal with 135 * later on. But that's no issue since vma_unbind can deal with
@@ -138,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb,
138 if (IS_ERR(vma)) { 142 if (IS_ERR(vma)) {
139 DRM_DEBUG("Failed to lookup VMA\n"); 143 DRM_DEBUG("Failed to lookup VMA\n");
140 ret = PTR_ERR(vma); 144 ret = PTR_ERR(vma);
141 goto out; 145 goto err;
142 } 146 }
143 147
148 /* Transfer ownership from the objects list to the vmas list. */
144 list_add_tail(&vma->exec_list, &eb->vmas); 149 list_add_tail(&vma->exec_list, &eb->vmas);
150 list_del_init(&obj->obj_exec_link);
145 151
146 vma->exec_entry = &exec[i]; 152 vma->exec_entry = &exec[i];
147 if (eb->and < 0) { 153 if (eb->and < 0) {
@@ -155,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb,
155 ++i; 161 ++i;
156 } 162 }
157 163
164 return 0;
165
158 166
159out: 167err:
160 while (!list_empty(&objects)) { 168 while (!list_empty(&objects)) {
161 obj = list_first_entry(&objects, 169 obj = list_first_entry(&objects,
162 struct drm_i915_gem_object, 170 struct drm_i915_gem_object,
163 obj_exec_link); 171 obj_exec_link);
164 list_del_init(&obj->obj_exec_link); 172 list_del_init(&obj->obj_exec_link);
165 if (ret) 173 drm_gem_object_unreference(&obj->base);
166 drm_gem_object_unreference(&obj->base);
167 } 174 }
175 /*
176 * Objects already transfered to the vmas list will be unreferenced by
177 * eb_destroy.
178 */
179
168 return ret; 180 return ret;
169} 181}
170 182
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a54eaabb3a3e..6c3a6e60aeac 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -299,23 +299,23 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
299 unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE; 299 unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
300 struct sg_page_iter sg_iter; 300 struct sg_page_iter sg_iter;
301 301
302 pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]); 302 pt_vaddr = NULL;
303 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { 303 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
304 dma_addr_t page_addr; 304 if (pt_vaddr == NULL)
305 pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
305 306
306 page_addr = sg_dma_address(sg_iter.sg) + 307 pt_vaddr[act_pte] =
307 (sg_iter.sg_pgoffset << PAGE_SHIFT); 308 gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
308 pt_vaddr[act_pte] = gen8_pte_encode(page_addr, cache_level, 309 cache_level, true);
309 true);
310 if (++act_pte == GEN8_PTES_PER_PAGE) { 310 if (++act_pte == GEN8_PTES_PER_PAGE) {
311 kunmap_atomic(pt_vaddr); 311 kunmap_atomic(pt_vaddr);
312 pt_vaddr = NULL;
312 act_pt++; 313 act_pt++;
313 pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
314 act_pte = 0; 314 act_pte = 0;
315
316 } 315 }
317 } 316 }
318 kunmap_atomic(pt_vaddr); 317 if (pt_vaddr)
318 kunmap_atomic(pt_vaddr);
319} 319}
320 320
321static void gen8_ppgtt_cleanup(struct i915_address_space *vm) 321static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -583,21 +583,23 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
583 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; 583 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
584 struct sg_page_iter sg_iter; 584 struct sg_page_iter sg_iter;
585 585
586 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); 586 pt_vaddr = NULL;
587 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { 587 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
588 dma_addr_t page_addr; 588 if (pt_vaddr == NULL)
589 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
589 590
590 page_addr = sg_page_iter_dma_address(&sg_iter); 591 pt_vaddr[act_pte] =
591 pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true); 592 vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
593 cache_level, true);
592 if (++act_pte == I915_PPGTT_PT_ENTRIES) { 594 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
593 kunmap_atomic(pt_vaddr); 595 kunmap_atomic(pt_vaddr);
596 pt_vaddr = NULL;
594 act_pt++; 597 act_pt++;
595 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
596 act_pte = 0; 598 act_pte = 0;
597
598 } 599 }
599 } 600 }
600 kunmap_atomic(pt_vaddr); 601 if (pt_vaddr)
602 kunmap_atomic(pt_vaddr);
601} 603}
602 604
603static void gen6_ppgtt_cleanup(struct i915_address_space *vm) 605static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
@@ -918,14 +920,12 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
918 WARN_ON(readq(&gtt_entries[i-1]) 920 WARN_ON(readq(&gtt_entries[i-1])
919 != gen8_pte_encode(addr, level, true)); 921 != gen8_pte_encode(addr, level, true));
920 922
921#if 0 /* TODO: Still needed on GEN8? */
922 /* This next bit makes the above posting read even more important. We 923 /* This next bit makes the above posting read even more important. We
923 * want to flush the TLBs only after we're certain all the PTE updates 924 * want to flush the TLBs only after we're certain all the PTE updates
924 * have finished. 925 * have finished.
925 */ 926 */
926 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 927 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
927 POSTING_READ(GFX_FLSH_CNTL_GEN6); 928 POSTING_READ(GFX_FLSH_CNTL_GEN6);
928#endif
929} 929}
930 930
931/* 931/*
@@ -1440,6 +1440,9 @@ static int i915_gmch_probe(struct drm_device *dev,
1440 dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; 1440 dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
1441 dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries; 1441 dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
1442 1442
1443 if (unlikely(dev_priv->gtt.do_idle_maps))
1444 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
1445
1443 return 0; 1446 return 0;
1444} 1447}
1445 1448
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index d284d892ed94..fed87ec17211 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -420,6 +420,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
420 420
421 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); 421 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
422 list_add_tail(&vma->mm_list, &ggtt->inactive_list); 422 list_add_tail(&vma->mm_list, &ggtt->inactive_list);
423 i915_gem_object_pin_pages(obj);
423 424
424 return obj; 425 return obj;
425 426
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 1d44c793bdf4..6d11e253218a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -62,7 +62,7 @@ static const u32 hpd_mask_i915[] = {
62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
63}; 63};
64 64
65static const u32 hpd_status_gen4[] = { 65static const u32 hpd_status_g4x[] = {
66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
@@ -1233,9 +1233,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
1233 spin_lock(&dev_priv->irq_lock); 1233 spin_lock(&dev_priv->irq_lock);
1234 for (i = 1; i < HPD_NUM_PINS; i++) { 1234 for (i = 1; i < HPD_NUM_PINS; i++) {
1235 1235
1236 WARN(((hpd[i] & hotplug_trigger) && 1236 WARN_ONCE(hpd[i] & hotplug_trigger &&
1237 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED), 1237 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED,
1238 "Received HPD interrupt although disabled\n"); 1238 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1239 hotplug_trigger, i, hpd[i]);
1239 1240
1240 if (!(hpd[i] & hotplug_trigger) || 1241 if (!(hpd[i] & hotplug_trigger) ||
1241 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1242 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
@@ -2714,6 +2715,8 @@ static void gen8_irq_preinstall(struct drm_device *dev)
2714#undef GEN8_IRQ_INIT_NDX 2715#undef GEN8_IRQ_INIT_NDX
2715 2716
2716 POSTING_READ(GEN8_PCU_IIR); 2717 POSTING_READ(GEN8_PCU_IIR);
2718
2719 ibx_irq_preinstall(dev);
2717} 2720}
2718 2721
2719static void ibx_hpd_irq_setup(struct drm_device *dev) 2722static void ibx_hpd_irq_setup(struct drm_device *dev)
@@ -3220,7 +3223,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3220 3223
3221 for_each_pipe(pipe) { 3224 for_each_pipe(pipe) {
3222 int plane = pipe; 3225 int plane = pipe;
3223 if (IS_MOBILE(dev)) 3226 if (HAS_FBC(dev))
3224 plane = !plane; 3227 plane = !plane;
3225 3228
3226 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3229 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
@@ -3421,7 +3424,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3421 3424
3422 for_each_pipe(pipe) { 3425 for_each_pipe(pipe) {
3423 int plane = pipe; 3426 int plane = pipe;
3424 if (IS_MOBILE(dev)) 3427 if (HAS_FBC(dev))
3425 plane = !plane; 3428 plane = !plane;
3426 3429
3427 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3430 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
@@ -3658,7 +3661,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
3658 hotplug_status); 3661 hotplug_status);
3659 3662
3660 intel_hpd_irq_handler(dev, hotplug_trigger, 3663 intel_hpd_irq_handler(dev, hotplug_trigger,
3661 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915); 3664 IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
3662 3665
3663 if (IS_G4X(dev) && 3666 if (IS_G4X(dev) &&
3664 (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)) 3667 (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X))
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f1eece4a63d5..76126e0ae609 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -193,10 +193,13 @@
193#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ 193#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
194#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ 194#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
195#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ 195#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
196#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
197#define MI_ARB_ON_OFF MI_INSTR(0x08, 0)
198#define MI_ARB_ENABLE (1<<0)
199#define MI_ARB_DISABLE (0<<0)
196#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) 200#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
197#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) 201#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
198#define MI_SUSPEND_FLUSH_EN (1<<0) 202#define MI_SUSPEND_FLUSH_EN (1<<0)
199#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
200#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0) 203#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0)
201#define MI_OVERLAY_CONTINUE (0x0<<21) 204#define MI_OVERLAY_CONTINUE (0x0<<21)
202#define MI_OVERLAY_ON (0x1<<21) 205#define MI_OVERLAY_ON (0x1<<21)
@@ -212,10 +215,24 @@
212#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) 215#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
213#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) 216#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
214#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) 217#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
215#define MI_ARB_ON_OFF MI_INSTR(0x08, 0) 218#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
216#define MI_ARB_ENABLE (1<<0) 219#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
217#define MI_ARB_DISABLE (0<<0) 220#define MI_SEMAPHORE_UPDATE (1<<21)
218 221#define MI_SEMAPHORE_COMPARE (1<<20)
222#define MI_SEMAPHORE_REGISTER (1<<18)
223#define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */
224#define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */
225#define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */
226#define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */
227#define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */
228#define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */
229#define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */
230#define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */
231#define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */
232#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
233#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
234#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
235#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
219#define MI_SET_CONTEXT MI_INSTR(0x18, 0) 236#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
220#define MI_MM_SPACE_GTT (1<<8) 237#define MI_MM_SPACE_GTT (1<<8)
221#define MI_MM_SPACE_PHYSICAL (0<<8) 238#define MI_MM_SPACE_PHYSICAL (0<<8)
@@ -235,7 +252,7 @@
235 */ 252 */
236#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) 253#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
237#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1) 254#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
238#define MI_SRM_LRM_GLOBAL_GTT (1<<22) 255#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
239#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ 256#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
240#define MI_FLUSH_DW_STORE_INDEX (1<<21) 257#define MI_FLUSH_DW_STORE_INDEX (1<<21)
241#define MI_INVALIDATE_TLB (1<<18) 258#define MI_INVALIDATE_TLB (1<<18)
@@ -246,30 +263,13 @@
246#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) 263#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
247#define MI_BATCH_NON_SECURE (1) 264#define MI_BATCH_NON_SECURE (1)
248/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ 265/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
249#define MI_BATCH_NON_SECURE_I965 (1<<8) 266#define MI_BATCH_NON_SECURE_I965 (1<<8)
250#define MI_BATCH_PPGTT_HSW (1<<8) 267#define MI_BATCH_PPGTT_HSW (1<<8)
251#define MI_BATCH_NON_SECURE_HSW (1<<13) 268#define MI_BATCH_NON_SECURE_HSW (1<<13)
252#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) 269#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
253#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ 270#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
254#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1) 271#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
255#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ 272
256#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
257#define MI_SEMAPHORE_UPDATE (1<<21)
258#define MI_SEMAPHORE_COMPARE (1<<20)
259#define MI_SEMAPHORE_REGISTER (1<<18)
260#define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */
261#define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */
262#define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */
263#define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */
264#define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */
265#define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */
266#define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */
267#define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */
268#define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */
269#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
270#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
271#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
272#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
273 273
274#define MI_PREDICATE_RESULT_2 (0x2214) 274#define MI_PREDICATE_RESULT_2 (0x2214)
275#define LOWER_SLICE_ENABLED (1<<0) 275#define LOWER_SLICE_ENABLED (1<<0)
@@ -3430,42 +3430,6 @@
3430/* the unit of memory self-refresh latency time is 0.5us */ 3430/* the unit of memory self-refresh latency time is 0.5us */
3431#define ILK_SRLT_MASK 0x3f 3431#define ILK_SRLT_MASK 0x3f
3432 3432
3433/* define the fifo size on Ironlake */
3434#define ILK_DISPLAY_FIFO 128
3435#define ILK_DISPLAY_MAXWM 64
3436#define ILK_DISPLAY_DFTWM 8
3437#define ILK_CURSOR_FIFO 32
3438#define ILK_CURSOR_MAXWM 16
3439#define ILK_CURSOR_DFTWM 8
3440
3441#define ILK_DISPLAY_SR_FIFO 512
3442#define ILK_DISPLAY_MAX_SRWM 0x1ff
3443#define ILK_DISPLAY_DFT_SRWM 0x3f
3444#define ILK_CURSOR_SR_FIFO 64
3445#define ILK_CURSOR_MAX_SRWM 0x3f
3446#define ILK_CURSOR_DFT_SRWM 8
3447
3448#define ILK_FIFO_LINE_SIZE 64
3449
3450/* define the WM info on Sandybridge */
3451#define SNB_DISPLAY_FIFO 128
3452#define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */
3453#define SNB_DISPLAY_DFTWM 8
3454#define SNB_CURSOR_FIFO 32
3455#define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */
3456#define SNB_CURSOR_DFTWM 8
3457
3458#define SNB_DISPLAY_SR_FIFO 512
3459#define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */
3460#define SNB_DISPLAY_DFT_SRWM 0x3f
3461#define SNB_CURSOR_SR_FIFO 64
3462#define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */
3463#define SNB_CURSOR_DFT_SRWM 8
3464
3465#define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */
3466
3467#define SNB_FIFO_LINE_SIZE 64
3468
3469 3433
3470/* the address where we get all kinds of latency value */ 3434/* the address where we get all kinds of latency value */
3471#define SSKPD 0x5d10 3435#define SSKPD 0x5d10
@@ -4148,6 +4112,8 @@
4148#define DISP_ARB_CTL 0x45000 4112#define DISP_ARB_CTL 0x45000
4149#define DISP_TILE_SURFACE_SWIZZLING (1<<13) 4113#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
4150#define DISP_FBC_WM_DIS (1<<15) 4114#define DISP_FBC_WM_DIS (1<<15)
4115#define DISP_ARB_CTL2 0x45004
4116#define DISP_DATA_PARTITION_5_6 (1<<6)
4151#define GEN7_MSG_CTL 0x45010 4117#define GEN7_MSG_CTL 0x45010
4152#define WAIT_FOR_PCH_RESET_ACK (1<<1) 4118#define WAIT_FOR_PCH_RESET_ACK (1<<1)
4153#define WAIT_FOR_PCH_FLR_ACK (1<<0) 4119#define WAIT_FOR_PCH_FLR_ACK (1<<0)
@@ -4856,6 +4822,8 @@
4856#define FORCEWAKE_ACK 0x130090 4822#define FORCEWAKE_ACK 0x130090
4857#define VLV_GTLC_WAKE_CTRL 0x130090 4823#define VLV_GTLC_WAKE_CTRL 0x130090
4858#define VLV_GTLC_PW_STATUS 0x130094 4824#define VLV_GTLC_PW_STATUS 0x130094
4825#define VLV_GTLC_PW_RENDER_STATUS_MASK 0x80
4826#define VLV_GTLC_PW_MEDIA_STATUS_MASK 0x20
4859#define FORCEWAKE_MT 0xa188 /* multi-threaded */ 4827#define FORCEWAKE_MT 0xa188 /* multi-threaded */
4860#define FORCEWAKE_KERNEL 0x1 4828#define FORCEWAKE_KERNEL 0x1
4861#define FORCEWAKE_USER 0x2 4829#define FORCEWAKE_USER 0x2
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 6b8fef7fb3bb..8150fdc08d49 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -237,7 +237,7 @@ static void i915_save_display(struct drm_device *dev)
237 } 237 }
238 238
239 /* Only regfile.save FBC state on the platform that supports FBC */ 239 /* Only regfile.save FBC state on the platform that supports FBC */
240 if (I915_HAS_FBC(dev)) { 240 if (HAS_FBC(dev)) {
241 if (HAS_PCH_SPLIT(dev)) { 241 if (HAS_PCH_SPLIT(dev)) {
242 dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); 242 dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
243 } else if (IS_GM45(dev)) { 243 } else if (IS_GM45(dev)) {
@@ -300,7 +300,7 @@ static void i915_restore_display(struct drm_device *dev)
300 300
301 /* only restore FBC info on the platform that supports FBC*/ 301 /* only restore FBC info on the platform that supports FBC*/
302 intel_disable_fbc(dev); 302 intel_disable_fbc(dev);
303 if (I915_HAS_FBC(dev)) { 303 if (HAS_FBC(dev)) {
304 if (HAS_PCH_SPLIT(dev)) { 304 if (HAS_PCH_SPLIT(dev)) {
305 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE); 305 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
306 } else if (IS_GM45(dev)) { 306 } else if (IS_GM45(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f88e5079a3f5..f22041973f3a 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -281,6 +281,34 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
281 } 281 }
282} 282}
283 283
284static void
285parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
286{
287 const struct bdb_lfp_backlight_data *backlight_data;
288 const struct bdb_lfp_backlight_data_entry *entry;
289
290 backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
291 if (!backlight_data)
292 return;
293
294 if (backlight_data->entry_size != sizeof(backlight_data->data[0])) {
295 DRM_DEBUG_KMS("Unsupported backlight data entry size %u\n",
296 backlight_data->entry_size);
297 return;
298 }
299
300 entry = &backlight_data->data[panel_type];
301
302 dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
303 dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
304 DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
305 "active %s, min brightness %u, level %u\n",
306 dev_priv->vbt.backlight.pwm_freq_hz,
307 dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
308 entry->min_brightness,
309 backlight_data->level[panel_type]);
310}
311
284/* Try to find sdvo panel data */ 312/* Try to find sdvo panel data */
285static void 313static void
286parse_sdvo_panel_data(struct drm_i915_private *dev_priv, 314parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
@@ -894,6 +922,7 @@ intel_parse_bios(struct drm_device *dev)
894 parse_general_features(dev_priv, bdb); 922 parse_general_features(dev_priv, bdb);
895 parse_general_definitions(dev_priv, bdb); 923 parse_general_definitions(dev_priv, bdb);
896 parse_lfp_panel_data(dev_priv, bdb); 924 parse_lfp_panel_data(dev_priv, bdb);
925 parse_lfp_backlight(dev_priv, bdb);
897 parse_sdvo_panel_data(dev_priv, bdb); 926 parse_sdvo_panel_data(dev_priv, bdb);
898 parse_sdvo_device_mapping(dev_priv, bdb); 927 parse_sdvo_device_mapping(dev_priv, bdb);
899 parse_device_mapping(dev_priv, bdb); 928 parse_device_mapping(dev_priv, bdb);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 81ed58cb7b31..282de5e9f39d 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -373,6 +373,22 @@ struct bdb_lvds_lfp_data {
373 struct bdb_lvds_lfp_data_entry data[16]; 373 struct bdb_lvds_lfp_data_entry data[16];
374} __packed; 374} __packed;
375 375
376struct bdb_lfp_backlight_data_entry {
377 u8 type:2;
378 u8 active_low_pwm:1;
379 u8 obsolete1:5;
380 u16 pwm_freq_hz;
381 u8 min_brightness;
382 u8 obsolete2;
383 u8 obsolete3;
384} __packed;
385
386struct bdb_lfp_backlight_data {
387 u8 entry_size;
388 struct bdb_lfp_backlight_data_entry data[16];
389 u8 level[16];
390} __packed;
391
376struct aimdb_header { 392struct aimdb_header {
377 char signature[16]; 393 char signature[16];
378 char oem_device[20]; 394 char oem_device[20];
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index d7d2683b89df..e06b9e017d6b 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1136,12 +1136,18 @@ void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
1136 enum pipe pipe; 1136 enum pipe pipe;
1137 struct intel_crtc *intel_crtc; 1137 struct intel_crtc *intel_crtc;
1138 1138
1139 dev_priv->ddi_plls.spll_refcount = 0;
1140 dev_priv->ddi_plls.wrpll1_refcount = 0;
1141 dev_priv->ddi_plls.wrpll2_refcount = 0;
1142
1139 for_each_pipe(pipe) { 1143 for_each_pipe(pipe) {
1140 intel_crtc = 1144 intel_crtc =
1141 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 1145 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
1142 1146
1143 if (!intel_crtc->active) 1147 if (!intel_crtc->active) {
1148 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
1144 continue; 1149 continue;
1150 }
1145 1151
1146 intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv, 1152 intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
1147 pipe); 1153 pipe);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 72a83fabb105..e77d4b8856a7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1211,15 +1211,12 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1211 } 1211 }
1212} 1212}
1213 1213
1214static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) 1214static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1215{ 1215{
1216 u32 val; 1216 u32 val;
1217 bool enabled; 1217 bool enabled;
1218 1218
1219 if (HAS_PCH_LPT(dev_priv->dev)) { 1219 WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1220 DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
1221 return;
1222 }
1223 1220
1224 val = I915_READ(PCH_DREF_CONTROL); 1221 val = I915_READ(PCH_DREF_CONTROL);
1225 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | 1222 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
@@ -1367,11 +1364,24 @@ static void intel_init_dpio(struct drm_device *dev)
1367 if (!IS_VALLEYVIEW(dev)) 1364 if (!IS_VALLEYVIEW(dev))
1368 return; 1365 return;
1369 1366
1370 /* Enable the CRI clock source so we can get at the display */ 1367 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1368}
1369
1370static void intel_reset_dpio(struct drm_device *dev)
1371{
1372 struct drm_i915_private *dev_priv = dev->dev_private;
1373
1374 if (!IS_VALLEYVIEW(dev))
1375 return;
1376
1377 /*
1378 * Enable the CRI clock source so we can get at the display and the
1379 * reference clock for VGA hotplug / manual detection.
1380 */
1371 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | 1381 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
1382 DPLL_REFA_CLK_ENABLE_VLV |
1372 DPLL_INTEGRATED_CRI_CLK_VLV); 1383 DPLL_INTEGRATED_CRI_CLK_VLV);
1373 1384
1374 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1375 /* 1385 /*
1376 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1386 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1377 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1387 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
@@ -1498,9 +1508,12 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1498 /* Make sure the pipe isn't still relying on us */ 1508 /* Make sure the pipe isn't still relying on us */
1499 assert_pipe_disabled(dev_priv, pipe); 1509 assert_pipe_disabled(dev_priv, pipe);
1500 1510
1501 /* Leave integrated clock source enabled */ 1511 /*
1512 * Leave integrated clock source and reference clock enabled for pipe B.
1513 * The latter is needed for VGA hotplug / manual detection.
1514 */
1502 if (pipe == PIPE_B) 1515 if (pipe == PIPE_B)
1503 val = DPLL_INTEGRATED_CRI_CLK_VLV; 1516 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
1504 I915_WRITE(DPLL(pipe), val); 1517 I915_WRITE(DPLL(pipe), val);
1505 POSTING_READ(DPLL(pipe)); 1518 POSTING_READ(DPLL(pipe));
1506} 1519}
@@ -2373,6 +2386,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2373 I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0); 2386 I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
2374 I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0); 2387 I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
2375 } 2388 }
2389 intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
2390 intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
2376 } 2391 }
2377 2392
2378 ret = dev_priv->display.update_plane(crtc, fb, x, y); 2393 ret = dev_priv->display.update_plane(crtc, fb, x, y);
@@ -3422,9 +3437,8 @@ void hsw_enable_ips(struct intel_crtc *crtc)
3422 mutex_unlock(&dev_priv->rps.hw_lock); 3437 mutex_unlock(&dev_priv->rps.hw_lock);
3423 /* Quoting Art Runyan: "its not safe to expect any particular 3438 /* Quoting Art Runyan: "its not safe to expect any particular
3424 * value in IPS_CTL bit 31 after enabling IPS through the 3439 * value in IPS_CTL bit 31 after enabling IPS through the
3425 * mailbox." Therefore we need to defer waiting on the state 3440 * mailbox." Moreover, the mailbox may return a bogus state,
3426 * change. 3441 * so we need to just enable it and continue on.
3427 * TODO: need to fix this for state checker
3428 */ 3442 */
3429 } else { 3443 } else {
3430 I915_WRITE(IPS_CTL, IPS_ENABLE); 3444 I915_WRITE(IPS_CTL, IPS_ENABLE);
@@ -3451,9 +3465,10 @@ void hsw_disable_ips(struct intel_crtc *crtc)
3451 mutex_lock(&dev_priv->rps.hw_lock); 3465 mutex_lock(&dev_priv->rps.hw_lock);
3452 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 3466 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3453 mutex_unlock(&dev_priv->rps.hw_lock); 3467 mutex_unlock(&dev_priv->rps.hw_lock);
3454 } else 3468 } else {
3455 I915_WRITE(IPS_CTL, 0); 3469 I915_WRITE(IPS_CTL, 0);
3456 POSTING_READ(IPS_CTL); 3470 POSTING_READ(IPS_CTL);
3471 }
3457 3472
3458 /* We need to wait for a vblank before we can disable the plane. */ 3473 /* We need to wait for a vblank before we can disable the plane. */
3459 intel_wait_for_vblank(dev, crtc->pipe); 3474 intel_wait_for_vblank(dev, crtc->pipe);
@@ -3488,7 +3503,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
3488 /* Workaround : Do not read or write the pipe palette/gamma data while 3503 /* Workaround : Do not read or write the pipe palette/gamma data while
3489 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 3504 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3490 */ 3505 */
3491 if (intel_crtc->config.ips_enabled && 3506 if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
3492 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) == 3507 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
3493 GAMMA_MODE_MODE_SPLIT)) { 3508 GAMMA_MODE_MODE_SPLIT)) {
3494 hsw_disable_ips(intel_crtc); 3509 hsw_disable_ips(intel_crtc);
@@ -4975,7 +4990,11 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4975 4990
4976 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 4991 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
4977 4992
4978 /* Enable DPIO clock input */ 4993 /*
4994 * Enable DPIO clock input. We should never disable the reference
4995 * clock for pipe B, since VGA hotplug / manual detection depends
4996 * on it.
4997 */
4979 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | 4998 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
4980 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; 4999 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
4981 /* We should never disable this, set it here for state tracking */ 5000 /* We should never disable this, set it here for state tracking */
@@ -5420,6 +5439,9 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
5420 struct drm_i915_private *dev_priv = dev->dev_private; 5439 struct drm_i915_private *dev_priv = dev->dev_private;
5421 uint32_t tmp; 5440 uint32_t tmp;
5422 5441
5442 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
5443 return;
5444
5423 tmp = I915_READ(PFIT_CONTROL); 5445 tmp = I915_READ(PFIT_CONTROL);
5424 if (!(tmp & PFIT_ENABLE)) 5446 if (!(tmp & PFIT_ENABLE))
5425 return; 5447 return;
@@ -6995,8 +7017,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
6995 if (intel_display_power_enabled(dev, pfit_domain)) 7017 if (intel_display_power_enabled(dev, pfit_domain))
6996 ironlake_get_pfit_config(crtc, pipe_config); 7018 ironlake_get_pfit_config(crtc, pipe_config);
6997 7019
6998 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 7020 if (IS_HASWELL(dev))
6999 (I915_READ(IPS_CTL) & IPS_ENABLE); 7021 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
7022 (I915_READ(IPS_CTL) & IPS_ENABLE);
7000 7023
7001 pipe_config->pixel_multiplier = 1; 7024 pipe_config->pixel_multiplier = 1;
7002 7025
@@ -7951,7 +7974,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7951 else 7974 else
7952 i9xx_clock(refclk, &clock); 7975 i9xx_clock(refclk, &clock);
7953 } else { 7976 } else {
7954 u32 lvds = I915_READ(LVDS); 7977 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
7955 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 7978 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
7956 7979
7957 if (is_lvds) { 7980 if (is_lvds) {
@@ -9326,7 +9349,9 @@ intel_pipe_config_compare(struct drm_device *dev,
9326 PIPE_CONF_CHECK_I(pch_pfit.size); 9349 PIPE_CONF_CHECK_I(pch_pfit.size);
9327 } 9350 }
9328 9351
9329 PIPE_CONF_CHECK_I(ips_enabled); 9352 /* BDW+ don't expose a synchronous way to read the state */
9353 if (IS_HASWELL(dev))
9354 PIPE_CONF_CHECK_I(ips_enabled);
9330 9355
9331 PIPE_CONF_CHECK_I(double_wide); 9356 PIPE_CONF_CHECK_I(double_wide);
9332 9357
@@ -9339,7 +9364,7 @@ intel_pipe_config_compare(struct drm_device *dev,
9339 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 9364 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
9340 PIPE_CONF_CHECK_I(pipe_bpp); 9365 PIPE_CONF_CHECK_I(pipe_bpp);
9341 9366
9342 if (!IS_HASWELL(dev)) { 9367 if (!HAS_DDI(dev)) {
9343 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); 9368 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
9344 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 9369 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
9345 } 9370 }
@@ -9913,17 +9938,21 @@ intel_modeset_stage_output_state(struct drm_device *dev,
9913 /* Check for any encoders that needs to be disabled. */ 9938 /* Check for any encoders that needs to be disabled. */
9914 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 9939 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9915 base.head) { 9940 base.head) {
9941 int num_connectors = 0;
9916 list_for_each_entry(connector, 9942 list_for_each_entry(connector,
9917 &dev->mode_config.connector_list, 9943 &dev->mode_config.connector_list,
9918 base.head) { 9944 base.head) {
9919 if (connector->new_encoder == encoder) { 9945 if (connector->new_encoder == encoder) {
9920 WARN_ON(!connector->new_encoder->new_crtc); 9946 WARN_ON(!connector->new_encoder->new_crtc);
9921 9947 num_connectors++;
9922 goto next_encoder;
9923 } 9948 }
9924 } 9949 }
9925 encoder->new_crtc = NULL; 9950
9926next_encoder: 9951 if (num_connectors == 0)
9952 encoder->new_crtc = NULL;
9953 else if (num_connectors > 1)
9954 return -EINVAL;
9955
9927 /* Only now check for crtc changes so we don't miss encoders 9956 /* Only now check for crtc changes so we don't miss encoders
9928 * that will be disabled. */ 9957 * that will be disabled. */
9929 if (&encoder->new_crtc->base != encoder->base.crtc) { 9958 if (&encoder->new_crtc->base != encoder->base.crtc) {
@@ -9994,6 +10023,16 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
9994 10023
9995 ret = intel_pipe_set_base(set->crtc, 10024 ret = intel_pipe_set_base(set->crtc,
9996 set->x, set->y, set->fb); 10025 set->x, set->y, set->fb);
10026 /*
10027 * In the fastboot case this may be our only check of the
10028 * state after boot. It would be better to only do it on
10029 * the first update, but we don't have a nice way of doing that
10030 * (and really, set_config isn't used much for high freq page
10031 * flipping, so increasing its cost here shouldn't be a big
10032 * deal).
10033 */
10034 if (i915_fastboot && ret == 0)
10035 intel_modeset_check_state(set->crtc->dev);
9997 } 10036 }
9998 10037
9999 if (ret) { 10038 if (ret) {
@@ -10054,7 +10093,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
10054 struct intel_shared_dpll *pll) 10093 struct intel_shared_dpll *pll)
10055{ 10094{
10056 /* PCH refclock must be enabled first */ 10095 /* PCH refclock must be enabled first */
10057 assert_pch_refclk_enabled(dev_priv); 10096 ibx_assert_pch_refclk_enabled(dev_priv);
10058 10097
10059 I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll); 10098 I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
10060 10099
@@ -10122,8 +10161,6 @@ static void intel_shared_dpll_init(struct drm_device *dev)
10122 dev_priv->num_shared_dpll = 0; 10161 dev_priv->num_shared_dpll = 0;
10123 10162
10124 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); 10163 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
10125 DRM_DEBUG_KMS("%i shared PLLs initialized\n",
10126 dev_priv->num_shared_dpll);
10127} 10164}
10128 10165
10129static void intel_crtc_init(struct drm_device *dev, int pipe) 10166static void intel_crtc_init(struct drm_device *dev, int pipe)
@@ -10151,7 +10188,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
10151 */ 10188 */
10152 intel_crtc->pipe = pipe; 10189 intel_crtc->pipe = pipe;
10153 intel_crtc->plane = pipe; 10190 intel_crtc->plane = pipe;
10154 if (IS_MOBILE(dev) && INTEL_INFO(dev)->gen < 4) { 10191 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
10155 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 10192 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
10156 intel_crtc->plane = !pipe; 10193 intel_crtc->plane = !pipe;
10157 } 10194 }
@@ -10240,6 +10277,28 @@ static bool has_edp_a(struct drm_device *dev)
10240 return true; 10277 return true;
10241} 10278}
10242 10279
10280const char *intel_output_name(int output)
10281{
10282 static const char *names[] = {
10283 [INTEL_OUTPUT_UNUSED] = "Unused",
10284 [INTEL_OUTPUT_ANALOG] = "Analog",
10285 [INTEL_OUTPUT_DVO] = "DVO",
10286 [INTEL_OUTPUT_SDVO] = "SDVO",
10287 [INTEL_OUTPUT_LVDS] = "LVDS",
10288 [INTEL_OUTPUT_TVOUT] = "TV",
10289 [INTEL_OUTPUT_HDMI] = "HDMI",
10290 [INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
10291 [INTEL_OUTPUT_EDP] = "eDP",
10292 [INTEL_OUTPUT_DSI] = "DSI",
10293 [INTEL_OUTPUT_UNKNOWN] = "Unknown",
10294 };
10295
10296 if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
10297 return "Invalid";
10298
10299 return names[output];
10300}
10301
10243static void intel_setup_outputs(struct drm_device *dev) 10302static void intel_setup_outputs(struct drm_device *dev)
10244{ 10303{
10245 struct drm_i915_private *dev_priv = dev->dev_private; 10304 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -10757,11 +10816,20 @@ static struct intel_quirk intel_quirks[] = {
10757 /* Sony Vaio Y cannot use SSC on LVDS */ 10816 /* Sony Vaio Y cannot use SSC on LVDS */
10758 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 10817 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
10759 10818
10760 /* 10819 /* Acer Aspire 5734Z must invert backlight brightness */
10761 * All GM45 Acer (and its brands eMachines and Packard Bell) laptops 10820 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
10762 * seem to use inverted backlight PWM. 10821
10763 */ 10822 /* Acer/eMachines G725 */
10764 { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness }, 10823 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
10824
10825 /* Acer/eMachines e725 */
10826 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
10827
10828 /* Acer/Packard Bell NCL20 */
10829 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
10830
10831 /* Acer Aspire 4736Z */
10832 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
10765}; 10833};
10766 10834
10767static void intel_init_quirks(struct drm_device *dev) 10835static void intel_init_quirks(struct drm_device *dev)
@@ -10809,7 +10877,7 @@ void intel_modeset_init_hw(struct drm_device *dev)
10809 10877
10810 intel_init_clock_gating(dev); 10878 intel_init_clock_gating(dev);
10811 10879
10812 intel_init_dpio(dev); 10880 intel_reset_dpio(dev);
10813 10881
10814 mutex_lock(&dev->struct_mutex); 10882 mutex_lock(&dev->struct_mutex);
10815 intel_enable_gt_powersave(dev); 10883 intel_enable_gt_powersave(dev);
@@ -10871,6 +10939,9 @@ void intel_modeset_init(struct drm_device *dev)
10871 } 10939 }
10872 } 10940 }
10873 10941
10942 intel_init_dpio(dev);
10943 intel_reset_dpio(dev);
10944
10874 intel_cpu_pll_init(dev); 10945 intel_cpu_pll_init(dev);
10875 intel_shared_dpll_init(dev); 10946 intel_shared_dpll_init(dev);
10876 10947
@@ -11218,7 +11289,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
11218 pll->on = false; 11289 pll->on = false;
11219 } 11290 }
11220 11291
11221 if (IS_HASWELL(dev)) 11292 if (HAS_PCH_SPLIT(dev))
11222 ilk_wm_get_hw_state(dev); 11293 ilk_wm_get_hw_state(dev);
11223 11294
11224 if (force_restore) { 11295 if (force_restore) {
@@ -11240,8 +11311,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
11240 } 11311 }
11241 11312
11242 intel_modeset_check_state(dev); 11313 intel_modeset_check_state(dev);
11243
11244 drm_mode_config_reset(dev);
11245} 11314}
11246 11315
11247void intel_modeset_gem_init(struct drm_device *dev) 11316void intel_modeset_gem_init(struct drm_device *dev)
@@ -11250,7 +11319,10 @@ void intel_modeset_gem_init(struct drm_device *dev)
11250 11319
11251 intel_setup_overlay(dev); 11320 intel_setup_overlay(dev);
11252 11321
11322 mutex_lock(&dev->mode_config.mutex);
11323 drm_mode_config_reset(dev);
11253 intel_modeset_setup_hw_state(dev, false); 11324 intel_modeset_setup_hw_state(dev, false);
11325 mutex_unlock(&dev->mode_config.mutex);
11254} 11326}
11255 11327
11256void intel_modeset_cleanup(struct drm_device *dev) 11328void intel_modeset_cleanup(struct drm_device *dev)
@@ -11328,14 +11400,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
11328int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 11400int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
11329{ 11401{
11330 struct drm_i915_private *dev_priv = dev->dev_private; 11402 struct drm_i915_private *dev_priv = dev->dev_private;
11403 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
11331 u16 gmch_ctrl; 11404 u16 gmch_ctrl;
11332 11405
11333 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); 11406 pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
11334 if (state) 11407 if (state)
11335 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 11408 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
11336 else 11409 else
11337 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 11410 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
11338 pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); 11411 pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
11339 return 0; 11412 return 0;
11340} 11413}
11341 11414
@@ -11445,7 +11518,8 @@ intel_display_capture_error_state(struct drm_device *dev)
11445 enum transcoder cpu_transcoder = transcoders[i]; 11518 enum transcoder cpu_transcoder = transcoders[i];
11446 11519
11447 error->transcoder[i].power_domain_on = 11520 error->transcoder[i].power_domain_on =
11448 intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i)); 11521 intel_display_power_enabled_sw(dev,
11522 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
11449 if (!error->transcoder[i].power_domain_on) 11523 if (!error->transcoder[i].power_domain_on)
11450 continue; 11524 continue;
11451 11525
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ea00068cced2..8754db9e3d52 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -65,8 +65,8 @@
65#define wait_for_atomic_us(COND, US) _wait_for((COND), \ 65#define wait_for_atomic_us(COND, US) _wait_for((COND), \
66 DIV_ROUND_UP((US), 1000), 0) 66 DIV_ROUND_UP((US), 1000), 0)
67 67
68#define KHz(x) (1000*x) 68#define KHz(x) (1000 * (x))
69#define MHz(x) KHz(1000*x) 69#define MHz(x) KHz(1000 * (x))
70 70
71/* 71/*
72 * Display related stuff 72 * Display related stuff
@@ -625,6 +625,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
625 625
626 626
627/* intel_display.c */ 627/* intel_display.c */
628const char *intel_output_name(int output);
628int intel_pch_rawclk(struct drm_device *dev); 629int intel_pch_rawclk(struct drm_device *dev);
629void intel_mark_busy(struct drm_device *dev); 630void intel_mark_busy(struct drm_device *dev);
630void intel_mark_fb_busy(struct drm_i915_gem_object *obj, 631void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
@@ -838,6 +839,7 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
838 uint32_t sprite_width, int pixel_size, 839 uint32_t sprite_width, int pixel_size,
839 bool enabled, bool scaled); 840 bool enabled, bool scaled);
840void intel_init_pm(struct drm_device *dev); 841void intel_init_pm(struct drm_device *dev);
842void intel_pm_setup(struct drm_device *dev);
841bool intel_fbc_enabled(struct drm_device *dev); 843bool intel_fbc_enabled(struct drm_device *dev);
842void intel_update_fbc(struct drm_device *dev); 844void intel_update_fbc(struct drm_device *dev);
843void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 845void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 284c3eb066f6..39eac9937a4a 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -328,8 +328,6 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
328 fb_set_suspend(info, state); 328 fb_set_suspend(info, state);
329} 329}
330 330
331MODULE_LICENSE("GPL and additional rights");
332
333void intel_fbdev_output_poll_changed(struct drm_device *dev) 331void intel_fbdev_output_poll_changed(struct drm_device *dev)
334{ 332{
335 struct drm_i915_private *dev_priv = dev->dev_private; 333 struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a98a990fbab3..a759ecdb7a6e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1005,7 +1005,7 @@ static int intel_panel_fitter_pipe(struct drm_device *dev)
1005 u32 pfit_control; 1005 u32 pfit_control;
1006 1006
1007 /* i830 doesn't have a panel fitter */ 1007 /* i830 doesn't have a panel fitter */
1008 if (IS_I830(dev)) 1008 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
1009 return -1; 1009 return -1;
1010 1010
1011 pfit_control = I915_READ(PFIT_CONTROL); 1011 pfit_control = I915_READ(PFIT_CONTROL);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 04b28f906f9e..d77cc81900f9 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -461,7 +461,7 @@ void intel_update_fbc(struct drm_device *dev)
461 const struct drm_display_mode *adjusted_mode; 461 const struct drm_display_mode *adjusted_mode;
462 unsigned int max_width, max_height; 462 unsigned int max_width, max_height;
463 463
464 if (!I915_HAS_FBC(dev)) { 464 if (!HAS_FBC(dev)) {
465 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED); 465 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
466 return; 466 return;
467 } 467 }
@@ -824,7 +824,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
824 return size; 824 return size;
825} 825}
826 826
827static int i85x_get_fifo_size(struct drm_device *dev, int plane) 827static int i830_get_fifo_size(struct drm_device *dev, int plane)
828{ 828{
829 struct drm_i915_private *dev_priv = dev->dev_private; 829 struct drm_i915_private *dev_priv = dev->dev_private;
830 uint32_t dsparb = I915_READ(DSPARB); 830 uint32_t dsparb = I915_READ(DSPARB);
@@ -857,21 +857,6 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
857 return size; 857 return size;
858} 858}
859 859
860static int i830_get_fifo_size(struct drm_device *dev, int plane)
861{
862 struct drm_i915_private *dev_priv = dev->dev_private;
863 uint32_t dsparb = I915_READ(DSPARB);
864 int size;
865
866 size = dsparb & 0x7f;
867 size >>= 1; /* Convert to cachelines */
868
869 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
870 plane ? "B" : "A", size);
871
872 return size;
873}
874
875/* Pineview has different values for various configs */ 860/* Pineview has different values for various configs */
876static const struct intel_watermark_params pineview_display_wm = { 861static const struct intel_watermark_params pineview_display_wm = {
877 PINEVIEW_DISPLAY_FIFO, 862 PINEVIEW_DISPLAY_FIFO,
@@ -950,14 +935,14 @@ static const struct intel_watermark_params i915_wm_info = {
950 2, 935 2,
951 I915_FIFO_LINE_SIZE 936 I915_FIFO_LINE_SIZE
952}; 937};
953static const struct intel_watermark_params i855_wm_info = { 938static const struct intel_watermark_params i830_wm_info = {
954 I855GM_FIFO_SIZE, 939 I855GM_FIFO_SIZE,
955 I915_MAX_WM, 940 I915_MAX_WM,
956 1, 941 1,
957 2, 942 2,
958 I830_FIFO_LINE_SIZE 943 I830_FIFO_LINE_SIZE
959}; 944};
960static const struct intel_watermark_params i830_wm_info = { 945static const struct intel_watermark_params i845_wm_info = {
961 I830_FIFO_SIZE, 946 I830_FIFO_SIZE,
962 I915_MAX_WM, 947 I915_MAX_WM,
963 1, 948 1,
@@ -965,65 +950,6 @@ static const struct intel_watermark_params i830_wm_info = {
965 I830_FIFO_LINE_SIZE 950 I830_FIFO_LINE_SIZE
966}; 951};
967 952
968static const struct intel_watermark_params ironlake_display_wm_info = {
969 ILK_DISPLAY_FIFO,
970 ILK_DISPLAY_MAXWM,
971 ILK_DISPLAY_DFTWM,
972 2,
973 ILK_FIFO_LINE_SIZE
974};
975static const struct intel_watermark_params ironlake_cursor_wm_info = {
976 ILK_CURSOR_FIFO,
977 ILK_CURSOR_MAXWM,
978 ILK_CURSOR_DFTWM,
979 2,
980 ILK_FIFO_LINE_SIZE
981};
982static const struct intel_watermark_params ironlake_display_srwm_info = {
983 ILK_DISPLAY_SR_FIFO,
984 ILK_DISPLAY_MAX_SRWM,
985 ILK_DISPLAY_DFT_SRWM,
986 2,
987 ILK_FIFO_LINE_SIZE
988};
989static const struct intel_watermark_params ironlake_cursor_srwm_info = {
990 ILK_CURSOR_SR_FIFO,
991 ILK_CURSOR_MAX_SRWM,
992 ILK_CURSOR_DFT_SRWM,
993 2,
994 ILK_FIFO_LINE_SIZE
995};
996
997static const struct intel_watermark_params sandybridge_display_wm_info = {
998 SNB_DISPLAY_FIFO,
999 SNB_DISPLAY_MAXWM,
1000 SNB_DISPLAY_DFTWM,
1001 2,
1002 SNB_FIFO_LINE_SIZE
1003};
1004static const struct intel_watermark_params sandybridge_cursor_wm_info = {
1005 SNB_CURSOR_FIFO,
1006 SNB_CURSOR_MAXWM,
1007 SNB_CURSOR_DFTWM,
1008 2,
1009 SNB_FIFO_LINE_SIZE
1010};
1011static const struct intel_watermark_params sandybridge_display_srwm_info = {
1012 SNB_DISPLAY_SR_FIFO,
1013 SNB_DISPLAY_MAX_SRWM,
1014 SNB_DISPLAY_DFT_SRWM,
1015 2,
1016 SNB_FIFO_LINE_SIZE
1017};
1018static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
1019 SNB_CURSOR_SR_FIFO,
1020 SNB_CURSOR_MAX_SRWM,
1021 SNB_CURSOR_DFT_SRWM,
1022 2,
1023 SNB_FIFO_LINE_SIZE
1024};
1025
1026
1027/** 953/**
1028 * intel_calculate_wm - calculate watermark level 954 * intel_calculate_wm - calculate watermark level
1029 * @clock_in_khz: pixel clock 955 * @clock_in_khz: pixel clock
@@ -1574,7 +1500,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1574 else if (!IS_GEN2(dev)) 1500 else if (!IS_GEN2(dev))
1575 wm_info = &i915_wm_info; 1501 wm_info = &i915_wm_info;
1576 else 1502 else
1577 wm_info = &i855_wm_info; 1503 wm_info = &i830_wm_info;
1578 1504
1579 fifo_size = dev_priv->display.get_fifo_size(dev, 0); 1505 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1580 crtc = intel_get_crtc_for_plane(dev, 0); 1506 crtc = intel_get_crtc_for_plane(dev, 0);
@@ -1622,7 +1548,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1622 if (IS_I945G(dev) || IS_I945GM(dev)) 1548 if (IS_I945G(dev) || IS_I945GM(dev))
1623 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); 1549 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1624 else if (IS_I915GM(dev)) 1550 else if (IS_I915GM(dev))
1625 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); 1551 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
1626 1552
1627 /* Calc sr entries for one plane configs */ 1553 /* Calc sr entries for one plane configs */
1628 if (HAS_FW_BLC(dev) && enabled) { 1554 if (HAS_FW_BLC(dev) && enabled) {
@@ -1674,14 +1600,14 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1674 I915_WRITE(FW_BLC_SELF, 1600 I915_WRITE(FW_BLC_SELF,
1675 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); 1601 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1676 else if (IS_I915GM(dev)) 1602 else if (IS_I915GM(dev))
1677 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); 1603 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
1678 DRM_DEBUG_KMS("memory self refresh enabled\n"); 1604 DRM_DEBUG_KMS("memory self refresh enabled\n");
1679 } else 1605 } else
1680 DRM_DEBUG_KMS("memory self refresh disabled\n"); 1606 DRM_DEBUG_KMS("memory self refresh disabled\n");
1681 } 1607 }
1682} 1608}
1683 1609
1684static void i830_update_wm(struct drm_crtc *unused_crtc) 1610static void i845_update_wm(struct drm_crtc *unused_crtc)
1685{ 1611{
1686 struct drm_device *dev = unused_crtc->dev; 1612 struct drm_device *dev = unused_crtc->dev;
1687 struct drm_i915_private *dev_priv = dev->dev_private; 1613 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1696,7 +1622,7 @@ static void i830_update_wm(struct drm_crtc *unused_crtc)
1696 1622
1697 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1623 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1698 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1624 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1699 &i830_wm_info, 1625 &i845_wm_info,
1700 dev_priv->display.get_fifo_size(dev, 0), 1626 dev_priv->display.get_fifo_size(dev, 0),
1701 4, latency_ns); 1627 4, latency_ns);
1702 fwater_lo = I915_READ(FW_BLC) & ~0xfff; 1628 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
@@ -1707,423 +1633,6 @@ static void i830_update_wm(struct drm_crtc *unused_crtc)
1707 I915_WRITE(FW_BLC, fwater_lo); 1633 I915_WRITE(FW_BLC, fwater_lo);
1708} 1634}
1709 1635
1710/*
1711 * Check the wm result.
1712 *
1713 * If any calculated watermark values is larger than the maximum value that
1714 * can be programmed into the associated watermark register, that watermark
1715 * must be disabled.
1716 */
1717static bool ironlake_check_srwm(struct drm_device *dev, int level,
1718 int fbc_wm, int display_wm, int cursor_wm,
1719 const struct intel_watermark_params *display,
1720 const struct intel_watermark_params *cursor)
1721{
1722 struct drm_i915_private *dev_priv = dev->dev_private;
1723
1724 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1725 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
1726
1727 if (fbc_wm > SNB_FBC_MAX_SRWM) {
1728 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1729 fbc_wm, SNB_FBC_MAX_SRWM, level);
1730
1731 /* fbc has it's own way to disable FBC WM */
1732 I915_WRITE(DISP_ARB_CTL,
1733 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
1734 return false;
1735 } else if (INTEL_INFO(dev)->gen >= 6) {
1736 /* enable FBC WM (except on ILK, where it must remain off) */
1737 I915_WRITE(DISP_ARB_CTL,
1738 I915_READ(DISP_ARB_CTL) & ~DISP_FBC_WM_DIS);
1739 }
1740
1741 if (display_wm > display->max_wm) {
1742 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1743 display_wm, SNB_DISPLAY_MAX_SRWM, level);
1744 return false;
1745 }
1746
1747 if (cursor_wm > cursor->max_wm) {
1748 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1749 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
1750 return false;
1751 }
1752
1753 if (!(fbc_wm || display_wm || cursor_wm)) {
1754 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
1755 return false;
1756 }
1757
1758 return true;
1759}
1760
1761/*
1762 * Compute watermark values of WM[1-3],
1763 */
1764static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1765 int latency_ns,
1766 const struct intel_watermark_params *display,
1767 const struct intel_watermark_params *cursor,
1768 int *fbc_wm, int *display_wm, int *cursor_wm)
1769{
1770 struct drm_crtc *crtc;
1771 const struct drm_display_mode *adjusted_mode;
1772 unsigned long line_time_us;
1773 int hdisplay, htotal, pixel_size, clock;
1774 int line_count, line_size;
1775 int small, large;
1776 int entries;
1777
1778 if (!latency_ns) {
1779 *fbc_wm = *display_wm = *cursor_wm = 0;
1780 return false;
1781 }
1782
1783 crtc = intel_get_crtc_for_plane(dev, plane);
1784 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1785 clock = adjusted_mode->crtc_clock;
1786 htotal = adjusted_mode->crtc_htotal;
1787 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1788 pixel_size = crtc->fb->bits_per_pixel / 8;
1789
1790 line_time_us = (htotal * 1000) / clock;
1791 line_count = (latency_ns / line_time_us + 1000) / 1000;
1792 line_size = hdisplay * pixel_size;
1793
1794 /* Use the minimum of the small and large buffer method for primary */
1795 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1796 large = line_count * line_size;
1797
1798 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1799 *display_wm = entries + display->guard_size;
1800
1801 /*
1802 * Spec says:
1803 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
1804 */
1805 *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
1806
1807 /* calculate the self-refresh watermark for display cursor */
1808 entries = line_count * pixel_size * 64;
1809 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1810 *cursor_wm = entries + cursor->guard_size;
1811
1812 return ironlake_check_srwm(dev, level,
1813 *fbc_wm, *display_wm, *cursor_wm,
1814 display, cursor);
1815}
1816
1817static void ironlake_update_wm(struct drm_crtc *crtc)
1818{
1819 struct drm_device *dev = crtc->dev;
1820 struct drm_i915_private *dev_priv = dev->dev_private;
1821 int fbc_wm, plane_wm, cursor_wm;
1822 unsigned int enabled;
1823
1824 enabled = 0;
1825 if (g4x_compute_wm0(dev, PIPE_A,
1826 &ironlake_display_wm_info,
1827 dev_priv->wm.pri_latency[0] * 100,
1828 &ironlake_cursor_wm_info,
1829 dev_priv->wm.cur_latency[0] * 100,
1830 &plane_wm, &cursor_wm)) {
1831 I915_WRITE(WM0_PIPEA_ILK,
1832 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1833 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1834 " plane %d, " "cursor: %d\n",
1835 plane_wm, cursor_wm);
1836 enabled |= 1 << PIPE_A;
1837 }
1838
1839 if (g4x_compute_wm0(dev, PIPE_B,
1840 &ironlake_display_wm_info,
1841 dev_priv->wm.pri_latency[0] * 100,
1842 &ironlake_cursor_wm_info,
1843 dev_priv->wm.cur_latency[0] * 100,
1844 &plane_wm, &cursor_wm)) {
1845 I915_WRITE(WM0_PIPEB_ILK,
1846 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1847 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1848 " plane %d, cursor: %d\n",
1849 plane_wm, cursor_wm);
1850 enabled |= 1 << PIPE_B;
1851 }
1852
1853 /*
1854 * Calculate and update the self-refresh watermark only when one
1855 * display plane is used.
1856 */
1857 I915_WRITE(WM3_LP_ILK, 0);
1858 I915_WRITE(WM2_LP_ILK, 0);
1859 I915_WRITE(WM1_LP_ILK, 0);
1860
1861 if (!single_plane_enabled(enabled))
1862 return;
1863 enabled = ffs(enabled) - 1;
1864
1865 /* WM1 */
1866 if (!ironlake_compute_srwm(dev, 1, enabled,
1867 dev_priv->wm.pri_latency[1] * 500,
1868 &ironlake_display_srwm_info,
1869 &ironlake_cursor_srwm_info,
1870 &fbc_wm, &plane_wm, &cursor_wm))
1871 return;
1872
1873 I915_WRITE(WM1_LP_ILK,
1874 WM1_LP_SR_EN |
1875 (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
1876 (fbc_wm << WM1_LP_FBC_SHIFT) |
1877 (plane_wm << WM1_LP_SR_SHIFT) |
1878 cursor_wm);
1879
1880 /* WM2 */
1881 if (!ironlake_compute_srwm(dev, 2, enabled,
1882 dev_priv->wm.pri_latency[2] * 500,
1883 &ironlake_display_srwm_info,
1884 &ironlake_cursor_srwm_info,
1885 &fbc_wm, &plane_wm, &cursor_wm))
1886 return;
1887
1888 I915_WRITE(WM2_LP_ILK,
1889 WM2_LP_EN |
1890 (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
1891 (fbc_wm << WM1_LP_FBC_SHIFT) |
1892 (plane_wm << WM1_LP_SR_SHIFT) |
1893 cursor_wm);
1894
1895 /*
1896 * WM3 is unsupported on ILK, probably because we don't have latency
1897 * data for that power state
1898 */
1899}
1900
1901static void sandybridge_update_wm(struct drm_crtc *crtc)
1902{
1903 struct drm_device *dev = crtc->dev;
1904 struct drm_i915_private *dev_priv = dev->dev_private;
1905 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
1906 u32 val;
1907 int fbc_wm, plane_wm, cursor_wm;
1908 unsigned int enabled;
1909
1910 enabled = 0;
1911 if (g4x_compute_wm0(dev, PIPE_A,
1912 &sandybridge_display_wm_info, latency,
1913 &sandybridge_cursor_wm_info, latency,
1914 &plane_wm, &cursor_wm)) {
1915 val = I915_READ(WM0_PIPEA_ILK);
1916 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1917 I915_WRITE(WM0_PIPEA_ILK, val |
1918 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1919 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1920 " plane %d, " "cursor: %d\n",
1921 plane_wm, cursor_wm);
1922 enabled |= 1 << PIPE_A;
1923 }
1924
1925 if (g4x_compute_wm0(dev, PIPE_B,
1926 &sandybridge_display_wm_info, latency,
1927 &sandybridge_cursor_wm_info, latency,
1928 &plane_wm, &cursor_wm)) {
1929 val = I915_READ(WM0_PIPEB_ILK);
1930 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1931 I915_WRITE(WM0_PIPEB_ILK, val |
1932 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1933 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1934 " plane %d, cursor: %d\n",
1935 plane_wm, cursor_wm);
1936 enabled |= 1 << PIPE_B;
1937 }
1938
1939 /*
1940 * Calculate and update the self-refresh watermark only when one
1941 * display plane is used.
1942 *
1943 * SNB support 3 levels of watermark.
1944 *
1945 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1946 * and disabled in the descending order
1947 *
1948 */
1949 I915_WRITE(WM3_LP_ILK, 0);
1950 I915_WRITE(WM2_LP_ILK, 0);
1951 I915_WRITE(WM1_LP_ILK, 0);
1952
1953 if (!single_plane_enabled(enabled) ||
1954 dev_priv->sprite_scaling_enabled)
1955 return;
1956 enabled = ffs(enabled) - 1;
1957
1958 /* WM1 */
1959 if (!ironlake_compute_srwm(dev, 1, enabled,
1960 dev_priv->wm.pri_latency[1] * 500,
1961 &sandybridge_display_srwm_info,
1962 &sandybridge_cursor_srwm_info,
1963 &fbc_wm, &plane_wm, &cursor_wm))
1964 return;
1965
1966 I915_WRITE(WM1_LP_ILK,
1967 WM1_LP_SR_EN |
1968 (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
1969 (fbc_wm << WM1_LP_FBC_SHIFT) |
1970 (plane_wm << WM1_LP_SR_SHIFT) |
1971 cursor_wm);
1972
1973 /* WM2 */
1974 if (!ironlake_compute_srwm(dev, 2, enabled,
1975 dev_priv->wm.pri_latency[2] * 500,
1976 &sandybridge_display_srwm_info,
1977 &sandybridge_cursor_srwm_info,
1978 &fbc_wm, &plane_wm, &cursor_wm))
1979 return;
1980
1981 I915_WRITE(WM2_LP_ILK,
1982 WM2_LP_EN |
1983 (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
1984 (fbc_wm << WM1_LP_FBC_SHIFT) |
1985 (plane_wm << WM1_LP_SR_SHIFT) |
1986 cursor_wm);
1987
1988 /* WM3 */
1989 if (!ironlake_compute_srwm(dev, 3, enabled,
1990 dev_priv->wm.pri_latency[3] * 500,
1991 &sandybridge_display_srwm_info,
1992 &sandybridge_cursor_srwm_info,
1993 &fbc_wm, &plane_wm, &cursor_wm))
1994 return;
1995
1996 I915_WRITE(WM3_LP_ILK,
1997 WM3_LP_EN |
1998 (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
1999 (fbc_wm << WM1_LP_FBC_SHIFT) |
2000 (plane_wm << WM1_LP_SR_SHIFT) |
2001 cursor_wm);
2002}
2003
2004static void ivybridge_update_wm(struct drm_crtc *crtc)
2005{
2006 struct drm_device *dev = crtc->dev;
2007 struct drm_i915_private *dev_priv = dev->dev_private;
2008 int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
2009 u32 val;
2010 int fbc_wm, plane_wm, cursor_wm;
2011 int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
2012 unsigned int enabled;
2013
2014 enabled = 0;
2015 if (g4x_compute_wm0(dev, PIPE_A,
2016 &sandybridge_display_wm_info, latency,
2017 &sandybridge_cursor_wm_info, latency,
2018 &plane_wm, &cursor_wm)) {
2019 val = I915_READ(WM0_PIPEA_ILK);
2020 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
2021 I915_WRITE(WM0_PIPEA_ILK, val |
2022 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
2023 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
2024 " plane %d, " "cursor: %d\n",
2025 plane_wm, cursor_wm);
2026 enabled |= 1 << PIPE_A;
2027 }
2028
2029 if (g4x_compute_wm0(dev, PIPE_B,
2030 &sandybridge_display_wm_info, latency,
2031 &sandybridge_cursor_wm_info, latency,
2032 &plane_wm, &cursor_wm)) {
2033 val = I915_READ(WM0_PIPEB_ILK);
2034 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
2035 I915_WRITE(WM0_PIPEB_ILK, val |
2036 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
2037 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
2038 " plane %d, cursor: %d\n",
2039 plane_wm, cursor_wm);
2040 enabled |= 1 << PIPE_B;
2041 }
2042
2043 if (g4x_compute_wm0(dev, PIPE_C,
2044 &sandybridge_display_wm_info, latency,
2045 &sandybridge_cursor_wm_info, latency,
2046 &plane_wm, &cursor_wm)) {
2047 val = I915_READ(WM0_PIPEC_IVB);
2048 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
2049 I915_WRITE(WM0_PIPEC_IVB, val |
2050 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
2051 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
2052 " plane %d, cursor: %d\n",
2053 plane_wm, cursor_wm);
2054 enabled |= 1 << PIPE_C;
2055 }
2056
2057 /*
2058 * Calculate and update the self-refresh watermark only when one
2059 * display plane is used.
2060 *
2061 * SNB support 3 levels of watermark.
2062 *
2063 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
2064 * and disabled in the descending order
2065 *
2066 */
2067 I915_WRITE(WM3_LP_ILK, 0);
2068 I915_WRITE(WM2_LP_ILK, 0);
2069 I915_WRITE(WM1_LP_ILK, 0);
2070
2071 if (!single_plane_enabled(enabled) ||
2072 dev_priv->sprite_scaling_enabled)
2073 return;
2074 enabled = ffs(enabled) - 1;
2075
2076 /* WM1 */
2077 if (!ironlake_compute_srwm(dev, 1, enabled,
2078 dev_priv->wm.pri_latency[1] * 500,
2079 &sandybridge_display_srwm_info,
2080 &sandybridge_cursor_srwm_info,
2081 &fbc_wm, &plane_wm, &cursor_wm))
2082 return;
2083
2084 I915_WRITE(WM1_LP_ILK,
2085 WM1_LP_SR_EN |
2086 (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
2087 (fbc_wm << WM1_LP_FBC_SHIFT) |
2088 (plane_wm << WM1_LP_SR_SHIFT) |
2089 cursor_wm);
2090
2091 /* WM2 */
2092 if (!ironlake_compute_srwm(dev, 2, enabled,
2093 dev_priv->wm.pri_latency[2] * 500,
2094 &sandybridge_display_srwm_info,
2095 &sandybridge_cursor_srwm_info,
2096 &fbc_wm, &plane_wm, &cursor_wm))
2097 return;
2098
2099 I915_WRITE(WM2_LP_ILK,
2100 WM2_LP_EN |
2101 (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
2102 (fbc_wm << WM1_LP_FBC_SHIFT) |
2103 (plane_wm << WM1_LP_SR_SHIFT) |
2104 cursor_wm);
2105
2106 /* WM3, note we have to correct the cursor latency */
2107 if (!ironlake_compute_srwm(dev, 3, enabled,
2108 dev_priv->wm.pri_latency[3] * 500,
2109 &sandybridge_display_srwm_info,
2110 &sandybridge_cursor_srwm_info,
2111 &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
2112 !ironlake_compute_srwm(dev, 3, enabled,
2113 dev_priv->wm.cur_latency[3] * 500,
2114 &sandybridge_display_srwm_info,
2115 &sandybridge_cursor_srwm_info,
2116 &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
2117 return;
2118
2119 I915_WRITE(WM3_LP_ILK,
2120 WM3_LP_EN |
2121 (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
2122 (fbc_wm << WM1_LP_FBC_SHIFT) |
2123 (plane_wm << WM1_LP_SR_SHIFT) |
2124 cursor_wm);
2125}
2126
2127static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev, 1636static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
2128 struct drm_crtc *crtc) 1637 struct drm_crtc *crtc)
2129{ 1638{
@@ -2192,7 +1701,7 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
2192 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; 1701 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
2193} 1702}
2194 1703
2195struct hsw_pipe_wm_parameters { 1704struct ilk_pipe_wm_parameters {
2196 bool active; 1705 bool active;
2197 uint32_t pipe_htotal; 1706 uint32_t pipe_htotal;
2198 uint32_t pixel_rate; 1707 uint32_t pixel_rate;
@@ -2201,7 +1710,7 @@ struct hsw_pipe_wm_parameters {
2201 struct intel_plane_wm_parameters cur; 1710 struct intel_plane_wm_parameters cur;
2202}; 1711};
2203 1712
2204struct hsw_wm_maximums { 1713struct ilk_wm_maximums {
2205 uint16_t pri; 1714 uint16_t pri;
2206 uint16_t spr; 1715 uint16_t spr;
2207 uint16_t cur; 1716 uint16_t cur;
@@ -2219,7 +1728,7 @@ struct intel_wm_config {
2219 * For both WM_PIPE and WM_LP. 1728 * For both WM_PIPE and WM_LP.
2220 * mem_value must be in 0.1us units. 1729 * mem_value must be in 0.1us units.
2221 */ 1730 */
2222static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params, 1731static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
2223 uint32_t mem_value, 1732 uint32_t mem_value,
2224 bool is_lp) 1733 bool is_lp)
2225{ 1734{
@@ -2248,7 +1757,7 @@ static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
2248 * For both WM_PIPE and WM_LP. 1757 * For both WM_PIPE and WM_LP.
2249 * mem_value must be in 0.1us units. 1758 * mem_value must be in 0.1us units.
2250 */ 1759 */
2251static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params, 1760static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
2252 uint32_t mem_value) 1761 uint32_t mem_value)
2253{ 1762{
2254 uint32_t method1, method2; 1763 uint32_t method1, method2;
@@ -2271,7 +1780,7 @@ static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
2271 * For both WM_PIPE and WM_LP. 1780 * For both WM_PIPE and WM_LP.
2272 * mem_value must be in 0.1us units. 1781 * mem_value must be in 0.1us units.
2273 */ 1782 */
2274static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params, 1783static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
2275 uint32_t mem_value) 1784 uint32_t mem_value)
2276{ 1785{
2277 if (!params->active || !params->cur.enabled) 1786 if (!params->active || !params->cur.enabled)
@@ -2285,7 +1794,7 @@ static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
2285} 1794}
2286 1795
2287/* Only for WM_LP. */ 1796/* Only for WM_LP. */
2288static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params, 1797static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
2289 uint32_t pri_val) 1798 uint32_t pri_val)
2290{ 1799{
2291 if (!params->active || !params->pri.enabled) 1800 if (!params->active || !params->pri.enabled)
@@ -2390,7 +1899,7 @@ static void ilk_compute_wm_maximums(struct drm_device *dev,
2390 int level, 1899 int level,
2391 const struct intel_wm_config *config, 1900 const struct intel_wm_config *config,
2392 enum intel_ddb_partitioning ddb_partitioning, 1901 enum intel_ddb_partitioning ddb_partitioning,
2393 struct hsw_wm_maximums *max) 1902 struct ilk_wm_maximums *max)
2394{ 1903{
2395 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); 1904 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
2396 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); 1905 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
@@ -2399,7 +1908,7 @@ static void ilk_compute_wm_maximums(struct drm_device *dev,
2399} 1908}
2400 1909
2401static bool ilk_validate_wm_level(int level, 1910static bool ilk_validate_wm_level(int level,
2402 const struct hsw_wm_maximums *max, 1911 const struct ilk_wm_maximums *max,
2403 struct intel_wm_level *result) 1912 struct intel_wm_level *result)
2404{ 1913{
2405 bool ret; 1914 bool ret;
@@ -2441,7 +1950,7 @@ static bool ilk_validate_wm_level(int level,
2441 1950
2442static void ilk_compute_wm_level(struct drm_i915_private *dev_priv, 1951static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
2443 int level, 1952 int level,
2444 const struct hsw_pipe_wm_parameters *p, 1953 const struct ilk_pipe_wm_parameters *p,
2445 struct intel_wm_level *result) 1954 struct intel_wm_level *result)
2446{ 1955{
2447 uint16_t pri_latency = dev_priv->wm.pri_latency[level]; 1956 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
@@ -2489,7 +1998,7 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
2489{ 1998{
2490 struct drm_i915_private *dev_priv = dev->dev_private; 1999 struct drm_i915_private *dev_priv = dev->dev_private;
2491 2000
2492 if (IS_HASWELL(dev)) { 2001 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2493 uint64_t sskpd = I915_READ64(MCH_SSKPD); 2002 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2494 2003
2495 wm[0] = (sskpd >> 56) & 0xFF; 2004 wm[0] = (sskpd >> 56) & 0xFF;
@@ -2537,7 +2046,7 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2537static int ilk_wm_max_level(const struct drm_device *dev) 2046static int ilk_wm_max_level(const struct drm_device *dev)
2538{ 2047{
2539 /* how many WM levels are we expecting */ 2048 /* how many WM levels are we expecting */
2540 if (IS_HASWELL(dev)) 2049 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2541 return 4; 2050 return 4;
2542 else if (INTEL_INFO(dev)->gen >= 6) 2051 else if (INTEL_INFO(dev)->gen >= 6)
2543 return 3; 2052 return 3;
@@ -2589,8 +2098,8 @@ static void intel_setup_wm_latency(struct drm_device *dev)
2589 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); 2098 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2590} 2099}
2591 2100
2592static void hsw_compute_wm_parameters(struct drm_crtc *crtc, 2101static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2593 struct hsw_pipe_wm_parameters *p, 2102 struct ilk_pipe_wm_parameters *p,
2594 struct intel_wm_config *config) 2103 struct intel_wm_config *config)
2595{ 2104{
2596 struct drm_device *dev = crtc->dev; 2105 struct drm_device *dev = crtc->dev;
@@ -2600,7 +2109,7 @@ static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
2600 2109
2601 p->active = intel_crtc_active(crtc); 2110 p->active = intel_crtc_active(crtc);
2602 if (p->active) { 2111 if (p->active) {
2603 p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal; 2112 p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
2604 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); 2113 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2605 p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8; 2114 p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
2606 p->cur.bytes_per_pixel = 4; 2115 p->cur.bytes_per_pixel = 4;
@@ -2627,7 +2136,7 @@ static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
2627 2136
2628/* Compute new watermarks for the pipe */ 2137/* Compute new watermarks for the pipe */
2629static bool intel_compute_pipe_wm(struct drm_crtc *crtc, 2138static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2630 const struct hsw_pipe_wm_parameters *params, 2139 const struct ilk_pipe_wm_parameters *params,
2631 struct intel_pipe_wm *pipe_wm) 2140 struct intel_pipe_wm *pipe_wm)
2632{ 2141{
2633 struct drm_device *dev = crtc->dev; 2142 struct drm_device *dev = crtc->dev;
@@ -2639,16 +2148,25 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2639 .sprites_enabled = params->spr.enabled, 2148 .sprites_enabled = params->spr.enabled,
2640 .sprites_scaled = params->spr.scaled, 2149 .sprites_scaled = params->spr.scaled,
2641 }; 2150 };
2642 struct hsw_wm_maximums max; 2151 struct ilk_wm_maximums max;
2643 2152
2644 /* LP0 watermarks always use 1/2 DDB partitioning */ 2153 /* LP0 watermarks always use 1/2 DDB partitioning */
2645 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); 2154 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2646 2155
2156 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2157 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
2158 max_level = 1;
2159
2160 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2161 if (params->spr.scaled)
2162 max_level = 0;
2163
2647 for (level = 0; level <= max_level; level++) 2164 for (level = 0; level <= max_level; level++)
2648 ilk_compute_wm_level(dev_priv, level, params, 2165 ilk_compute_wm_level(dev_priv, level, params,
2649 &pipe_wm->wm[level]); 2166 &pipe_wm->wm[level]);
2650 2167
2651 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc); 2168 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2169 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2652 2170
2653 /* At least LP0 must be valid */ 2171 /* At least LP0 must be valid */
2654 return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]); 2172 return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
@@ -2683,12 +2201,19 @@ static void ilk_merge_wm_level(struct drm_device *dev,
2683 * Merge all low power watermarks for all active pipes. 2201 * Merge all low power watermarks for all active pipes.
2684 */ 2202 */
2685static void ilk_wm_merge(struct drm_device *dev, 2203static void ilk_wm_merge(struct drm_device *dev,
2686 const struct hsw_wm_maximums *max, 2204 const struct intel_wm_config *config,
2205 const struct ilk_wm_maximums *max,
2687 struct intel_pipe_wm *merged) 2206 struct intel_pipe_wm *merged)
2688{ 2207{
2689 int level, max_level = ilk_wm_max_level(dev); 2208 int level, max_level = ilk_wm_max_level(dev);
2690 2209
2691 merged->fbc_wm_enabled = true; 2210 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2211 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2212 config->num_pipes_active > 1)
2213 return;
2214
2215 /* ILK: FBC WM must be disabled always */
2216 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2692 2217
2693 /* merge each WM1+ level */ 2218 /* merge each WM1+ level */
2694 for (level = 1; level <= max_level; level++) { 2219 for (level = 1; level <= max_level; level++) {
@@ -2708,6 +2233,20 @@ static void ilk_wm_merge(struct drm_device *dev,
2708 wm->fbc_val = 0; 2233 wm->fbc_val = 0;
2709 } 2234 }
2710 } 2235 }
2236
2237 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2238 /*
2239 * FIXME this is racy. FBC might get enabled later.
2240 * What we should check here is whether FBC can be
2241 * enabled sometime later.
2242 */
2243 if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
2244 for (level = 2; level <= max_level; level++) {
2245 struct intel_wm_level *wm = &merged->wm[level];
2246
2247 wm->enable = false;
2248 }
2249 }
2711} 2250}
2712 2251
2713static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) 2252static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
@@ -2716,10 +2255,21 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2716 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); 2255 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2717} 2256}
2718 2257
2719static void hsw_compute_wm_results(struct drm_device *dev, 2258/* The value we need to program into the WM_LPx latency field */
2259static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2260{
2261 struct drm_i915_private *dev_priv = dev->dev_private;
2262
2263 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2264 return 2 * level;
2265 else
2266 return dev_priv->wm.pri_latency[level];
2267}
2268
2269static void ilk_compute_wm_results(struct drm_device *dev,
2720 const struct intel_pipe_wm *merged, 2270 const struct intel_pipe_wm *merged,
2721 enum intel_ddb_partitioning partitioning, 2271 enum intel_ddb_partitioning partitioning,
2722 struct hsw_wm_values *results) 2272 struct ilk_wm_values *results)
2723{ 2273{
2724 struct intel_crtc *intel_crtc; 2274 struct intel_crtc *intel_crtc;
2725 int level, wm_lp; 2275 int level, wm_lp;
@@ -2738,7 +2288,7 @@ static void hsw_compute_wm_results(struct drm_device *dev,
2738 break; 2288 break;
2739 2289
2740 results->wm_lp[wm_lp - 1] = WM3_LP_EN | 2290 results->wm_lp[wm_lp - 1] = WM3_LP_EN |
2741 ((level * 2) << WM1_LP_LATENCY_SHIFT) | 2291 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2742 (r->pri_val << WM1_LP_SR_SHIFT) | 2292 (r->pri_val << WM1_LP_SR_SHIFT) |
2743 r->cur_val; 2293 r->cur_val;
2744 2294
@@ -2749,7 +2299,11 @@ static void hsw_compute_wm_results(struct drm_device *dev,
2749 results->wm_lp[wm_lp - 1] |= 2299 results->wm_lp[wm_lp - 1] |=
2750 r->fbc_val << WM1_LP_FBC_SHIFT; 2300 r->fbc_val << WM1_LP_FBC_SHIFT;
2751 2301
2752 results->wm_lp_spr[wm_lp - 1] = r->spr_val; 2302 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2303 WARN_ON(wm_lp != 1);
2304 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2305 } else
2306 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2753 } 2307 }
2754 2308
2755 /* LP0 register values */ 2309 /* LP0 register values */
@@ -2772,7 +2326,7 @@ static void hsw_compute_wm_results(struct drm_device *dev,
2772 2326
2773/* Find the result with the highest level enabled. Check for enable_fbc_wm in 2327/* Find the result with the highest level enabled. Check for enable_fbc_wm in
2774 * case both are at the same level. Prefer r1 in case they're the same. */ 2328 * case both are at the same level. Prefer r1 in case they're the same. */
2775static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev, 2329static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2776 struct intel_pipe_wm *r1, 2330 struct intel_pipe_wm *r1,
2777 struct intel_pipe_wm *r2) 2331 struct intel_pipe_wm *r2)
2778{ 2332{
@@ -2807,8 +2361,8 @@ static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev,
2807#define WM_DIRTY_DDB (1 << 25) 2361#define WM_DIRTY_DDB (1 << 25)
2808 2362
2809static unsigned int ilk_compute_wm_dirty(struct drm_device *dev, 2363static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
2810 const struct hsw_wm_values *old, 2364 const struct ilk_wm_values *old,
2811 const struct hsw_wm_values *new) 2365 const struct ilk_wm_values *new)
2812{ 2366{
2813 unsigned int dirty = 0; 2367 unsigned int dirty = 0;
2814 enum pipe pipe; 2368 enum pipe pipe;
@@ -2858,27 +2412,53 @@ static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
2858 return dirty; 2412 return dirty;
2859} 2413}
2860 2414
2415static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2416 unsigned int dirty)
2417{
2418 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2419 bool changed = false;
2420
2421 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2422 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2423 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2424 changed = true;
2425 }
2426 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2427 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2428 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2429 changed = true;
2430 }
2431 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2432 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2433 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2434 changed = true;
2435 }
2436
2437 /*
2438 * Don't touch WM1S_LP_EN here.
2439 * Doing so could cause underruns.
2440 */
2441
2442 return changed;
2443}
2444
2861/* 2445/*
2862 * The spec says we shouldn't write when we don't need, because every write 2446 * The spec says we shouldn't write when we don't need, because every write
2863 * causes WMs to be re-evaluated, expending some power. 2447 * causes WMs to be re-evaluated, expending some power.
2864 */ 2448 */
2865static void hsw_write_wm_values(struct drm_i915_private *dev_priv, 2449static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2866 struct hsw_wm_values *results) 2450 struct ilk_wm_values *results)
2867{ 2451{
2868 struct hsw_wm_values *previous = &dev_priv->wm.hw; 2452 struct drm_device *dev = dev_priv->dev;
2453 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2869 unsigned int dirty; 2454 unsigned int dirty;
2870 uint32_t val; 2455 uint32_t val;
2871 2456
2872 dirty = ilk_compute_wm_dirty(dev_priv->dev, previous, results); 2457 dirty = ilk_compute_wm_dirty(dev, previous, results);
2873 if (!dirty) 2458 if (!dirty)
2874 return; 2459 return;
2875 2460
2876 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != 0) 2461 _ilk_disable_lp_wm(dev_priv, dirty);
2877 I915_WRITE(WM3_LP_ILK, 0);
2878 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != 0)
2879 I915_WRITE(WM2_LP_ILK, 0);
2880 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != 0)
2881 I915_WRITE(WM1_LP_ILK, 0);
2882 2462
2883 if (dirty & WM_DIRTY_PIPE(PIPE_A)) 2463 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2884 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]); 2464 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
@@ -2895,12 +2475,21 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2895 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]); 2475 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2896 2476
2897 if (dirty & WM_DIRTY_DDB) { 2477 if (dirty & WM_DIRTY_DDB) {
2898 val = I915_READ(WM_MISC); 2478 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2899 if (results->partitioning == INTEL_DDB_PART_1_2) 2479 val = I915_READ(WM_MISC);
2900 val &= ~WM_MISC_DATA_PARTITION_5_6; 2480 if (results->partitioning == INTEL_DDB_PART_1_2)
2901 else 2481 val &= ~WM_MISC_DATA_PARTITION_5_6;
2902 val |= WM_MISC_DATA_PARTITION_5_6; 2482 else
2903 I915_WRITE(WM_MISC, val); 2483 val |= WM_MISC_DATA_PARTITION_5_6;
2484 I915_WRITE(WM_MISC, val);
2485 } else {
2486 val = I915_READ(DISP_ARB_CTL2);
2487 if (results->partitioning == INTEL_DDB_PART_1_2)
2488 val &= ~DISP_DATA_PARTITION_5_6;
2489 else
2490 val |= DISP_DATA_PARTITION_5_6;
2491 I915_WRITE(DISP_ARB_CTL2, val);
2492 }
2904 } 2493 }
2905 2494
2906 if (dirty & WM_DIRTY_FBC) { 2495 if (dirty & WM_DIRTY_FBC) {
@@ -2912,37 +2501,48 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2912 I915_WRITE(DISP_ARB_CTL, val); 2501 I915_WRITE(DISP_ARB_CTL, val);
2913 } 2502 }
2914 2503
2915 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0]) 2504 if (dirty & WM_DIRTY_LP(1) &&
2505 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2916 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); 2506 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2917 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2918 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2919 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2920 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2921 2507
2922 if (dirty & WM_DIRTY_LP(1) && results->wm_lp[0] != 0) 2508 if (INTEL_INFO(dev)->gen >= 7) {
2509 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2510 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2511 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2512 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2513 }
2514
2515 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2923 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); 2516 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2924 if (dirty & WM_DIRTY_LP(2) && results->wm_lp[1] != 0) 2517 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2925 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); 2518 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2926 if (dirty & WM_DIRTY_LP(3) && results->wm_lp[2] != 0) 2519 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2927 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); 2520 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2928 2521
2929 dev_priv->wm.hw = *results; 2522 dev_priv->wm.hw = *results;
2930} 2523}
2931 2524
2932static void haswell_update_wm(struct drm_crtc *crtc) 2525static bool ilk_disable_lp_wm(struct drm_device *dev)
2526{
2527 struct drm_i915_private *dev_priv = dev->dev_private;
2528
2529 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2530}
2531
2532static void ilk_update_wm(struct drm_crtc *crtc)
2933{ 2533{
2934 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2534 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2935 struct drm_device *dev = crtc->dev; 2535 struct drm_device *dev = crtc->dev;
2936 struct drm_i915_private *dev_priv = dev->dev_private; 2536 struct drm_i915_private *dev_priv = dev->dev_private;
2937 struct hsw_wm_maximums max; 2537 struct ilk_wm_maximums max;
2938 struct hsw_pipe_wm_parameters params = {}; 2538 struct ilk_pipe_wm_parameters params = {};
2939 struct hsw_wm_values results = {}; 2539 struct ilk_wm_values results = {};
2940 enum intel_ddb_partitioning partitioning; 2540 enum intel_ddb_partitioning partitioning;
2941 struct intel_pipe_wm pipe_wm = {}; 2541 struct intel_pipe_wm pipe_wm = {};
2942 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; 2542 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
2943 struct intel_wm_config config = {}; 2543 struct intel_wm_config config = {};
2944 2544
2945 hsw_compute_wm_parameters(crtc, &params, &config); 2545 ilk_compute_wm_parameters(crtc, &params, &config);
2946 2546
2947 intel_compute_pipe_wm(crtc, &params, &pipe_wm); 2547 intel_compute_pipe_wm(crtc, &params, &pipe_wm);
2948 2548
@@ -2952,15 +2552,15 @@ static void haswell_update_wm(struct drm_crtc *crtc)
2952 intel_crtc->wm.active = pipe_wm; 2552 intel_crtc->wm.active = pipe_wm;
2953 2553
2954 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max); 2554 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
2955 ilk_wm_merge(dev, &max, &lp_wm_1_2); 2555 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
2956 2556
2957 /* 5/6 split only in single pipe config on IVB+ */ 2557 /* 5/6 split only in single pipe config on IVB+ */
2958 if (INTEL_INFO(dev)->gen >= 7 && 2558 if (INTEL_INFO(dev)->gen >= 7 &&
2959 config.num_pipes_active == 1 && config.sprites_enabled) { 2559 config.num_pipes_active == 1 && config.sprites_enabled) {
2960 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); 2560 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
2961 ilk_wm_merge(dev, &max, &lp_wm_5_6); 2561 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
2962 2562
2963 best_lp_wm = hsw_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); 2563 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
2964 } else { 2564 } else {
2965 best_lp_wm = &lp_wm_1_2; 2565 best_lp_wm = &lp_wm_1_2;
2966 } 2566 }
@@ -2968,16 +2568,17 @@ static void haswell_update_wm(struct drm_crtc *crtc)
2968 partitioning = (best_lp_wm == &lp_wm_1_2) ? 2568 partitioning = (best_lp_wm == &lp_wm_1_2) ?
2969 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; 2569 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
2970 2570
2971 hsw_compute_wm_results(dev, best_lp_wm, partitioning, &results); 2571 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
2972 2572
2973 hsw_write_wm_values(dev_priv, &results); 2573 ilk_write_wm_values(dev_priv, &results);
2974} 2574}
2975 2575
2976static void haswell_update_sprite_wm(struct drm_plane *plane, 2576static void ilk_update_sprite_wm(struct drm_plane *plane,
2977 struct drm_crtc *crtc, 2577 struct drm_crtc *crtc,
2978 uint32_t sprite_width, int pixel_size, 2578 uint32_t sprite_width, int pixel_size,
2979 bool enabled, bool scaled) 2579 bool enabled, bool scaled)
2980{ 2580{
2581 struct drm_device *dev = plane->dev;
2981 struct intel_plane *intel_plane = to_intel_plane(plane); 2582 struct intel_plane *intel_plane = to_intel_plane(plane);
2982 2583
2983 intel_plane->wm.enabled = enabled; 2584 intel_plane->wm.enabled = enabled;
@@ -2985,176 +2586,24 @@ static void haswell_update_sprite_wm(struct drm_plane *plane,
2985 intel_plane->wm.horiz_pixels = sprite_width; 2586 intel_plane->wm.horiz_pixels = sprite_width;
2986 intel_plane->wm.bytes_per_pixel = pixel_size; 2587 intel_plane->wm.bytes_per_pixel = pixel_size;
2987 2588
2988 haswell_update_wm(crtc); 2589 /*
2989} 2590 * IVB workaround: must disable low power watermarks for at least
2990 2591 * one frame before enabling scaling. LP watermarks can be re-enabled
2991static bool 2592 * when scaling is disabled.
2992sandybridge_compute_sprite_wm(struct drm_device *dev, int plane, 2593 *
2993 uint32_t sprite_width, int pixel_size, 2594 * WaCxSRDisabledForSpriteScaling:ivb
2994 const struct intel_watermark_params *display, 2595 */
2995 int display_latency_ns, int *sprite_wm) 2596 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
2996{ 2597 intel_wait_for_vblank(dev, intel_plane->pipe);
2997 struct drm_crtc *crtc;
2998 int clock;
2999 int entries, tlb_miss;
3000
3001 crtc = intel_get_crtc_for_plane(dev, plane);
3002 if (!intel_crtc_active(crtc)) {
3003 *sprite_wm = display->guard_size;
3004 return false;
3005 }
3006
3007 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
3008
3009 /* Use the small buffer method to calculate the sprite watermark */
3010 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
3011 tlb_miss = display->fifo_size*display->cacheline_size -
3012 sprite_width * 8;
3013 if (tlb_miss > 0)
3014 entries += tlb_miss;
3015 entries = DIV_ROUND_UP(entries, display->cacheline_size);
3016 *sprite_wm = entries + display->guard_size;
3017 if (*sprite_wm > (int)display->max_wm)
3018 *sprite_wm = display->max_wm;
3019
3020 return true;
3021}
3022
3023static bool
3024sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
3025 uint32_t sprite_width, int pixel_size,
3026 const struct intel_watermark_params *display,
3027 int latency_ns, int *sprite_wm)
3028{
3029 struct drm_crtc *crtc;
3030 unsigned long line_time_us;
3031 int clock;
3032 int line_count, line_size;
3033 int small, large;
3034 int entries;
3035
3036 if (!latency_ns) {
3037 *sprite_wm = 0;
3038 return false;
3039 }
3040
3041 crtc = intel_get_crtc_for_plane(dev, plane);
3042 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
3043 if (!clock) {
3044 *sprite_wm = 0;
3045 return false;
3046 }
3047
3048 line_time_us = (sprite_width * 1000) / clock;
3049 if (!line_time_us) {
3050 *sprite_wm = 0;
3051 return false;
3052 }
3053
3054 line_count = (latency_ns / line_time_us + 1000) / 1000;
3055 line_size = sprite_width * pixel_size;
3056
3057 /* Use the minimum of the small and large buffer method for primary */
3058 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
3059 large = line_count * line_size;
3060
3061 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
3062 *sprite_wm = entries + display->guard_size;
3063 2598
3064 return *sprite_wm > 0x3ff ? false : true; 2599 ilk_update_wm(crtc);
3065}
3066
3067static void sandybridge_update_sprite_wm(struct drm_plane *plane,
3068 struct drm_crtc *crtc,
3069 uint32_t sprite_width, int pixel_size,
3070 bool enabled, bool scaled)
3071{
3072 struct drm_device *dev = plane->dev;
3073 struct drm_i915_private *dev_priv = dev->dev_private;
3074 int pipe = to_intel_plane(plane)->pipe;
3075 int latency = dev_priv->wm.spr_latency[0] * 100; /* In unit 0.1us */
3076 u32 val;
3077 int sprite_wm, reg;
3078 int ret;
3079
3080 if (!enabled)
3081 return;
3082
3083 switch (pipe) {
3084 case 0:
3085 reg = WM0_PIPEA_ILK;
3086 break;
3087 case 1:
3088 reg = WM0_PIPEB_ILK;
3089 break;
3090 case 2:
3091 reg = WM0_PIPEC_IVB;
3092 break;
3093 default:
3094 return; /* bad pipe */
3095 }
3096
3097 ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
3098 &sandybridge_display_wm_info,
3099 latency, &sprite_wm);
3100 if (!ret) {
3101 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %c\n",
3102 pipe_name(pipe));
3103 return;
3104 }
3105
3106 val = I915_READ(reg);
3107 val &= ~WM0_PIPE_SPRITE_MASK;
3108 I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
3109 DRM_DEBUG_KMS("sprite watermarks For pipe %c - %d\n", pipe_name(pipe), sprite_wm);
3110
3111
3112 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
3113 pixel_size,
3114 &sandybridge_display_srwm_info,
3115 dev_priv->wm.spr_latency[1] * 500,
3116 &sprite_wm);
3117 if (!ret) {
3118 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
3119 pipe_name(pipe));
3120 return;
3121 }
3122 I915_WRITE(WM1S_LP_ILK, sprite_wm);
3123
3124 /* Only IVB has two more LP watermarks for sprite */
3125 if (!IS_IVYBRIDGE(dev))
3126 return;
3127
3128 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
3129 pixel_size,
3130 &sandybridge_display_srwm_info,
3131 dev_priv->wm.spr_latency[2] * 500,
3132 &sprite_wm);
3133 if (!ret) {
3134 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
3135 pipe_name(pipe));
3136 return;
3137 }
3138 I915_WRITE(WM2S_LP_IVB, sprite_wm);
3139
3140 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
3141 pixel_size,
3142 &sandybridge_display_srwm_info,
3143 dev_priv->wm.spr_latency[3] * 500,
3144 &sprite_wm);
3145 if (!ret) {
3146 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
3147 pipe_name(pipe));
3148 return;
3149 }
3150 I915_WRITE(WM3S_LP_IVB, sprite_wm);
3151} 2600}
3152 2601
3153static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) 2602static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3154{ 2603{
3155 struct drm_device *dev = crtc->dev; 2604 struct drm_device *dev = crtc->dev;
3156 struct drm_i915_private *dev_priv = dev->dev_private; 2605 struct drm_i915_private *dev_priv = dev->dev_private;
3157 struct hsw_wm_values *hw = &dev_priv->wm.hw; 2606 struct ilk_wm_values *hw = &dev_priv->wm.hw;
3158 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2607 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3159 struct intel_pipe_wm *active = &intel_crtc->wm.active; 2608 struct intel_pipe_wm *active = &intel_crtc->wm.active;
3160 enum pipe pipe = intel_crtc->pipe; 2609 enum pipe pipe = intel_crtc->pipe;
@@ -3165,7 +2614,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3165 }; 2614 };
3166 2615
3167 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]); 2616 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
3168 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); 2617 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2618 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3169 2619
3170 if (intel_crtc_active(crtc)) { 2620 if (intel_crtc_active(crtc)) {
3171 u32 tmp = hw->wm_pipe[pipe]; 2621 u32 tmp = hw->wm_pipe[pipe];
@@ -3197,7 +2647,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3197void ilk_wm_get_hw_state(struct drm_device *dev) 2647void ilk_wm_get_hw_state(struct drm_device *dev)
3198{ 2648{
3199 struct drm_i915_private *dev_priv = dev->dev_private; 2649 struct drm_i915_private *dev_priv = dev->dev_private;
3200 struct hsw_wm_values *hw = &dev_priv->wm.hw; 2650 struct ilk_wm_values *hw = &dev_priv->wm.hw;
3201 struct drm_crtc *crtc; 2651 struct drm_crtc *crtc;
3202 2652
3203 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 2653 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
@@ -3211,8 +2661,12 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
3211 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); 2661 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
3212 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); 2662 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
3213 2663
3214 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? 2664 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3215 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 2665 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2666 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2667 else if (IS_IVYBRIDGE(dev))
2668 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
2669 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
3216 2670
3217 hw->enable_fbc_wm = 2671 hw->enable_fbc_wm =
3218 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); 2672 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
@@ -3583,9 +3037,11 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3583 3037
3584void gen6_rps_idle(struct drm_i915_private *dev_priv) 3038void gen6_rps_idle(struct drm_i915_private *dev_priv)
3585{ 3039{
3040 struct drm_device *dev = dev_priv->dev;
3041
3586 mutex_lock(&dev_priv->rps.hw_lock); 3042 mutex_lock(&dev_priv->rps.hw_lock);
3587 if (dev_priv->rps.enabled) { 3043 if (dev_priv->rps.enabled) {
3588 if (dev_priv->info->is_valleyview) 3044 if (IS_VALLEYVIEW(dev))
3589 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay); 3045 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3590 else 3046 else
3591 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); 3047 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
@@ -3596,9 +3052,11 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
3596 3052
3597void gen6_rps_boost(struct drm_i915_private *dev_priv) 3053void gen6_rps_boost(struct drm_i915_private *dev_priv)
3598{ 3054{
3055 struct drm_device *dev = dev_priv->dev;
3056
3599 mutex_lock(&dev_priv->rps.hw_lock); 3057 mutex_lock(&dev_priv->rps.hw_lock);
3600 if (dev_priv->rps.enabled) { 3058 if (dev_priv->rps.enabled) {
3601 if (dev_priv->info->is_valleyview) 3059 if (IS_VALLEYVIEW(dev))
3602 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay); 3060 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
3603 else 3061 else
3604 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay); 3062 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
@@ -4972,6 +4430,20 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
4972 } 4430 }
4973} 4431}
4974 4432
4433static void ilk_init_lp_watermarks(struct drm_device *dev)
4434{
4435 struct drm_i915_private *dev_priv = dev->dev_private;
4436
4437 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
4438 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
4439 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
4440
4441 /*
4442 * Don't touch WM1S_LP_EN here.
4443 * Doing so could cause underruns.
4444 */
4445}
4446
4975static void ironlake_init_clock_gating(struct drm_device *dev) 4447static void ironlake_init_clock_gating(struct drm_device *dev)
4976{ 4448{
4977 struct drm_i915_private *dev_priv = dev->dev_private; 4449 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5005,9 +4477,8 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
5005 I915_WRITE(DISP_ARB_CTL, 4477 I915_WRITE(DISP_ARB_CTL,
5006 (I915_READ(DISP_ARB_CTL) | 4478 (I915_READ(DISP_ARB_CTL) |
5007 DISP_FBC_WM_DIS)); 4479 DISP_FBC_WM_DIS));
5008 I915_WRITE(WM3_LP_ILK, 0); 4480
5009 I915_WRITE(WM2_LP_ILK, 0); 4481 ilk_init_lp_watermarks(dev);
5010 I915_WRITE(WM1_LP_ILK, 0);
5011 4482
5012 /* 4483 /*
5013 * Based on the document from hardware guys the following bits 4484 * Based on the document from hardware guys the following bits
@@ -5114,9 +4585,7 @@ static void gen6_init_clock_gating(struct drm_device *dev)
5114 I915_WRITE(GEN6_GT_MODE, 4585 I915_WRITE(GEN6_GT_MODE,
5115 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); 4586 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
5116 4587
5117 I915_WRITE(WM3_LP_ILK, 0); 4588 ilk_init_lp_watermarks(dev);
5118 I915_WRITE(WM2_LP_ILK, 0);
5119 I915_WRITE(WM1_LP_ILK, 0);
5120 4589
5121 I915_WRITE(CACHE_MODE_0, 4590 I915_WRITE(CACHE_MODE_0,
5122 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 4591 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
@@ -5290,9 +4759,7 @@ static void haswell_init_clock_gating(struct drm_device *dev)
5290{ 4759{
5291 struct drm_i915_private *dev_priv = dev->dev_private; 4760 struct drm_i915_private *dev_priv = dev->dev_private;
5292 4761
5293 I915_WRITE(WM3_LP_ILK, 0); 4762 ilk_init_lp_watermarks(dev);
5294 I915_WRITE(WM2_LP_ILK, 0);
5295 I915_WRITE(WM1_LP_ILK, 0);
5296 4763
5297 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. 4764 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5298 * This implements the WaDisableRCZUnitClockGating:hsw workaround. 4765 * This implements the WaDisableRCZUnitClockGating:hsw workaround.
@@ -5341,9 +4808,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
5341 struct drm_i915_private *dev_priv = dev->dev_private; 4808 struct drm_i915_private *dev_priv = dev->dev_private;
5342 uint32_t snpcr; 4809 uint32_t snpcr;
5343 4810
5344 I915_WRITE(WM3_LP_ILK, 0); 4811 ilk_init_lp_watermarks(dev);
5345 I915_WRITE(WM2_LP_ILK, 0);
5346 I915_WRITE(WM1_LP_ILK, 0);
5347 4812
5348 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); 4813 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
5349 4814
@@ -6062,7 +5527,7 @@ void intel_init_pm(struct drm_device *dev)
6062{ 5527{
6063 struct drm_i915_private *dev_priv = dev->dev_private; 5528 struct drm_i915_private *dev_priv = dev->dev_private;
6064 5529
6065 if (I915_HAS_FBC(dev)) { 5530 if (HAS_FBC(dev)) {
6066 if (INTEL_INFO(dev)->gen >= 7) { 5531 if (INTEL_INFO(dev)->gen >= 7) {
6067 dev_priv->display.fbc_enabled = ironlake_fbc_enabled; 5532 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6068 dev_priv->display.enable_fbc = gen7_enable_fbc; 5533 dev_priv->display.enable_fbc = gen7_enable_fbc;
@@ -6095,58 +5560,27 @@ void intel_init_pm(struct drm_device *dev)
6095 if (HAS_PCH_SPLIT(dev)) { 5560 if (HAS_PCH_SPLIT(dev)) {
6096 intel_setup_wm_latency(dev); 5561 intel_setup_wm_latency(dev);
6097 5562
6098 if (IS_GEN5(dev)) { 5563 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
6099 if (dev_priv->wm.pri_latency[1] && 5564 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
6100 dev_priv->wm.spr_latency[1] && 5565 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
6101 dev_priv->wm.cur_latency[1]) 5566 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
6102 dev_priv->display.update_wm = ironlake_update_wm; 5567 dev_priv->display.update_wm = ilk_update_wm;
6103 else { 5568 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
6104 DRM_DEBUG_KMS("Failed to get proper latency. " 5569 } else {
6105 "Disable CxSR\n"); 5570 DRM_DEBUG_KMS("Failed to read display plane latency. "
6106 dev_priv->display.update_wm = NULL; 5571 "Disable CxSR\n");
6107 } 5572 }
5573
5574 if (IS_GEN5(dev))
6108 dev_priv->display.init_clock_gating = ironlake_init_clock_gating; 5575 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
6109 } else if (IS_GEN6(dev)) { 5576 else if (IS_GEN6(dev))
6110 if (dev_priv->wm.pri_latency[0] &&
6111 dev_priv->wm.spr_latency[0] &&
6112 dev_priv->wm.cur_latency[0]) {
6113 dev_priv->display.update_wm = sandybridge_update_wm;
6114 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
6115 } else {
6116 DRM_DEBUG_KMS("Failed to read display plane latency. "
6117 "Disable CxSR\n");
6118 dev_priv->display.update_wm = NULL;
6119 }
6120 dev_priv->display.init_clock_gating = gen6_init_clock_gating; 5577 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
6121 } else if (IS_IVYBRIDGE(dev)) { 5578 else if (IS_IVYBRIDGE(dev))
6122 if (dev_priv->wm.pri_latency[0] &&
6123 dev_priv->wm.spr_latency[0] &&
6124 dev_priv->wm.cur_latency[0]) {
6125 dev_priv->display.update_wm = ivybridge_update_wm;
6126 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
6127 } else {
6128 DRM_DEBUG_KMS("Failed to read display plane latency. "
6129 "Disable CxSR\n");
6130 dev_priv->display.update_wm = NULL;
6131 }
6132 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; 5579 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
6133 } else if (IS_HASWELL(dev)) { 5580 else if (IS_HASWELL(dev))
6134 if (dev_priv->wm.pri_latency[0] &&
6135 dev_priv->wm.spr_latency[0] &&
6136 dev_priv->wm.cur_latency[0]) {
6137 dev_priv->display.update_wm = haswell_update_wm;
6138 dev_priv->display.update_sprite_wm =
6139 haswell_update_sprite_wm;
6140 } else {
6141 DRM_DEBUG_KMS("Failed to read display plane latency. "
6142 "Disable CxSR\n");
6143 dev_priv->display.update_wm = NULL;
6144 }
6145 dev_priv->display.init_clock_gating = haswell_init_clock_gating; 5581 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
6146 } else if (INTEL_INFO(dev)->gen == 8) { 5582 else if (INTEL_INFO(dev)->gen == 8)
6147 dev_priv->display.init_clock_gating = gen8_init_clock_gating; 5583 dev_priv->display.init_clock_gating = gen8_init_clock_gating;
6148 } else
6149 dev_priv->display.update_wm = NULL;
6150 } else if (IS_VALLEYVIEW(dev)) { 5584 } else if (IS_VALLEYVIEW(dev)) {
6151 dev_priv->display.update_wm = valleyview_update_wm; 5585 dev_priv->display.update_wm = valleyview_update_wm;
6152 dev_priv->display.init_clock_gating = 5586 dev_priv->display.init_clock_gating =
@@ -6180,21 +5614,21 @@ void intel_init_pm(struct drm_device *dev)
6180 dev_priv->display.update_wm = i9xx_update_wm; 5614 dev_priv->display.update_wm = i9xx_update_wm;
6181 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 5615 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
6182 dev_priv->display.init_clock_gating = gen3_init_clock_gating; 5616 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6183 } else if (IS_I865G(dev)) { 5617 } else if (IS_GEN2(dev)) {
6184 dev_priv->display.update_wm = i830_update_wm; 5618 if (INTEL_INFO(dev)->num_pipes == 1) {
6185 dev_priv->display.init_clock_gating = i85x_init_clock_gating; 5619 dev_priv->display.update_wm = i845_update_wm;
6186 dev_priv->display.get_fifo_size = i830_get_fifo_size;
6187 } else if (IS_I85X(dev)) {
6188 dev_priv->display.update_wm = i9xx_update_wm;
6189 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
6190 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
6191 } else {
6192 dev_priv->display.update_wm = i830_update_wm;
6193 dev_priv->display.init_clock_gating = i830_init_clock_gating;
6194 if (IS_845G(dev))
6195 dev_priv->display.get_fifo_size = i845_get_fifo_size; 5620 dev_priv->display.get_fifo_size = i845_get_fifo_size;
6196 else 5621 } else {
5622 dev_priv->display.update_wm = i9xx_update_wm;
6197 dev_priv->display.get_fifo_size = i830_get_fifo_size; 5623 dev_priv->display.get_fifo_size = i830_get_fifo_size;
5624 }
5625
5626 if (IS_I85X(dev) || IS_I865G(dev))
5627 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
5628 else
5629 dev_priv->display.init_clock_gating = i830_init_clock_gating;
5630 } else {
5631 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
6198 } 5632 }
6199} 5633}
6200 5634
@@ -6289,10 +5723,19 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
6289 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6; 5723 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
6290} 5724}
6291 5725
6292void intel_pm_init(struct drm_device *dev) 5726void intel_pm_setup(struct drm_device *dev)
6293{ 5727{
6294 struct drm_i915_private *dev_priv = dev->dev_private; 5728 struct drm_i915_private *dev_priv = dev->dev_private;
6295 5729
5730 mutex_init(&dev_priv->rps.hw_lock);
5731
5732 mutex_init(&dev_priv->pc8.lock);
5733 dev_priv->pc8.requirements_met = false;
5734 dev_priv->pc8.gpu_idle = false;
5735 dev_priv->pc8.irqs_disabled = false;
5736 dev_priv->pc8.enabled = false;
5737 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
5738 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
6296 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, 5739 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
6297 intel_gen6_powersave_work); 5740 intel_gen6_powersave_work);
6298} 5741}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e05a0216cd9b..8fcb32a02cb4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -663,14 +663,15 @@ gen6_add_request(struct intel_ring_buffer *ring)
663 struct drm_device *dev = ring->dev; 663 struct drm_device *dev = ring->dev;
664 struct drm_i915_private *dev_priv = dev->dev_private; 664 struct drm_i915_private *dev_priv = dev->dev_private;
665 struct intel_ring_buffer *useless; 665 struct intel_ring_buffer *useless;
666 int i, ret; 666 int i, ret, num_dwords = 4;
667 667
668 ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) * 668 if (i915_semaphore_is_enabled(dev))
669 MBOX_UPDATE_DWORDS) + 669 num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
670 4); 670#undef MBOX_UPDATE_DWORDS
671
672 ret = intel_ring_begin(ring, num_dwords);
671 if (ret) 673 if (ret)
672 return ret; 674 return ret;
673#undef MBOX_UPDATE_DWORDS
674 675
675 for_each_ring(useless, dev_priv, i) { 676 for_each_ring(useless, dev_priv, i) {
676 u32 mbox_reg = ring->signal_mbox[i]; 677 u32 mbox_reg = ring->signal_mbox[i];
@@ -1606,8 +1607,8 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1606 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); 1607 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
1607} 1608}
1608 1609
1609static int __intel_ring_begin(struct intel_ring_buffer *ring, 1610static int __intel_ring_prepare(struct intel_ring_buffer *ring,
1610 int bytes) 1611 int bytes)
1611{ 1612{
1612 int ret; 1613 int ret;
1613 1614
@@ -1623,7 +1624,6 @@ static int __intel_ring_begin(struct intel_ring_buffer *ring,
1623 return ret; 1624 return ret;
1624 } 1625 }
1625 1626
1626 ring->space -= bytes;
1627 return 0; 1627 return 0;
1628} 1628}
1629 1629
@@ -1638,12 +1638,17 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
1638 if (ret) 1638 if (ret)
1639 return ret; 1639 return ret;
1640 1640
1641 ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
1642 if (ret)
1643 return ret;
1644
1641 /* Preallocate the olr before touching the ring */ 1645 /* Preallocate the olr before touching the ring */
1642 ret = intel_ring_alloc_seqno(ring); 1646 ret = intel_ring_alloc_seqno(ring);
1643 if (ret) 1647 if (ret)
1644 return ret; 1648 return ret;
1645 1649
1646 return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t)); 1650 ring->space -= num_dwords * sizeof(uint32_t);
1651 return 0;
1647} 1652}
1648 1653
1649void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) 1654void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 90a3f6db8288..fe4de89c374c 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -230,7 +230,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
230 u32 sprctl, sprscale = 0; 230 u32 sprctl, sprscale = 0;
231 unsigned long sprsurf_offset, linear_offset; 231 unsigned long sprsurf_offset, linear_offset;
232 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 232 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
233 bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
234 233
235 sprctl = I915_READ(SPRCTL(pipe)); 234 sprctl = I915_READ(SPRCTL(pipe));
236 235
@@ -291,21 +290,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
291 crtc_w--; 290 crtc_w--;
292 crtc_h--; 291 crtc_h--;
293 292
294 /* 293 if (crtc_w != src_w || crtc_h != src_h)
295 * IVB workaround: must disable low power watermarks for at least
296 * one frame before enabling scaling. LP watermarks can be re-enabled
297 * when scaling is disabled.
298 */
299 if (crtc_w != src_w || crtc_h != src_h) {
300 dev_priv->sprite_scaling_enabled |= 1 << pipe;
301
302 if (!scaling_was_enabled) {
303 intel_update_watermarks(crtc);
304 intel_wait_for_vblank(dev, pipe);
305 }
306 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; 294 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
307 } else
308 dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
309 295
310 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); 296 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
311 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); 297 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
@@ -332,10 +318,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
332 I915_MODIFY_DISPBASE(SPRSURF(pipe), 318 I915_MODIFY_DISPBASE(SPRSURF(pipe),
333 i915_gem_obj_ggtt_offset(obj) + sprsurf_offset); 319 i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
334 POSTING_READ(SPRSURF(pipe)); 320 POSTING_READ(SPRSURF(pipe));
335
336 /* potentially re-enable LP watermarks */
337 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
338 intel_update_watermarks(crtc);
339} 321}
340 322
341static void 323static void
@@ -345,7 +327,6 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
345 struct drm_i915_private *dev_priv = dev->dev_private; 327 struct drm_i915_private *dev_priv = dev->dev_private;
346 struct intel_plane *intel_plane = to_intel_plane(plane); 328 struct intel_plane *intel_plane = to_intel_plane(plane);
347 int pipe = intel_plane->pipe; 329 int pipe = intel_plane->pipe;
348 bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
349 330
350 I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE); 331 I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
351 /* Can't leave the scaler enabled... */ 332 /* Can't leave the scaler enabled... */
@@ -355,13 +336,13 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
355 I915_MODIFY_DISPBASE(SPRSURF(pipe), 0); 336 I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
356 POSTING_READ(SPRSURF(pipe)); 337 POSTING_READ(SPRSURF(pipe));
357 338
358 dev_priv->sprite_scaling_enabled &= ~(1 << pipe); 339 /*
340 * Avoid underruns when disabling the sprite.
341 * FIXME remove once watermark updates are done properly.
342 */
343 intel_wait_for_vblank(dev, pipe);
359 344
360 intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false); 345 intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
361
362 /* potentially re-enable LP watermarks */
363 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
364 intel_update_watermarks(crtc);
365} 346}
366 347
367static int 348static int
@@ -488,7 +469,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
488 crtc_h--; 469 crtc_h--;
489 470
490 dvsscale = 0; 471 dvsscale = 0;
491 if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h) 472 if (crtc_w != src_w || crtc_h != src_h)
492 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; 473 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
493 474
494 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); 475 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
@@ -528,6 +509,12 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
528 I915_MODIFY_DISPBASE(DVSSURF(pipe), 0); 509 I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
529 POSTING_READ(DVSSURF(pipe)); 510 POSTING_READ(DVSSURF(pipe));
530 511
512 /*
513 * Avoid underruns when disabling the sprite.
514 * FIXME remove once watermark updates are done properly.
515 */
516 intel_wait_for_vblank(dev, pipe);
517
531 intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false); 518 intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
532} 519}
533 520
@@ -661,6 +648,15 @@ format_is_yuv(uint32_t format)
661 } 648 }
662} 649}
663 650
651static bool colorkey_enabled(struct intel_plane *intel_plane)
652{
653 struct drm_intel_sprite_colorkey key;
654
655 intel_plane->get_colorkey(&intel_plane->base, &key);
656
657 return key.flags != I915_SET_COLORKEY_NONE;
658}
659
664static int 660static int
665intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, 661intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
666 struct drm_framebuffer *fb, int crtc_x, int crtc_y, 662 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
@@ -846,7 +842,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
846 * If the sprite is completely covering the primary plane, 842 * If the sprite is completely covering the primary plane,
847 * we can disable the primary and save power. 843 * we can disable the primary and save power.
848 */ 844 */
849 disable_primary = drm_rect_equals(&dst, &clip); 845 disable_primary = drm_rect_equals(&dst, &clip) && !colorkey_enabled(intel_plane);
850 WARN_ON(disable_primary && !visible && intel_crtc->active); 846 WARN_ON(disable_primary && !visible && intel_crtc->active);
851 847
852 mutex_lock(&dev->struct_mutex); 848 mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c
index 48f06378d3f9..2ea5568b6cf5 100644
--- a/drivers/gpu/drm/nouveau/core/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/core/core/subdev.c
@@ -104,11 +104,8 @@ nouveau_subdev_create_(struct nouveau_object *parent,
104 104
105 if (parent) { 105 if (parent) {
106 struct nouveau_device *device = nv_device(parent); 106 struct nouveau_device *device = nv_device(parent);
107 int subidx = nv_hclass(subdev) & 0xff;
108
109 subdev->debug = nouveau_dbgopt(device->dbgopt, subname); 107 subdev->debug = nouveau_dbgopt(device->dbgopt, subname);
110 subdev->mmio = nv_subdev(device)->mmio; 108 subdev->mmio = nv_subdev(device)->mmio;
111 device->subdev[subidx] = *pobject;
112 } 109 }
113 110
114 return 0; 111 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 9135b25a29d0..dd01c6c435d6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -268,6 +268,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
268 if (ret) 268 if (ret)
269 return ret; 269 return ret;
270 270
271 device->subdev[i] = devobj->subdev[i];
272
271 /* note: can't init *any* subdevs until devinit has been run 273 /* note: can't init *any* subdevs until devinit has been run
272 * due to not knowing exactly what the vbios init tables will 274 * due to not knowing exactly what the vbios init tables will
273 * mess with. devinit also can't be run until all of its 275 * mess with. devinit also can't be run until all of its
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 8d06eef2b9ee..dbc5e33de94f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -161,7 +161,7 @@ nvc0_identify(struct nouveau_device *device)
161 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 161 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
162 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 162 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
163 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 163 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
164 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; 164 device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
165 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 165 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
167 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 167 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 434bb4b0fa2e..5c8a63dc506a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -334,7 +334,7 @@ nvc0_graph_mthd(struct nvc0_graph_priv *priv, struct nvc0_graph_mthd *mthds)
334 while ((mthd = &mthds[i++]) && (init = mthd->init)) { 334 while ((mthd = &mthds[i++]) && (init = mthd->init)) {
335 u32 addr = 0x80000000 | mthd->oclass; 335 u32 addr = 0x80000000 | mthd->oclass;
336 for (data = 0; init->count; init++) { 336 for (data = 0; init->count; init++) {
337 if (data != init->data) { 337 if (init == mthd->init || data != init->data) {
338 nv_wr32(priv, 0x40448c, init->data); 338 nv_wr32(priv, 0x40448c, init->data);
339 data = init->data; 339 data = init->data;
340 } 340 }
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 8541aa382ff2..d89dbdf39b0d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -75,6 +75,11 @@ struct nouveau_fb {
75static inline struct nouveau_fb * 75static inline struct nouveau_fb *
76nouveau_fb(void *obj) 76nouveau_fb(void *obj)
77{ 77{
78 /* fbram uses this before device subdev pointer is valid */
79 if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
80 nv_subidx(obj) == NVDEV_SUBDEV_FB)
81 return obj;
82
78 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB]; 83 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB];
79} 84}
80 85
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 9fa5da723871..7f50a858b16f 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -73,7 +73,7 @@ struct nouveau_i2c {
73 int (*identify)(struct nouveau_i2c *, int index, 73 int (*identify)(struct nouveau_i2c *, int index,
74 const char *what, struct nouveau_i2c_board_info *, 74 const char *what, struct nouveau_i2c_board_info *,
75 bool (*match)(struct nouveau_i2c_port *, 75 bool (*match)(struct nouveau_i2c_port *,
76 struct i2c_board_info *)); 76 struct i2c_board_info *, void *), void *);
77 struct list_head ports; 77 struct list_head ports;
78}; 78};
79 79
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
index ec7a54e91a08..4aca33887aaa 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
@@ -50,6 +50,13 @@ struct nouveau_instmem {
50static inline struct nouveau_instmem * 50static inline struct nouveau_instmem *
51nouveau_instmem(void *obj) 51nouveau_instmem(void *obj)
52{ 52{
53 /* nv04/nv40 impls need to create objects in their constructor,
54 * which is before the subdev pointer is valid
55 */
56 if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
57 nv_subidx(obj) == NVDEV_SUBDEV_INSTMEM)
58 return obj;
59
53 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM]; 60 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM];
54} 61}
55 62
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 420908cb82b6..df1b1b423093 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -365,13 +365,13 @@ static u16
365init_script(struct nouveau_bios *bios, int index) 365init_script(struct nouveau_bios *bios, int index)
366{ 366{
367 struct nvbios_init init = { .bios = bios }; 367 struct nvbios_init init = { .bios = bios };
368 u16 data; 368 u16 bmp_ver = bmp_version(bios), data;
369 369
370 if (bmp_version(bios) && bmp_version(bios) < 0x0510) { 370 if (bmp_ver && bmp_ver < 0x0510) {
371 if (index > 1) 371 if (index > 1 || bmp_ver < 0x0100)
372 return 0x0000; 372 return 0x0000;
373 373
374 data = bios->bmp_offset + (bios->version.major < 2 ? 14 : 18); 374 data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18);
375 return nv_ro16(bios, data + (index * 2)); 375 return nv_ro16(bios, data + (index * 2));
376 } 376 }
377 377
@@ -1294,7 +1294,11 @@ init_jump(struct nvbios_init *init)
1294 u16 offset = nv_ro16(bios, init->offset + 1); 1294 u16 offset = nv_ro16(bios, init->offset + 1);
1295 1295
1296 trace("JUMP\t0x%04x\n", offset); 1296 trace("JUMP\t0x%04x\n", offset);
1297 init->offset = offset; 1297
1298 if (init_exec(init))
1299 init->offset = offset;
1300 else
1301 init->offset += 3;
1298} 1302}
1299 1303
1300/** 1304/**
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 041fd5edaebf..c33c03d2f4af 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -197,7 +197,7 @@ static int
197nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what, 197nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
198 struct nouveau_i2c_board_info *info, 198 struct nouveau_i2c_board_info *info,
199 bool (*match)(struct nouveau_i2c_port *, 199 bool (*match)(struct nouveau_i2c_port *,
200 struct i2c_board_info *)) 200 struct i2c_board_info *, void *), void *data)
201{ 201{
202 struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index); 202 struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index);
203 int i; 203 int i;
@@ -221,7 +221,7 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
221 } 221 }
222 222
223 if (nv_probe_i2c(port, info[i].dev.addr) && 223 if (nv_probe_i2c(port, info[i].dev.addr) &&
224 (!match || match(port, &info[i].dev))) { 224 (!match || match(port, &info[i].dev, data))) {
225 nv_info(i2c, "detected %s: %s\n", what, 225 nv_info(i2c, "detected %s: %s\n", what,
226 info[i].dev.type); 226 info[i].dev.type);
227 return i; 227 return i;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index e44ed7b93c6d..7610fc5f8fa2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -29,9 +29,9 @@
29 29
30static bool 30static bool
31probe_monitoring_device(struct nouveau_i2c_port *i2c, 31probe_monitoring_device(struct nouveau_i2c_port *i2c,
32 struct i2c_board_info *info) 32 struct i2c_board_info *info, void *data)
33{ 33{
34 struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c); 34 struct nouveau_therm_priv *priv = data;
35 struct nvbios_therm_sensor *sensor = &priv->bios_sensor; 35 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
36 struct i2c_client *client; 36 struct i2c_client *client;
37 37
@@ -96,7 +96,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
96 }; 96 };
97 97
98 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 98 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
99 board, probe_monitoring_device); 99 board, probe_monitoring_device, therm);
100 if (priv->ic) 100 if (priv->ic)
101 return; 101 return;
102 } 102 }
@@ -108,7 +108,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
108 }; 108 };
109 109
110 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 110 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
111 board, probe_monitoring_device); 111 board, probe_monitoring_device, therm);
112 if (priv->ic) 112 if (priv->ic)
113 return; 113 return;
114 } 114 }
@@ -117,5 +117,5 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
117 device. Let's try our static list. 117 device. Let's try our static list.
118 */ 118 */
119 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 119 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
120 nv_board_infos, probe_monitoring_device); 120 nv_board_infos, probe_monitoring_device, therm);
121} 121}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 936a71c59080..7fdc51e2a571 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -643,7 +643,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
643 get_tmds_slave(encoder)) 643 get_tmds_slave(encoder))
644 return; 644 return;
645 645
646 type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL); 646 type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL, NULL);
647 if (type < 0) 647 if (type < 0)
648 return; 648 return;
649 649
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index cc4b208ce546..244822df8ffc 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -59,7 +59,7 @@ int nv04_tv_identify(struct drm_device *dev, int i2c_index)
59 struct nouveau_i2c *i2c = nouveau_i2c(drm->device); 59 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
60 60
61 return i2c->identify(i2c, i2c_index, "TV encoder", 61 return i2c->identify(i2c, i2c_index, "TV encoder",
62 nv04_tv_encoder_info, NULL); 62 nv04_tv_encoder_info, NULL, NULL);
63} 63}
64 64
65 65
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 6828d81ed7b9..900fae01793e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -447,6 +447,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
447 if (ret) 447 if (ret)
448 goto done; 448 goto done;
449 449
450 info->offset = ntfy->node->offset;
451
450done: 452done:
451 if (ret) 453 if (ret)
452 nouveau_abi16_ntfy_fini(chan, ntfy); 454 nouveau_abi16_ntfy_fini(chan, ntfy);
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 95c740454049..ba0183fb84f3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -51,6 +51,7 @@ static struct nouveau_dsm_priv {
51 bool dsm_detected; 51 bool dsm_detected;
52 bool optimus_detected; 52 bool optimus_detected;
53 acpi_handle dhandle; 53 acpi_handle dhandle;
54 acpi_handle other_handle;
54 acpi_handle rom_handle; 55 acpi_handle rom_handle;
55} nouveau_dsm_priv; 56} nouveau_dsm_priv;
56 57
@@ -260,9 +261,10 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
260 if (!dhandle) 261 if (!dhandle)
261 return false; 262 return false;
262 263
263 if (!acpi_has_method(dhandle, "_DSM")) 264 if (!acpi_has_method(dhandle, "_DSM")) {
265 nouveau_dsm_priv.other_handle = dhandle;
264 return false; 266 return false;
265 267 }
266 if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER)) 268 if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
267 retval |= NOUVEAU_DSM_HAS_MUX; 269 retval |= NOUVEAU_DSM_HAS_MUX;
268 270
@@ -338,6 +340,16 @@ static bool nouveau_dsm_detect(void)
338 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", 340 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
339 acpi_method_name); 341 acpi_method_name);
340 nouveau_dsm_priv.dsm_detected = true; 342 nouveau_dsm_priv.dsm_detected = true;
343 /*
344 * On some systems hotplug events are generated for the device
345 * being switched off when _DSM is executed. They cause ACPI
346 * hotplug to trigger and attempt to remove the device from
347 * the system, which causes it to break down. Prevent that from
348 * happening by setting the no_hotplug flag for the involved
349 * ACPI device objects.
350 */
351 acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle);
352 acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle);
341 ret = true; 353 ret = true;
342 } 354 }
343 355
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 29c3efdfc7dd..25ea82f8def3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -610,7 +610,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
610 ret = nouveau_fence_sync(fence, chan); 610 ret = nouveau_fence_sync(fence, chan);
611 nouveau_fence_unref(&fence); 611 nouveau_fence_unref(&fence);
612 if (ret) 612 if (ret)
613 return ret; 613 goto fail_free;
614 614
615 if (new_bo != old_bo) { 615 if (new_bo != old_bo) {
616 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); 616 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 7a3759f1c41a..98a22e6e27a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -858,6 +858,12 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
858 if (nouveau_runtime_pm == 0) 858 if (nouveau_runtime_pm == 0)
859 return -EINVAL; 859 return -EINVAL;
860 860
861 /* are we optimus enabled? */
862 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
863 DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
864 return -EINVAL;
865 }
866
861 nv_debug_level(SILENT); 867 nv_debug_level(SILENT);
862 drm_kms_helper_poll_disable(drm_dev); 868 drm_kms_helper_poll_disable(drm_dev);
863 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); 869 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 80a20120e625..0b9621c9aeea 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1143,31 +1143,53 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1143 } 1143 }
1144 1144
1145 if (tiling_flags & RADEON_TILING_MACRO) { 1145 if (tiling_flags & RADEON_TILING_MACRO) {
1146 if (rdev->family >= CHIP_BONAIRE) 1146 evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
1147 tmp = rdev->config.cik.tile_config;
1148 else if (rdev->family >= CHIP_TAHITI)
1149 tmp = rdev->config.si.tile_config;
1150 else if (rdev->family >= CHIP_CAYMAN)
1151 tmp = rdev->config.cayman.tile_config;
1152 else
1153 tmp = rdev->config.evergreen.tile_config;
1154 1147
1155 switch ((tmp & 0xf0) >> 4) { 1148 /* Set NUM_BANKS. */
1156 case 0: /* 4 banks */ 1149 if (rdev->family >= CHIP_BONAIRE) {
1157 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK); 1150 unsigned tileb, index, num_banks, tile_split_bytes;
1158 break; 1151
1159 case 1: /* 8 banks */ 1152 /* Calculate the macrotile mode index. */
1160 default: 1153 tile_split_bytes = 64 << tile_split;
1161 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK); 1154 tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
1162 break; 1155 tileb = min(tile_split_bytes, tileb);
1163 case 2: /* 16 banks */ 1156
1164 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK); 1157 for (index = 0; tileb > 64; index++) {
1165 break; 1158 tileb >>= 1;
1159 }
1160
1161 if (index >= 16) {
1162 DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
1163 target_fb->bits_per_pixel, tile_split);
1164 return -EINVAL;
1165 }
1166
1167 num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
1168 fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
1169 } else {
1170 /* SI and older. */
1171 if (rdev->family >= CHIP_TAHITI)
1172 tmp = rdev->config.si.tile_config;
1173 else if (rdev->family >= CHIP_CAYMAN)
1174 tmp = rdev->config.cayman.tile_config;
1175 else
1176 tmp = rdev->config.evergreen.tile_config;
1177
1178 switch ((tmp & 0xf0) >> 4) {
1179 case 0: /* 4 banks */
1180 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
1181 break;
1182 case 1: /* 8 banks */
1183 default:
1184 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
1185 break;
1186 case 2: /* 16 banks */
1187 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
1188 break;
1189 }
1166 } 1190 }
1167 1191
1168 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); 1192 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
1169
1170 evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
1171 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split); 1193 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
1172 fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw); 1194 fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
1173 fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh); 1195 fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
@@ -1180,23 +1202,18 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1180 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); 1202 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
1181 1203
1182 if (rdev->family >= CHIP_BONAIRE) { 1204 if (rdev->family >= CHIP_BONAIRE) {
1183 u32 num_pipe_configs = rdev->config.cik.max_tile_pipes; 1205 /* Read the pipe config from the 2D TILED SCANOUT mode.
1184 u32 num_rb = rdev->config.cik.max_backends_per_se; 1206 * It should be the same for the other modes too, but not all
1185 if (num_pipe_configs > 8) 1207 * modes set the pipe config field. */
1186 num_pipe_configs = 8; 1208 u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f;
1187 if (num_pipe_configs == 8) 1209
1188 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P8_32x32_16x16); 1210 fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config);
1189 else if (num_pipe_configs == 4) {
1190 if (num_rb == 4)
1191 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_16x16);
1192 else if (num_rb < 4)
1193 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_8x16);
1194 } else if (num_pipe_configs == 2)
1195 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P2);
1196 } else if ((rdev->family == CHIP_TAHITI) || 1211 } else if ((rdev->family == CHIP_TAHITI) ||
1197 (rdev->family == CHIP_PITCAIRN)) 1212 (rdev->family == CHIP_PITCAIRN))
1198 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16); 1213 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
1199 else if (rdev->family == CHIP_VERDE) 1214 else if ((rdev->family == CHIP_VERDE) ||
1215 (rdev->family == CHIP_OLAND) ||
1216 (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */
1200 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16); 1217 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
1201 1218
1202 switch (radeon_crtc->crtc_id) { 1219 switch (radeon_crtc->crtc_id) {
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 9e50dd5d0e42..e7f6334138a1 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3057,7 +3057,7 @@ static u32 cik_create_bitmask(u32 bit_width)
3057 * Returns the disabled RB bitmask. 3057 * Returns the disabled RB bitmask.
3058 */ 3058 */
3059static u32 cik_get_rb_disabled(struct radeon_device *rdev, 3059static u32 cik_get_rb_disabled(struct radeon_device *rdev,
3060 u32 max_rb_num, u32 se_num, 3060 u32 max_rb_num_per_se,
3061 u32 sh_per_se) 3061 u32 sh_per_se)
3062{ 3062{
3063 u32 data, mask; 3063 u32 data, mask;
@@ -3071,7 +3071,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
3071 3071
3072 data >>= BACKEND_DISABLE_SHIFT; 3072 data >>= BACKEND_DISABLE_SHIFT;
3073 3073
3074 mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se); 3074 mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se);
3075 3075
3076 return data & mask; 3076 return data & mask;
3077} 3077}
@@ -3088,7 +3088,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
3088 */ 3088 */
3089static void cik_setup_rb(struct radeon_device *rdev, 3089static void cik_setup_rb(struct radeon_device *rdev,
3090 u32 se_num, u32 sh_per_se, 3090 u32 se_num, u32 sh_per_se,
3091 u32 max_rb_num) 3091 u32 max_rb_num_per_se)
3092{ 3092{
3093 int i, j; 3093 int i, j;
3094 u32 data, mask; 3094 u32 data, mask;
@@ -3098,7 +3098,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
3098 for (i = 0; i < se_num; i++) { 3098 for (i = 0; i < se_num; i++) {
3099 for (j = 0; j < sh_per_se; j++) { 3099 for (j = 0; j < sh_per_se; j++) {
3100 cik_select_se_sh(rdev, i, j); 3100 cik_select_se_sh(rdev, i, j);
3101 data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); 3101 data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
3102 if (rdev->family == CHIP_HAWAII) 3102 if (rdev->family == CHIP_HAWAII)
3103 disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH); 3103 disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
3104 else 3104 else
@@ -3108,12 +3108,14 @@ static void cik_setup_rb(struct radeon_device *rdev,
3108 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 3108 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3109 3109
3110 mask = 1; 3110 mask = 1;
3111 for (i = 0; i < max_rb_num; i++) { 3111 for (i = 0; i < max_rb_num_per_se * se_num; i++) {
3112 if (!(disabled_rbs & mask)) 3112 if (!(disabled_rbs & mask))
3113 enabled_rbs |= mask; 3113 enabled_rbs |= mask;
3114 mask <<= 1; 3114 mask <<= 1;
3115 } 3115 }
3116 3116
3117 rdev->config.cik.backend_enable_mask = enabled_rbs;
3118
3117 for (i = 0; i < se_num; i++) { 3119 for (i = 0; i < se_num; i++) {
3118 cik_select_se_sh(rdev, i, 0xffffffff); 3120 cik_select_se_sh(rdev, i, 0xffffffff);
3119 data = 0; 3121 data = 0;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index f0f9e1089409..af520d4d362b 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -527,7 +527,7 @@ int cik_copy_dma(struct radeon_device *rdev,
527 radeon_ring_write(ring, 0); /* src/dst endian swap */ 527 radeon_ring_write(ring, 0); /* src/dst endian swap */
528 radeon_ring_write(ring, src_offset & 0xffffffff); 528 radeon_ring_write(ring, src_offset & 0xffffffff);
529 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff); 529 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
530 radeon_ring_write(ring, dst_offset & 0xfffffffc); 530 radeon_ring_write(ring, dst_offset & 0xffffffff);
531 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff); 531 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
532 src_offset += cur_size_in_bytes; 532 src_offset += cur_size_in_bytes;
533 dst_offset += cur_size_in_bytes; 533 dst_offset += cur_size_in_bytes;
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index de86493cbc44..713a5d359901 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -174,7 +174,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
174 } 174 }
175 175
176 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); 176 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
177 if (sad_count < 0) { 177 if (sad_count <= 0) {
178 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 178 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
179 return; 179 return;
180 } 180 }
@@ -235,7 +235,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
235 } 235 }
236 236
237 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); 237 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
238 if (sad_count < 0) { 238 if (sad_count <= 0) {
239 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 239 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
240 return; 240 return;
241 } 241 }
@@ -308,7 +308,9 @@ int dce6_audio_init(struct radeon_device *rdev)
308 rdev->audio.enabled = true; 308 rdev->audio.enabled = true;
309 309
310 if (ASIC_IS_DCE8(rdev)) 310 if (ASIC_IS_DCE8(rdev))
311 rdev->audio.num_pins = 7; 311 rdev->audio.num_pins = 6;
312 else if (ASIC_IS_DCE61(rdev))
313 rdev->audio.num_pins = 4;
312 else 314 else
313 rdev->audio.num_pins = 6; 315 rdev->audio.num_pins = 6;
314 316
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index aa695c4feb3d..0c6d5cef4cf1 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -118,7 +118,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
118 } 118 }
119 119
120 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); 120 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
121 if (sad_count < 0) { 121 if (sad_count <= 0) {
122 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 122 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
123 return; 123 return;
124 } 124 }
@@ -173,7 +173,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
173 } 173 }
174 174
175 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); 175 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
176 if (sad_count < 0) { 176 if (sad_count <= 0) {
177 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 177 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
178 return; 178 return;
179 } 179 }
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 9f11a55962b5..af45b23675ee 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -896,6 +896,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
896 (rdev->pdev->device == 0x999C)) { 896 (rdev->pdev->device == 0x999C)) {
897 rdev->config.cayman.max_simds_per_se = 6; 897 rdev->config.cayman.max_simds_per_se = 6;
898 rdev->config.cayman.max_backends_per_se = 2; 898 rdev->config.cayman.max_backends_per_se = 2;
899 rdev->config.cayman.max_hw_contexts = 8;
900 rdev->config.cayman.sx_max_export_size = 256;
901 rdev->config.cayman.sx_max_export_pos_size = 64;
902 rdev->config.cayman.sx_max_export_smx_size = 192;
899 } else if ((rdev->pdev->device == 0x9903) || 903 } else if ((rdev->pdev->device == 0x9903) ||
900 (rdev->pdev->device == 0x9904) || 904 (rdev->pdev->device == 0x9904) ||
901 (rdev->pdev->device == 0x990A) || 905 (rdev->pdev->device == 0x990A) ||
@@ -906,6 +910,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
906 (rdev->pdev->device == 0x999D)) { 910 (rdev->pdev->device == 0x999D)) {
907 rdev->config.cayman.max_simds_per_se = 4; 911 rdev->config.cayman.max_simds_per_se = 4;
908 rdev->config.cayman.max_backends_per_se = 2; 912 rdev->config.cayman.max_backends_per_se = 2;
913 rdev->config.cayman.max_hw_contexts = 8;
914 rdev->config.cayman.sx_max_export_size = 256;
915 rdev->config.cayman.sx_max_export_pos_size = 64;
916 rdev->config.cayman.sx_max_export_smx_size = 192;
909 } else if ((rdev->pdev->device == 0x9919) || 917 } else if ((rdev->pdev->device == 0x9919) ||
910 (rdev->pdev->device == 0x9990) || 918 (rdev->pdev->device == 0x9990) ||
911 (rdev->pdev->device == 0x9991) || 919 (rdev->pdev->device == 0x9991) ||
@@ -916,9 +924,17 @@ static void cayman_gpu_init(struct radeon_device *rdev)
916 (rdev->pdev->device == 0x99A0)) { 924 (rdev->pdev->device == 0x99A0)) {
917 rdev->config.cayman.max_simds_per_se = 3; 925 rdev->config.cayman.max_simds_per_se = 3;
918 rdev->config.cayman.max_backends_per_se = 1; 926 rdev->config.cayman.max_backends_per_se = 1;
927 rdev->config.cayman.max_hw_contexts = 4;
928 rdev->config.cayman.sx_max_export_size = 128;
929 rdev->config.cayman.sx_max_export_pos_size = 32;
930 rdev->config.cayman.sx_max_export_smx_size = 96;
919 } else { 931 } else {
920 rdev->config.cayman.max_simds_per_se = 2; 932 rdev->config.cayman.max_simds_per_se = 2;
921 rdev->config.cayman.max_backends_per_se = 1; 933 rdev->config.cayman.max_backends_per_se = 1;
934 rdev->config.cayman.max_hw_contexts = 4;
935 rdev->config.cayman.sx_max_export_size = 128;
936 rdev->config.cayman.sx_max_export_pos_size = 32;
937 rdev->config.cayman.sx_max_export_smx_size = 96;
922 } 938 }
923 rdev->config.cayman.max_texture_channel_caches = 2; 939 rdev->config.cayman.max_texture_channel_caches = 2;
924 rdev->config.cayman.max_gprs = 256; 940 rdev->config.cayman.max_gprs = 256;
@@ -926,10 +942,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
926 rdev->config.cayman.max_gs_threads = 32; 942 rdev->config.cayman.max_gs_threads = 32;
927 rdev->config.cayman.max_stack_entries = 512; 943 rdev->config.cayman.max_stack_entries = 512;
928 rdev->config.cayman.sx_num_of_sets = 8; 944 rdev->config.cayman.sx_num_of_sets = 8;
929 rdev->config.cayman.sx_max_export_size = 256;
930 rdev->config.cayman.sx_max_export_pos_size = 64;
931 rdev->config.cayman.sx_max_export_smx_size = 192;
932 rdev->config.cayman.max_hw_contexts = 8;
933 rdev->config.cayman.sq_num_cf_insts = 2; 945 rdev->config.cayman.sq_num_cf_insts = 2;
934 946
935 rdev->config.cayman.sc_prim_fifo_size = 0x40; 947 rdev->config.cayman.sc_prim_fifo_size = 0x40;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 746c0c6c269b..c5519ca4bbc4 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1949,7 +1949,7 @@ struct si_asic {
1949 unsigned sc_earlyz_tile_fifo_size; 1949 unsigned sc_earlyz_tile_fifo_size;
1950 1950
1951 unsigned num_tile_pipes; 1951 unsigned num_tile_pipes;
1952 unsigned num_backends_per_se; 1952 unsigned backend_enable_mask;
1953 unsigned backend_disable_mask_per_asic; 1953 unsigned backend_disable_mask_per_asic;
1954 unsigned backend_map; 1954 unsigned backend_map;
1955 unsigned num_texture_channel_caches; 1955 unsigned num_texture_channel_caches;
@@ -1979,7 +1979,7 @@ struct cik_asic {
1979 unsigned sc_earlyz_tile_fifo_size; 1979 unsigned sc_earlyz_tile_fifo_size;
1980 1980
1981 unsigned num_tile_pipes; 1981 unsigned num_tile_pipes;
1982 unsigned num_backends_per_se; 1982 unsigned backend_enable_mask;
1983 unsigned backend_disable_mask_per_asic; 1983 unsigned backend_disable_mask_per_asic;
1984 unsigned backend_map; 1984 unsigned backend_map;
1985 unsigned num_texture_channel_caches; 1985 unsigned num_texture_channel_caches;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index f55879dd11c6..f74db43346fd 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2030,7 +2030,7 @@ static struct radeon_asic ci_asic = {
2030 .hdmi_setmode = &evergreen_hdmi_setmode, 2030 .hdmi_setmode = &evergreen_hdmi_setmode,
2031 }, 2031 },
2032 .copy = { 2032 .copy = {
2033 .blit = NULL, 2033 .blit = &cik_copy_cpdma,
2034 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 2034 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2035 .dma = &cik_copy_dma, 2035 .dma = &cik_copy_dma,
2036 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 2036 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2132,7 +2132,7 @@ static struct radeon_asic kv_asic = {
2132 .hdmi_setmode = &evergreen_hdmi_setmode, 2132 .hdmi_setmode = &evergreen_hdmi_setmode,
2133 }, 2133 },
2134 .copy = { 2134 .copy = {
2135 .blit = NULL, 2135 .blit = &cik_copy_cpdma,
2136 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 2136 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2137 .dma = &cik_copy_dma, 2137 .dma = &cik_copy_dma,
2138 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 2138 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 9d302eaeea15..485848f889f5 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -33,6 +33,7 @@ static struct radeon_atpx_priv {
33 bool atpx_detected; 33 bool atpx_detected;
34 /* handle for device - and atpx */ 34 /* handle for device - and atpx */
35 acpi_handle dhandle; 35 acpi_handle dhandle;
36 acpi_handle other_handle;
36 struct radeon_atpx atpx; 37 struct radeon_atpx atpx;
37} radeon_atpx_priv; 38} radeon_atpx_priv;
38 39
@@ -451,9 +452,10 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
451 return false; 452 return false;
452 453
453 status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); 454 status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
454 if (ACPI_FAILURE(status)) 455 if (ACPI_FAILURE(status)) {
456 radeon_atpx_priv.other_handle = dhandle;
455 return false; 457 return false;
456 458 }
457 radeon_atpx_priv.dhandle = dhandle; 459 radeon_atpx_priv.dhandle = dhandle;
458 radeon_atpx_priv.atpx.handle = atpx_handle; 460 radeon_atpx_priv.atpx.handle = atpx_handle;
459 return true; 461 return true;
@@ -530,6 +532,16 @@ static bool radeon_atpx_detect(void)
530 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", 532 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
531 acpi_method_name); 533 acpi_method_name);
532 radeon_atpx_priv.atpx_detected = true; 534 radeon_atpx_priv.atpx_detected = true;
535 /*
536 * On some systems hotplug events are generated for the device
537 * being switched off when ATPX is executed. They cause ACPI
538 * hotplug to trigger and attempt to remove the device from
539 * the system, which causes it to break down. Prevent that from
540 * happening by setting the no_hotplug flag for the involved
541 * ACPI device objects.
542 */
543 acpi_bus_no_hotplug(radeon_atpx_priv.dhandle);
544 acpi_bus_no_hotplug(radeon_atpx_priv.other_handle);
533 return true; 545 return true;
534 } 546 }
535 return false; 547 return false;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e91d548063ef..67fadcf4590f 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -77,9 +77,10 @@
77 * 2.33.0 - Add SI tiling mode array query 77 * 2.33.0 - Add SI tiling mode array query
78 * 2.34.0 - Add CIK tiling mode array query 78 * 2.34.0 - Add CIK tiling mode array query
79 * 2.35.0 - Add CIK macrotile mode array query 79 * 2.35.0 - Add CIK macrotile mode array query
80 * 2.36.0 - Fix CIK DCE tiling setup
80 */ 81 */
81#define KMS_DRIVER_MAJOR 2 82#define KMS_DRIVER_MAJOR 2
82#define KMS_DRIVER_MINOR 35 83#define KMS_DRIVER_MINOR 36
83#define KMS_DRIVER_PATCHLEVEL 0 84#define KMS_DRIVER_PATCHLEVEL 0
84int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 85int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
85int radeon_driver_unload_kms(struct drm_device *dev); 86int radeon_driver_unload_kms(struct drm_device *dev);
@@ -512,15 +513,6 @@ static const struct file_operations radeon_driver_kms_fops = {
512#endif 513#endif
513}; 514};
514 515
515
516static void
517radeon_pci_shutdown(struct pci_dev *pdev)
518{
519 struct drm_device *dev = pci_get_drvdata(pdev);
520
521 radeon_driver_unload_kms(dev);
522}
523
524static struct drm_driver kms_driver = { 516static struct drm_driver kms_driver = {
525 .driver_features = 517 .driver_features =
526 DRIVER_USE_AGP | 518 DRIVER_USE_AGP |
@@ -590,7 +582,6 @@ static struct pci_driver radeon_kms_pci_driver = {
590 .probe = radeon_pci_probe, 582 .probe = radeon_pci_probe,
591 .remove = radeon_pci_remove, 583 .remove = radeon_pci_remove,
592 .driver.pm = &radeon_pm_ops, 584 .driver.pm = &radeon_pm_ops,
593 .shutdown = radeon_pci_shutdown,
594}; 585};
595 586
596static int __init radeon_init(void) 587static int __init radeon_init(void)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index c44574e248d1..5bf50cec017e 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -461,6 +461,15 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
461 case RADEON_INFO_SI_CP_DMA_COMPUTE: 461 case RADEON_INFO_SI_CP_DMA_COMPUTE:
462 *value = 1; 462 *value = 1;
463 break; 463 break;
464 case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
465 if (rdev->family >= CHIP_BONAIRE) {
466 *value = rdev->config.cik.backend_enable_mask;
467 } else if (rdev->family >= CHIP_TAHITI) {
468 *value = rdev->config.si.backend_enable_mask;
469 } else {
470 DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
471 }
472 break;
464 default: 473 default:
465 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 474 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
466 return -EINVAL; 475 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 373d088bac66..b9c0529b4a2e 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -473,7 +473,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
473 return -EINVAL; 473 return -EINVAL;
474 } 474 }
475 475
476 if ((start >> 28) != (end >> 28)) { 476 if ((start >> 28) != ((end - 1) >> 28)) {
477 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", 477 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
478 start, end); 478 start, end);
479 return -EINVAL; 479 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index e461b45f29a9..35950738bd5e 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev)
162 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); 162 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
163 base = G_000100_MC_FB_START(base) << 16; 163 base = G_000100_MC_FB_START(base) << 16;
164 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 164 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
165 /* Some boards seem to be configured for 128MB of sideport memory,
166 * but really only have 64MB. Just skip the sideport and use
167 * UMA memory.
168 */
169 if (rdev->mc.igp_sideport_enabled &&
170 (rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
171 base += 128 * 1024 * 1024;
172 rdev->mc.real_vram_size -= 128 * 1024 * 1024;
173 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
174 }
165 175
166 /* Use K8 direct mapping for fast fb access. */ 176 /* Use K8 direct mapping for fast fb access. */
167 rdev->fastfb_working = false; 177 rdev->fastfb_working = false;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 4aaeb118a3ff..b95267846ff2 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2335,6 +2335,12 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
2335 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, 2335 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
2336 ASIC_INTERNAL_MEMORY_SS, 0); 2336 ASIC_INTERNAL_MEMORY_SS, 0);
2337 2337
2338 /* disable ss, causes hangs on some cayman boards */
2339 if (rdev->family == CHIP_CAYMAN) {
2340 pi->sclk_ss = false;
2341 pi->mclk_ss = false;
2342 }
2343
2338 if (pi->sclk_ss || pi->mclk_ss) 2344 if (pi->sclk_ss || pi->mclk_ss)
2339 pi->dynamic_ss = true; 2345 pi->dynamic_ss = true;
2340 else 2346 else
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 626163ef483d..22d3517ed6ad 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2813,7 +2813,7 @@ static void si_setup_spi(struct radeon_device *rdev,
2813} 2813}
2814 2814
2815static u32 si_get_rb_disabled(struct radeon_device *rdev, 2815static u32 si_get_rb_disabled(struct radeon_device *rdev,
2816 u32 max_rb_num, u32 se_num, 2816 u32 max_rb_num_per_se,
2817 u32 sh_per_se) 2817 u32 sh_per_se)
2818{ 2818{
2819 u32 data, mask; 2819 u32 data, mask;
@@ -2827,14 +2827,14 @@ static u32 si_get_rb_disabled(struct radeon_device *rdev,
2827 2827
2828 data >>= BACKEND_DISABLE_SHIFT; 2828 data >>= BACKEND_DISABLE_SHIFT;
2829 2829
2830 mask = si_create_bitmask(max_rb_num / se_num / sh_per_se); 2830 mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
2831 2831
2832 return data & mask; 2832 return data & mask;
2833} 2833}
2834 2834
2835static void si_setup_rb(struct radeon_device *rdev, 2835static void si_setup_rb(struct radeon_device *rdev,
2836 u32 se_num, u32 sh_per_se, 2836 u32 se_num, u32 sh_per_se,
2837 u32 max_rb_num) 2837 u32 max_rb_num_per_se)
2838{ 2838{
2839 int i, j; 2839 int i, j;
2840 u32 data, mask; 2840 u32 data, mask;
@@ -2844,19 +2844,21 @@ static void si_setup_rb(struct radeon_device *rdev,
2844 for (i = 0; i < se_num; i++) { 2844 for (i = 0; i < se_num; i++) {
2845 for (j = 0; j < sh_per_se; j++) { 2845 for (j = 0; j < sh_per_se; j++) {
2846 si_select_se_sh(rdev, i, j); 2846 si_select_se_sh(rdev, i, j);
2847 data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); 2847 data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
2848 disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); 2848 disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
2849 } 2849 }
2850 } 2850 }
2851 si_select_se_sh(rdev, 0xffffffff, 0xffffffff); 2851 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
2852 2852
2853 mask = 1; 2853 mask = 1;
2854 for (i = 0; i < max_rb_num; i++) { 2854 for (i = 0; i < max_rb_num_per_se * se_num; i++) {
2855 if (!(disabled_rbs & mask)) 2855 if (!(disabled_rbs & mask))
2856 enabled_rbs |= mask; 2856 enabled_rbs |= mask;
2857 mask <<= 1; 2857 mask <<= 1;
2858 } 2858 }
2859 2859
2860 rdev->config.si.backend_enable_mask = enabled_rbs;
2861
2860 for (i = 0; i < se_num; i++) { 2862 for (i = 0; i < se_num; i++) {
2861 si_select_se_sh(rdev, i, 0xffffffff); 2863 si_select_se_sh(rdev, i, 0xffffffff);
2862 data = 0; 2864 data = 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 145f54f17b85..1df856f78568 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -353,7 +353,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
353 * Don't move nonexistent data. Clear destination instead. 353 * Don't move nonexistent data. Clear destination instead.
354 */ 354 */
355 if (old_iomap == NULL && 355 if (old_iomap == NULL &&
356 (ttm == NULL || ttm->state == tt_unpopulated)) { 356 (ttm == NULL || (ttm->state == tt_unpopulated &&
357 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
357 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); 358 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
358 goto out2; 359 goto out2;
359 } 360 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index cfcdf5b5440a..801231c9ae48 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -178,9 +178,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
178 } 178 }
179 179
180 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 180 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
181 drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; 181 vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
182 page_last = vma_pages(vma) + 182 page_last = vma_pages(vma) + vma->vm_pgoff -
183 drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; 183 drm_vma_node_start(&bo->vma_node);
184 184
185 if (unlikely(page_offset >= bo->num_pages)) { 185 if (unlikely(page_offset >= bo->num_pages)) {
186 retval = VM_FAULT_SIGBUS; 186 retval = VM_FAULT_SIGBUS;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 92d1206482a6..797ed29a36ea 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -123,7 +123,7 @@ static struct cpuidle_state *cpuidle_state_table;
123 * which is also the index into the MWAIT hint array. 123 * which is also the index into the MWAIT hint array.
124 * Thus C0 is a dummy. 124 * Thus C0 is a dummy.
125 */ 125 */
126static struct cpuidle_state nehalem_cstates[] __initdata = { 126static struct cpuidle_state nehalem_cstates[] = {
127 { 127 {
128 .name = "C1-NHM", 128 .name = "C1-NHM",
129 .desc = "MWAIT 0x00", 129 .desc = "MWAIT 0x00",
@@ -156,7 +156,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
156 .enter = NULL } 156 .enter = NULL }
157}; 157};
158 158
159static struct cpuidle_state snb_cstates[] __initdata = { 159static struct cpuidle_state snb_cstates[] = {
160 { 160 {
161 .name = "C1-SNB", 161 .name = "C1-SNB",
162 .desc = "MWAIT 0x00", 162 .desc = "MWAIT 0x00",
@@ -196,7 +196,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
196 .enter = NULL } 196 .enter = NULL }
197}; 197};
198 198
199static struct cpuidle_state ivb_cstates[] __initdata = { 199static struct cpuidle_state ivb_cstates[] = {
200 { 200 {
201 .name = "C1-IVB", 201 .name = "C1-IVB",
202 .desc = "MWAIT 0x00", 202 .desc = "MWAIT 0x00",
@@ -236,7 +236,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
236 .enter = NULL } 236 .enter = NULL }
237}; 237};
238 238
239static struct cpuidle_state hsw_cstates[] __initdata = { 239static struct cpuidle_state hsw_cstates[] = {
240 { 240 {
241 .name = "C1-HSW", 241 .name = "C1-HSW",
242 .desc = "MWAIT 0x00", 242 .desc = "MWAIT 0x00",
@@ -297,7 +297,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
297 .enter = NULL } 297 .enter = NULL }
298}; 298};
299 299
300static struct cpuidle_state atom_cstates[] __initdata = { 300static struct cpuidle_state atom_cstates[] = {
301 { 301 {
302 .name = "C1E-ATM", 302 .name = "C1E-ATM",
303 .desc = "MWAIT 0x00", 303 .desc = "MWAIT 0x00",
@@ -329,7 +329,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
329 { 329 {
330 .enter = NULL } 330 .enter = NULL }
331}; 331};
332static struct cpuidle_state avn_cstates[] __initdata = { 332static struct cpuidle_state avn_cstates[] = {
333 { 333 {
334 .name = "C1-AVN", 334 .name = "C1-AVN",
335 .desc = "MWAIT 0x00", 335 .desc = "MWAIT 0x00",
@@ -344,6 +344,8 @@ static struct cpuidle_state avn_cstates[] __initdata = {
344 .exit_latency = 15, 344 .exit_latency = 15,
345 .target_residency = 45, 345 .target_residency = 45,
346 .enter = &intel_idle }, 346 .enter = &intel_idle },
347 {
348 .enter = NULL }
347}; 349};
348 350
349/** 351/**
@@ -377,6 +379,9 @@ static int intel_idle(struct cpuidle_device *dev,
377 379
378 if (!current_set_polling_and_test()) { 380 if (!current_set_polling_and_test()) {
379 381
382 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
383 clflush((void *)&current_thread_info()->flags);
384
380 __monitor((void *)&current_thread_info()->flags, 0, 0); 385 __monitor((void *)&current_thread_info()->flags, 0, 0);
381 smp_mb(); 386 smp_mb();
382 if (!need_resched()) 387 if (!need_resched())
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index acb7f90359a3..749a6cadab8b 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -200,7 +200,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
200 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), 200 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
201 .address = 1, 201 .address = 1,
202 .scan_index = 1, 202 .scan_index = 1,
203 .scan_type = IIO_ST('u', 12, 16, 0), 203 .scan_type = {
204 .sign = 'u',
205 .realbits = 12,
206 .storagebits = 16,
207 .shift = 0,
208 .endianness = IIO_BE,
209 },
204 }, 210 },
205 .channel[1] = { 211 .channel[1] = {
206 .type = IIO_VOLTAGE, 212 .type = IIO_VOLTAGE,
@@ -210,7 +216,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
210 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), 216 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
211 .address = 0, 217 .address = 0,
212 .scan_index = 0, 218 .scan_index = 0,
213 .scan_type = IIO_ST('u', 12, 16, 0), 219 .scan_type = {
220 .sign = 'u',
221 .realbits = 12,
222 .storagebits = 16,
223 .shift = 0,
224 .endianness = IIO_BE,
225 },
214 }, 226 },
215 .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2), 227 .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
216 .int_vref_mv = 2500, 228 .int_vref_mv = 2500,
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 3fb7757a1028..368660dfe135 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -651,7 +651,12 @@ static const struct iio_chan_spec adis16448_channels[] = {
651 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), 651 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
652 .address = ADIS16448_BARO_OUT, 652 .address = ADIS16448_BARO_OUT,
653 .scan_index = ADIS16400_SCAN_BARO, 653 .scan_index = ADIS16400_SCAN_BARO,
654 .scan_type = IIO_ST('s', 16, 16, 0), 654 .scan_type = {
655 .sign = 's',
656 .realbits = 16,
657 .storagebits = 16,
658 .endianness = IIO_BE,
659 },
655 }, 660 },
656 ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12), 661 ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
657 IIO_CHAN_SOFT_TIMESTAMP(11) 662 IIO_CHAN_SOFT_TIMESTAMP(11)
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 21df57130018..0922e39b0ea9 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -387,7 +387,7 @@ static int cm36651_read_int_time(struct cm36651_data *cm36651,
387 return -EINVAL; 387 return -EINVAL;
388 } 388 }
389 389
390 return IIO_VAL_INT_PLUS_MICRO; 390 return IIO_VAL_INT;
391} 391}
392 392
393static int cm36651_write_int_time(struct cm36651_data *cm36651, 393static int cm36651_write_int_time(struct cm36651_data *cm36651,
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index c47c2034ca71..0717940ec3b5 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -181,9 +181,16 @@ static void add_ref(struct iw_cm_id *cm_id)
181static void rem_ref(struct iw_cm_id *cm_id) 181static void rem_ref(struct iw_cm_id *cm_id)
182{ 182{
183 struct iwcm_id_private *cm_id_priv; 183 struct iwcm_id_private *cm_id_priv;
184 int cb_destroy;
185
184 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 186 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
185 if (iwcm_deref_id(cm_id_priv) && 187
186 test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) { 188 /*
189 * Test bit before deref in case the cm_id gets freed on another
190 * thread.
191 */
192 cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
193 if (iwcm_deref_id(cm_id_priv) && cb_destroy) {
187 BUG_ON(!list_empty(&cm_id_priv->work_list)); 194 BUG_ON(!list_empty(&cm_id_priv->work_list));
188 free_cm_id(cm_id_priv); 195 free_cm_id(cm_id_priv);
189 } 196 }
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index bdc842e9faef..a283274a5a09 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -49,12 +49,20 @@
49 49
50#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 50#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
51 do { \ 51 do { \
52 (udata)->inbuf = (void __user *) (ibuf); \ 52 (udata)->inbuf = (const void __user *) (ibuf); \
53 (udata)->outbuf = (void __user *) (obuf); \ 53 (udata)->outbuf = (void __user *) (obuf); \
54 (udata)->inlen = (ilen); \ 54 (udata)->inlen = (ilen); \
55 (udata)->outlen = (olen); \ 55 (udata)->outlen = (olen); \
56 } while (0) 56 } while (0)
57 57
58#define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen) \
59 do { \
60 (udata)->inbuf = (ilen) ? (const void __user *) (ibuf) : NULL; \
61 (udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL; \
62 (udata)->inlen = (ilen); \
63 (udata)->outlen = (olen); \
64 } while (0)
65
58/* 66/*
59 * Our lifetime rules for these structs are the following: 67 * Our lifetime rules for these structs are the following:
60 * 68 *
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 65f6e7dc380c..f1cc83855af6 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2593,6 +2593,9 @@ out_put:
2593static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, 2593static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
2594 union ib_flow_spec *ib_spec) 2594 union ib_flow_spec *ib_spec)
2595{ 2595{
2596 if (kern_spec->reserved)
2597 return -EINVAL;
2598
2596 ib_spec->type = kern_spec->type; 2599 ib_spec->type = kern_spec->type;
2597 2600
2598 switch (ib_spec->type) { 2601 switch (ib_spec->type) {
@@ -2646,6 +2649,9 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2646 void *ib_spec; 2649 void *ib_spec;
2647 int i; 2650 int i;
2648 2651
2652 if (ucore->inlen < sizeof(cmd))
2653 return -EINVAL;
2654
2649 if (ucore->outlen < sizeof(resp)) 2655 if (ucore->outlen < sizeof(resp))
2650 return -ENOSPC; 2656 return -ENOSPC;
2651 2657
@@ -2671,6 +2677,10 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2671 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 2677 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
2672 return -EINVAL; 2678 return -EINVAL;
2673 2679
2680 if (cmd.flow_attr.reserved[0] ||
2681 cmd.flow_attr.reserved[1])
2682 return -EINVAL;
2683
2674 if (cmd.flow_attr.num_of_specs) { 2684 if (cmd.flow_attr.num_of_specs) {
2675 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 2685 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
2676 GFP_KERNEL); 2686 GFP_KERNEL);
@@ -2731,6 +2741,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2731 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 2741 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
2732 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 2742 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
2733 i, cmd.flow_attr.size); 2743 i, cmd.flow_attr.size);
2744 err = -EINVAL;
2734 goto err_free; 2745 goto err_free;
2735 } 2746 }
2736 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 2747 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
@@ -2791,10 +2802,16 @@ int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
2791 struct ib_uobject *uobj; 2802 struct ib_uobject *uobj;
2792 int ret; 2803 int ret;
2793 2804
2805 if (ucore->inlen < sizeof(cmd))
2806 return -EINVAL;
2807
2794 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 2808 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2795 if (ret) 2809 if (ret)
2796 return ret; 2810 return ret;
2797 2811
2812 if (cmd.comp_mask)
2813 return -EINVAL;
2814
2798 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, 2815 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
2799 file->ucontext); 2816 file->ucontext);
2800 if (!uobj) 2817 if (!uobj)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 34386943ebcf..08219fb3338b 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -668,25 +668,30 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
668 if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count) 668 if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count)
669 return -EINVAL; 669 return -EINVAL;
670 670
671 if (ex_hdr.cmd_hdr_reserved)
672 return -EINVAL;
673
671 if (ex_hdr.response) { 674 if (ex_hdr.response) {
672 if (!hdr.out_words && !ex_hdr.provider_out_words) 675 if (!hdr.out_words && !ex_hdr.provider_out_words)
673 return -EINVAL; 676 return -EINVAL;
677
678 if (!access_ok(VERIFY_WRITE,
679 (void __user *) (unsigned long) ex_hdr.response,
680 (hdr.out_words + ex_hdr.provider_out_words) * 8))
681 return -EFAULT;
674 } else { 682 } else {
675 if (hdr.out_words || ex_hdr.provider_out_words) 683 if (hdr.out_words || ex_hdr.provider_out_words)
676 return -EINVAL; 684 return -EINVAL;
677 } 685 }
678 686
679 INIT_UDATA(&ucore, 687 INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response,
680 (hdr.in_words) ? buf : 0, 688 hdr.in_words * 8, hdr.out_words * 8);
681 (unsigned long)ex_hdr.response, 689
682 hdr.in_words * 8, 690 INIT_UDATA_BUF_OR_NULL(&uhw,
683 hdr.out_words * 8); 691 buf + ucore.inlen,
684 692 (unsigned long) ex_hdr.response + ucore.outlen,
685 INIT_UDATA(&uhw, 693 ex_hdr.provider_in_words * 8,
686 (ex_hdr.provider_in_words) ? buf + ucore.inlen : 0, 694 ex_hdr.provider_out_words * 8);
687 (ex_hdr.provider_out_words) ? (unsigned long)ex_hdr.response + ucore.outlen : 0,
688 ex_hdr.provider_in_words * 8,
689 ex_hdr.provider_out_words * 8);
690 695
691 err = uverbs_ex_cmd_table[command](file, 696 err = uverbs_ex_cmd_table[command](file,
692 &ucore, 697 &ucore,
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 12fef76c791c..45126879ad28 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -524,50 +524,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
524 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 524 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
525} 525}
526 526
527#define VLAN_NONE 0xfff
528#define FILTER_SEL_VLAN_NONE 0xffff
529#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
530#define FILTER_SEL_WIDTH_VIN_P_FC \
531 (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
532#define FILTER_SEL_WIDTH_TAG_P_FC \
533 (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
534#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
535
536static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
537 struct l2t_entry *l2t)
538{
539 unsigned int ntuple = 0;
540 u32 viid;
541
542 switch (dev->rdev.lldi.filt_mode) {
543
544 /* default filter mode */
545 case HW_TPL_FR_MT_PR_IV_P_FC:
546 if (l2t->vlan == VLAN_NONE)
547 ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
548 else {
549 ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
550 ntuple |= 1 << FILTER_SEL_WIDTH_TAG_P_FC;
551 }
552 ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
553 FILTER_SEL_WIDTH_VLD_TAG_P_FC;
554 break;
555 case HW_TPL_FR_MT_PR_OV_P_FC: {
556 viid = cxgb4_port_viid(l2t->neigh->dev);
557
558 ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
559 ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
560 ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
561 ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
562 FILTER_SEL_WIDTH_VLD_TAG_P_FC;
563 break;
564 }
565 default:
566 break;
567 }
568 return ntuple;
569}
570
571static int send_connect(struct c4iw_ep *ep) 527static int send_connect(struct c4iw_ep *ep)
572{ 528{
573 struct cpl_act_open_req *req; 529 struct cpl_act_open_req *req;
@@ -641,8 +597,9 @@ static int send_connect(struct c4iw_ep *ep)
641 req->local_ip = la->sin_addr.s_addr; 597 req->local_ip = la->sin_addr.s_addr;
642 req->peer_ip = ra->sin_addr.s_addr; 598 req->peer_ip = ra->sin_addr.s_addr;
643 req->opt0 = cpu_to_be64(opt0); 599 req->opt0 = cpu_to_be64(opt0);
644 req->params = cpu_to_be32(select_ntuple(ep->com.dev, 600 req->params = cpu_to_be32(cxgb4_select_ntuple(
645 ep->dst, ep->l2t)); 601 ep->com.dev->rdev.lldi.ports[0],
602 ep->l2t));
646 req->opt2 = cpu_to_be32(opt2); 603 req->opt2 = cpu_to_be32(opt2);
647 } else { 604 } else {
648 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen); 605 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
@@ -662,9 +619,9 @@ static int send_connect(struct c4iw_ep *ep)
662 req6->peer_ip_lo = *((__be64 *) 619 req6->peer_ip_lo = *((__be64 *)
663 (ra6->sin6_addr.s6_addr + 8)); 620 (ra6->sin6_addr.s6_addr + 8));
664 req6->opt0 = cpu_to_be64(opt0); 621 req6->opt0 = cpu_to_be64(opt0);
665 req6->params = cpu_to_be32( 622 req6->params = cpu_to_be32(cxgb4_select_ntuple(
666 select_ntuple(ep->com.dev, ep->dst, 623 ep->com.dev->rdev.lldi.ports[0],
667 ep->l2t)); 624 ep->l2t));
668 req6->opt2 = cpu_to_be32(opt2); 625 req6->opt2 = cpu_to_be32(opt2);
669 } 626 }
670 } else { 627 } else {
@@ -681,8 +638,9 @@ static int send_connect(struct c4iw_ep *ep)
681 t5_req->peer_ip = ra->sin_addr.s_addr; 638 t5_req->peer_ip = ra->sin_addr.s_addr;
682 t5_req->opt0 = cpu_to_be64(opt0); 639 t5_req->opt0 = cpu_to_be64(opt0);
683 t5_req->params = cpu_to_be64(V_FILTER_TUPLE( 640 t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
684 select_ntuple(ep->com.dev, 641 cxgb4_select_ntuple(
685 ep->dst, ep->l2t))); 642 ep->com.dev->rdev.lldi.ports[0],
643 ep->l2t)));
686 t5_req->opt2 = cpu_to_be32(opt2); 644 t5_req->opt2 = cpu_to_be32(opt2);
687 } else { 645 } else {
688 t5_req6 = (struct cpl_t5_act_open_req6 *) 646 t5_req6 = (struct cpl_t5_act_open_req6 *)
@@ -703,7 +661,9 @@ static int send_connect(struct c4iw_ep *ep)
703 (ra6->sin6_addr.s6_addr + 8)); 661 (ra6->sin6_addr.s6_addr + 8));
704 t5_req6->opt0 = cpu_to_be64(opt0); 662 t5_req6->opt0 = cpu_to_be64(opt0);
705 t5_req6->params = (__force __be64)cpu_to_be32( 663 t5_req6->params = (__force __be64)cpu_to_be32(
706 select_ntuple(ep->com.dev, ep->dst, ep->l2t)); 664 cxgb4_select_ntuple(
665 ep->com.dev->rdev.lldi.ports[0],
666 ep->l2t));
707 t5_req6->opt2 = cpu_to_be32(opt2); 667 t5_req6->opt2 = cpu_to_be32(opt2);
708 } 668 }
709 } 669 }
@@ -1630,7 +1590,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1630 memset(req, 0, sizeof(*req)); 1590 memset(req, 0, sizeof(*req));
1631 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); 1591 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
1632 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 1592 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
1633 req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, 1593 req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
1594 ep->com.dev->rdev.lldi.ports[0],
1634 ep->l2t)); 1595 ep->l2t));
1635 sin = (struct sockaddr_in *)&ep->com.local_addr; 1596 sin = (struct sockaddr_in *)&ep->com.local_addr;
1636 req->le.lport = sin->sin_port; 1597 req->le.lport = sin->sin_port;
@@ -2938,7 +2899,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2938 /* 2899 /*
2939 * Allocate a server TID. 2900 * Allocate a server TID.
2940 */ 2901 */
2941 if (dev->rdev.lldi.enable_fw_ofld_conn) 2902 if (dev->rdev.lldi.enable_fw_ofld_conn &&
2903 ep->com.local_addr.ss_family == AF_INET)
2942 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, 2904 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
2943 cm_id->local_addr.ss_family, ep); 2905 cm_id->local_addr.ss_family, ep);
2944 else 2906 else
@@ -3323,9 +3285,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3323 /* 3285 /*
3324 * Calculate the server tid from filter hit index from cpl_rx_pkt. 3286 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3325 */ 3287 */
3326 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val) 3288 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
3327 - dev->rdev.lldi.tids->sftid_base
3328 + dev->rdev.lldi.tids->nstids;
3329 3289
3330 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); 3290 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
3331 if (!lep) { 3291 if (!lep) {
@@ -3397,7 +3357,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3397 window = (__force u16) htons((__force u16)tcph->window); 3357 window = (__force u16) htons((__force u16)tcph->window);
3398 3358
3399 /* Calcuate filter portion for LE region. */ 3359 /* Calcuate filter portion for LE region. */
3400 filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e)); 3360 filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
3361 dev->rdev.lldi.ports[0],
3362 e));
3401 3363
3402 /* 3364 /*
3403 * Synthesize the cpl_pass_accept_req. We have everything except the 3365 * Synthesize the cpl_pass_accept_req. We have everything except the
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 4cb8eb24497c..84e45006451c 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -173,7 +173,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
173 return ret; 173 return ret;
174} 174}
175 175
176int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) 176static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
177{ 177{
178 u32 remain = len; 178 u32 remain = len;
179 u32 dmalen; 179 u32 dmalen;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index c29b5c838833..cdc7df4fdb8a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -31,6 +31,7 @@
31 */ 31 */
32 32
33#include <linux/netdevice.h> 33#include <linux/netdevice.h>
34#include <linux/if_arp.h> /* For ARPHRD_xxx */
34#include <linux/module.h> 35#include <linux/module.h>
35#include <net/rtnetlink.h> 36#include <net/rtnetlink.h>
36#include "ipoib.h" 37#include "ipoib.h"
@@ -103,7 +104,7 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
103 return -EINVAL; 104 return -EINVAL;
104 105
105 pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); 106 pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
106 if (!pdev) 107 if (!pdev || pdev->type != ARPHRD_INFINIBAND)
107 return -ENODEV; 108 return -ENODEV;
108 109
109 ppriv = netdev_priv(pdev); 110 ppriv = netdev_priv(pdev);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 6be57c38638d..9804fca6bf06 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -207,7 +207,9 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
207 isert_conn->conn_rx_descs = NULL; 207 isert_conn->conn_rx_descs = NULL;
208} 208}
209 209
210static void isert_cq_tx_work(struct work_struct *);
210static void isert_cq_tx_callback(struct ib_cq *, void *); 211static void isert_cq_tx_callback(struct ib_cq *, void *);
212static void isert_cq_rx_work(struct work_struct *);
211static void isert_cq_rx_callback(struct ib_cq *, void *); 213static void isert_cq_rx_callback(struct ib_cq *, void *);
212 214
213static int 215static int
@@ -259,26 +261,36 @@ isert_create_device_ib_res(struct isert_device *device)
259 cq_desc[i].device = device; 261 cq_desc[i].device = device;
260 cq_desc[i].cq_index = i; 262 cq_desc[i].cq_index = i;
261 263
264 INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
262 device->dev_rx_cq[i] = ib_create_cq(device->ib_device, 265 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
263 isert_cq_rx_callback, 266 isert_cq_rx_callback,
264 isert_cq_event_callback, 267 isert_cq_event_callback,
265 (void *)&cq_desc[i], 268 (void *)&cq_desc[i],
266 ISER_MAX_RX_CQ_LEN, i); 269 ISER_MAX_RX_CQ_LEN, i);
267 if (IS_ERR(device->dev_rx_cq[i])) 270 if (IS_ERR(device->dev_rx_cq[i])) {
271 ret = PTR_ERR(device->dev_rx_cq[i]);
272 device->dev_rx_cq[i] = NULL;
268 goto out_cq; 273 goto out_cq;
274 }
269 275
276 INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
270 device->dev_tx_cq[i] = ib_create_cq(device->ib_device, 277 device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
271 isert_cq_tx_callback, 278 isert_cq_tx_callback,
272 isert_cq_event_callback, 279 isert_cq_event_callback,
273 (void *)&cq_desc[i], 280 (void *)&cq_desc[i],
274 ISER_MAX_TX_CQ_LEN, i); 281 ISER_MAX_TX_CQ_LEN, i);
275 if (IS_ERR(device->dev_tx_cq[i])) 282 if (IS_ERR(device->dev_tx_cq[i])) {
283 ret = PTR_ERR(device->dev_tx_cq[i]);
284 device->dev_tx_cq[i] = NULL;
276 goto out_cq; 285 goto out_cq;
286 }
277 287
278 if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP)) 288 ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
289 if (ret)
279 goto out_cq; 290 goto out_cq;
280 291
281 if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP)) 292 ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
293 if (ret)
282 goto out_cq; 294 goto out_cq;
283 } 295 }
284 296
@@ -1724,7 +1736,6 @@ isert_cq_tx_callback(struct ib_cq *cq, void *context)
1724{ 1736{
1725 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; 1737 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1726 1738
1727 INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
1728 queue_work(isert_comp_wq, &cq_desc->cq_tx_work); 1739 queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
1729} 1740}
1730 1741
@@ -1768,7 +1779,6 @@ isert_cq_rx_callback(struct ib_cq *cq, void *context)
1768{ 1779{
1769 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; 1780 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1770 1781
1771 INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
1772 queue_work(isert_rx_wq, &cq_desc->cq_rx_work); 1782 queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
1773} 1783}
1774 1784
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 846ccdd905b1..d2965e4b3224 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1871,6 +1871,10 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
1871 break; 1871 break;
1872 1872
1873 case EV_ABS: 1873 case EV_ABS:
1874 input_alloc_absinfo(dev);
1875 if (!dev->absinfo)
1876 return;
1877
1874 __set_bit(code, dev->absbit); 1878 __set_bit(code, dev->absbit);
1875 break; 1879 break;
1876 1880
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index 75762d6ff3ba..aa127ba392a4 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -455,7 +455,18 @@ static void zforce_complete(struct zforce_ts *ts, int cmd, int result)
455 } 455 }
456} 456}
457 457
458static irqreturn_t zforce_interrupt(int irq, void *dev_id) 458static irqreturn_t zforce_irq(int irq, void *dev_id)
459{
460 struct zforce_ts *ts = dev_id;
461 struct i2c_client *client = ts->client;
462
463 if (ts->suspended && device_may_wakeup(&client->dev))
464 pm_wakeup_event(&client->dev, 500);
465
466 return IRQ_WAKE_THREAD;
467}
468
469static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
459{ 470{
460 struct zforce_ts *ts = dev_id; 471 struct zforce_ts *ts = dev_id;
461 struct i2c_client *client = ts->client; 472 struct i2c_client *client = ts->client;
@@ -465,12 +476,10 @@ static irqreturn_t zforce_interrupt(int irq, void *dev_id)
465 u8 *payload; 476 u8 *payload;
466 477
467 /* 478 /*
468 * When suspended, emit a wakeup signal if necessary and return. 479 * When still suspended, return.
469 * Due to the level-interrupt we will get re-triggered later. 480 * Due to the level-interrupt we will get re-triggered later.
470 */ 481 */
471 if (ts->suspended) { 482 if (ts->suspended) {
472 if (device_may_wakeup(&client->dev))
473 pm_wakeup_event(&client->dev, 500);
474 msleep(20); 483 msleep(20);
475 return IRQ_HANDLED; 484 return IRQ_HANDLED;
476 } 485 }
@@ -763,8 +772,8 @@ static int zforce_probe(struct i2c_client *client,
763 * Therefore we can trigger the interrupt anytime it is low and do 772 * Therefore we can trigger the interrupt anytime it is low and do
764 * not need to limit it to the interrupt edge. 773 * not need to limit it to the interrupt edge.
765 */ 774 */
766 ret = devm_request_threaded_irq(&client->dev, client->irq, NULL, 775 ret = devm_request_threaded_irq(&client->dev, client->irq,
767 zforce_interrupt, 776 zforce_irq, zforce_irq_thread,
768 IRQF_TRIGGER_LOW | IRQF_ONESHOT, 777 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
769 input_dev->name, ts); 778 input_dev->name, ts);
770 if (ret) { 779 if (ret) {
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 82cec63a9011..3ee78f02e5d7 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -149,8 +149,9 @@ static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p,
149static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p, 149static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
150 int irq, int do_mask) 150 int irq, int do_mask)
151{ 151{
152 int bitfield_width = 4; /* PRIO assumed to have fixed bitfield width */ 152 /* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */
153 int shift = (7 - irq) * bitfield_width; /* PRIO assumed to be 32-bit */ 153 int bitfield_width = 4;
154 int shift = 32 - (irq + 1) * bitfield_width;
154 155
155 intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO, 156 intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO,
156 shift, bitfield_width, 157 shift, bitfield_width,
@@ -159,8 +160,9 @@ static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
159 160
160static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value) 161static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value)
161{ 162{
163 /* The SENSE register is assumed to be 32-bit. */
162 int bitfield_width = p->config.sense_bitfield_width; 164 int bitfield_width = p->config.sense_bitfield_width;
163 int shift = (7 - irq) * bitfield_width; /* SENSE assumed to be 32-bit */ 165 int shift = 32 - (irq + 1) * bitfield_width;
164 166
165 dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value); 167 dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value);
166 168
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 497bd026c237..4a4825528188 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -1643,10 +1643,6 @@ setup_hfcpci(struct IsdnCard *card)
1643 int i; 1643 int i;
1644 struct pci_dev *tmp_hfcpci = NULL; 1644 struct pci_dev *tmp_hfcpci = NULL;
1645 1645
1646#ifdef __BIG_ENDIAN
1647#error "not running on big endian machines now"
1648#endif
1649
1650 strcpy(tmp, hfcpci_revision); 1646 strcpy(tmp, hfcpci_revision);
1651 printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp)); 1647 printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
1652 1648
diff --git a/drivers/isdn/hisax/telespci.c b/drivers/isdn/hisax/telespci.c
index f6ab63aa6995..33eeb4602c7e 100644
--- a/drivers/isdn/hisax/telespci.c
+++ b/drivers/isdn/hisax/telespci.c
@@ -290,10 +290,6 @@ int setup_telespci(struct IsdnCard *card)
290 struct IsdnCardState *cs = card->cs; 290 struct IsdnCardState *cs = card->cs;
291 char tmp[64]; 291 char tmp[64];
292 292
293#ifdef __BIG_ENDIAN
294#error "not running on big endian machines now"
295#endif
296
297 strcpy(tmp, telespci_revision); 293 strcpy(tmp, telespci_revision);
298 printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp)); 294 printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp));
299 if (cs->typ != ISDN_CTYPE_TELESPCI) 295 if (cs->typ != ISDN_CTYPE_TELESPCI)
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 05188351711d..a97263e902ff 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -244,18 +244,12 @@ static int lp5521_update_program_memory(struct lp55xx_chip *chip,
244 if (i % 2) 244 if (i % 2)
245 goto err; 245 goto err;
246 246
247 mutex_lock(&chip->lock);
248
249 for (i = 0; i < LP5521_PROGRAM_LENGTH; i++) { 247 for (i = 0; i < LP5521_PROGRAM_LENGTH; i++) {
250 ret = lp55xx_write(chip, addr[idx] + i, pattern[i]); 248 ret = lp55xx_write(chip, addr[idx] + i, pattern[i]);
251 if (ret) { 249 if (ret)
252 mutex_unlock(&chip->lock);
253 return -EINVAL; 250 return -EINVAL;
254 }
255 } 251 }
256 252
257 mutex_unlock(&chip->lock);
258
259 return size; 253 return size;
260 254
261err: 255err:
@@ -427,15 +421,17 @@ static ssize_t store_engine_load(struct device *dev,
427{ 421{
428 struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); 422 struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
429 struct lp55xx_chip *chip = led->chip; 423 struct lp55xx_chip *chip = led->chip;
424 int ret;
430 425
431 mutex_lock(&chip->lock); 426 mutex_lock(&chip->lock);
432 427
433 chip->engine_idx = nr; 428 chip->engine_idx = nr;
434 lp5521_load_engine(chip); 429 lp5521_load_engine(chip);
430 ret = lp5521_update_program_memory(chip, buf, len);
435 431
436 mutex_unlock(&chip->lock); 432 mutex_unlock(&chip->lock);
437 433
438 return lp5521_update_program_memory(chip, buf, len); 434 return ret;
439} 435}
440store_load(1) 436store_load(1)
441store_load(2) 437store_load(2)
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 6b553d9f4266..fd9ab5f61441 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -337,18 +337,12 @@ static int lp5523_update_program_memory(struct lp55xx_chip *chip,
337 if (i % 2) 337 if (i % 2)
338 goto err; 338 goto err;
339 339
340 mutex_lock(&chip->lock);
341
342 for (i = 0; i < LP5523_PROGRAM_LENGTH; i++) { 340 for (i = 0; i < LP5523_PROGRAM_LENGTH; i++) {
343 ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]); 341 ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]);
344 if (ret) { 342 if (ret)
345 mutex_unlock(&chip->lock);
346 return -EINVAL; 343 return -EINVAL;
347 }
348 } 344 }
349 345
350 mutex_unlock(&chip->lock);
351
352 return size; 346 return size;
353 347
354err: 348err:
@@ -548,15 +542,17 @@ static ssize_t store_engine_load(struct device *dev,
548{ 542{
549 struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); 543 struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
550 struct lp55xx_chip *chip = led->chip; 544 struct lp55xx_chip *chip = led->chip;
545 int ret;
551 546
552 mutex_lock(&chip->lock); 547 mutex_lock(&chip->lock);
553 548
554 chip->engine_idx = nr; 549 chip->engine_idx = nr;
555 lp5523_load_engine_and_select_page(chip); 550 lp5523_load_engine_and_select_page(chip);
551 ret = lp5523_update_program_memory(chip, buf, len);
556 552
557 mutex_unlock(&chip->lock); 553 mutex_unlock(&chip->lock);
558 554
559 return lp5523_update_program_memory(chip, buf, len); 555 return ret;
560} 556}
561store_load(1) 557store_load(1)
562store_load(2) 558store_load(2)
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 2b46bf1d7e40..4c9852d92b0a 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -421,9 +421,11 @@ out:
421 421
422 if (watermark <= WATERMARK_METADATA) { 422 if (watermark <= WATERMARK_METADATA) {
423 SET_GC_MARK(b, GC_MARK_METADATA); 423 SET_GC_MARK(b, GC_MARK_METADATA);
424 SET_GC_MOVE(b, 0);
424 b->prio = BTREE_PRIO; 425 b->prio = BTREE_PRIO;
425 } else { 426 } else {
426 SET_GC_MARK(b, GC_MARK_RECLAIMABLE); 427 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
428 SET_GC_MOVE(b, 0);
427 b->prio = INITIAL_PRIO; 429 b->prio = INITIAL_PRIO;
428 } 430 }
429 431
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 4beb55a0ff30..754f43177483 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -197,7 +197,7 @@ struct bucket {
197 uint8_t disk_gen; 197 uint8_t disk_gen;
198 uint8_t last_gc; /* Most out of date gen in the btree */ 198 uint8_t last_gc; /* Most out of date gen in the btree */
199 uint8_t gc_gen; 199 uint8_t gc_gen;
200 uint16_t gc_mark; 200 uint16_t gc_mark; /* Bitfield used by GC. See below for field */
201}; 201};
202 202
203/* 203/*
@@ -209,7 +209,8 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
209#define GC_MARK_RECLAIMABLE 0 209#define GC_MARK_RECLAIMABLE 0
210#define GC_MARK_DIRTY 1 210#define GC_MARK_DIRTY 1
211#define GC_MARK_METADATA 2 211#define GC_MARK_METADATA 2
212BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14); 212BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13);
213BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
213 214
214#include "journal.h" 215#include "journal.h"
215#include "stats.h" 216#include "stats.h"
@@ -372,14 +373,14 @@ struct cached_dev {
372 unsigned char writeback_percent; 373 unsigned char writeback_percent;
373 unsigned writeback_delay; 374 unsigned writeback_delay;
374 375
375 int writeback_rate_change;
376 int64_t writeback_rate_derivative;
377 uint64_t writeback_rate_target; 376 uint64_t writeback_rate_target;
377 int64_t writeback_rate_proportional;
378 int64_t writeback_rate_derivative;
379 int64_t writeback_rate_change;
378 380
379 unsigned writeback_rate_update_seconds; 381 unsigned writeback_rate_update_seconds;
380 unsigned writeback_rate_d_term; 382 unsigned writeback_rate_d_term;
381 unsigned writeback_rate_p_term_inverse; 383 unsigned writeback_rate_p_term_inverse;
382 unsigned writeback_rate_d_smooth;
383}; 384};
384 385
385enum alloc_watermarks { 386enum alloc_watermarks {
@@ -445,7 +446,6 @@ struct cache {
445 * call prio_write() to keep gens from wrapping. 446 * call prio_write() to keep gens from wrapping.
446 */ 447 */
447 uint8_t need_save_prio; 448 uint8_t need_save_prio;
448 unsigned gc_move_threshold;
449 449
450 /* 450 /*
451 * If nonzero, we know we aren't going to find any buckets to invalidate 451 * If nonzero, we know we aren't going to find any buckets to invalidate
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 5e2765aadce1..31bb53fcc67a 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1561,6 +1561,28 @@ size_t bch_btree_gc_finish(struct cache_set *c)
1561 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), 1561 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1562 GC_MARK_METADATA); 1562 GC_MARK_METADATA);
1563 1563
1564 /* don't reclaim buckets to which writeback keys point */
1565 rcu_read_lock();
1566 for (i = 0; i < c->nr_uuids; i++) {
1567 struct bcache_device *d = c->devices[i];
1568 struct cached_dev *dc;
1569 struct keybuf_key *w, *n;
1570 unsigned j;
1571
1572 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1573 continue;
1574 dc = container_of(d, struct cached_dev, disk);
1575
1576 spin_lock(&dc->writeback_keys.lock);
1577 rbtree_postorder_for_each_entry_safe(w, n,
1578 &dc->writeback_keys.keys, node)
1579 for (j = 0; j < KEY_PTRS(&w->key); j++)
1580 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1581 GC_MARK_DIRTY);
1582 spin_unlock(&dc->writeback_keys.lock);
1583 }
1584 rcu_read_unlock();
1585
1564 for_each_cache(ca, c, i) { 1586 for_each_cache(ca, c, i) {
1565 uint64_t *i; 1587 uint64_t *i;
1566 1588
@@ -1817,7 +1839,8 @@ static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
1817 if (KEY_START(k) > KEY_START(insert) + sectors_found) 1839 if (KEY_START(k) > KEY_START(insert) + sectors_found)
1818 goto check_failed; 1840 goto check_failed;
1819 1841
1820 if (KEY_PTRS(replace_key) != KEY_PTRS(k)) 1842 if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
1843 KEY_DIRTY(k) != KEY_DIRTY(replace_key))
1821 goto check_failed; 1844 goto check_failed;
1822 1845
1823 /* skip past gen */ 1846 /* skip past gen */
@@ -2217,7 +2240,7 @@ struct btree_insert_op {
2217 struct bkey *replace_key; 2240 struct bkey *replace_key;
2218}; 2241};
2219 2242
2220int btree_insert_fn(struct btree_op *b_op, struct btree *b) 2243static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2221{ 2244{
2222 struct btree_insert_op *op = container_of(b_op, 2245 struct btree_insert_op *op = container_of(b_op,
2223 struct btree_insert_op, op); 2246 struct btree_insert_op, op);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 7c1275e66025..f2f0998c4a91 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -25,10 +25,9 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
25 unsigned i; 25 unsigned i;
26 26
27 for (i = 0; i < KEY_PTRS(k); i++) { 27 for (i = 0; i < KEY_PTRS(k); i++) {
28 struct cache *ca = PTR_CACHE(c, k, i);
29 struct bucket *g = PTR_BUCKET(c, k, i); 28 struct bucket *g = PTR_BUCKET(c, k, i);
30 29
31 if (GC_SECTORS_USED(g) < ca->gc_move_threshold) 30 if (GC_MOVE(g))
32 return true; 31 return true;
33 } 32 }
34 33
@@ -65,11 +64,16 @@ static void write_moving_finish(struct closure *cl)
65 64
66static void read_moving_endio(struct bio *bio, int error) 65static void read_moving_endio(struct bio *bio, int error)
67{ 66{
67 struct bbio *b = container_of(bio, struct bbio, bio);
68 struct moving_io *io = container_of(bio->bi_private, 68 struct moving_io *io = container_of(bio->bi_private,
69 struct moving_io, cl); 69 struct moving_io, cl);
70 70
71 if (error) 71 if (error)
72 io->op.error = error; 72 io->op.error = error;
73 else if (!KEY_DIRTY(&b->key) &&
74 ptr_stale(io->op.c, &b->key, 0)) {
75 io->op.error = -EINTR;
76 }
73 77
74 bch_bbio_endio(io->op.c, bio, error, "reading data to move"); 78 bch_bbio_endio(io->op.c, bio, error, "reading data to move");
75} 79}
@@ -141,6 +145,11 @@ static void read_moving(struct cache_set *c)
141 if (!w) 145 if (!w)
142 break; 146 break;
143 147
148 if (ptr_stale(c, &w->key, 0)) {
149 bch_keybuf_del(&c->moving_gc_keys, w);
150 continue;
151 }
152
144 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) 153 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
145 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 154 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
146 GFP_KERNEL); 155 GFP_KERNEL);
@@ -184,7 +193,8 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r)
184 193
185static unsigned bucket_heap_top(struct cache *ca) 194static unsigned bucket_heap_top(struct cache *ca)
186{ 195{
187 return GC_SECTORS_USED(heap_peek(&ca->heap)); 196 struct bucket *b;
197 return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
188} 198}
189 199
190void bch_moving_gc(struct cache_set *c) 200void bch_moving_gc(struct cache_set *c)
@@ -226,9 +236,8 @@ void bch_moving_gc(struct cache_set *c)
226 sectors_to_move -= GC_SECTORS_USED(b); 236 sectors_to_move -= GC_SECTORS_USED(b);
227 } 237 }
228 238
229 ca->gc_move_threshold = bucket_heap_top(ca); 239 while (heap_pop(&ca->heap, b, bucket_cmp))
230 240 SET_GC_MOVE(b, 1);
231 pr_debug("threshold %u", ca->gc_move_threshold);
232 } 241 }
233 242
234 mutex_unlock(&c->bucket_lock); 243 mutex_unlock(&c->bucket_lock);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index dec15cd2d797..c57bfa071a57 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1676,7 +1676,7 @@ err:
1676static bool can_attach_cache(struct cache *ca, struct cache_set *c) 1676static bool can_attach_cache(struct cache *ca, struct cache_set *c)
1677{ 1677{
1678 return ca->sb.block_size == c->sb.block_size && 1678 return ca->sb.block_size == c->sb.block_size &&
1679 ca->sb.bucket_size == c->sb.block_size && 1679 ca->sb.bucket_size == c->sb.bucket_size &&
1680 ca->sb.nr_in_set == c->sb.nr_in_set; 1680 ca->sb.nr_in_set == c->sb.nr_in_set;
1681} 1681}
1682 1682
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 80d4c2bee18a..a1f85612f0b3 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -83,7 +83,6 @@ rw_attribute(writeback_rate);
83rw_attribute(writeback_rate_update_seconds); 83rw_attribute(writeback_rate_update_seconds);
84rw_attribute(writeback_rate_d_term); 84rw_attribute(writeback_rate_d_term);
85rw_attribute(writeback_rate_p_term_inverse); 85rw_attribute(writeback_rate_p_term_inverse);
86rw_attribute(writeback_rate_d_smooth);
87read_attribute(writeback_rate_debug); 86read_attribute(writeback_rate_debug);
88 87
89read_attribute(stripe_size); 88read_attribute(stripe_size);
@@ -129,31 +128,41 @@ SHOW(__bch_cached_dev)
129 var_printf(writeback_running, "%i"); 128 var_printf(writeback_running, "%i");
130 var_print(writeback_delay); 129 var_print(writeback_delay);
131 var_print(writeback_percent); 130 var_print(writeback_percent);
132 sysfs_print(writeback_rate, dc->writeback_rate.rate); 131 sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9);
133 132
134 var_print(writeback_rate_update_seconds); 133 var_print(writeback_rate_update_seconds);
135 var_print(writeback_rate_d_term); 134 var_print(writeback_rate_d_term);
136 var_print(writeback_rate_p_term_inverse); 135 var_print(writeback_rate_p_term_inverse);
137 var_print(writeback_rate_d_smooth);
138 136
139 if (attr == &sysfs_writeback_rate_debug) { 137 if (attr == &sysfs_writeback_rate_debug) {
138 char rate[20];
140 char dirty[20]; 139 char dirty[20];
141 char derivative[20];
142 char target[20]; 140 char target[20];
143 bch_hprint(dirty, 141 char proportional[20];
144 bcache_dev_sectors_dirty(&dc->disk) << 9); 142 char derivative[20];
145 bch_hprint(derivative, dc->writeback_rate_derivative << 9); 143 char change[20];
144 s64 next_io;
145
146 bch_hprint(rate, dc->writeback_rate.rate << 9);
147 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
146 bch_hprint(target, dc->writeback_rate_target << 9); 148 bch_hprint(target, dc->writeback_rate_target << 9);
149 bch_hprint(proportional,dc->writeback_rate_proportional << 9);
150 bch_hprint(derivative, dc->writeback_rate_derivative << 9);
151 bch_hprint(change, dc->writeback_rate_change << 9);
152
153 next_io = div64_s64(dc->writeback_rate.next - local_clock(),
154 NSEC_PER_MSEC);
147 155
148 return sprintf(buf, 156 return sprintf(buf,
149 "rate:\t\t%u\n" 157 "rate:\t\t%s/sec\n"
150 "change:\t\t%i\n"
151 "dirty:\t\t%s\n" 158 "dirty:\t\t%s\n"
159 "target:\t\t%s\n"
160 "proportional:\t%s\n"
152 "derivative:\t%s\n" 161 "derivative:\t%s\n"
153 "target:\t\t%s\n", 162 "change:\t\t%s/sec\n"
154 dc->writeback_rate.rate, 163 "next io:\t%llims\n",
155 dc->writeback_rate_change, 164 rate, dirty, target, proportional,
156 dirty, derivative, target); 165 derivative, change, next_io);
157 } 166 }
158 167
159 sysfs_hprint(dirty_data, 168 sysfs_hprint(dirty_data,
@@ -189,6 +198,7 @@ STORE(__cached_dev)
189 struct kobj_uevent_env *env; 198 struct kobj_uevent_env *env;
190 199
191#define d_strtoul(var) sysfs_strtoul(var, dc->var) 200#define d_strtoul(var) sysfs_strtoul(var, dc->var)
201#define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
192#define d_strtoi_h(var) sysfs_hatoi(var, dc->var) 202#define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
193 203
194 sysfs_strtoul(data_csum, dc->disk.data_csum); 204 sysfs_strtoul(data_csum, dc->disk.data_csum);
@@ -197,16 +207,15 @@ STORE(__cached_dev)
197 d_strtoul(writeback_metadata); 207 d_strtoul(writeback_metadata);
198 d_strtoul(writeback_running); 208 d_strtoul(writeback_running);
199 d_strtoul(writeback_delay); 209 d_strtoul(writeback_delay);
200 sysfs_strtoul_clamp(writeback_rate, 210
201 dc->writeback_rate.rate, 1, 1000000);
202 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40); 211 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
203 212
204 d_strtoul(writeback_rate_update_seconds); 213 sysfs_strtoul_clamp(writeback_rate,
214 dc->writeback_rate.rate, 1, INT_MAX);
215
216 d_strtoul_nonzero(writeback_rate_update_seconds);
205 d_strtoul(writeback_rate_d_term); 217 d_strtoul(writeback_rate_d_term);
206 d_strtoul(writeback_rate_p_term_inverse); 218 d_strtoul_nonzero(writeback_rate_p_term_inverse);
207 sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
208 dc->writeback_rate_p_term_inverse, 1, INT_MAX);
209 d_strtoul(writeback_rate_d_smooth);
210 219
211 d_strtoi_h(sequential_cutoff); 220 d_strtoi_h(sequential_cutoff);
212 d_strtoi_h(readahead); 221 d_strtoi_h(readahead);
@@ -313,7 +322,6 @@ static struct attribute *bch_cached_dev_files[] = {
313 &sysfs_writeback_rate_update_seconds, 322 &sysfs_writeback_rate_update_seconds,
314 &sysfs_writeback_rate_d_term, 323 &sysfs_writeback_rate_d_term,
315 &sysfs_writeback_rate_p_term_inverse, 324 &sysfs_writeback_rate_p_term_inverse,
316 &sysfs_writeback_rate_d_smooth,
317 &sysfs_writeback_rate_debug, 325 &sysfs_writeback_rate_debug,
318 &sysfs_dirty_data, 326 &sysfs_dirty_data,
319 &sysfs_stripe_size, 327 &sysfs_stripe_size,
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 462214eeacbe..bb37618e7664 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -209,7 +209,13 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
209{ 209{
210 uint64_t now = local_clock(); 210 uint64_t now = local_clock();
211 211
212 d->next += div_u64(done, d->rate); 212 d->next += div_u64(done * NSEC_PER_SEC, d->rate);
213
214 if (time_before64(now + NSEC_PER_SEC, d->next))
215 d->next = now + NSEC_PER_SEC;
216
217 if (time_after64(now - NSEC_PER_SEC * 2, d->next))
218 d->next = now - NSEC_PER_SEC * 2;
213 219
214 return time_after64(d->next, now) 220 return time_after64(d->next, now)
215 ? div_u64(d->next - now, NSEC_PER_SEC / HZ) 221 ? div_u64(d->next - now, NSEC_PER_SEC / HZ)
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 362c4b3f8b4a..1030c6020e98 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -110,7 +110,7 @@ do { \
110 _r; \ 110 _r; \
111}) 111})
112 112
113#define heap_peek(h) ((h)->size ? (h)->data[0] : NULL) 113#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
114 114
115#define heap_full(h) ((h)->used == (h)->size) 115#define heap_full(h) ((h)->used == (h)->size)
116 116
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 99053b1251be..6c44fe059c27 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -30,38 +30,40 @@ static void __update_writeback_rate(struct cached_dev *dc)
30 30
31 /* PD controller */ 31 /* PD controller */
32 32
33 int change = 0;
34 int64_t error;
35 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); 33 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
36 int64_t derivative = dirty - dc->disk.sectors_dirty_last; 34 int64_t derivative = dirty - dc->disk.sectors_dirty_last;
35 int64_t proportional = dirty - target;
36 int64_t change;
37 37
38 dc->disk.sectors_dirty_last = dirty; 38 dc->disk.sectors_dirty_last = dirty;
39 39
40 derivative *= dc->writeback_rate_d_term; 40 /* Scale to sectors per second */
41 derivative = clamp(derivative, -dirty, dirty);
42 41
43 derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, 42 proportional *= dc->writeback_rate_update_seconds;
44 dc->writeback_rate_d_smooth, 0); 43 proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse);
45 44
46 /* Avoid divide by zero */ 45 derivative = div_s64(derivative, dc->writeback_rate_update_seconds);
47 if (!target)
48 goto out;
49 46
50 error = div64_s64((dirty + derivative - target) << 8, target); 47 derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
48 (dc->writeback_rate_d_term /
49 dc->writeback_rate_update_seconds) ?: 1, 0);
50
51 derivative *= dc->writeback_rate_d_term;
52 derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse);
51 53
52 change = div_s64((dc->writeback_rate.rate * error) >> 8, 54 change = proportional + derivative;
53 dc->writeback_rate_p_term_inverse);
54 55
55 /* Don't increase writeback rate if the device isn't keeping up */ 56 /* Don't increase writeback rate if the device isn't keeping up */
56 if (change > 0 && 57 if (change > 0 &&
57 time_after64(local_clock(), 58 time_after64(local_clock(),
58 dc->writeback_rate.next + 10 * NSEC_PER_MSEC)) 59 dc->writeback_rate.next + NSEC_PER_MSEC))
59 change = 0; 60 change = 0;
60 61
61 dc->writeback_rate.rate = 62 dc->writeback_rate.rate =
62 clamp_t(int64_t, dc->writeback_rate.rate + change, 63 clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,
63 1, NSEC_PER_MSEC); 64 1, NSEC_PER_MSEC);
64out: 65
66 dc->writeback_rate_proportional = proportional;
65 dc->writeback_rate_derivative = derivative; 67 dc->writeback_rate_derivative = derivative;
66 dc->writeback_rate_change = change; 68 dc->writeback_rate_change = change;
67 dc->writeback_rate_target = target; 69 dc->writeback_rate_target = target;
@@ -87,15 +89,11 @@ static void update_writeback_rate(struct work_struct *work)
87 89
88static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) 90static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
89{ 91{
90 uint64_t ret;
91
92 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || 92 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
93 !dc->writeback_percent) 93 !dc->writeback_percent)
94 return 0; 94 return 0;
95 95
96 ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); 96 return bch_next_delay(&dc->writeback_rate, sectors);
97
98 return min_t(uint64_t, ret, HZ);
99} 97}
100 98
101struct dirty_io { 99struct dirty_io {
@@ -241,7 +239,7 @@ static void read_dirty(struct cached_dev *dc)
241 if (KEY_START(&w->key) != dc->last_read || 239 if (KEY_START(&w->key) != dc->last_read ||
242 jiffies_to_msecs(delay) > 50) 240 jiffies_to_msecs(delay) > 50)
243 while (!kthread_should_stop() && delay) 241 while (!kthread_should_stop() && delay)
244 delay = schedule_timeout_interruptible(delay); 242 delay = schedule_timeout_uninterruptible(delay);
245 243
246 dc->last_read = KEY_OFFSET(&w->key); 244 dc->last_read = KEY_OFFSET(&w->key);
247 245
@@ -438,7 +436,7 @@ static int bch_writeback_thread(void *arg)
438 while (delay && 436 while (delay &&
439 !kthread_should_stop() && 437 !kthread_should_stop() &&
440 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) 438 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
441 delay = schedule_timeout_interruptible(delay); 439 delay = schedule_timeout_uninterruptible(delay);
442 } 440 }
443 } 441 }
444 442
@@ -476,6 +474,8 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
476 474
477 bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), 475 bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
478 sectors_dirty_init_fn, 0); 476 sectors_dirty_init_fn, 0);
477
478 dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
479} 479}
480 480
481int bch_cached_dev_writeback_init(struct cached_dev *dc) 481int bch_cached_dev_writeback_init(struct cached_dev *dc)
@@ -490,18 +490,15 @@ int bch_cached_dev_writeback_init(struct cached_dev *dc)
490 dc->writeback_delay = 30; 490 dc->writeback_delay = 30;
491 dc->writeback_rate.rate = 1024; 491 dc->writeback_rate.rate = 1024;
492 492
493 dc->writeback_rate_update_seconds = 30; 493 dc->writeback_rate_update_seconds = 5;
494 dc->writeback_rate_d_term = 16; 494 dc->writeback_rate_d_term = 30;
495 dc->writeback_rate_p_term_inverse = 64; 495 dc->writeback_rate_p_term_inverse = 6000;
496 dc->writeback_rate_d_smooth = 8;
497 496
498 dc->writeback_thread = kthread_create(bch_writeback_thread, dc, 497 dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
499 "bcache_writeback"); 498 "bcache_writeback");
500 if (IS_ERR(dc->writeback_thread)) 499 if (IS_ERR(dc->writeback_thread))
501 return PTR_ERR(dc->writeback_thread); 500 return PTR_ERR(dc->writeback_thread);
502 501
503 set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE);
504
505 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); 502 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
506 schedule_delayed_work(&dc->writeback_rate_update, 503 schedule_delayed_work(&dc->writeback_rate_update,
507 dc->writeback_rate_update_seconds * HZ); 504 dc->writeback_rate_update_seconds * HZ);
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 11e20afbdcac..705698fd2c7e 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -1228,8 +1228,14 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
1228 1228
1229 pcr->remove_pci = true; 1229 pcr->remove_pci = true;
1230 1230
1231 cancel_delayed_work(&pcr->carddet_work); 1231 /* Disable interrupts at the pcr level */
1232 cancel_delayed_work(&pcr->idle_work); 1232 spin_lock_irq(&pcr->lock);
1233 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1234 pcr->bier = 0;
1235 spin_unlock_irq(&pcr->lock);
1236
1237 cancel_delayed_work_sync(&pcr->carddet_work);
1238 cancel_delayed_work_sync(&pcr->idle_work);
1233 1239
1234 mfd_remove_devices(&pcidev->dev); 1240 mfd_remove_devices(&pcidev->dev);
1235 1241
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index d210d131fef2..0f55589a56b8 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -73,7 +73,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
73 return -ENOMEM; 73 return -ENOMEM;
74 } 74 }
75 info->map.cached = 75 info->map.cached =
76 ioremap_cached(info->map.phys, info->map.size); 76 ioremap_cache(info->map.phys, info->map.size);
77 if (!info->map.cached) 77 if (!info->map.cached)
78 printk(KERN_WARNING "Failed to ioremap cached %s\n", 78 printk(KERN_WARNING "Failed to ioremap cached %s\n",
79 info->map.name); 79 info->map.name);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 187b1b7772ef..4ced59436558 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2201,20 +2201,25 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
2201 2201
2202 port = &(SLAVE_AD_INFO(slave).port); 2202 port = &(SLAVE_AD_INFO(slave).port);
2203 2203
2204 // if slave is null, the whole port is not initialized 2204 /* if slave is null, the whole port is not initialized */
2205 if (!port->slave) { 2205 if (!port->slave) {
2206 pr_warning("Warning: %s: speed changed for uninitialized port on %s\n", 2206 pr_warning("Warning: %s: speed changed for uninitialized port on %s\n",
2207 slave->bond->dev->name, slave->dev->name); 2207 slave->bond->dev->name, slave->dev->name);
2208 return; 2208 return;
2209 } 2209 }
2210 2210
2211 __get_state_machine_lock(port);
2212
2211 port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; 2213 port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
2212 port->actor_oper_port_key = port->actor_admin_port_key |= 2214 port->actor_oper_port_key = port->actor_admin_port_key |=
2213 (__get_link_speed(port) << 1); 2215 (__get_link_speed(port) << 1);
2214 pr_debug("Port %d changed speed\n", port->actor_port_number); 2216 pr_debug("Port %d changed speed\n", port->actor_port_number);
2215 // there is no need to reselect a new aggregator, just signal the 2217 /* there is no need to reselect a new aggregator, just signal the
2216 // state machines to reinitialize 2218 * state machines to reinitialize
2219 */
2217 port->sm_vars |= AD_PORT_BEGIN; 2220 port->sm_vars |= AD_PORT_BEGIN;
2221
2222 __release_state_machine_lock(port);
2218} 2223}
2219 2224
2220/** 2225/**
@@ -2229,20 +2234,25 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
2229 2234
2230 port = &(SLAVE_AD_INFO(slave).port); 2235 port = &(SLAVE_AD_INFO(slave).port);
2231 2236
2232 // if slave is null, the whole port is not initialized 2237 /* if slave is null, the whole port is not initialized */
2233 if (!port->slave) { 2238 if (!port->slave) {
2234 pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n", 2239 pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n",
2235 slave->bond->dev->name, slave->dev->name); 2240 slave->bond->dev->name, slave->dev->name);
2236 return; 2241 return;
2237 } 2242 }
2238 2243
2244 __get_state_machine_lock(port);
2245
2239 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; 2246 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
2240 port->actor_oper_port_key = port->actor_admin_port_key |= 2247 port->actor_oper_port_key = port->actor_admin_port_key |=
2241 __get_duplex(port); 2248 __get_duplex(port);
2242 pr_debug("Port %d changed duplex\n", port->actor_port_number); 2249 pr_debug("Port %d changed duplex\n", port->actor_port_number);
2243 // there is no need to reselect a new aggregator, just signal the 2250 /* there is no need to reselect a new aggregator, just signal the
2244 // state machines to reinitialize 2251 * state machines to reinitialize
2252 */
2245 port->sm_vars |= AD_PORT_BEGIN; 2253 port->sm_vars |= AD_PORT_BEGIN;
2254
2255 __release_state_machine_lock(port);
2246} 2256}
2247 2257
2248/** 2258/**
@@ -2258,15 +2268,21 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2258 2268
2259 port = &(SLAVE_AD_INFO(slave).port); 2269 port = &(SLAVE_AD_INFO(slave).port);
2260 2270
2261 // if slave is null, the whole port is not initialized 2271 /* if slave is null, the whole port is not initialized */
2262 if (!port->slave) { 2272 if (!port->slave) {
2263 pr_warning("Warning: %s: link status changed for uninitialized port on %s\n", 2273 pr_warning("Warning: %s: link status changed for uninitialized port on %s\n",
2264 slave->bond->dev->name, slave->dev->name); 2274 slave->bond->dev->name, slave->dev->name);
2265 return; 2275 return;
2266 } 2276 }
2267 2277
2268 // on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed) 2278 __get_state_machine_lock(port);
2269 // on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report 2279 /* on link down we are zeroing duplex and speed since
2280 * some of the adaptors(ce1000.lan) report full duplex/speed
2281 * instead of N/A(duplex) / 0(speed).
2282 *
2283 * on link up we are forcing recheck on the duplex and speed since
2284 * some of he adaptors(ce1000.lan) report.
2285 */
2270 if (link == BOND_LINK_UP) { 2286 if (link == BOND_LINK_UP) {
2271 port->is_enabled = true; 2287 port->is_enabled = true;
2272 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; 2288 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
@@ -2282,10 +2298,15 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2282 port->actor_oper_port_key = (port->actor_admin_port_key &= 2298 port->actor_oper_port_key = (port->actor_admin_port_key &=
2283 ~AD_SPEED_KEY_BITS); 2299 ~AD_SPEED_KEY_BITS);
2284 } 2300 }
2285 //BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN"))); 2301 pr_debug("Port %d changed link status to %s",
2286 // there is no need to reselect a new aggregator, just signal the 2302 port->actor_port_number,
2287 // state machines to reinitialize 2303 (link == BOND_LINK_UP) ? "UP" : "DOWN");
2304 /* there is no need to reselect a new aggregator, just signal the
2305 * state machines to reinitialize
2306 */
2288 port->sm_vars |= AD_PORT_BEGIN; 2307 port->sm_vars |= AD_PORT_BEGIN;
2308
2309 __release_state_machine_lock(port);
2289} 2310}
2290 2311
2291/* 2312/*
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 398e299ee1bd..4b8c58b0ec24 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3732,7 +3732,8 @@ static inline int bond_slave_override(struct bonding *bond,
3732} 3732}
3733 3733
3734 3734
3735static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) 3735static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
3736 void *accel_priv)
3736{ 3737{
3737 /* 3738 /*
3738 * This helper function exists to help dev_pick_tx get the correct 3739 * This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 5f9a7ad9b964..8aeec0b4601a 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -625,6 +625,7 @@ static int ems_usb_start(struct ems_usb *dev)
625 usb_unanchor_urb(urb); 625 usb_unanchor_urb(urb);
626 usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, 626 usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
627 urb->transfer_dma); 627 urb->transfer_dma);
628 usb_free_urb(urb);
628 break; 629 break;
629 } 630 }
630 631
@@ -798,8 +799,8 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
798 * allowed (MAX_TX_URBS). 799 * allowed (MAX_TX_URBS).
799 */ 800 */
800 if (!context) { 801 if (!context) {
801 usb_unanchor_urb(urb);
802 usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); 802 usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
803 usb_free_urb(urb);
803 804
804 netdev_warn(netdev, "couldn't find free context\n"); 805 netdev_warn(netdev, "couldn't find free context\n");
805 806
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 8ee9d1556e6e..263dd921edc4 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -927,6 +927,9 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
927 /* set LED in default state (end of init phase) */ 927 /* set LED in default state (end of init phase) */
928 pcan_usb_pro_set_led(dev, 0, 1); 928 pcan_usb_pro_set_led(dev, 0, 1);
929 929
930 kfree(bi);
931 kfree(fi);
932
930 return 0; 933 return 0;
931 934
932 err_out: 935 err_out:
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index b2ffad1304d2..248baf6273fb 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -565,6 +565,8 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
565 /* Make sure pointer to data buffer is set */ 565 /* Make sure pointer to data buffer is set */
566 wmb(); 566 wmb();
567 567
568 skb_tx_timestamp(skb);
569
568 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); 570 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
569 571
570 /* Increment index to point to the next BD */ 572 /* Increment index to point to the next BD */
@@ -579,8 +581,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
579 581
580 arc_reg_set(priv, R_STATUS, TXPL_MASK); 582 arc_reg_set(priv, R_STATUS, TXPL_MASK);
581 583
582 skb_tx_timestamp(skb);
583
584 return NETDEV_TX_OK; 584 return NETDEV_TX_OK;
585} 585}
586 586
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index a36a760ada28..29801750f239 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -145,9 +145,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
145 * Mask some pcie error bits 145 * Mask some pcie error bits
146 */ 146 */
147 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 147 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
148 pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data); 148 if (pos) {
149 data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); 149 pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
150 pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); 150 data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
151 pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
152 }
151 /* clear error status */ 153 /* clear error status */
152 pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, 154 pcie_capability_write_word(pdev, PCI_EXP_DEVSTA,
153 PCI_EXP_DEVSTA_NFED | 155 PCI_EXP_DEVSTA_NFED |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index a1f66e2c9a86..ec6119089b82 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -520,10 +520,12 @@ struct bnx2x_fastpath {
520#define BNX2X_FP_STATE_IDLE 0 520#define BNX2X_FP_STATE_IDLE 0
521#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ 521#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
522#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ 522#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
523#define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */ 523#define BNX2X_FP_STATE_DISABLED (1 << 2)
524#define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */ 524#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
525#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
526#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
525#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) 527#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
526#define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) 528#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
527#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) 529#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
528 /* protect state */ 530 /* protect state */
529 spinlock_t lock; 531 spinlock_t lock;
@@ -613,7 +615,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
613{ 615{
614 bool rc = true; 616 bool rc = true;
615 617
616 spin_lock(&fp->lock); 618 spin_lock_bh(&fp->lock);
617 if (fp->state & BNX2X_FP_LOCKED) { 619 if (fp->state & BNX2X_FP_LOCKED) {
618 WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); 620 WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
619 fp->state |= BNX2X_FP_STATE_NAPI_YIELD; 621 fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
@@ -622,7 +624,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
622 /* we don't care if someone yielded */ 624 /* we don't care if someone yielded */
623 fp->state = BNX2X_FP_STATE_NAPI; 625 fp->state = BNX2X_FP_STATE_NAPI;
624 } 626 }
625 spin_unlock(&fp->lock); 627 spin_unlock_bh(&fp->lock);
626 return rc; 628 return rc;
627} 629}
628 630
@@ -631,14 +633,16 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
631{ 633{
632 bool rc = false; 634 bool rc = false;
633 635
634 spin_lock(&fp->lock); 636 spin_lock_bh(&fp->lock);
635 WARN_ON(fp->state & 637 WARN_ON(fp->state &
636 (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); 638 (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
637 639
638 if (fp->state & BNX2X_FP_STATE_POLL_YIELD) 640 if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
639 rc = true; 641 rc = true;
640 fp->state = BNX2X_FP_STATE_IDLE; 642
641 spin_unlock(&fp->lock); 643 /* state ==> idle, unless currently disabled */
644 fp->state &= BNX2X_FP_STATE_DISABLED;
645 spin_unlock_bh(&fp->lock);
642 return rc; 646 return rc;
643} 647}
644 648
@@ -669,7 +673,9 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
669 673
670 if (fp->state & BNX2X_FP_STATE_POLL_YIELD) 674 if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
671 rc = true; 675 rc = true;
672 fp->state = BNX2X_FP_STATE_IDLE; 676
677 /* state ==> idle, unless currently disabled */
678 fp->state &= BNX2X_FP_STATE_DISABLED;
673 spin_unlock_bh(&fp->lock); 679 spin_unlock_bh(&fp->lock);
674 return rc; 680 return rc;
675} 681}
@@ -677,9 +683,23 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
677/* true if a socket is polling, even if it did not get the lock */ 683/* true if a socket is polling, even if it did not get the lock */
678static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) 684static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
679{ 685{
680 WARN_ON(!(fp->state & BNX2X_FP_LOCKED)); 686 WARN_ON(!(fp->state & BNX2X_FP_OWNED));
681 return fp->state & BNX2X_FP_USER_PEND; 687 return fp->state & BNX2X_FP_USER_PEND;
682} 688}
689
690/* false if fp is currently owned */
691static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
692{
693 int rc = true;
694
695 spin_lock_bh(&fp->lock);
696 if (fp->state & BNX2X_FP_OWNED)
697 rc = false;
698 fp->state |= BNX2X_FP_STATE_DISABLED;
699 spin_unlock_bh(&fp->lock);
700
701 return rc;
702}
683#else 703#else
684static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) 704static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
685{ 705{
@@ -709,6 +729,10 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
709{ 729{
710 return false; 730 return false;
711} 731}
732static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
733{
734 return true;
735}
712#endif /* CONFIG_NET_RX_BUSY_POLL */ 736#endif /* CONFIG_NET_RX_BUSY_POLL */
713 737
714/* Use 2500 as a mini-jumbo MTU for FCoE */ 738/* Use 2500 as a mini-jumbo MTU for FCoE */
@@ -1250,7 +1274,10 @@ struct bnx2x_slowpath {
1250 * Therefore, if they would have been defined in the same union, 1274 * Therefore, if they would have been defined in the same union,
1251 * data can get corrupted. 1275 * data can get corrupted.
1252 */ 1276 */
1253 struct afex_vif_list_ramrod_data func_afex_rdata; 1277 union {
1278 struct afex_vif_list_ramrod_data viflist_data;
1279 struct function_update_data func_update;
1280 } func_afex_rdata;
1254 1281
1255 /* used by dmae command executer */ 1282 /* used by dmae command executer */
1256 struct dmae_command dmae[MAX_DMAE_C]; 1283 struct dmae_command dmae[MAX_DMAE_C];
@@ -2499,4 +2526,6 @@ void bnx2x_set_local_cmng(struct bnx2x *bp);
2499#define MCPR_SCRATCH_BASE(bp) \ 2526#define MCPR_SCRATCH_BASE(bp) \
2500 (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) 2527 (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
2501 2528
2529#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
2530
2502#endif /* bnx2x.h */ 2531#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ec96130533cc..bf811565ee24 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -160,6 +160,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
160 struct sk_buff *skb = tx_buf->skb; 160 struct sk_buff *skb = tx_buf->skb;
161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; 161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
162 int nbd; 162 int nbd;
163 u16 split_bd_len = 0;
163 164
164 /* prefetch skb end pointer to speedup dev_kfree_skb() */ 165 /* prefetch skb end pointer to speedup dev_kfree_skb() */
165 prefetch(&skb->end); 166 prefetch(&skb->end);
@@ -167,10 +168,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
167 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", 168 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
168 txdata->txq_index, idx, tx_buf, skb); 169 txdata->txq_index, idx, tx_buf, skb);
169 170
170 /* unmap first bd */
171 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; 171 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
172 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
173 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
174 172
175 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; 173 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
176#ifdef BNX2X_STOP_ON_ERROR 174#ifdef BNX2X_STOP_ON_ERROR
@@ -188,12 +186,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
188 --nbd; 186 --nbd;
189 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 187 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
190 188
191 /* ...and the TSO split header bd since they have no mapping */ 189 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
192 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { 190 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
191 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
192 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
193 --nbd; 193 --nbd;
194 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 194 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
195 } 195 }
196 196
197 /* unmap first bd */
198 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
199 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
200 DMA_TO_DEVICE);
201
197 /* now free frags */ 202 /* now free frags */
198 while (nbd > 0) { 203 while (nbd > 0) {
199 204
@@ -1790,26 +1795,22 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1790{ 1795{
1791 int i; 1796 int i;
1792 1797
1793 local_bh_disable();
1794 for_each_rx_queue_cnic(bp, i) { 1798 for_each_rx_queue_cnic(bp, i) {
1795 napi_disable(&bnx2x_fp(bp, i, napi)); 1799 napi_disable(&bnx2x_fp(bp, i, napi));
1796 while (!bnx2x_fp_lock_napi(&bp->fp[i])) 1800 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1797 mdelay(1); 1801 usleep_range(1000, 2000);
1798 } 1802 }
1799 local_bh_enable();
1800} 1803}
1801 1804
1802static void bnx2x_napi_disable(struct bnx2x *bp) 1805static void bnx2x_napi_disable(struct bnx2x *bp)
1803{ 1806{
1804 int i; 1807 int i;
1805 1808
1806 local_bh_disable();
1807 for_each_eth_queue(bp, i) { 1809 for_each_eth_queue(bp, i) {
1808 napi_disable(&bnx2x_fp(bp, i, napi)); 1810 napi_disable(&bnx2x_fp(bp, i, napi));
1809 while (!bnx2x_fp_lock_napi(&bp->fp[i])) 1811 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1810 mdelay(1); 1812 usleep_range(1000, 2000);
1811 } 1813 }
1812 local_bh_enable();
1813} 1814}
1814 1815
1815void bnx2x_netif_start(struct bnx2x *bp) 1816void bnx2x_netif_start(struct bnx2x *bp)
@@ -1832,7 +1833,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1832 bnx2x_napi_disable_cnic(bp); 1833 bnx2x_napi_disable_cnic(bp);
1833} 1834}
1834 1835
1835u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) 1836u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1837 void *accel_priv)
1836{ 1838{
1837 struct bnx2x *bp = netdev_priv(dev); 1839 struct bnx2x *bp = netdev_priv(dev);
1838 1840
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index da8fcaa74495..41f3ca5ad972 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -524,7 +524,8 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
524int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); 524int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
525 525
526/* select_queue callback */ 526/* select_queue callback */
527u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); 527u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
528 void *accel_priv);
528 529
529static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 530static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
530 struct bnx2x_fastpath *fp, 531 struct bnx2x_fastpath *fp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 20dcc02431ca..11fc79585491 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -3865,6 +3865,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3865 3865
3866 bnx2x_warpcore_enable_AN_KR2(phy, params, vars); 3866 bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
3867 } else { 3867 } else {
3868 /* Enable Auto-Detect to support 1G over CL37 as well */
3869 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3870 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
3871
3872 /* Force cl48 sync_status LOW to avoid getting stuck in CL73
3873 * parallel-detect loop when CL73 and CL37 are enabled.
3874 */
3875 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
3876 MDIO_AER_BLOCK_AER_REG, 0);
3877 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3878 MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800);
3879 bnx2x_set_aer_mmd(params, phy);
3880
3868 bnx2x_disable_kr2(params, vars, phy); 3881 bnx2x_disable_kr2(params, vars, phy);
3869 } 3882 }
3870 3883
@@ -8120,17 +8133,20 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8120 *edc_mode = EDC_MODE_ACTIVE_DAC; 8133 *edc_mode = EDC_MODE_ACTIVE_DAC;
8121 else 8134 else
8122 check_limiting_mode = 1; 8135 check_limiting_mode = 1;
8123 } else if (copper_module_type & 8136 } else {
8124 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { 8137 *edc_mode = EDC_MODE_PASSIVE_DAC;
8138 /* Even in case PASSIVE_DAC indication is not set,
8139 * treat it as a passive DAC cable, since some cables
8140 * don't have this indication.
8141 */
8142 if (copper_module_type &
8143 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
8125 DP(NETIF_MSG_LINK, 8144 DP(NETIF_MSG_LINK,
8126 "Passive Copper cable detected\n"); 8145 "Passive Copper cable detected\n");
8127 *edc_mode = 8146 } else {
8128 EDC_MODE_PASSIVE_DAC; 8147 DP(NETIF_MSG_LINK,
8129 } else { 8148 "Unknown copper-cable-type\n");
8130 DP(NETIF_MSG_LINK, 8149 }
8131 "Unknown copper-cable-type 0x%x !!!\n",
8132 copper_module_type);
8133 return -EINVAL;
8134 } 8150 }
8135 break; 8151 break;
8136 } 8152 }
@@ -10825,9 +10841,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10825 (1<<11)); 10841 (1<<11));
10826 10842
10827 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 10843 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
10828 (phy->speed_cap_mask & 10844 (phy->speed_cap_mask &
10829 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || 10845 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
10830 (phy->req_line_speed == SPEED_1000)) { 10846 (phy->req_line_speed == SPEED_1000)) {
10831 an_1000_val |= (1<<8); 10847 an_1000_val |= (1<<8);
10832 autoneg_val |= (1<<9 | 1<<12); 10848 autoneg_val |= (1<<9 | 1<<12);
10833 if (phy->req_duplex == DUPLEX_FULL) 10849 if (phy->req_duplex == DUPLEX_FULL)
@@ -10843,30 +10859,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10843 0x09, 10859 0x09,
10844 &an_1000_val); 10860 &an_1000_val);
10845 10861
10846 /* Set 100 speed advertisement */ 10862 /* Advertise 10/100 link speed */
10847 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 10863 if (phy->req_line_speed == SPEED_AUTO_NEG) {
10848 (phy->speed_cap_mask & 10864 if (phy->speed_cap_mask &
10849 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | 10865 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
10850 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) { 10866 an_10_100_val |= (1<<5);
10851 an_10_100_val |= (1<<7); 10867 autoneg_val |= (1<<9 | 1<<12);
10852 /* Enable autoneg and restart autoneg for legacy speeds */ 10868 DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
10853 autoneg_val |= (1<<9 | 1<<12); 10869 }
10854 10870 if (phy->speed_cap_mask &
10855 if (phy->req_duplex == DUPLEX_FULL) 10871 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
10856 an_10_100_val |= (1<<8);
10857 DP(NETIF_MSG_LINK, "Advertising 100M\n");
10858 }
10859
10860 /* Set 10 speed advertisement */
10861 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
10862 (phy->speed_cap_mask &
10863 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
10864 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
10865 an_10_100_val |= (1<<5);
10866 autoneg_val |= (1<<9 | 1<<12);
10867 if (phy->req_duplex == DUPLEX_FULL)
10868 an_10_100_val |= (1<<6); 10872 an_10_100_val |= (1<<6);
10869 DP(NETIF_MSG_LINK, "Advertising 10M\n"); 10873 autoneg_val |= (1<<9 | 1<<12);
10874 DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
10875 }
10876 if (phy->speed_cap_mask &
10877 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
10878 an_10_100_val |= (1<<7);
10879 autoneg_val |= (1<<9 | 1<<12);
10880 DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
10881 }
10882 if (phy->speed_cap_mask &
10883 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
10884 an_10_100_val |= (1<<8);
10885 autoneg_val |= (1<<9 | 1<<12);
10886 DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
10887 }
10870 } 10888 }
10871 10889
10872 /* Only 10/100 are allowed to work in FORCE mode */ 10890 /* Only 10/100 are allowed to work in FORCE mode */
@@ -13342,6 +13360,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
13342 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, 13360 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
13343 old_status, status); 13361 old_status, status);
13344 13362
13363 /* Do not touch the link in case physical link down */
13364 if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
13365 return 1;
13366
13345 /* a. Update shmem->link_status accordingly 13367 /* a. Update shmem->link_status accordingly
13346 * b. Update link_vars->link_up 13368 * b. Update link_vars->link_up
13347 */ 13369 */
@@ -13550,7 +13572,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13550 */ 13572 */
13551 not_kr2_device = (((base_page & 0x8000) == 0) || 13573 not_kr2_device = (((base_page & 0x8000) == 0) ||
13552 (((base_page & 0x8000) && 13574 (((base_page & 0x8000) &&
13553 ((next_page & 0xe0) == 0x2)))); 13575 ((next_page & 0xe0) == 0x20))));
13554 13576
13555 /* In case KR2 is already disabled, check if we need to re-enable it */ 13577 /* In case KR2 is already disabled, check if we need to re-enable it */
13556 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { 13578 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 814d0eca9b33..8b3107b2fcc1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -11447,9 +11447,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
11447 } 11447 }
11448 } 11448 }
11449 11449
11450 /* adjust igu_sb_cnt to MF for E1x */ 11450 /* adjust igu_sb_cnt to MF for E1H */
11451 if (CHIP_IS_E1x(bp) && IS_MF(bp)) 11451 if (CHIP_IS_E1H(bp) && IS_MF(bp))
11452 bp->igu_sb_cnt /= E1HVN_MAX; 11452 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
11453 11453
11454 /* port info */ 11454 /* port info */
11455 bnx2x_get_port_hwinfo(bp); 11455 bnx2x_get_port_hwinfo(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 3efbb35267c8..14ffb6e56e59 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -7179,6 +7179,7 @@ Theotherbitsarereservedandshouldbezero*/
7179#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca 7179#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
7180#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da 7180#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
7181#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea 7181#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
7182#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa
7182#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104 7183#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
7183#define MDIO_WC_REG_XGXS_STATUS3 0x8129 7184#define MDIO_WC_REG_XGXS_STATUS3 0x8129
7184#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 7185#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 32c92abf5094..18438a504d57 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2038,6 +2038,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2038 struct bnx2x_vlan_mac_ramrod_params p; 2038 struct bnx2x_vlan_mac_ramrod_params p;
2039 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 2039 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
2040 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; 2040 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
2041 unsigned long flags;
2041 int read_lock; 2042 int read_lock;
2042 int rc = 0; 2043 int rc = 0;
2043 2044
@@ -2046,8 +2047,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2046 spin_lock_bh(&exeq->lock); 2047 spin_lock_bh(&exeq->lock);
2047 2048
2048 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { 2049 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
2049 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == 2050 flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
2050 *vlan_mac_flags) { 2051 if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
2052 BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
2051 rc = exeq->remove(bp, exeq->owner, exeq_pos); 2053 rc = exeq->remove(bp, exeq->owner, exeq_pos);
2052 if (rc) { 2054 if (rc) {
2053 BNX2X_ERR("Failed to remove command\n"); 2055 BNX2X_ERR("Failed to remove command\n");
@@ -2080,7 +2082,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2080 return read_lock; 2082 return read_lock;
2081 2083
2082 list_for_each_entry(pos, &o->head, link) { 2084 list_for_each_entry(pos, &o->head, link) {
2083 if (pos->vlan_mac_flags == *vlan_mac_flags) { 2085 flags = pos->vlan_mac_flags;
2086 if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
2087 BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
2084 p.user_req.vlan_mac_flags = pos->vlan_mac_flags; 2088 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
2085 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); 2089 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
2086 rc = bnx2x_config_vlan_mac(bp, &p); 2090 rc = bnx2x_config_vlan_mac(bp, &p);
@@ -4382,8 +4386,11 @@ int bnx2x_config_rss(struct bnx2x *bp,
4382 struct bnx2x_raw_obj *r = &o->raw; 4386 struct bnx2x_raw_obj *r = &o->raw;
4383 4387
4384 /* Do nothing if only driver cleanup was requested */ 4388 /* Do nothing if only driver cleanup was requested */
4385 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) 4389 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
4390 DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
4391 p->ramrod_flags);
4386 return 0; 4392 return 0;
4393 }
4387 4394
4388 r->set_pending(r); 4395 r->set_pending(r);
4389 4396
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 658f4e33abf9..6a53c15c85a3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -266,6 +266,13 @@ enum {
266 BNX2X_DONT_CONSUME_CAM_CREDIT, 266 BNX2X_DONT_CONSUME_CAM_CREDIT,
267 BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, 267 BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
268}; 268};
269/* When looking for matching filters, some flags are not interesting */
270#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \
271 1 << BNX2X_ETH_MAC | \
272 1 << BNX2X_ISCSI_ETH_MAC | \
273 1 << BNX2X_NETQ_ETH_MAC)
274#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
275 ((flags) & BNX2X_VLAN_MAC_CMP_MASK)
269 276
270struct bnx2x_vlan_mac_ramrod_params { 277struct bnx2x_vlan_mac_ramrod_params {
271 /* Object to run the command from */ 278 /* Object to run the command from */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 2e46c28fc601..e7845e5be1c7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1209,6 +1209,11 @@ static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
1209 /* next state */ 1209 /* next state */
1210 vfop->state = BNX2X_VFOP_RXMODE_DONE; 1210 vfop->state = BNX2X_VFOP_RXMODE_DONE;
1211 1211
1212 /* record the accept flags in vfdb so hypervisor can modify them
1213 * if necessary
1214 */
1215 bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) =
1216 ramrod->rx_accept_flags;
1212 vfop->rc = bnx2x_config_rx_mode(bp, ramrod); 1217 vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
1213 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1218 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1214op_err: 1219op_err:
@@ -1224,39 +1229,43 @@ op_pending:
1224 return; 1229 return;
1225} 1230}
1226 1231
1232static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
1233 struct bnx2x_rx_mode_ramrod_params *ramrod,
1234 struct bnx2x_virtf *vf,
1235 unsigned long accept_flags)
1236{
1237 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1238
1239 memset(ramrod, 0, sizeof(*ramrod));
1240 ramrod->cid = vfq->cid;
1241 ramrod->cl_id = vfq_cl_id(vf, vfq);
1242 ramrod->rx_mode_obj = &bp->rx_mode_obj;
1243 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1244 ramrod->rx_accept_flags = accept_flags;
1245 ramrod->tx_accept_flags = accept_flags;
1246 ramrod->pstate = &vf->filter_state;
1247 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1248
1249 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1250 set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1251 set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1252
1253 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1254 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1255}
1256
1227int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, 1257int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
1228 struct bnx2x_virtf *vf, 1258 struct bnx2x_virtf *vf,
1229 struct bnx2x_vfop_cmd *cmd, 1259 struct bnx2x_vfop_cmd *cmd,
1230 int qid, unsigned long accept_flags) 1260 int qid, unsigned long accept_flags)
1231{ 1261{
1232 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1233 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1262 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1234 1263
1235 if (vfop) { 1264 if (vfop) {
1236 struct bnx2x_rx_mode_ramrod_params *ramrod = 1265 struct bnx2x_rx_mode_ramrod_params *ramrod =
1237 &vf->op_params.rx_mode; 1266 &vf->op_params.rx_mode;
1238 1267
1239 memset(ramrod, 0, sizeof(*ramrod)); 1268 bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags);
1240
1241 /* Prepare ramrod parameters */
1242 ramrod->cid = vfq->cid;
1243 ramrod->cl_id = vfq_cl_id(vf, vfq);
1244 ramrod->rx_mode_obj = &bp->rx_mode_obj;
1245 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1246
1247 ramrod->rx_accept_flags = accept_flags;
1248 ramrod->tx_accept_flags = accept_flags;
1249 ramrod->pstate = &vf->filter_state;
1250 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1251
1252 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1253 set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1254 set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1255
1256 ramrod->rdata =
1257 bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1258 ramrod->rdata_mapping =
1259 bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1260 1269
1261 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, 1270 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
1262 bnx2x_vfop_rxmode, cmd->done); 1271 bnx2x_vfop_rxmode, cmd->done);
@@ -3202,13 +3211,16 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
3202 bnx2x_iov_static_resc(bp, vf); 3211 bnx2x_iov_static_resc(bp, vf);
3203 } 3212 }
3204 3213
3205 /* prepare msix vectors in VF configuration space */ 3214 /* prepare msix vectors in VF configuration space - the value in the
3215 * PCI configuration space should be the index of the last entry,
3216 * namely one less than the actual size of the table
3217 */
3206 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 3218 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
3207 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 3219 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
3208 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 3220 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
3209 num_vf_queues); 3221 num_vf_queues - 1);
3210 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", 3222 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
3211 vf_idx, num_vf_queues); 3223 vf_idx, num_vf_queues - 1);
3212 } 3224 }
3213 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 3225 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
3214 3226
@@ -3436,10 +3448,18 @@ out:
3436 3448
3437int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 3449int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3438{ 3450{
3451 struct bnx2x_queue_state_params q_params = {NULL};
3452 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3453 struct bnx2x_queue_update_params *update_params;
3454 struct pf_vf_bulletin_content *bulletin = NULL;
3455 struct bnx2x_rx_mode_ramrod_params rx_ramrod;
3439 struct bnx2x *bp = netdev_priv(dev); 3456 struct bnx2x *bp = netdev_priv(dev);
3440 int rc, q_logical_state; 3457 struct bnx2x_vlan_mac_obj *vlan_obj;
3458 unsigned long vlan_mac_flags = 0;
3459 unsigned long ramrod_flags = 0;
3441 struct bnx2x_virtf *vf = NULL; 3460 struct bnx2x_virtf *vf = NULL;
3442 struct pf_vf_bulletin_content *bulletin = NULL; 3461 unsigned long accept_flags;
3462 int rc;
3443 3463
3444 /* sanity and init */ 3464 /* sanity and init */
3445 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3465 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
@@ -3457,104 +3477,118 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3457 /* update PF's copy of the VF's bulletin. No point in posting the vlan 3477 /* update PF's copy of the VF's bulletin. No point in posting the vlan
3458 * to the VF since it doesn't have anything to do with it. But it useful 3478 * to the VF since it doesn't have anything to do with it. But it useful
3459 * to store it here in case the VF is not up yet and we can only 3479 * to store it here in case the VF is not up yet and we can only
3460 * configure the vlan later when it does. 3480 * configure the vlan later when it does. Treat vlan id 0 as remove the
3481 * Host tag.
3461 */ 3482 */
3462 bulletin->valid_bitmap |= 1 << VLAN_VALID; 3483 if (vlan > 0)
3484 bulletin->valid_bitmap |= 1 << VLAN_VALID;
3485 else
3486 bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
3463 bulletin->vlan = vlan; 3487 bulletin->vlan = vlan;
3464 3488
3465 /* is vf initialized and queue set up? */ 3489 /* is vf initialized and queue set up? */
3466 q_logical_state = 3490 if (vf->state != VF_ENABLED ||
3467 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); 3491 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
3468 if (vf->state == VF_ENABLED && 3492 BNX2X_Q_LOGICAL_STATE_ACTIVE)
3469 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3493 return rc;
3470 /* configure the vlan in device on this vf's queue */
3471 unsigned long ramrod_flags = 0;
3472 unsigned long vlan_mac_flags = 0;
3473 struct bnx2x_vlan_mac_obj *vlan_obj =
3474 &bnx2x_leading_vfq(vf, vlan_obj);
3475 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3476 struct bnx2x_queue_state_params q_params = {NULL};
3477 struct bnx2x_queue_update_params *update_params;
3478 3494
3479 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); 3495 /* configure the vlan in device on this vf's queue */
3480 if (rc) 3496 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
3481 return rc; 3497 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
3482 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3498 if (rc)
3499 return rc;
3483 3500
3484 /* must lock vfpf channel to protect against vf flows */ 3501 /* must lock vfpf channel to protect against vf flows */
3485 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3502 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3486 3503
3487 /* remove existing vlans */ 3504 /* remove existing vlans */
3488 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3505 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3489 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 3506 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
3490 &ramrod_flags); 3507 &ramrod_flags);
3491 if (rc) { 3508 if (rc) {
3492 BNX2X_ERR("failed to delete vlans\n"); 3509 BNX2X_ERR("failed to delete vlans\n");
3493 rc = -EINVAL; 3510 rc = -EINVAL;
3494 goto out; 3511 goto out;
3495 } 3512 }
3513
3514 /* need to remove/add the VF's accept_any_vlan bit */
3515 accept_flags = bnx2x_leading_vfq(vf, accept_flags);
3516 if (vlan)
3517 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
3518 else
3519 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
3520
3521 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
3522 accept_flags);
3523 bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
3524 bnx2x_config_rx_mode(bp, &rx_ramrod);
3525
3526 /* configure the new vlan to device */
3527 memset(&ramrod_param, 0, sizeof(ramrod_param));
3528 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3529 ramrod_param.vlan_mac_obj = vlan_obj;
3530 ramrod_param.ramrod_flags = ramrod_flags;
3531 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
3532 &ramrod_param.user_req.vlan_mac_flags);
3533 ramrod_param.user_req.u.vlan.vlan = vlan;
3534 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
3535 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3536 if (rc) {
3537 BNX2X_ERR("failed to configure vlan\n");
3538 rc = -EINVAL;
3539 goto out;
3540 }
3496 3541
3497 /* send queue update ramrod to configure default vlan and silent 3542 /* send queue update ramrod to configure default vlan and silent
3498 * vlan removal 3543 * vlan removal
3544 */
3545 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3546 q_params.cmd = BNX2X_Q_CMD_UPDATE;
3547 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
3548 update_params = &q_params.params.update;
3549 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
3550 &update_params->update_flags);
3551 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
3552 &update_params->update_flags);
3553 if (vlan == 0) {
3554 /* if vlan is 0 then we want to leave the VF traffic
3555 * untagged, and leave the incoming traffic untouched
3556 * (i.e. do not remove any vlan tags).
3499 */ 3557 */
3500 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3558 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3501 q_params.cmd = BNX2X_Q_CMD_UPDATE; 3559 &update_params->update_flags);
3502 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); 3560 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3503 update_params = &q_params.params.update; 3561 &update_params->update_flags);
3504 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 3562 } else {
3563 /* configure default vlan to vf queue and set silent
3564 * vlan removal (the vf remains unaware of this vlan).
3565 */
3566 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3505 &update_params->update_flags); 3567 &update_params->update_flags);
3506 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 3568 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3507 &update_params->update_flags); 3569 &update_params->update_flags);
3570 update_params->def_vlan = vlan;
3571 update_params->silent_removal_value =
3572 vlan & VLAN_VID_MASK;
3573 update_params->silent_removal_mask = VLAN_VID_MASK;
3574 }
3508 3575
3509 if (vlan == 0) { 3576 /* Update the Queue state */
3510 /* if vlan is 0 then we want to leave the VF traffic 3577 rc = bnx2x_queue_state_change(bp, &q_params);
3511 * untagged, and leave the incoming traffic untouched 3578 if (rc) {
3512 * (i.e. do not remove any vlan tags). 3579 BNX2X_ERR("Failed to configure default VLAN\n");
3513 */ 3580 goto out;
3514 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3581 }
3515 &update_params->update_flags);
3516 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3517 &update_params->update_flags);
3518 } else {
3519 /* configure the new vlan to device */
3520 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3521 ramrod_param.vlan_mac_obj = vlan_obj;
3522 ramrod_param.ramrod_flags = ramrod_flags;
3523 ramrod_param.user_req.u.vlan.vlan = vlan;
3524 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
3525 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3526 if (rc) {
3527 BNX2X_ERR("failed to configure vlan\n");
3528 rc = -EINVAL;
3529 goto out;
3530 }
3531
3532 /* configure default vlan to vf queue and set silent
3533 * vlan removal (the vf remains unaware of this vlan).
3534 */
3535 update_params = &q_params.params.update;
3536 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3537 &update_params->update_flags);
3538 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3539 &update_params->update_flags);
3540 update_params->def_vlan = vlan;
3541 }
3542 3582
3543 /* Update the Queue state */
3544 rc = bnx2x_queue_state_change(bp, &q_params);
3545 if (rc) {
3546 BNX2X_ERR("Failed to configure default VLAN\n");
3547 goto out;
3548 }
3549 3583
3550 /* clear the flag indicating that this VF needs its vlan 3584 /* clear the flag indicating that this VF needs its vlan
3551 * (will only be set if the HV configured the Vlan before vf was 3585 * (will only be set if the HV configured the Vlan before vf was
3552 * up and we were called because the VF came up later 3586 * up and we were called because the VF came up later
3553 */ 3587 */
3554out: 3588out:
3555 vf->cfg_flags &= ~VF_CFG_VLAN; 3589 vf->cfg_flags &= ~VF_CFG_VLAN;
3556 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3590 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3557 } 3591
3558 return rc; 3592 return rc;
3559} 3593}
3560 3594
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 1ff6a9366629..8c213fa52174 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -74,6 +74,7 @@ struct bnx2x_vf_queue {
74 /* VLANs object */ 74 /* VLANs object */
75 struct bnx2x_vlan_mac_obj vlan_obj; 75 struct bnx2x_vlan_mac_obj vlan_obj;
76 atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ 76 atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
77 unsigned long accept_flags; /* last accept flags configured */
77 78
78 /* Queue Slow-path State object */ 79 /* Queue Slow-path State object */
79 struct bnx2x_queue_sp_obj sp_obj; 80 struct bnx2x_queue_sp_obj sp_obj;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index efa8a151d789..0756d7dabdd5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -208,7 +208,7 @@ static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
208 return -EINVAL; 208 return -EINVAL;
209 } 209 }
210 210
211 BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg); 211 DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
212 212
213 *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT; 213 *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
214 214
@@ -1598,6 +1598,8 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1598 1598
1599 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { 1599 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
1600 unsigned long accept = 0; 1600 unsigned long accept = 0;
1601 struct pf_vf_bulletin_content *bulletin =
1602 BP_VF_BULLETIN(bp, vf->index);
1601 1603
1602 /* covert VF-PF if mask to bnx2x accept flags */ 1604 /* covert VF-PF if mask to bnx2x accept flags */
1603 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) 1605 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
@@ -1617,9 +1619,11 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1617 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); 1619 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
1618 1620
1619 /* A packet arriving the vf's mac should be accepted 1621 /* A packet arriving the vf's mac should be accepted
1620 * with any vlan 1622 * with any vlan, unless a vlan has already been
1623 * configured.
1621 */ 1624 */
1622 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); 1625 if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
1626 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
1623 1627
1624 /* set rx-mode */ 1628 /* set rx-mode */
1625 rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, 1629 rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
@@ -1710,6 +1714,21 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1710 goto response; 1714 goto response;
1711 } 1715 }
1712 } 1716 }
1717 /* if vlan was set by hypervisor we don't allow guest to config vlan */
1718 if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
1719 int i;
1720
1721 /* search for vlan filters */
1722 for (i = 0; i < filters->n_mac_vlan_filters; i++) {
1723 if (filters->filters[i].flags &
1724 VFPF_Q_FILTER_VLAN_TAG_VALID) {
1725 BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
1726 vf->abs_vfid);
1727 vf->op_rc = -EPERM;
1728 goto response;
1729 }
1730 }
1731 }
1713 1732
1714 /* verify vf_qid */ 1733 /* verify vf_qid */
1715 if (filters->vf_qid > vf_rxq_count(vf)) 1734 if (filters->vf_qid > vf_rxq_count(vf))
@@ -1805,6 +1824,9 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
1805 vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; 1824 vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
1806 1825
1807 /* flags handled individually for backward/forward compatability */ 1826 /* flags handled individually for backward/forward compatability */
1827 vf_op_params->rss_flags = 0;
1828 vf_op_params->ramrod_flags = 0;
1829
1808 if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) 1830 if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
1809 __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); 1831 __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
1810 if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) 1832 if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index f3dd93b4aeaa..15a66e4b1f57 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7622,7 +7622,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7622{ 7622{
7623 u32 base = (u32) mapping & 0xffffffff; 7623 u32 base = (u32) mapping & 0xffffffff;
7624 7624
7625 return (base > 0xffffdcc0) && (base + len + 8 < base); 7625 return base + len + 8 < base;
7626} 7626}
7627 7627
7628/* Test for TSO DMA buffers that cross into regions which are within MSS bytes 7628/* Test for TSO DMA buffers that cross into regions which are within MSS bytes
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 6c9308850453..56e0415f8cdf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -228,6 +228,25 @@ struct tp_params {
228 228
229 uint32_t dack_re; /* DACK timer resolution */ 229 uint32_t dack_re; /* DACK timer resolution */
230 unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ 230 unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
231
232 u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
233 u32 ingress_config; /* cached TP_INGRESS_CONFIG */
234
235 /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
236 * subset of the set of fields which may be present in the Compressed
237 * Filter Tuple portion of filters and TCP TCB connections. The
238 * fields which are present are controlled by the TP_VLAN_PRI_MAP.
239 * Since a variable number of fields may or may not be present, their
240 * shifted field positions within the Compressed Filter Tuple may
241 * vary, or not even be present if the field isn't selected in
242 * TP_VLAN_PRI_MAP. Since some of these fields are needed in various
243 * places we store their offsets here, or a -1 if the field isn't
244 * present.
245 */
246 int vlan_shift;
247 int vnic_shift;
248 int port_shift;
249 int protocol_shift;
231}; 250};
232 251
233struct vpd_params { 252struct vpd_params {
@@ -926,6 +945,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
926 const u8 *fw_data, unsigned int fw_size, 945 const u8 *fw_data, unsigned int fw_size,
927 struct fw_hdr *card_fw, enum dev_state state, int *reset); 946 struct fw_hdr *card_fw, enum dev_state state, int *reset);
928int t4_prep_adapter(struct adapter *adapter); 947int t4_prep_adapter(struct adapter *adapter);
948int t4_init_tp_params(struct adapter *adap);
949int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
929int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); 950int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
930void t4_fatal_err(struct adapter *adapter); 951void t4_fatal_err(struct adapter *adapter);
931int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 952int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index d6b12e035a7d..fff02ed1295e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2986,7 +2986,14 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2986 if (stid >= 0) { 2986 if (stid >= 0) {
2987 t->stid_tab[stid].data = data; 2987 t->stid_tab[stid].data = data;
2988 stid += t->stid_base; 2988 stid += t->stid_base;
2989 t->stids_in_use++; 2989 /* IPv6 requires max of 520 bits or 16 cells in TCAM
2990 * This is equivalent to 4 TIDs. With CLIP enabled it
2991 * needs 2 TIDs.
2992 */
2993 if (family == PF_INET)
2994 t->stids_in_use++;
2995 else
2996 t->stids_in_use += 4;
2990 } 2997 }
2991 spin_unlock_bh(&t->stid_lock); 2998 spin_unlock_bh(&t->stid_lock);
2992 return stid; 2999 return stid;
@@ -3012,7 +3019,8 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3012 } 3019 }
3013 if (stid >= 0) { 3020 if (stid >= 0) {
3014 t->stid_tab[stid].data = data; 3021 t->stid_tab[stid].data = data;
3015 stid += t->stid_base; 3022 stid -= t->nstids;
3023 stid += t->sftid_base;
3016 t->stids_in_use++; 3024 t->stids_in_use++;
3017 } 3025 }
3018 spin_unlock_bh(&t->stid_lock); 3026 spin_unlock_bh(&t->stid_lock);
@@ -3024,14 +3032,24 @@ EXPORT_SYMBOL(cxgb4_alloc_sftid);
3024 */ 3032 */
3025void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) 3033void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3026{ 3034{
3027 stid -= t->stid_base; 3035 /* Is it a server filter TID? */
3036 if (t->nsftids && (stid >= t->sftid_base)) {
3037 stid -= t->sftid_base;
3038 stid += t->nstids;
3039 } else {
3040 stid -= t->stid_base;
3041 }
3042
3028 spin_lock_bh(&t->stid_lock); 3043 spin_lock_bh(&t->stid_lock);
3029 if (family == PF_INET) 3044 if (family == PF_INET)
3030 __clear_bit(stid, t->stid_bmap); 3045 __clear_bit(stid, t->stid_bmap);
3031 else 3046 else
3032 bitmap_release_region(t->stid_bmap, stid, 2); 3047 bitmap_release_region(t->stid_bmap, stid, 2);
3033 t->stid_tab[stid].data = NULL; 3048 t->stid_tab[stid].data = NULL;
3034 t->stids_in_use--; 3049 if (family == PF_INET)
3050 t->stids_in_use--;
3051 else
3052 t->stids_in_use -= 4;
3035 spin_unlock_bh(&t->stid_lock); 3053 spin_unlock_bh(&t->stid_lock);
3036} 3054}
3037EXPORT_SYMBOL(cxgb4_free_stid); 3055EXPORT_SYMBOL(cxgb4_free_stid);
@@ -3134,6 +3152,7 @@ static int tid_init(struct tid_info *t)
3134 size_t size; 3152 size_t size;
3135 unsigned int stid_bmap_size; 3153 unsigned int stid_bmap_size;
3136 unsigned int natids = t->natids; 3154 unsigned int natids = t->natids;
3155 struct adapter *adap = container_of(t, struct adapter, tids);
3137 3156
3138 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); 3157 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3139 size = t->ntids * sizeof(*t->tid_tab) + 3158 size = t->ntids * sizeof(*t->tid_tab) +
@@ -3167,6 +3186,11 @@ static int tid_init(struct tid_info *t)
3167 t->afree = t->atid_tab; 3186 t->afree = t->atid_tab;
3168 } 3187 }
3169 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); 3188 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3189 /* Reserve stid 0 for T4/T5 adapters */
3190 if (!t->stid_base &&
3191 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3192 __set_bit(0, t->stid_bmap);
3193
3170 return 0; 3194 return 0;
3171} 3195}
3172 3196
@@ -3731,7 +3755,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
3731 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( 3755 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
3732 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> 3756 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3733 (adap->fn * 4)); 3757 (adap->fn * 4));
3734 lli.filt_mode = adap->filter_mode; 3758 lli.filt_mode = adap->params.tp.vlan_pri_map;
3735 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ 3759 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3736 for (i = 0; i < NCHAN; i++) 3760 for (i = 0; i < NCHAN; i++)
3737 lli.tx_modq[i] = i; 3761 lli.tx_modq[i] = i;
@@ -4179,7 +4203,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4179 adap = netdev2adap(dev); 4203 adap = netdev2adap(dev);
4180 4204
4181 /* Adjust stid to correct filter index */ 4205 /* Adjust stid to correct filter index */
4182 stid -= adap->tids.nstids; 4206 stid -= adap->tids.sftid_base;
4183 stid += adap->tids.nftids; 4207 stid += adap->tids.nftids;
4184 4208
4185 /* Check to make sure the filter requested is writable ... 4209 /* Check to make sure the filter requested is writable ...
@@ -4205,12 +4229,17 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4205 f->fs.val.lip[i] = val[i]; 4229 f->fs.val.lip[i] = val[i];
4206 f->fs.mask.lip[i] = ~0; 4230 f->fs.mask.lip[i] = ~0;
4207 } 4231 }
4208 if (adap->filter_mode & F_PORT) { 4232 if (adap->params.tp.vlan_pri_map & F_PORT) {
4209 f->fs.val.iport = port; 4233 f->fs.val.iport = port;
4210 f->fs.mask.iport = mask; 4234 f->fs.mask.iport = mask;
4211 } 4235 }
4212 } 4236 }
4213 4237
4238 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
4239 f->fs.val.proto = IPPROTO_TCP;
4240 f->fs.mask.proto = ~0;
4241 }
4242
4214 f->fs.dirsteer = 1; 4243 f->fs.dirsteer = 1;
4215 f->fs.iq = queue; 4244 f->fs.iq = queue;
4216 /* Mark filter as locked */ 4245 /* Mark filter as locked */
@@ -4237,7 +4266,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4237 adap = netdev2adap(dev); 4266 adap = netdev2adap(dev);
4238 4267
4239 /* Adjust stid to correct filter index */ 4268 /* Adjust stid to correct filter index */
4240 stid -= adap->tids.nstids; 4269 stid -= adap->tids.sftid_base;
4241 stid += adap->tids.nftids; 4270 stid += adap->tids.nftids;
4242 4271
4243 f = &adap->tids.ftid_tab[stid]; 4272 f = &adap->tids.ftid_tab[stid];
@@ -5092,7 +5121,7 @@ static int adap_init0(struct adapter *adap)
5092 enum dev_state state; 5121 enum dev_state state;
5093 u32 params[7], val[7]; 5122 u32 params[7], val[7];
5094 struct fw_caps_config_cmd caps_cmd; 5123 struct fw_caps_config_cmd caps_cmd;
5095 int reset = 1, j; 5124 int reset = 1;
5096 5125
5097 /* 5126 /*
5098 * Contact FW, advertising Master capability (and potentially forcing 5127 * Contact FW, advertising Master capability (and potentially forcing
@@ -5434,21 +5463,11 @@ static int adap_init0(struct adapter *adap)
5434 /* 5463 /*
5435 * These are finalized by FW initialization, load their values now. 5464 * These are finalized by FW initialization, load their values now.
5436 */ 5465 */
5437 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
5438 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
5439 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
5440 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); 5466 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5441 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, 5467 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5442 adap->params.b_wnd); 5468 adap->params.b_wnd);
5443 5469
5444 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ 5470 t4_init_tp_params(adap);
5445 for (j = 0; j < NCHAN; j++)
5446 adap->params.tp.tx_modq[j] = j;
5447
5448 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5449 &adap->filter_mode, 1,
5450 TP_VLAN_PRI_MAP);
5451
5452 adap->flags |= FW_OK; 5471 adap->flags |= FW_OK;
5453 return 0; 5472 return 0;
5454 5473
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 6f21f2451c30..4dd0a82533e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -131,7 +131,14 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
131 131
132static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) 132static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
133{ 133{
134 stid -= t->stid_base; 134 /* Is it a server filter TID? */
135 if (t->nsftids && (stid >= t->sftid_base)) {
136 stid -= t->sftid_base;
137 stid += t->nstids;
138 } else {
139 stid -= t->stid_base;
140 }
141
135 return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL; 142 return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL;
136} 143}
137 144
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 29878098101e..cb05be905def 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -45,6 +45,7 @@
45#include "l2t.h" 45#include "l2t.h"
46#include "t4_msg.h" 46#include "t4_msg.h"
47#include "t4fw_api.h" 47#include "t4fw_api.h"
48#include "t4_regs.h"
48 49
49#define VLAN_NONE 0xfff 50#define VLAN_NONE 0xfff
50 51
@@ -411,6 +412,40 @@ done:
411} 412}
412EXPORT_SYMBOL(cxgb4_l2t_get); 413EXPORT_SYMBOL(cxgb4_l2t_get);
413 414
415u64 cxgb4_select_ntuple(struct net_device *dev,
416 const struct l2t_entry *l2t)
417{
418 struct adapter *adap = netdev2adap(dev);
419 struct tp_params *tp = &adap->params.tp;
420 u64 ntuple = 0;
421
422 /* Initialize each of the fields which we care about which are present
423 * in the Compressed Filter Tuple.
424 */
425 if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
426 ntuple |= (F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
427
428 if (tp->port_shift >= 0)
429 ntuple |= (u64)l2t->lport << tp->port_shift;
430
431 if (tp->protocol_shift >= 0)
432 ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
433
434 if (tp->vnic_shift >= 0) {
435 u32 viid = cxgb4_port_viid(dev);
436 u32 vf = FW_VIID_VIN_GET(viid);
437 u32 pf = FW_VIID_PFN_GET(viid);
438 u32 vld = FW_VIID_VIVLD_GET(viid);
439
440 ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
441 V_FT_VNID_ID_PF(pf) |
442 V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
443 }
444
445 return ntuple;
446}
447EXPORT_SYMBOL(cxgb4_select_ntuple);
448
414/* 449/*
415 * Called when address resolution fails for an L2T entry to handle packets 450 * Called when address resolution fails for an L2T entry to handle packets
416 * on the arpq head. If a packet specifies a failure handler it is invoked, 451 * on the arpq head. If a packet specifies a failure handler it is invoked,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 108c0f1fce1c..85eb5c71358d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -98,7 +98,8 @@ int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
98struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, 98struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
99 const struct net_device *physdev, 99 const struct net_device *physdev,
100 unsigned int priority); 100 unsigned int priority);
101 101u64 cxgb4_select_ntuple(struct net_device *dev,
102 const struct l2t_entry *l2t);
102void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); 103void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
103struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); 104struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
104int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, 105int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index cc380c36e1a8..cc3511a5cd0c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2581,7 +2581,7 @@ static int t4_sge_init_soft(struct adapter *adap)
2581 #undef READ_FL_BUF 2581 #undef READ_FL_BUF
2582 2582
2583 if (fl_small_pg != PAGE_SIZE || 2583 if (fl_small_pg != PAGE_SIZE ||
2584 (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg || 2584 (fl_large_pg != 0 && (fl_large_pg < fl_small_pg ||
2585 (fl_large_pg & (fl_large_pg-1)) != 0))) { 2585 (fl_large_pg & (fl_large_pg-1)) != 0))) {
2586 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", 2586 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
2587 fl_small_pg, fl_large_pg); 2587 fl_small_pg, fl_large_pg);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 74a6fce5a15a..e1413eacdbd2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3808,6 +3808,109 @@ int t4_prep_adapter(struct adapter *adapter)
3808 return 0; 3808 return 0;
3809} 3809}
3810 3810
3811/**
3812 * t4_init_tp_params - initialize adap->params.tp
3813 * @adap: the adapter
3814 *
3815 * Initialize various fields of the adapter's TP Parameters structure.
3816 */
3817int t4_init_tp_params(struct adapter *adap)
3818{
3819 int chan;
3820 u32 v;
3821
3822 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3823 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3824 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
3825
3826 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
3827 for (chan = 0; chan < NCHAN; chan++)
3828 adap->params.tp.tx_modq[chan] = chan;
3829
3830 /* Cache the adapter's Compressed Filter Mode and global Incress
3831 * Configuration.
3832 */
3833 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3834 &adap->params.tp.vlan_pri_map, 1,
3835 TP_VLAN_PRI_MAP);
3836 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3837 &adap->params.tp.ingress_config, 1,
3838 TP_INGRESS_CONFIG);
3839
3840 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
3841 * shift positions of several elements of the Compressed Filter Tuple
3842 * for this adapter which we need frequently ...
3843 */
3844 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
3845 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
3846 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
3847 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
3848 F_PROTOCOL);
3849
3850 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
3851 * represents the presense of an Outer VLAN instead of a VNIC ID.
3852 */
3853 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
3854 adap->params.tp.vnic_shift = -1;
3855
3856 return 0;
3857}
3858
3859/**
3860 * t4_filter_field_shift - calculate filter field shift
3861 * @adap: the adapter
3862 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
3863 *
3864 * Return the shift position of a filter field within the Compressed
3865 * Filter Tuple. The filter field is specified via its selection bit
3866 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
3867 */
3868int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
3869{
3870 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
3871 unsigned int sel;
3872 int field_shift;
3873
3874 if ((filter_mode & filter_sel) == 0)
3875 return -1;
3876
3877 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
3878 switch (filter_mode & sel) {
3879 case F_FCOE:
3880 field_shift += W_FT_FCOE;
3881 break;
3882 case F_PORT:
3883 field_shift += W_FT_PORT;
3884 break;
3885 case F_VNIC_ID:
3886 field_shift += W_FT_VNIC_ID;
3887 break;
3888 case F_VLAN:
3889 field_shift += W_FT_VLAN;
3890 break;
3891 case F_TOS:
3892 field_shift += W_FT_TOS;
3893 break;
3894 case F_PROTOCOL:
3895 field_shift += W_FT_PROTOCOL;
3896 break;
3897 case F_ETHERTYPE:
3898 field_shift += W_FT_ETHERTYPE;
3899 break;
3900 case F_MACMATCH:
3901 field_shift += W_FT_MACMATCH;
3902 break;
3903 case F_MPSHITTYPE:
3904 field_shift += W_FT_MPSHITTYPE;
3905 break;
3906 case F_FRAGMENTATION:
3907 field_shift += W_FT_FRAGMENTATION;
3908 break;
3909 }
3910 }
3911 return field_shift;
3912}
3913
3811int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) 3914int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3812{ 3915{
3813 u8 addr[6]; 3916 u8 addr[6];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 0a8205d69d2c..4082522d8140 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1171,10 +1171,50 @@
1171 1171
1172#define A_TP_TX_SCHED_PCMD 0x25 1172#define A_TP_TX_SCHED_PCMD 0x25
1173 1173
1174#define S_VNIC 11
1175#define V_VNIC(x) ((x) << S_VNIC)
1176#define F_VNIC V_VNIC(1U)
1177
1178#define S_FRAGMENTATION 9
1179#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION)
1180#define F_FRAGMENTATION V_FRAGMENTATION(1U)
1181
1182#define S_MPSHITTYPE 8
1183#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE)
1184#define F_MPSHITTYPE V_MPSHITTYPE(1U)
1185
1186#define S_MACMATCH 7
1187#define V_MACMATCH(x) ((x) << S_MACMATCH)
1188#define F_MACMATCH V_MACMATCH(1U)
1189
1190#define S_ETHERTYPE 6
1191#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE)
1192#define F_ETHERTYPE V_ETHERTYPE(1U)
1193
1194#define S_PROTOCOL 5
1195#define V_PROTOCOL(x) ((x) << S_PROTOCOL)
1196#define F_PROTOCOL V_PROTOCOL(1U)
1197
1198#define S_TOS 4
1199#define V_TOS(x) ((x) << S_TOS)
1200#define F_TOS V_TOS(1U)
1201
1202#define S_VLAN 3
1203#define V_VLAN(x) ((x) << S_VLAN)
1204#define F_VLAN V_VLAN(1U)
1205
1206#define S_VNIC_ID 2
1207#define V_VNIC_ID(x) ((x) << S_VNIC_ID)
1208#define F_VNIC_ID V_VNIC_ID(1U)
1209
1174#define S_PORT 1 1210#define S_PORT 1
1175#define V_PORT(x) ((x) << S_PORT) 1211#define V_PORT(x) ((x) << S_PORT)
1176#define F_PORT V_PORT(1U) 1212#define F_PORT V_PORT(1U)
1177 1213
1214#define S_FCOE 0
1215#define V_FCOE(x) ((x) << S_FCOE)
1216#define F_FCOE V_FCOE(1U)
1217
1178#define NUM_MPS_CLS_SRAM_L_INSTANCES 336 1218#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
1179#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512 1219#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
1180 1220
@@ -1213,4 +1253,37 @@
1213#define V_CHIPID(x) ((x) << S_CHIPID) 1253#define V_CHIPID(x) ((x) << S_CHIPID)
1214#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID) 1254#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID)
1215 1255
1256/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
1257 * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
1258 * selects for a particular field being present. These fields, when present
1259 * in the Compressed Filter Tuple, have the following widths in bits.
1260 */
1261#define W_FT_FCOE 1
1262#define W_FT_PORT 3
1263#define W_FT_VNIC_ID 17
1264#define W_FT_VLAN 17
1265#define W_FT_TOS 8
1266#define W_FT_PROTOCOL 8
1267#define W_FT_ETHERTYPE 16
1268#define W_FT_MACMATCH 9
1269#define W_FT_MPSHITTYPE 3
1270#define W_FT_FRAGMENTATION 1
1271
1272/* Some of the Compressed Filter Tuple fields have internal structure. These
1273 * bit shifts/masks describe those structures. All shifts are relative to the
1274 * base position of the fields within the Compressed Filter Tuple
1275 */
1276#define S_FT_VLAN_VLD 16
1277#define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD)
1278#define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U)
1279
1280#define S_FT_VNID_ID_VF 0
1281#define V_FT_VNID_ID_VF(x) ((x) << S_FT_VNID_ID_VF)
1282
1283#define S_FT_VNID_ID_PF 7
1284#define V_FT_VNID_ID_PF(x) ((x) << S_FT_VNID_ID_PF)
1285
1286#define S_FT_VNID_ID_VLD 16
1287#define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD)
1288
1216#endif /* __T4_REGS_H */ 1289#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 5878df619b53..4ccaf9af6fc9 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -104,6 +104,7 @@ static inline char *nic_name(struct pci_dev *pdev)
104#define BE3_MAX_RSS_QS 16 104#define BE3_MAX_RSS_QS 16
105#define BE3_MAX_TX_QS 16 105#define BE3_MAX_TX_QS 16
106#define BE3_MAX_EVT_QS 16 106#define BE3_MAX_EVT_QS 16
107#define BE3_SRIOV_MAX_EVT_QS 8
107 108
108#define MAX_RX_QS 32 109#define MAX_RX_QS 32
109#define MAX_EVT_QS 32 110#define MAX_EVT_QS 32
@@ -480,7 +481,7 @@ struct be_adapter {
480 struct list_head entry; 481 struct list_head entry;
481 482
482 u32 flash_status; 483 u32 flash_status;
483 struct completion flash_compl; 484 struct completion et_cmd_compl;
484 485
485 struct be_resources res; /* resources available for the func */ 486 struct be_resources res; /* resources available for the func */
486 u16 num_vfs; /* Number of VFs provisioned by PF */ 487 u16 num_vfs; /* Number of VFs provisioned by PF */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index e0e8bc1ef14c..94c35c8d799d 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -141,11 +141,17 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
141 subsystem = resp_hdr->subsystem; 141 subsystem = resp_hdr->subsystem;
142 } 142 }
143 143
144 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
145 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
146 complete(&adapter->et_cmd_compl);
147 return 0;
148 }
149
144 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) || 150 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
145 (opcode == OPCODE_COMMON_WRITE_OBJECT)) && 151 (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
146 (subsystem == CMD_SUBSYSTEM_COMMON)) { 152 (subsystem == CMD_SUBSYSTEM_COMMON)) {
147 adapter->flash_status = compl_status; 153 adapter->flash_status = compl_status;
148 complete(&adapter->flash_compl); 154 complete(&adapter->et_cmd_compl);
149 } 155 }
150 156
151 if (compl_status == MCC_STATUS_SUCCESS) { 157 if (compl_status == MCC_STATUS_SUCCESS) {
@@ -2017,6 +2023,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2017 0x3ea83c02, 0x4a110304}; 2023 0x3ea83c02, 0x4a110304};
2018 int status; 2024 int status;
2019 2025
2026 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2027 return 0;
2028
2020 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2029 if (mutex_lock_interruptible(&adapter->mbox_lock))
2021 return -1; 2030 return -1;
2022 2031
@@ -2160,7 +2169,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2160 be_mcc_notify(adapter); 2169 be_mcc_notify(adapter);
2161 spin_unlock_bh(&adapter->mcc_lock); 2170 spin_unlock_bh(&adapter->mcc_lock);
2162 2171
2163 if (!wait_for_completion_timeout(&adapter->flash_compl, 2172 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2164 msecs_to_jiffies(60000))) 2173 msecs_to_jiffies(60000)))
2165 status = -1; 2174 status = -1;
2166 else 2175 else
@@ -2255,8 +2264,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2255 be_mcc_notify(adapter); 2264 be_mcc_notify(adapter);
2256 spin_unlock_bh(&adapter->mcc_lock); 2265 spin_unlock_bh(&adapter->mcc_lock);
2257 2266
2258 if (!wait_for_completion_timeout(&adapter->flash_compl, 2267 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2259 msecs_to_jiffies(40000))) 2268 msecs_to_jiffies(40000)))
2260 status = -1; 2269 status = -1;
2261 else 2270 else
2262 status = adapter->flash_status; 2271 status = adapter->flash_status;
@@ -2367,6 +2376,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2367{ 2376{
2368 struct be_mcc_wrb *wrb; 2377 struct be_mcc_wrb *wrb;
2369 struct be_cmd_req_loopback_test *req; 2378 struct be_cmd_req_loopback_test *req;
2379 struct be_cmd_resp_loopback_test *resp;
2370 int status; 2380 int status;
2371 2381
2372 spin_lock_bh(&adapter->mcc_lock); 2382 spin_lock_bh(&adapter->mcc_lock);
@@ -2381,8 +2391,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2381 2391
2382 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2392 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2383 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); 2393 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2384 req->hdr.timeout = cpu_to_le32(4);
2385 2394
2395 req->hdr.timeout = cpu_to_le32(15);
2386 req->pattern = cpu_to_le64(pattern); 2396 req->pattern = cpu_to_le64(pattern);
2387 req->src_port = cpu_to_le32(port_num); 2397 req->src_port = cpu_to_le32(port_num);
2388 req->dest_port = cpu_to_le32(port_num); 2398 req->dest_port = cpu_to_le32(port_num);
@@ -2390,12 +2400,15 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2390 req->num_pkts = cpu_to_le32(num_pkts); 2400 req->num_pkts = cpu_to_le32(num_pkts);
2391 req->loopback_type = cpu_to_le32(loopback_type); 2401 req->loopback_type = cpu_to_le32(loopback_type);
2392 2402
2393 status = be_mcc_notify_wait(adapter); 2403 be_mcc_notify(adapter);
2394 if (!status) { 2404
2395 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb); 2405 spin_unlock_bh(&adapter->mcc_lock);
2396 status = le32_to_cpu(resp->status);
2397 }
2398 2406
2407 wait_for_completion(&adapter->et_cmd_compl);
2408 resp = embedded_payload(wrb);
2409 status = le32_to_cpu(resp->status);
2410
2411 return status;
2399err: 2412err:
2400 spin_unlock_bh(&adapter->mcc_lock); 2413 spin_unlock_bh(&adapter->mcc_lock);
2401 return status; 2414 return status;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 0fde69d5cb6a..bf40fdaecfa3 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2744,13 +2744,16 @@ static int be_rx_qs_create(struct be_adapter *adapter)
2744 if (!BEx_chip(adapter)) 2744 if (!BEx_chip(adapter))
2745 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 | 2745 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2746 RSS_ENABLE_UDP_IPV6; 2746 RSS_ENABLE_UDP_IPV6;
2747 } else {
2748 /* Disable RSS, if only default RX Q is created */
2749 adapter->rss_flags = RSS_ENABLE_NONE;
2750 }
2747 2751
2748 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags, 2752 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2749 128); 2753 128);
2750 if (rc) { 2754 if (rc) {
2751 adapter->rss_flags = 0; 2755 adapter->rss_flags = RSS_ENABLE_NONE;
2752 return rc; 2756 return rc;
2753 }
2754 } 2757 }
2755 2758
2756 /* First time posting */ 2759 /* First time posting */
@@ -3124,11 +3127,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
3124{ 3127{
3125 struct pci_dev *pdev = adapter->pdev; 3128 struct pci_dev *pdev = adapter->pdev;
3126 bool use_sriov = false; 3129 bool use_sriov = false;
3130 int max_vfs;
3127 3131
3128 if (BE3_chip(adapter) && sriov_want(adapter)) { 3132 max_vfs = pci_sriov_get_totalvfs(pdev);
3129 int max_vfs;
3130 3133
3131 max_vfs = pci_sriov_get_totalvfs(pdev); 3134 if (BE3_chip(adapter) && sriov_want(adapter)) {
3132 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; 3135 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3133 use_sriov = res->max_vfs; 3136 use_sriov = res->max_vfs;
3134 } 3137 }
@@ -3159,7 +3162,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
3159 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; 3162 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3160 res->max_rx_qs = res->max_rss_qs + 1; 3163 res->max_rx_qs = res->max_rss_qs + 1;
3161 3164
3162 res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1; 3165 if (be_physfn(adapter))
3166 res->max_evt_qs = (max_vfs > 0) ?
3167 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3168 else
3169 res->max_evt_qs = 1;
3163 3170
3164 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT; 3171 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3165 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS)) 3172 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
@@ -4205,7 +4212,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
4205 spin_lock_init(&adapter->mcc_lock); 4212 spin_lock_init(&adapter->mcc_lock);
4206 spin_lock_init(&adapter->mcc_cq_lock); 4213 spin_lock_init(&adapter->mcc_cq_lock);
4207 4214
4208 init_completion(&adapter->flash_compl); 4215 init_completion(&adapter->et_cmd_compl);
4209 pci_save_state(adapter->pdev); 4216 pci_save_state(adapter->pdev);
4210 return 0; 4217 return 0;
4211 4218
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index e7c8b749c5a5..50bb71c663e2 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -428,6 +428,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
428 /* If this was the last BD in the ring, start at the beginning again. */ 428 /* If this was the last BD in the ring, start at the beginning again. */
429 bdp = fec_enet_get_nextdesc(bdp, fep); 429 bdp = fec_enet_get_nextdesc(bdp, fep);
430 430
431 skb_tx_timestamp(skb);
432
431 fep->cur_tx = bdp; 433 fep->cur_tx = bdp;
432 434
433 if (fep->cur_tx == fep->dirty_tx) 435 if (fep->cur_tx == fep->dirty_tx)
@@ -436,8 +438,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
436 /* Trigger transmission start */ 438 /* Trigger transmission start */
437 writel(0, fep->hwp + FEC_X_DES_ACTIVE); 439 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
438 440
439 skb_tx_timestamp(skb);
440
441 return NETDEV_TX_OK; 441 return NETDEV_TX_OK;
442} 442}
443 443
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 895450e9bb3c..ff2d806eaef7 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -718,8 +718,11 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
718 e1000_release_phy_80003es2lan(hw); 718 e1000_release_phy_80003es2lan(hw);
719 719
720 /* Disable IBIST slave mode (far-end loopback) */ 720 /* Disable IBIST slave mode (far-end loopback) */
721 e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, 721 ret_val =
722 &kum_reg_data); 722 e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
723 &kum_reg_data);
724 if (ret_val)
725 return ret_val;
723 kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; 726 kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
724 e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, 727 e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
725 kum_reg_data); 728 kum_reg_data);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 8d3945ab7334..c30d41d6e426 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6174,7 +6174,7 @@ static int __e1000_resume(struct pci_dev *pdev)
6174 return 0; 6174 return 0;
6175} 6175}
6176 6176
6177#ifdef CONFIG_PM_SLEEP 6177#ifdef CONFIG_PM
6178static int e1000_suspend(struct device *dev) 6178static int e1000_suspend(struct device *dev)
6179{ 6179{
6180 struct pci_dev *pdev = to_pci_dev(dev); 6180 struct pci_dev *pdev = to_pci_dev(dev);
@@ -6193,7 +6193,7 @@ static int e1000_resume(struct device *dev)
6193 6193
6194 return __e1000_resume(pdev); 6194 return __e1000_resume(pdev);
6195} 6195}
6196#endif /* CONFIG_PM_SLEEP */ 6196#endif /* CONFIG_PM */
6197 6197
6198#ifdef CONFIG_PM_RUNTIME 6198#ifdef CONFIG_PM_RUNTIME
6199static int e1000_runtime_suspend(struct device *dev) 6199static int e1000_runtime_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index da2be59505c0..20e71f4ca426 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1757,19 +1757,23 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
1757 * it across the board. 1757 * it across the board.
1758 */ 1758 */
1759 ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); 1759 ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
1760 if (ret_val) 1760 if (ret_val) {
1761 /* If the first read fails, another entity may have 1761 /* If the first read fails, another entity may have
1762 * ownership of the resources, wait and try again to 1762 * ownership of the resources, wait and try again to
1763 * see if they have relinquished the resources yet. 1763 * see if they have relinquished the resources yet.
1764 */ 1764 */
1765 udelay(usec_interval); 1765 if (usec_interval >= 1000)
1766 msleep(usec_interval / 1000);
1767 else
1768 udelay(usec_interval);
1769 }
1766 ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); 1770 ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
1767 if (ret_val) 1771 if (ret_val)
1768 break; 1772 break;
1769 if (phy_status & BMSR_LSTATUS) 1773 if (phy_status & BMSR_LSTATUS)
1770 break; 1774 break;
1771 if (usec_interval >= 1000) 1775 if (usec_interval >= 1000)
1772 mdelay(usec_interval / 1000); 1776 msleep(usec_interval / 1000);
1773 else 1777 else
1774 udelay(usec_interval); 1778 udelay(usec_interval);
1775 } 1779 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index cc06854296a3..5bcc870f8367 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6827,12 +6827,20 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6827 return __ixgbe_maybe_stop_tx(tx_ring, size); 6827 return __ixgbe_maybe_stop_tx(tx_ring, size);
6828} 6828}
6829 6829
6830#ifdef IXGBE_FCOE 6830static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
6831static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) 6831 void *accel_priv)
6832{ 6832{
6833 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
6834#ifdef IXGBE_FCOE
6833 struct ixgbe_adapter *adapter; 6835 struct ixgbe_adapter *adapter;
6834 struct ixgbe_ring_feature *f; 6836 struct ixgbe_ring_feature *f;
6835 int txq; 6837 int txq;
6838#endif
6839
6840 if (fwd_adapter)
6841 return skb->queue_mapping + fwd_adapter->tx_base_queue;
6842
6843#ifdef IXGBE_FCOE
6836 6844
6837 /* 6845 /*
6838 * only execute the code below if protocol is FCoE 6846 * only execute the code below if protocol is FCoE
@@ -6858,9 +6866,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6858 txq -= f->indices; 6866 txq -= f->indices;
6859 6867
6860 return txq + f->offset; 6868 return txq + f->offset;
6869#else
6870 return __netdev_pick_tx(dev, skb);
6871#endif
6861} 6872}
6862 6873
6863#endif
6864netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, 6874netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6865 struct ixgbe_adapter *adapter, 6875 struct ixgbe_adapter *adapter,
6866 struct ixgbe_ring *tx_ring) 6876 struct ixgbe_ring *tx_ring)
@@ -7629,27 +7639,11 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
7629 kfree(fwd_adapter); 7639 kfree(fwd_adapter);
7630} 7640}
7631 7641
7632static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb,
7633 struct net_device *dev,
7634 void *priv)
7635{
7636 struct ixgbe_fwd_adapter *fwd_adapter = priv;
7637 unsigned int queue;
7638 struct ixgbe_ring *tx_ring;
7639
7640 queue = skb->queue_mapping + fwd_adapter->tx_base_queue;
7641 tx_ring = fwd_adapter->real_adapter->tx_ring[queue];
7642
7643 return __ixgbe_xmit_frame(skb, dev, tx_ring);
7644}
7645
7646static const struct net_device_ops ixgbe_netdev_ops = { 7642static const struct net_device_ops ixgbe_netdev_ops = {
7647 .ndo_open = ixgbe_open, 7643 .ndo_open = ixgbe_open,
7648 .ndo_stop = ixgbe_close, 7644 .ndo_stop = ixgbe_close,
7649 .ndo_start_xmit = ixgbe_xmit_frame, 7645 .ndo_start_xmit = ixgbe_xmit_frame,
7650#ifdef IXGBE_FCOE
7651 .ndo_select_queue = ixgbe_select_queue, 7646 .ndo_select_queue = ixgbe_select_queue,
7652#endif
7653 .ndo_set_rx_mode = ixgbe_set_rx_mode, 7647 .ndo_set_rx_mode = ixgbe_set_rx_mode,
7654 .ndo_validate_addr = eth_validate_addr, 7648 .ndo_validate_addr = eth_validate_addr,
7655 .ndo_set_mac_address = ixgbe_set_mac, 7649 .ndo_set_mac_address = ixgbe_set_mac,
@@ -7689,7 +7683,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7689 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, 7683 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
7690 .ndo_dfwd_add_station = ixgbe_fwd_add, 7684 .ndo_dfwd_add_station = ixgbe_fwd_add,
7691 .ndo_dfwd_del_station = ixgbe_fwd_del, 7685 .ndo_dfwd_del_station = ixgbe_fwd_del,
7692 .ndo_dfwd_start_xmit = ixgbe_fwd_xmit,
7693}; 7686};
7694 7687
7695/** 7688/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index d6f0c0d8cf11..72084f70adbb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -291,7 +291,9 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
291{ 291{
292 struct ixgbe_adapter *adapter = pci_get_drvdata(dev); 292 struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
293 int err; 293 int err;
294#ifdef CONFIG_PCI_IOV
294 u32 current_flags = adapter->flags; 295 u32 current_flags = adapter->flags;
296#endif
295 297
296 err = ixgbe_disable_sriov(adapter); 298 err = ixgbe_disable_sriov(adapter);
297 299
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 6a6c1f76d8e0..ec94a20d7099 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -619,7 +619,8 @@ ltq_etop_set_multicast_list(struct net_device *dev)
619} 619}
620 620
621static u16 621static u16
622ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb) 622ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
623 void *accel_priv)
623{ 624{
624 /* we are currently only using the first queue */ 625 /* we are currently only using the first queue */
625 return 0; 626 return 0;
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 7354960b583b..c4eeb69a5bee 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -92,6 +92,12 @@ static int orion_mdio_wait_ready(struct mii_bus *bus)
92 if (time_is_before_jiffies(end)) 92 if (time_is_before_jiffies(end))
93 ++timedout; 93 ++timedout;
94 } else { 94 } else {
95 /* wait_event_timeout does not guarantee a delay of at
96 * least one whole jiffie, so timeout must be no less
97 * than two.
98 */
99 if (timeout < 2)
100 timeout = 2;
95 wait_event_timeout(dev->smi_busy_wait, 101 wait_event_timeout(dev->smi_busy_wait,
96 orion_mdio_smi_is_done(dev), 102 orion_mdio_smi_is_done(dev),
97 timeout); 103 timeout);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index f54ebd5a1702..a7fcd593b2db 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -592,7 +592,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
592 } 592 }
593} 593}
594 594
595u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) 595u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
596 void *accel_priv)
596{ 597{
597 struct mlx4_en_priv *priv = netdev_priv(dev); 598 struct mlx4_en_priv *priv = netdev_priv(dev);
598 u16 rings_p_up = priv->num_tx_rings_p_up; 599 u16 rings_p_up = priv->num_tx_rings_p_up;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index f3758de59c05..d5758adceaa2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -714,7 +714,8 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
714int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 714int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
715 715
716void mlx4_en_tx_irq(struct mlx4_cq *mcq); 716void mlx4_en_tx_irq(struct mlx4_cq *mcq);
717u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); 717u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
718 void *accel_priv);
718netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 719netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
719 720
720int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 721int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 7692dfd4f262..cc68657f0536 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1604,13 +1604,13 @@ netxen_process_lro(struct netxen_adapter *adapter,
1604 u32 seq_number; 1604 u32 seq_number;
1605 u8 vhdr_len = 0; 1605 u8 vhdr_len = 0;
1606 1606
1607 if (unlikely(ring > adapter->max_rds_rings)) 1607 if (unlikely(ring >= adapter->max_rds_rings))
1608 return NULL; 1608 return NULL;
1609 1609
1610 rds_ring = &recv_ctx->rds_rings[ring]; 1610 rds_ring = &recv_ctx->rds_rings[ring];
1611 1611
1612 index = netxen_get_lro_sts_refhandle(sts_data0); 1612 index = netxen_get_lro_sts_refhandle(sts_data0);
1613 if (unlikely(index > rds_ring->num_desc)) 1613 if (unlikely(index >= rds_ring->num_desc))
1614 return NULL; 1614 return NULL;
1615 1615
1616 buffer = &rds_ring->rx_buf_arr[index]; 1616 buffer = &rds_ring->rx_buf_arr[index];
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 631ea0ac1cd8..f2a7c7166e24 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -487,6 +487,7 @@ struct qlcnic_hardware_context {
487 struct qlcnic_mailbox *mailbox; 487 struct qlcnic_mailbox *mailbox;
488 u8 extend_lb_time; 488 u8 extend_lb_time;
489 u8 phys_port_id[ETH_ALEN]; 489 u8 phys_port_id[ETH_ALEN];
490 u8 lb_mode;
490}; 491};
491 492
492struct qlcnic_adapter_stats { 493struct qlcnic_adapter_stats {
@@ -578,6 +579,8 @@ struct qlcnic_host_tx_ring {
578 dma_addr_t phys_addr; 579 dma_addr_t phys_addr;
579 dma_addr_t hw_cons_phys_addr; 580 dma_addr_t hw_cons_phys_addr;
580 struct netdev_queue *txq; 581 struct netdev_queue *txq;
582 /* Lock to protect Tx descriptors cleanup */
583 spinlock_t tx_clean_lock;
581} ____cacheline_internodealigned_in_smp; 584} ____cacheline_internodealigned_in_smp;
582 585
583/* 586/*
@@ -808,6 +811,7 @@ struct qlcnic_mac_list_s {
808 811
809#define QLCNIC_ILB_MODE 0x1 812#define QLCNIC_ILB_MODE 0x1
810#define QLCNIC_ELB_MODE 0x2 813#define QLCNIC_ELB_MODE 0x2
814#define QLCNIC_LB_MODE_MASK 0x3
811 815
812#define QLCNIC_LINKEVENT 0x1 816#define QLCNIC_LINKEVENT 0x1
813#define QLCNIC_LB_RESPONSE 0x2 817#define QLCNIC_LB_RESPONSE 0x2
@@ -1093,7 +1097,6 @@ struct qlcnic_adapter {
1093 struct qlcnic_filter_hash rx_fhash; 1097 struct qlcnic_filter_hash rx_fhash;
1094 struct list_head vf_mc_list; 1098 struct list_head vf_mc_list;
1095 1099
1096 spinlock_t tx_clean_lock;
1097 spinlock_t mac_learn_lock; 1100 spinlock_t mac_learn_lock;
1098 /* spinlock for catching rcv filters for eswitch traffic */ 1101 /* spinlock for catching rcv filters for eswitch traffic */
1099 spinlock_t rx_mac_learn_lock; 1102 spinlock_t rx_mac_learn_lock;
@@ -1708,6 +1711,7 @@ int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *);
1708void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *); 1711void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *);
1709void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx); 1712void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx);
1710void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx); 1713void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx);
1714void qlcnic_update_stats(struct qlcnic_adapter *);
1711 1715
1712/* Adapter hardware abstraction */ 1716/* Adapter hardware abstraction */
1713struct qlcnic_hardware_ops { 1717struct qlcnic_hardware_ops {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index b1cb0ffb15c7..f776f99f7915 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -447,8 +447,9 @@ irqreturn_t qlcnic_83xx_intr(int irq, void *data)
447 447
448 qlcnic_83xx_poll_process_aen(adapter); 448 qlcnic_83xx_poll_process_aen(adapter);
449 449
450 if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) { 450 if (ahw->diag_test) {
451 ahw->diag_cnt++; 451 if (ahw->diag_test == QLCNIC_INTERRUPT_TEST)
452 ahw->diag_cnt++;
452 qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); 453 qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
453 return IRQ_HANDLED; 454 return IRQ_HANDLED;
454 } 455 }
@@ -1345,11 +1346,6 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
1345 } 1346 }
1346 1347
1347 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { 1348 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
1348 /* disable and free mailbox interrupt */
1349 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
1350 qlcnic_83xx_enable_mbx_poll(adapter);
1351 qlcnic_83xx_free_mbx_intr(adapter);
1352 }
1353 adapter->ahw->loopback_state = 0; 1349 adapter->ahw->loopback_state = 0;
1354 adapter->ahw->hw_ops->setup_link_event(adapter, 1); 1350 adapter->ahw->hw_ops->setup_link_event(adapter, 1);
1355 } 1351 }
@@ -1363,33 +1359,20 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
1363{ 1359{
1364 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1360 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1365 struct qlcnic_host_sds_ring *sds_ring; 1361 struct qlcnic_host_sds_ring *sds_ring;
1366 int ring, err; 1362 int ring;
1367 1363
1368 clear_bit(__QLCNIC_DEV_UP, &adapter->state); 1364 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
1369 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { 1365 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
1370 for (ring = 0; ring < adapter->drv_sds_rings; ring++) { 1366 for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
1371 sds_ring = &adapter->recv_ctx->sds_rings[ring]; 1367 sds_ring = &adapter->recv_ctx->sds_rings[ring];
1372 qlcnic_83xx_disable_intr(adapter, sds_ring); 1368 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1373 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) 1369 qlcnic_83xx_disable_intr(adapter, sds_ring);
1374 qlcnic_83xx_enable_mbx_poll(adapter);
1375 } 1370 }
1376 } 1371 }
1377 1372
1378 qlcnic_fw_destroy_ctx(adapter); 1373 qlcnic_fw_destroy_ctx(adapter);
1379 qlcnic_detach(adapter); 1374 qlcnic_detach(adapter);
1380 1375
1381 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
1382 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
1383 err = qlcnic_83xx_setup_mbx_intr(adapter);
1384 qlcnic_83xx_disable_mbx_poll(adapter);
1385 if (err) {
1386 dev_err(&adapter->pdev->dev,
1387 "%s: failed to setup mbx interrupt\n",
1388 __func__);
1389 goto out;
1390 }
1391 }
1392 }
1393 adapter->ahw->diag_test = 0; 1376 adapter->ahw->diag_test = 0;
1394 adapter->drv_sds_rings = drv_sds_rings; 1377 adapter->drv_sds_rings = drv_sds_rings;
1395 1378
@@ -1399,9 +1382,6 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
1399 if (netif_running(netdev)) 1382 if (netif_running(netdev))
1400 __qlcnic_up(adapter, netdev); 1383 __qlcnic_up(adapter, netdev);
1401 1384
1402 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST &&
1403 !(adapter->flags & QLCNIC_MSIX_ENABLED))
1404 qlcnic_83xx_disable_mbx_poll(adapter);
1405out: 1385out:
1406 netif_device_attach(netdev); 1386 netif_device_attach(netdev);
1407} 1387}
@@ -1704,12 +1684,6 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
1704 } 1684 }
1705 } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); 1685 } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
1706 1686
1707 /* Make sure carrier is off and queue is stopped during loopback */
1708 if (netif_running(netdev)) {
1709 netif_carrier_off(netdev);
1710 netif_tx_stop_all_queues(netdev);
1711 }
1712
1713 ret = qlcnic_do_lb_test(adapter, mode); 1687 ret = qlcnic_do_lb_test(adapter, mode);
1714 1688
1715 qlcnic_83xx_clear_lb_mode(adapter, mode); 1689 qlcnic_83xx_clear_lb_mode(adapter, mode);
@@ -2141,6 +2115,7 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
2141 ahw->link_autoneg = MSB(MSW(data[3])); 2115 ahw->link_autoneg = MSB(MSW(data[3]));
2142 ahw->module_type = MSB(LSW(data[3])); 2116 ahw->module_type = MSB(LSW(data[3]));
2143 ahw->has_link_events = 1; 2117 ahw->has_link_events = 1;
2118 ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK;
2144 qlcnic_advert_link_change(adapter, link_status); 2119 qlcnic_advert_link_change(adapter, link_status);
2145} 2120}
2146 2121
@@ -3754,6 +3729,19 @@ static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
3754 return; 3729 return;
3755} 3730}
3756 3731
3732static inline void qlcnic_dump_mailbox_registers(struct qlcnic_adapter *adapter)
3733{
3734 struct qlcnic_hardware_context *ahw = adapter->ahw;
3735 u32 offset;
3736
3737 offset = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
3738 dev_info(&adapter->pdev->dev, "Mbx interrupt mask=0x%x, Mbx interrupt enable=0x%x, Host mbx control=0x%x, Fw mbx control=0x%x",
3739 readl(ahw->pci_base0 + offset),
3740 QLCRDX(ahw, QLCNIC_MBX_INTR_ENBL),
3741 QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL),
3742 QLCRDX(ahw, QLCNIC_FW_MBX_CTRL));
3743}
3744
3757static void qlcnic_83xx_mailbox_worker(struct work_struct *work) 3745static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
3758{ 3746{
3759 struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox, 3747 struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
@@ -3798,6 +3786,8 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
3798 __func__, cmd->cmd_op, cmd->type, ahw->pci_func, 3786 __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
3799 ahw->op_mode); 3787 ahw->op_mode);
3800 clear_bit(QLC_83XX_MBX_READY, &mbx->status); 3788 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
3789 qlcnic_dump_mailbox_registers(adapter);
3790 qlcnic_83xx_get_mbx_data(adapter, cmd);
3801 qlcnic_dump_mbx(adapter, cmd); 3791 qlcnic_dump_mbx(adapter, cmd);
3802 qlcnic_83xx_idc_request_reset(adapter, 3792 qlcnic_83xx_idc_request_reset(adapter,
3803 QLCNIC_FORCE_FW_DUMP_KEY); 3793 QLCNIC_FORCE_FW_DUMP_KEY);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 4cae6caa6bfa..a6a33508e401 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -662,4 +662,5 @@ pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
662 pci_channel_state_t); 662 pci_channel_state_t);
663pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *); 663pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
664void qlcnic_83xx_io_resume(struct pci_dev *); 664void qlcnic_83xx_io_resume(struct pci_dev *);
665void qlcnic_83xx_stop_hw(struct qlcnic_adapter *);
665#endif 666#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 89208e5b25d6..918e18ddf038 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -740,6 +740,7 @@ static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter)
740 adapter->ahw->idc.err_code = -EIO; 740 adapter->ahw->idc.err_code = -EIO;
741 dev_err(&adapter->pdev->dev, 741 dev_err(&adapter->pdev->dev,
742 "%s: Device in unknown state\n", __func__); 742 "%s: Device in unknown state\n", __func__);
743 clear_bit(__QLCNIC_RESETTING, &adapter->state);
743 return 0; 744 return 0;
744} 745}
745 746
@@ -818,7 +819,6 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
818 struct qlcnic_hardware_context *ahw = adapter->ahw; 819 struct qlcnic_hardware_context *ahw = adapter->ahw;
819 struct qlcnic_mailbox *mbx = ahw->mailbox; 820 struct qlcnic_mailbox *mbx = ahw->mailbox;
820 int ret = 0; 821 int ret = 0;
821 u32 owner;
822 u32 val; 822 u32 val;
823 823
824 /* Perform NIC configuration based ready state entry actions */ 824 /* Perform NIC configuration based ready state entry actions */
@@ -848,9 +848,9 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
848 set_bit(__QLCNIC_RESETTING, &adapter->state); 848 set_bit(__QLCNIC_RESETTING, &adapter->state);
849 qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); 849 qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
850 } else { 850 } else {
851 owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); 851 netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
852 if (ahw->pci_func == owner) 852 __func__);
853 qlcnic_dump_fw(adapter); 853 qlcnic_83xx_idc_enter_failed_state(adapter, 1);
854 } 854 }
855 return -EIO; 855 return -EIO;
856 } 856 }
@@ -948,13 +948,26 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter)
948 return 0; 948 return 0;
949} 949}
950 950
951static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) 951static void qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
952{ 952{
953 dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__); 953 struct qlcnic_hardware_context *ahw = adapter->ahw;
954 u32 val, owner;
955
956 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
957 if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
958 owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
959 if (ahw->pci_func == owner) {
960 qlcnic_83xx_stop_hw(adapter);
961 qlcnic_dump_fw(adapter);
962 }
963 }
964
965 netdev_warn(adapter->netdev, "%s: Reboot will be required to recover the adapter!!\n",
966 __func__);
954 clear_bit(__QLCNIC_RESETTING, &adapter->state); 967 clear_bit(__QLCNIC_RESETTING, &adapter->state);
955 adapter->ahw->idc.err_code = -EIO; 968 ahw->idc.err_code = -EIO;
956 969
957 return 0; 970 return;
958} 971}
959 972
960static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter) 973static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter)
@@ -1063,12 +1076,6 @@ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
1063 adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state; 1076 adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
1064 qlcnic_83xx_periodic_tasks(adapter); 1077 qlcnic_83xx_periodic_tasks(adapter);
1065 1078
1066 /* Do not reschedule if firmaware is in hanged state and auto
1067 * recovery is disabled
1068 */
1069 if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset)
1070 return;
1071
1072 /* Re-schedule the function */ 1079 /* Re-schedule the function */
1073 if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status)) 1080 if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
1074 qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, 1081 qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
@@ -1219,10 +1226,10 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
1219 } 1226 }
1220 1227
1221 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); 1228 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
1222 if ((val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) || 1229 if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
1223 !qlcnic_auto_fw_reset) { 1230 netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
1224 dev_err(&adapter->pdev->dev, 1231 __func__);
1225 "%s:failed, device in non reset mode\n", __func__); 1232 qlcnic_83xx_idc_enter_failed_state(adapter, 0);
1226 qlcnic_83xx_unlock_driver(adapter); 1233 qlcnic_83xx_unlock_driver(adapter);
1227 return; 1234 return;
1228 } 1235 }
@@ -1254,24 +1261,24 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
1254 if (size & 0xF) 1261 if (size & 0xF)
1255 size = (size + 16) & ~0xF; 1262 size = (size + 16) & ~0xF;
1256 1263
1257 p_cache = kzalloc(size, GFP_KERNEL); 1264 p_cache = vzalloc(size);
1258 if (p_cache == NULL) 1265 if (p_cache == NULL)
1259 return -ENOMEM; 1266 return -ENOMEM;
1260 1267
1261 ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache, 1268 ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache,
1262 size / sizeof(u32)); 1269 size / sizeof(u32));
1263 if (ret) { 1270 if (ret) {
1264 kfree(p_cache); 1271 vfree(p_cache);
1265 return ret; 1272 return ret;
1266 } 1273 }
1267 /* 16 byte write to MS memory */ 1274 /* 16 byte write to MS memory */
1268 ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache, 1275 ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache,
1269 size / 16); 1276 size / 16);
1270 if (ret) { 1277 if (ret) {
1271 kfree(p_cache); 1278 vfree(p_cache);
1272 return ret; 1279 return ret;
1273 } 1280 }
1274 kfree(p_cache); 1281 vfree(p_cache);
1275 1282
1276 return ret; 1283 return ret;
1277} 1284}
@@ -1939,7 +1946,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
1939 p_dev->ahw->reset.seq_index = index; 1946 p_dev->ahw->reset.seq_index = index;
1940} 1947}
1941 1948
1942static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev) 1949void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
1943{ 1950{
1944 p_dev->ahw->reset.seq_index = 0; 1951 p_dev->ahw->reset.seq_index = 0;
1945 1952
@@ -1994,6 +2001,14 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
1994 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); 2001 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
1995 if (!(val & QLC_83XX_IDC_GRACEFULL_RESET)) 2002 if (!(val & QLC_83XX_IDC_GRACEFULL_RESET))
1996 qlcnic_dump_fw(adapter); 2003 qlcnic_dump_fw(adapter);
2004
2005 if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
2006 netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
2007 __func__);
2008 qlcnic_83xx_idc_enter_failed_state(adapter, 1);
2009 return err;
2010 }
2011
1997 qlcnic_83xx_init_hw(adapter); 2012 qlcnic_83xx_init_hw(adapter);
1998 2013
1999 if (qlcnic_83xx_copy_bootloader(adapter)) 2014 if (qlcnic_83xx_copy_bootloader(adapter))
@@ -2073,8 +2088,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
2073 ahw->nic_mode = QLCNIC_DEFAULT_MODE; 2088 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
2074 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; 2089 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
2075 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; 2090 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
2076 adapter->max_sds_rings = ahw->max_rx_ques; 2091 adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
2077 adapter->max_tx_rings = ahw->max_tx_ques; 2092 adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
2078 } else { 2093 } else {
2079 return -EIO; 2094 return -EIO;
2080 } 2095 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index b36c02fafcfd..6b08194aa0d4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -167,27 +167,35 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
167 167
168#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) 168#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
169 169
170static inline int qlcnic_82xx_statistics(void) 170static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter)
171{ 171{
172 return ARRAY_SIZE(qlcnic_device_gstrings_stats) + 172 return ARRAY_SIZE(qlcnic_gstrings_stats) +
173 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); 173 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
174 QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
174} 175}
175 176
176static inline int qlcnic_83xx_statistics(void) 177static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter)
177{ 178{
178 return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + 179 return ARRAY_SIZE(qlcnic_gstrings_stats) +
180 ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
179 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + 181 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
180 ARRAY_SIZE(qlcnic_83xx_rx_stats_strings); 182 ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) +
183 QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
181} 184}
182 185
183static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter) 186static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
184{ 187{
185 if (qlcnic_82xx_check(adapter)) 188 int len = -1;
186 return qlcnic_82xx_statistics(); 189
187 else if (qlcnic_83xx_check(adapter)) 190 if (qlcnic_82xx_check(adapter)) {
188 return qlcnic_83xx_statistics(); 191 len = qlcnic_82xx_statistics(adapter);
189 else 192 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
190 return -1; 193 len += ARRAY_SIZE(qlcnic_device_gstrings_stats);
194 } else if (qlcnic_83xx_check(adapter)) {
195 len = qlcnic_83xx_statistics(adapter);
196 }
197
198 return len;
191} 199}
192 200
193#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412 201#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412
@@ -667,30 +675,25 @@ qlcnic_set_ringparam(struct net_device *dev,
667static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter, 675static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter,
668 u8 rx_ring, u8 tx_ring) 676 u8 rx_ring, u8 tx_ring)
669{ 677{
678 if (rx_ring == 0 || tx_ring == 0)
679 return -EINVAL;
680
670 if (rx_ring != 0) { 681 if (rx_ring != 0) {
671 if (rx_ring > adapter->max_sds_rings) { 682 if (rx_ring > adapter->max_sds_rings) {
672 netdev_err(adapter->netdev, "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n", 683 netdev_err(adapter->netdev,
684 "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n",
673 rx_ring, adapter->max_sds_rings); 685 rx_ring, adapter->max_sds_rings);
674 return -EINVAL; 686 return -EINVAL;
675 } 687 }
676 } 688 }
677 689
678 if (tx_ring != 0) { 690 if (tx_ring != 0) {
679 if (qlcnic_82xx_check(adapter) && 691 if (tx_ring > adapter->max_tx_rings) {
680 (tx_ring > adapter->max_tx_rings)) {
681 netdev_err(adapter->netdev, 692 netdev_err(adapter->netdev,
682 "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n", 693 "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n",
683 tx_ring, adapter->max_tx_rings); 694 tx_ring, adapter->max_tx_rings);
684 return -EINVAL; 695 return -EINVAL;
685 } 696 }
686
687 if (qlcnic_83xx_check(adapter) &&
688 (tx_ring > QLCNIC_SINGLE_RING)) {
689 netdev_err(adapter->netdev,
690 "Invalid ring count, Tx ring count %d should not be greater than %d driver Tx rings.\n",
691 tx_ring, QLCNIC_SINGLE_RING);
692 return -EINVAL;
693 }
694 } 697 }
695 698
696 return 0; 699 return 0;
@@ -925,18 +928,13 @@ static int qlcnic_eeprom_test(struct net_device *dev)
925 928
926static int qlcnic_get_sset_count(struct net_device *dev, int sset) 929static int qlcnic_get_sset_count(struct net_device *dev, int sset)
927{ 930{
928 int len;
929 931
930 struct qlcnic_adapter *adapter = netdev_priv(dev); 932 struct qlcnic_adapter *adapter = netdev_priv(dev);
931 switch (sset) { 933 switch (sset) {
932 case ETH_SS_TEST: 934 case ETH_SS_TEST:
933 return QLCNIC_TEST_LEN; 935 return QLCNIC_TEST_LEN;
934 case ETH_SS_STATS: 936 case ETH_SS_STATS:
935 len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN; 937 return qlcnic_dev_statistics_len(adapter);
936 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
937 qlcnic_83xx_check(adapter))
938 return len;
939 return qlcnic_82xx_statistics();
940 default: 938 default:
941 return -EOPNOTSUPP; 939 return -EOPNOTSUPP;
942 } 940 }
@@ -948,6 +946,7 @@ static int qlcnic_irq_test(struct net_device *netdev)
948 struct qlcnic_hardware_context *ahw = adapter->ahw; 946 struct qlcnic_hardware_context *ahw = adapter->ahw;
949 struct qlcnic_cmd_args cmd; 947 struct qlcnic_cmd_args cmd;
950 int ret, drv_sds_rings = adapter->drv_sds_rings; 948 int ret, drv_sds_rings = adapter->drv_sds_rings;
949 int drv_tx_rings = adapter->drv_tx_rings;
951 950
952 if (qlcnic_83xx_check(adapter)) 951 if (qlcnic_83xx_check(adapter))
953 return qlcnic_83xx_interrupt_test(netdev); 952 return qlcnic_83xx_interrupt_test(netdev);
@@ -980,6 +979,7 @@ free_diag_res:
980 979
981clear_diag_irq: 980clear_diag_irq:
982 adapter->drv_sds_rings = drv_sds_rings; 981 adapter->drv_sds_rings = drv_sds_rings;
982 adapter->drv_tx_rings = drv_tx_rings;
983 clear_bit(__QLCNIC_RESETTING, &adapter->state); 983 clear_bit(__QLCNIC_RESETTING, &adapter->state);
984 984
985 return ret; 985 return ret;
@@ -1270,7 +1270,7 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
1270 return data; 1270 return data;
1271} 1271}
1272 1272
1273static void qlcnic_update_stats(struct qlcnic_adapter *adapter) 1273void qlcnic_update_stats(struct qlcnic_adapter *adapter)
1274{ 1274{
1275 struct qlcnic_host_tx_ring *tx_ring; 1275 struct qlcnic_host_tx_ring *tx_ring;
1276 int ring; 1276 int ring;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index e9c21e5d0ca9..c4262c23ed7c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -134,6 +134,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
134 struct qlcnic_skb_frag *buffrag; 134 struct qlcnic_skb_frag *buffrag;
135 int i, j; 135 int i, j;
136 136
137 spin_lock(&tx_ring->tx_clean_lock);
138
137 cmd_buf = tx_ring->cmd_buf_arr; 139 cmd_buf = tx_ring->cmd_buf_arr;
138 for (i = 0; i < tx_ring->num_desc; i++) { 140 for (i = 0; i < tx_ring->num_desc; i++) {
139 buffrag = cmd_buf->frag_array; 141 buffrag = cmd_buf->frag_array;
@@ -157,6 +159,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
157 } 159 }
158 cmd_buf++; 160 cmd_buf++;
159 } 161 }
162
163 spin_unlock(&tx_ring->tx_clean_lock);
160} 164}
161 165
162void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) 166void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 0149c9495347..ad1531ae3aa8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -687,17 +687,15 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
687 if (adapter->ahw->linkup && !linkup) { 687 if (adapter->ahw->linkup && !linkup) {
688 netdev_info(netdev, "NIC Link is down\n"); 688 netdev_info(netdev, "NIC Link is down\n");
689 adapter->ahw->linkup = 0; 689 adapter->ahw->linkup = 0;
690 if (netif_running(netdev)) { 690 netif_carrier_off(netdev);
691 netif_carrier_off(netdev);
692 netif_tx_stop_all_queues(netdev);
693 }
694 } else if (!adapter->ahw->linkup && linkup) { 691 } else if (!adapter->ahw->linkup && linkup) {
692 /* Do not advertise Link up if the port is in loopback mode */
693 if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode)
694 return;
695
695 netdev_info(netdev, "NIC Link is up\n"); 696 netdev_info(netdev, "NIC Link is up\n");
696 adapter->ahw->linkup = 1; 697 adapter->ahw->linkup = 1;
697 if (netif_running(netdev)) { 698 netif_carrier_on(netdev);
698 netif_carrier_on(netdev);
699 netif_wake_queue(netdev);
700 }
701 } 699 }
702} 700}
703 701
@@ -784,7 +782,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
784 struct net_device *netdev = adapter->netdev; 782 struct net_device *netdev = adapter->netdev;
785 struct qlcnic_skb_frag *frag; 783 struct qlcnic_skb_frag *frag;
786 784
787 if (!spin_trylock(&adapter->tx_clean_lock)) 785 if (!spin_trylock(&tx_ring->tx_clean_lock))
788 return 1; 786 return 1;
789 787
790 sw_consumer = tx_ring->sw_consumer; 788 sw_consumer = tx_ring->sw_consumer;
@@ -813,8 +811,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
813 break; 811 break;
814 } 812 }
815 813
814 tx_ring->sw_consumer = sw_consumer;
815
816 if (count && netif_running(netdev)) { 816 if (count && netif_running(netdev)) {
817 tx_ring->sw_consumer = sw_consumer;
818 smp_mb(); 817 smp_mb();
819 if (netif_tx_queue_stopped(tx_ring->txq) && 818 if (netif_tx_queue_stopped(tx_ring->txq) &&
820 netif_carrier_ok(netdev)) { 819 netif_carrier_ok(netdev)) {
@@ -840,7 +839,8 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
840 */ 839 */
841 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); 840 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
842 done = (sw_consumer == hw_consumer); 841 done = (sw_consumer == hw_consumer);
843 spin_unlock(&adapter->tx_clean_lock); 842
843 spin_unlock(&tx_ring->tx_clean_lock);
844 844
845 return done; 845 return done;
846} 846}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 05c1eef8df13..550791b8fbae 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1178,6 +1178,7 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
1178 } else { 1178 } else {
1179 adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE; 1179 adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
1180 adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS; 1180 adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS;
1181 adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
1181 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; 1182 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
1182 } 1183 }
1183 1184
@@ -1755,7 +1756,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1755 if (qlcnic_sriov_vf_check(adapter)) 1756 if (qlcnic_sriov_vf_check(adapter))
1756 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); 1757 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
1757 smp_mb(); 1758 smp_mb();
1758 spin_lock(&adapter->tx_clean_lock);
1759 netif_carrier_off(netdev); 1759 netif_carrier_off(netdev);
1760 adapter->ahw->linkup = 0; 1760 adapter->ahw->linkup = 0;
1761 netif_tx_disable(netdev); 1761 netif_tx_disable(netdev);
@@ -1776,7 +1776,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1776 1776
1777 for (ring = 0; ring < adapter->drv_tx_rings; ring++) 1777 for (ring = 0; ring < adapter->drv_tx_rings; ring++)
1778 qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]); 1778 qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
1779 spin_unlock(&adapter->tx_clean_lock);
1780} 1779}
1781 1780
1782/* Usage: During suspend and firmware recovery module */ 1781/* Usage: During suspend and firmware recovery module */
@@ -1940,7 +1939,6 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1940 qlcnic_detach(adapter); 1939 qlcnic_detach(adapter);
1941 1940
1942 adapter->drv_sds_rings = QLCNIC_SINGLE_RING; 1941 adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
1943 adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
1944 adapter->ahw->diag_test = test; 1942 adapter->ahw->diag_test = test;
1945 adapter->ahw->linkup = 0; 1943 adapter->ahw->linkup = 0;
1946 1944
@@ -2172,6 +2170,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
2172 } 2170 }
2173 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); 2171 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
2174 tx_ring->cmd_buf_arr = cmd_buf_arr; 2172 tx_ring->cmd_buf_arr = cmd_buf_arr;
2173 spin_lock_init(&tx_ring->tx_clean_lock);
2175 } 2174 }
2176 2175
2177 if (qlcnic_83xx_check(adapter) || 2176 if (qlcnic_83xx_check(adapter) ||
@@ -2299,7 +2298,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2299 rwlock_init(&adapter->ahw->crb_lock); 2298 rwlock_init(&adapter->ahw->crb_lock);
2300 mutex_init(&adapter->ahw->mem_lock); 2299 mutex_init(&adapter->ahw->mem_lock);
2301 2300
2302 spin_lock_init(&adapter->tx_clean_lock);
2303 INIT_LIST_HEAD(&adapter->mac_list); 2301 INIT_LIST_HEAD(&adapter->mac_list);
2304 2302
2305 qlcnic_register_dcb(adapter); 2303 qlcnic_register_dcb(adapter);
@@ -2782,6 +2780,9 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2782 struct qlcnic_adapter *adapter = netdev_priv(netdev); 2780 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2783 struct net_device_stats *stats = &netdev->stats; 2781 struct net_device_stats *stats = &netdev->stats;
2784 2782
2783 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2784 qlcnic_update_stats(adapter);
2785
2785 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; 2786 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2786 stats->tx_packets = adapter->stats.xmitfinished; 2787 stats->tx_packets = adapter->stats.xmitfinished;
2787 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; 2788 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 686f460b1502..024f8161d2fe 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -75,7 +75,6 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
75 num_vfs = sriov->num_vfs; 75 num_vfs = sriov->num_vfs;
76 max = num_vfs + 1; 76 max = num_vfs + 1;
77 info->bit_offsets = 0xffff; 77 info->bit_offsets = 0xffff;
78 info->max_tx_ques = res->num_tx_queues / max;
79 info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters; 78 info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
80 num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC; 79 num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC;
81 80
@@ -86,6 +85,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
86 info->max_tx_mac_filters = temp; 85 info->max_tx_mac_filters = temp;
87 info->min_tx_bw = 0; 86 info->min_tx_bw = 0;
88 info->max_tx_bw = MAX_BW; 87 info->max_tx_bw = MAX_BW;
88 info->max_tx_ques = res->num_tx_queues - sriov->num_vfs;
89 } else { 89 } else {
90 id = qlcnic_sriov_func_to_index(adapter, func); 90 id = qlcnic_sriov_func_to_index(adapter, func);
91 if (id < 0) 91 if (id < 0)
@@ -95,6 +95,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
95 info->max_tx_bw = vp->max_tx_bw; 95 info->max_tx_bw = vp->max_tx_bw;
96 info->max_rx_ucast_mac_filters = num_vf_macs; 96 info->max_rx_ucast_mac_filters = num_vf_macs;
97 info->max_tx_mac_filters = num_vf_macs; 97 info->max_tx_mac_filters = num_vf_macs;
98 info->max_tx_ques = QLCNIC_SINGLE_RING;
98 } 99 }
99 100
100 info->max_rx_ip_addr = res->num_destip / max; 101 info->max_rx_ip_addr = res->num_destip / max;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8a7a23a84ac5..797b56a0efc4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -622,17 +622,15 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
622 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 622 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
623 return -EOPNOTSUPP; 623 return -EOPNOTSUPP;
624 624
625 if (netif_msg_hw(priv)) { 625 priv->adv_ts = 0;
626 if (priv->dma_cap.time_stamp) { 626 if (priv->dma_cap.atime_stamp && priv->extend_desc)
627 pr_debug("IEEE 1588-2002 Time Stamp supported\n"); 627 priv->adv_ts = 1;
628 priv->adv_ts = 0; 628
629 } 629 if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
630 if (priv->dma_cap.atime_stamp && priv->extend_desc) { 630 pr_debug("IEEE 1588-2002 Time Stamp supported\n");
631 pr_debug 631
632 ("IEEE 1588-2008 Advanced Time Stamp supported\n"); 632 if (netif_msg_hw(priv) && priv->adv_ts)
633 priv->adv_ts = 1; 633 pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
634 }
635 }
636 634
637 priv->hw->ptp = &stmmac_ptp; 635 priv->hw->ptp = &stmmac_ptp;
638 priv->hwts_tx_en = 0; 636 priv->hwts_tx_en = 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index b8b0eeed0f92..7680581ebe12 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -56,7 +56,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
56 56
57 priv->hw->ptp->config_addend(priv->ioaddr, addend); 57 priv->hw->ptp->config_addend(priv->ioaddr, addend);
58 58
59 spin_unlock_irqrestore(&priv->lock, flags); 59 spin_unlock_irqrestore(&priv->ptp_lock, flags);
60 60
61 return 0; 61 return 0;
62} 62}
@@ -91,7 +91,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
91 91
92 priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); 92 priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
93 93
94 spin_unlock_irqrestore(&priv->lock, flags); 94 spin_unlock_irqrestore(&priv->ptp_lock, flags);
95 95
96 return 0; 96 return 0;
97} 97}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 5120d9ce1dd4..5330fd298705 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -740,6 +740,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
740 /* set speed_in input in case RMII mode is used in 100Mbps */ 740 /* set speed_in input in case RMII mode is used in 100Mbps */
741 if (phy->speed == 100) 741 if (phy->speed == 100)
742 mac_control |= BIT(15); 742 mac_control |= BIT(15);
743 else if (phy->speed == 10)
744 mac_control |= BIT(18); /* In Band mode */
743 745
744 *link = true; 746 *link = true;
745 } else { 747 } else {
@@ -2106,7 +2108,7 @@ static int cpsw_probe(struct platform_device *pdev)
2106 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { 2108 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
2107 for (i = res->start; i <= res->end; i++) { 2109 for (i = res->start; i <= res->end; i++) {
2108 if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0, 2110 if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
2109 dev_name(priv->dev), priv)) { 2111 dev_name(&pdev->dev), priv)) {
2110 dev_err(priv->dev, "error attaching irq\n"); 2112 dev_err(priv->dev, "error attaching irq\n");
2111 goto clean_ale_ret; 2113 goto clean_ale_ret;
2112 } 2114 }
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 628b736e5ae7..0e9fb3301b11 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2080,7 +2080,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
2080} 2080}
2081 2081
2082/* Return subqueue id on this core (one per core). */ 2082/* Return subqueue id on this core (one per core). */
2083static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb) 2083static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb,
2084 void *accel_priv)
2084{ 2085{
2085 return smp_processor_id(); 2086 return smp_processor_id();
2086} 2087}
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 3169252613fa..5d78c1d08abd 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
571 case HDLCDRVCTL_CALIBRATE: 571 case HDLCDRVCTL_CALIBRATE:
572 if(!capable(CAP_SYS_RAWIO)) 572 if(!capable(CAP_SYS_RAWIO))
573 return -EPERM; 573 return -EPERM;
574 if (bi.data.calibrate > INT_MAX / s->par.bitrate)
575 return -EINVAL;
574 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; 576 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
575 return 0; 577 return 0;
576 578
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 1971411574db..61dd2447e1bb 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1057,6 +1057,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1057 break; 1057 break;
1058 1058
1059 case SIOCYAMGCFG: 1059 case SIOCYAMGCFG:
1060 memset(&yi, 0, sizeof(yi));
1060 yi.cfg.mask = 0xffffffff; 1061 yi.cfg.mask = 0xffffffff;
1061 yi.cfg.iobase = yp->iobase; 1062 yi.cfg.iobase = yp->iobase;
1062 yi.cfg.irq = yp->irq; 1063 yi.cfg.irq = yp->irq;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 524f713f6017..71baeb3ed905 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -261,9 +261,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
261 struct sk_buff *skb; 261 struct sk_buff *skb;
262 262
263 net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev; 263 net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
264 if (!net) { 264 if (!net || net->reg_state != NETREG_REGISTERED) {
265 netdev_err(net, "got receive callback but net device"
266 " not initialized yet\n");
267 packet->status = NVSP_STAT_FAIL; 265 packet->status = NVSP_STAT_FAIL;
268 return 0; 266 return 0;
269 } 267 }
@@ -327,7 +325,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
327 return -EINVAL; 325 return -EINVAL;
328 326
329 nvdev->start_remove = true; 327 nvdev->start_remove = true;
330 cancel_delayed_work_sync(&ndevctx->dwork);
331 cancel_work_sync(&ndevctx->work); 328 cancel_work_sync(&ndevctx->work);
332 netif_tx_disable(ndev); 329 netif_tx_disable(ndev);
333 rndis_filter_device_remove(hdev); 330 rndis_filter_device_remove(hdev);
@@ -436,19 +433,11 @@ static int netvsc_probe(struct hv_device *dev,
436 SET_ETHTOOL_OPS(net, &ethtool_ops); 433 SET_ETHTOOL_OPS(net, &ethtool_ops);
437 SET_NETDEV_DEV(net, &dev->device); 434 SET_NETDEV_DEV(net, &dev->device);
438 435
439 ret = register_netdev(net);
440 if (ret != 0) {
441 pr_err("Unable to register netdev.\n");
442 free_netdev(net);
443 goto out;
444 }
445
446 /* Notify the netvsc driver of the new device */ 436 /* Notify the netvsc driver of the new device */
447 device_info.ring_size = ring_size; 437 device_info.ring_size = ring_size;
448 ret = rndis_filter_device_add(dev, &device_info); 438 ret = rndis_filter_device_add(dev, &device_info);
449 if (ret != 0) { 439 if (ret != 0) {
450 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); 440 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
451 unregister_netdev(net);
452 free_netdev(net); 441 free_netdev(net);
453 hv_set_drvdata(dev, NULL); 442 hv_set_drvdata(dev, NULL);
454 return ret; 443 return ret;
@@ -457,7 +446,13 @@ static int netvsc_probe(struct hv_device *dev,
457 446
458 netif_carrier_on(net); 447 netif_carrier_on(net);
459 448
460out: 449 ret = register_netdev(net);
450 if (ret != 0) {
451 pr_err("Unable to register netdev.\n");
452 rndis_filter_device_remove(dev);
453 free_netdev(net);
454 }
455
461 return ret; 456 return ret;
462} 457}
463 458
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index acf93798dc67..bc8faaec33f5 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -299,7 +299,7 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
299 299
300 if (vlan->fwd_priv) { 300 if (vlan->fwd_priv) {
301 skb->dev = vlan->lowerdev; 301 skb->dev = vlan->lowerdev;
302 ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv); 302 ret = dev_queue_xmit_accel(skb, vlan->fwd_priv);
303 } else { 303 } else {
304 ret = macvlan_queue_xmit(skb, dev); 304 ret = macvlan_queue_xmit(skb, dev);
305 } 305 }
@@ -338,6 +338,8 @@ static const struct header_ops macvlan_hard_header_ops = {
338 .cache_update = eth_header_cache_update, 338 .cache_update = eth_header_cache_update,
339}; 339};
340 340
341static struct rtnl_link_ops macvlan_link_ops;
342
341static int macvlan_open(struct net_device *dev) 343static int macvlan_open(struct net_device *dev)
342{ 344{
343 struct macvlan_dev *vlan = netdev_priv(dev); 345 struct macvlan_dev *vlan = netdev_priv(dev);
@@ -353,7 +355,8 @@ static int macvlan_open(struct net_device *dev)
353 goto hash_add; 355 goto hash_add;
354 } 356 }
355 357
356 if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) { 358 if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD &&
359 dev->rtnl_link_ops == &macvlan_link_ops) {
357 vlan->fwd_priv = 360 vlan->fwd_priv =
358 lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); 361 lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
359 362
@@ -362,10 +365,8 @@ static int macvlan_open(struct net_device *dev)
362 */ 365 */
363 if (IS_ERR_OR_NULL(vlan->fwd_priv)) { 366 if (IS_ERR_OR_NULL(vlan->fwd_priv)) {
364 vlan->fwd_priv = NULL; 367 vlan->fwd_priv = NULL;
365 } else { 368 } else
366 dev->features &= ~NETIF_F_LLTX;
367 return 0; 369 return 0;
368 }
369 } 370 }
370 371
371 err = -EBUSY; 372 err = -EBUSY;
@@ -690,8 +691,18 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
690 netdev_features_t features) 691 netdev_features_t features)
691{ 692{
692 struct macvlan_dev *vlan = netdev_priv(dev); 693 struct macvlan_dev *vlan = netdev_priv(dev);
694 netdev_features_t mask;
695
696 features |= NETIF_F_ALL_FOR_ALL;
697 features &= (vlan->set_features | ~MACVLAN_FEATURES);
698 mask = features;
699
700 features = netdev_increment_features(vlan->lowerdev->features,
701 features,
702 mask);
703 features |= NETIF_F_LLTX;
693 704
694 return features & (vlan->set_features | ~MACVLAN_FEATURES); 705 return features;
695} 706}
696 707
697static const struct ethtool_ops macvlan_ethtool_ops = { 708static const struct ethtool_ops macvlan_ethtool_ops = {
@@ -1019,9 +1030,8 @@ static int macvlan_device_event(struct notifier_block *unused,
1019 break; 1030 break;
1020 case NETDEV_FEAT_CHANGE: 1031 case NETDEV_FEAT_CHANGE:
1021 list_for_each_entry(vlan, &port->vlans, list) { 1032 list_for_each_entry(vlan, &port->vlans, list) {
1022 vlan->dev->features = dev->features & MACVLAN_FEATURES;
1023 vlan->dev->gso_max_size = dev->gso_max_size; 1033 vlan->dev->gso_max_size = dev->gso_max_size;
1024 netdev_features_change(vlan->dev); 1034 netdev_update_features(vlan->dev);
1025 } 1035 }
1026 break; 1036 break;
1027 case NETDEV_UNREGISTER: 1037 case NETDEV_UNREGISTER:
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 36c6994436b7..98434b84f041 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -565,10 +565,8 @@ int phy_start_interrupts(struct phy_device *phydev)
565 int err = 0; 565 int err = 0;
566 566
567 atomic_set(&phydev->irq_disable, 0); 567 atomic_set(&phydev->irq_disable, 0);
568 if (request_irq(phydev->irq, phy_interrupt, 568 if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
569 IRQF_SHARED, 569 phydev) < 0) {
570 "phy_interrupt",
571 phydev) < 0) {
572 pr_warn("%s: Can't get IRQ %d (PHY)\n", 570 pr_warn("%s: Can't get IRQ %d (PHY)\n",
573 phydev->bus->name, phydev->irq); 571 phydev->bus->name, phydev->irq);
574 phydev->irq = PHY_POLL; 572 phydev->irq = PHY_POLL;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 736050d6b451..b75ae5bde673 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1647,7 +1647,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1647 return NETDEV_TX_OK; 1647 return NETDEV_TX_OK;
1648} 1648}
1649 1649
1650static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb) 1650static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
1651 void *accel_priv)
1651{ 1652{
1652 /* 1653 /*
1653 * This helper function exists to help dev_pick_tx get the correct 1654 * This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7c8343a4f918..ecec8029c5e8 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -348,7 +348,8 @@ unlock:
348 * different rxq no. here. If we could not get rxhash, then we would 348 * different rxq no. here. If we could not get rxhash, then we would
349 * hope the rxq no. may help here. 349 * hope the rxq no. may help here.
350 */ 350 */
351static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb) 351static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
352 void *accel_priv)
352{ 353{
353 struct tun_struct *tun = netdev_priv(dev); 354 struct tun_struct *tun = netdev_priv(dev);
354 struct tun_flow_entry *e; 355 struct tun_flow_entry *e;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 85e4a01670f0..47b0f732b0b1 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -276,12 +276,12 @@ config USB_NET_CDC_MBIM
276 module will be called cdc_mbim. 276 module will be called cdc_mbim.
277 277
278config USB_NET_DM9601 278config USB_NET_DM9601
279 tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices" 279 tristate "Davicom DM96xx based USB 10/100 ethernet devices"
280 depends on USB_USBNET 280 depends on USB_USBNET
281 select CRC32 281 select CRC32
282 help 282 help
283 This option adds support for Davicom DM9601 based USB 1.1 283 This option adds support for Davicom DM9601/DM9620/DM9621A
284 10/100 Ethernet adapters. 284 based USB 10/100 Ethernet adapters.
285 285
286config USB_NET_SR9700 286config USB_NET_SR9700
287 tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices" 287 tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices"
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index c6867f926cff..14aa48fa8d7e 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Davicom DM9601 USB 1.1 10/100Mbps ethernet devices 2 * Davicom DM96xx USB 10/100Mbps ethernet devices
3 * 3 *
4 * Peter Korsgaard <jacmet@sunsite.dk> 4 * Peter Korsgaard <jacmet@sunsite.dk>
5 * 5 *
@@ -364,7 +364,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
364 dev->net->ethtool_ops = &dm9601_ethtool_ops; 364 dev->net->ethtool_ops = &dm9601_ethtool_ops;
365 dev->net->hard_header_len += DM_TX_OVERHEAD; 365 dev->net->hard_header_len += DM_TX_OVERHEAD;
366 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 366 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
367 dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD; 367
368 /* dm9620/21a require room for 4 byte padding, even in dm9601
369 * mode, so we need +1 to be able to receive full size
370 * ethernet frames.
371 */
372 dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1;
368 373
369 dev->mii.dev = dev->net; 374 dev->mii.dev = dev->net;
370 dev->mii.mdio_read = dm9601_mdio_read; 375 dev->mii.mdio_read = dm9601_mdio_read;
@@ -468,7 +473,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
468static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, 473static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
469 gfp_t flags) 474 gfp_t flags)
470{ 475{
471 int len; 476 int len, pad;
472 477
473 /* format: 478 /* format:
474 b1: packet length low 479 b1: packet length low
@@ -476,12 +481,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
476 b3..n: packet data 481 b3..n: packet data
477 */ 482 */
478 483
479 len = skb->len; 484 len = skb->len + DM_TX_OVERHEAD;
485
486 /* workaround for dm962x errata with tx fifo getting out of
487 * sync if a USB bulk transfer retry happens right after a
488 * packet with odd / maxpacket length by adding up to 3 bytes
489 * padding.
490 */
491 while ((len & 1) || !(len % dev->maxpacket))
492 len++;
480 493
481 if (skb_headroom(skb) < DM_TX_OVERHEAD) { 494 len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */
495 pad = len - skb->len;
496
497 if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) {
482 struct sk_buff *skb2; 498 struct sk_buff *skb2;
483 499
484 skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags); 500 skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags);
485 dev_kfree_skb_any(skb); 501 dev_kfree_skb_any(skb);
486 skb = skb2; 502 skb = skb2;
487 if (!skb) 503 if (!skb)
@@ -490,10 +506,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
490 506
491 __skb_push(skb, DM_TX_OVERHEAD); 507 __skb_push(skb, DM_TX_OVERHEAD);
492 508
493 /* usbnet adds padding if length is a multiple of packet size 509 if (pad) {
494 if so, adjust length value in header */ 510 memset(skb->data + skb->len, 0, pad);
495 if ((skb->len % dev->maxpacket) == 0) 511 __skb_put(skb, pad);
496 len++; 512 }
497 513
498 skb->data[0] = len; 514 skb->data[0] = len;
499 skb->data[1] = len >> 8; 515 skb->data[1] = len >> 8;
@@ -543,7 +559,7 @@ static int dm9601_link_reset(struct usbnet *dev)
543} 559}
544 560
545static const struct driver_info dm9601_info = { 561static const struct driver_info dm9601_info = {
546 .description = "Davicom DM9601 USB Ethernet", 562 .description = "Davicom DM96xx USB 10/100 Ethernet",
547 .flags = FLAG_ETHER | FLAG_LINK_INTR, 563 .flags = FLAG_ETHER | FLAG_LINK_INTR,
548 .bind = dm9601_bind, 564 .bind = dm9601_bind,
549 .rx_fixup = dm9601_rx_fixup, 565 .rx_fixup = dm9601_rx_fixup,
@@ -594,6 +610,10 @@ static const struct usb_device_id products[] = {
594 USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */ 610 USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */
595 .driver_info = (unsigned long)&dm9601_info, 611 .driver_info = (unsigned long)&dm9601_info,
596 }, 612 },
613 {
614 USB_DEVICE(0x0a46, 0x9621), /* DM9621A USB to Fast Ethernet Adapter */
615 .driver_info = (unsigned long)&dm9601_info,
616 },
597 {}, // END 617 {}, // END
598}; 618};
599 619
@@ -612,5 +632,5 @@ static struct usb_driver dm9601_driver = {
612module_usb_driver(dm9601_driver); 632module_usb_driver(dm9601_driver);
613 633
614MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>"); 634MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
615MODULE_DESCRIPTION("Davicom DM9601 USB 1.1 ethernet devices"); 635MODULE_DESCRIPTION("Davicom DM96xx USB 10/100 ethernet devices");
616MODULE_LICENSE("GPL"); 636MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 86292e6aaf49..1a482344b3f5 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -185,7 +185,6 @@ enum rx_ctrl_state{
185#define BM_REQUEST_TYPE (0xa1) 185#define BM_REQUEST_TYPE (0xa1)
186#define B_NOTIFICATION (0x20) 186#define B_NOTIFICATION (0x20)
187#define W_VALUE (0x0) 187#define W_VALUE (0x0)
188#define W_INDEX (0x2)
189#define W_LENGTH (0x2) 188#define W_LENGTH (0x2)
190 189
191#define B_OVERRUN (0x1<<6) 190#define B_OVERRUN (0x1<<6)
@@ -1487,6 +1486,7 @@ static void tiocmget_intr_callback(struct urb *urb)
1487 struct uart_icount *icount; 1486 struct uart_icount *icount;
1488 struct hso_serial_state_notification *serial_state_notification; 1487 struct hso_serial_state_notification *serial_state_notification;
1489 struct usb_device *usb; 1488 struct usb_device *usb;
1489 int if_num;
1490 1490
1491 /* Sanity checks */ 1491 /* Sanity checks */
1492 if (!serial) 1492 if (!serial)
@@ -1495,15 +1495,24 @@ static void tiocmget_intr_callback(struct urb *urb)
1495 handle_usb_error(status, __func__, serial->parent); 1495 handle_usb_error(status, __func__, serial->parent);
1496 return; 1496 return;
1497 } 1497 }
1498
1499 /* tiocmget is only supported on HSO_PORT_MODEM */
1498 tiocmget = serial->tiocmget; 1500 tiocmget = serial->tiocmget;
1499 if (!tiocmget) 1501 if (!tiocmget)
1500 return; 1502 return;
1503 BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM);
1504
1501 usb = serial->parent->usb; 1505 usb = serial->parent->usb;
1506 if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber;
1507
1508 /* wIndex should be the USB interface number of the port to which the
1509 * notification applies, which should always be the Modem port.
1510 */
1502 serial_state_notification = &tiocmget->serial_state_notification; 1511 serial_state_notification = &tiocmget->serial_state_notification;
1503 if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE || 1512 if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE ||
1504 serial_state_notification->bNotification != B_NOTIFICATION || 1513 serial_state_notification->bNotification != B_NOTIFICATION ||
1505 le16_to_cpu(serial_state_notification->wValue) != W_VALUE || 1514 le16_to_cpu(serial_state_notification->wValue) != W_VALUE ||
1506 le16_to_cpu(serial_state_notification->wIndex) != W_INDEX || 1515 le16_to_cpu(serial_state_notification->wIndex) != if_num ||
1507 le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) { 1516 le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) {
1508 dev_warn(&usb->dev, 1517 dev_warn(&usb->dev,
1509 "hso received invalid serial state notification\n"); 1518 "hso received invalid serial state notification\n");
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 03832d3780aa..f54637828574 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -117,7 +117,6 @@ enum {
117struct mcs7830_data { 117struct mcs7830_data {
118 u8 multi_filter[8]; 118 u8 multi_filter[8];
119 u8 config; 119 u8 config;
120 u8 link_counter;
121}; 120};
122 121
123static const char driver_name[] = "MOSCHIP usb-ethernet driver"; 122static const char driver_name[] = "MOSCHIP usb-ethernet driver";
@@ -561,26 +560,16 @@ static void mcs7830_status(struct usbnet *dev, struct urb *urb)
561{ 560{
562 u8 *buf = urb->transfer_buffer; 561 u8 *buf = urb->transfer_buffer;
563 bool link, link_changed; 562 bool link, link_changed;
564 struct mcs7830_data *data = mcs7830_get_data(dev);
565 563
566 if (urb->actual_length < 16) 564 if (urb->actual_length < 16)
567 return; 565 return;
568 566
569 link = !(buf[1] & 0x20); 567 link = !(buf[1] == 0x20);
570 link_changed = netif_carrier_ok(dev->net) != link; 568 link_changed = netif_carrier_ok(dev->net) != link;
571 if (link_changed) { 569 if (link_changed) {
572 data->link_counter++; 570 usbnet_link_change(dev, link, 0);
573 /* 571 netdev_dbg(dev->net, "Link Status is: %d\n", link);
574 track link state 20 times to guard against erroneous 572 }
575 link state changes reported sometimes by the chip
576 */
577 if (data->link_counter > 20) {
578 data->link_counter = 0;
579 usbnet_link_change(dev, link, 0);
580 netdev_dbg(dev->net, "Link Status is: %d\n", link);
581 }
582 } else
583 data->link_counter = 0;
584} 573}
585 574
586static const struct driver_info moschip_info = { 575static const struct driver_info moschip_info = {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d208f8604981..5d776447d9c3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1797,16 +1797,17 @@ static int virtnet_restore(struct virtio_device *vdev)
1797 if (err) 1797 if (err)
1798 return err; 1798 return err;
1799 1799
1800 if (netif_running(vi->dev)) 1800 if (netif_running(vi->dev)) {
1801 for (i = 0; i < vi->curr_queue_pairs; i++)
1802 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1803 schedule_delayed_work(&vi->refill, 0);
1804
1801 for (i = 0; i < vi->max_queue_pairs; i++) 1805 for (i = 0; i < vi->max_queue_pairs; i++)
1802 virtnet_napi_enable(&vi->rq[i]); 1806 virtnet_napi_enable(&vi->rq[i]);
1807 }
1803 1808
1804 netif_device_attach(vi->dev); 1809 netif_device_attach(vi->dev);
1805 1810
1806 for (i = 0; i < vi->curr_queue_pairs; i++)
1807 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1808 schedule_delayed_work(&vi->refill, 0);
1809
1810 mutex_lock(&vi->config_lock); 1811 mutex_lock(&vi->config_lock);
1811 vi->config_enable = true; 1812 vi->config_enable = true;
1812 mutex_unlock(&vi->config_lock); 1813 mutex_unlock(&vi->config_lock);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 249e01c5600c..ed384fee76ac 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2440,7 +2440,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2440 /* update header length based on lower device */ 2440 /* update header length based on lower device */
2441 dev->hard_header_len = lowerdev->hard_header_len + 2441 dev->hard_header_len = lowerdev->hard_header_len +
2442 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2442 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2443 } 2443 } else if (use_ipv6)
2444 vxlan->flags |= VXLAN_F_IPV6;
2444 2445
2445 if (data[IFLA_VXLAN_TOS]) 2446 if (data[IFLA_VXLAN_TOS])
2446 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]); 2447 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 8d78253c26ce..a366d6b4626f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -76,9 +76,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
76 mask2 |= ATH9K_INT_CST; 76 mask2 |= ATH9K_INT_CST;
77 if (isr2 & AR_ISR_S2_TSFOOR) 77 if (isr2 & AR_ISR_S2_TSFOOR)
78 mask2 |= ATH9K_INT_TSFOOR; 78 mask2 |= ATH9K_INT_TSFOOR;
79
80 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
81 REG_WRITE(ah, AR_ISR_S2, isr2);
82 isr &= ~AR_ISR_BCNMISC;
83 }
79 } 84 }
80 85
81 isr = REG_READ(ah, AR_ISR_RAC); 86 if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
87 isr = REG_READ(ah, AR_ISR_RAC);
88
82 if (isr == 0xffffffff) { 89 if (isr == 0xffffffff) {
83 *masked = 0; 90 *masked = 0;
84 return false; 91 return false;
@@ -97,11 +104,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
97 104
98 *masked |= ATH9K_INT_TX; 105 *masked |= ATH9K_INT_TX;
99 106
100 s0_s = REG_READ(ah, AR_ISR_S0_S); 107 if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
108 s0_s = REG_READ(ah, AR_ISR_S0_S);
109 s1_s = REG_READ(ah, AR_ISR_S1_S);
110 } else {
111 s0_s = REG_READ(ah, AR_ISR_S0);
112 REG_WRITE(ah, AR_ISR_S0, s0_s);
113 s1_s = REG_READ(ah, AR_ISR_S1);
114 REG_WRITE(ah, AR_ISR_S1, s1_s);
115
116 isr &= ~(AR_ISR_TXOK |
117 AR_ISR_TXDESC |
118 AR_ISR_TXERR |
119 AR_ISR_TXEOL);
120 }
121
101 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK); 122 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
102 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC); 123 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
103
104 s1_s = REG_READ(ah, AR_ISR_S1_S);
105 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR); 124 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
106 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL); 125 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
107 } 126 }
@@ -114,13 +133,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
114 *masked |= mask2; 133 *masked |= mask2;
115 } 134 }
116 135
117 if (AR_SREV_9100(ah)) 136 if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) {
118 return true;
119
120 if (isr & AR_ISR_GENTMR) {
121 u32 s5_s; 137 u32 s5_s;
122 138
123 s5_s = REG_READ(ah, AR_ISR_S5_S); 139 if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
140 s5_s = REG_READ(ah, AR_ISR_S5_S);
141 } else {
142 s5_s = REG_READ(ah, AR_ISR_S5);
143 }
144
124 ah->intr_gen_timer_trigger = 145 ah->intr_gen_timer_trigger =
125 MS(s5_s, AR_ISR_S5_GENTIMER_TRIG); 146 MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
126 147
@@ -133,8 +154,21 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
133 if ((s5_s & AR_ISR_S5_TIM_TIMER) && 154 if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
134 !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 155 !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
135 *masked |= ATH9K_INT_TIM_TIMER; 156 *masked |= ATH9K_INT_TIM_TIMER;
157
158 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
159 REG_WRITE(ah, AR_ISR_S5, s5_s);
160 isr &= ~AR_ISR_GENTMR;
161 }
136 } 162 }
137 163
164 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
165 REG_WRITE(ah, AR_ISR, isr);
166 REG_READ(ah, AR_ISR);
167 }
168
169 if (AR_SREV_9100(ah))
170 return true;
171
138 if (sync_cause) { 172 if (sync_cause) {
139 ath9k_debug_sync_cause(common, sync_cause); 173 ath9k_debug_sync_cause(common, sync_cause);
140 fatal_int = 174 fatal_int =
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 9a2657fdd9cc..608d739d1378 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -127,21 +127,26 @@ static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
127 struct ath9k_vif_iter_data *iter_data = data; 127 struct ath9k_vif_iter_data *iter_data = data;
128 int i; 128 int i;
129 129
130 for (i = 0; i < ETH_ALEN; i++) 130 if (iter_data->hw_macaddr != NULL) {
131 iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]); 131 for (i = 0; i < ETH_ALEN; i++)
132 iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
133 } else {
134 iter_data->hw_macaddr = mac;
135 }
132} 136}
133 137
134static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv, 138static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv,
135 struct ieee80211_vif *vif) 139 struct ieee80211_vif *vif)
136{ 140{
137 struct ath_common *common = ath9k_hw_common(priv->ah); 141 struct ath_common *common = ath9k_hw_common(priv->ah);
138 struct ath9k_vif_iter_data iter_data; 142 struct ath9k_vif_iter_data iter_data;
139 143
140 /* 144 /*
141 * Use the hardware MAC address as reference, the hardware uses it 145 * Pick the MAC address of the first interface as the new hardware
142 * together with the BSSID mask when matching addresses. 146 * MAC address. The hardware will use it together with the BSSID mask
147 * when matching addresses.
143 */ 148 */
144 iter_data.hw_macaddr = common->macaddr; 149 iter_data.hw_macaddr = NULL;
145 memset(&iter_data.mask, 0xff, ETH_ALEN); 150 memset(&iter_data.mask, 0xff, ETH_ALEN);
146 151
147 if (vif) 152 if (vif)
@@ -153,6 +158,10 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
153 ath9k_htc_bssid_iter, &iter_data); 158 ath9k_htc_bssid_iter, &iter_data);
154 159
155 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); 160 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
161
162 if (iter_data.hw_macaddr)
163 memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
164
156 ath_hw_setbssidmask(common); 165 ath_hw_setbssidmask(common);
157} 166}
158 167
@@ -1063,7 +1072,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1063 goto out; 1072 goto out;
1064 } 1073 }
1065 1074
1066 ath9k_htc_set_bssid_mask(priv, vif); 1075 ath9k_htc_set_mac_bssid_mask(priv, vif);
1067 1076
1068 priv->vif_slot |= (1 << avp->index); 1077 priv->vif_slot |= (1 << avp->index);
1069 priv->nvifs++; 1078 priv->nvifs++;
@@ -1128,7 +1137,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
1128 1137
1129 ath9k_htc_set_opmode(priv); 1138 ath9k_htc_set_opmode(priv);
1130 1139
1131 ath9k_htc_set_bssid_mask(priv, vif); 1140 ath9k_htc_set_mac_bssid_mask(priv, vif);
1132 1141
1133 /* 1142 /*
1134 * Stop ANI only if there are no associated station interfaces. 1143 * Stop ANI only if there are no associated station interfaces.
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 74f452c7b166..21aa09e0e825 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -965,8 +965,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
965 struct ath_common *common = ath9k_hw_common(ah); 965 struct ath_common *common = ath9k_hw_common(ah);
966 966
967 /* 967 /*
968 * Use the hardware MAC address as reference, the hardware uses it 968 * Pick the MAC address of the first interface as the new hardware
969 * together with the BSSID mask when matching addresses. 969 * MAC address. The hardware will use it together with the BSSID mask
970 * when matching addresses.
970 */ 971 */
971 memset(iter_data, 0, sizeof(*iter_data)); 972 memset(iter_data, 0, sizeof(*iter_data));
972 memset(&iter_data->mask, 0xff, ETH_ALEN); 973 memset(&iter_data->mask, 0xff, ETH_ALEN);
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 86605027c41d..e6272546395a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -357,21 +357,27 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
357 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, 357 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
358 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, 358 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)},
359 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, 359 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
360 {IWL_PCI_DEVICE(0x095B, 0x5012, iwl7265_2ac_cfg)}, 360 {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
361 {IWL_PCI_DEVICE(0x095B, 0x500A, iwl7265_2ac_cfg)}, 361 {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)},
362 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, 362 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
363 {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
363 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, 364 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
364 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, 365 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
365 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, 366 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
366 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, 367 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
367 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, 368 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
368 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, 369 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
369 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, 371 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
372 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
373 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, 374 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, 375 {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
372 {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, 376 {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
373 {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, 377 {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)},
374 {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, 378 {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)},
379 {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)},
380 {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
375 {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, 381 {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
376 {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, 382 {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
377#endif /* CONFIG_IWLMVM */ 383#endif /* CONFIG_IWLMVM */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index c72438bb2faf..a1b32ee9594a 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2011,7 +2011,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
2011 (hwsim_flags & HWSIM_TX_STAT_ACK)) { 2011 (hwsim_flags & HWSIM_TX_STAT_ACK)) {
2012 if (skb->len >= 16) { 2012 if (skb->len >= 16) {
2013 hdr = (struct ieee80211_hdr *) skb->data; 2013 hdr = (struct ieee80211_hdr *) skb->data;
2014 mac80211_hwsim_monitor_ack(txi->rate_driver_data[0], 2014 mac80211_hwsim_monitor_ack(data2->channel,
2015 hdr->addr2); 2015 hdr->addr2);
2016 } 2016 }
2017 txi->flags |= IEEE80211_TX_STAT_ACK; 2017 txi->flags |= IEEE80211_TX_STAT_ACK;
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 78e8a6666cc6..8bb8988c435c 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -746,7 +746,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
746} 746}
747 747
748static u16 748static u16
749mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb) 749mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
750 void *accel_priv)
750{ 751{
751 skb->priority = cfg80211_classify8021d(skb); 752 skb->priority = cfg80211_classify8021d(skb);
752 return mwifiex_1d_to_wmm_queue[skb->priority]; 753 return mwifiex_1d_to_wmm_queue[skb->priority];
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 0f494444bcd1..5a53195d016b 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -740,6 +740,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
740 }; 740 };
741 int index = rtlpci->rx_ring[rx_queue_idx].idx; 741 int index = rtlpci->rx_ring[rx_queue_idx].idx;
742 742
743 if (rtlpci->driver_is_goingto_unload)
744 return;
743 /*RX NORMAL PKT */ 745 /*RX NORMAL PKT */
744 while (count--) { 746 while (count--) {
745 /*rx descriptor */ 747 /*rx descriptor */
@@ -1636,6 +1638,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
1636 */ 1638 */
1637 set_hal_stop(rtlhal); 1639 set_hal_stop(rtlhal);
1638 1640
1641 rtlpci->driver_is_goingto_unload = true;
1639 rtlpriv->cfg->ops->disable_interrupt(hw); 1642 rtlpriv->cfg->ops->disable_interrupt(hw);
1640 cancel_work_sync(&rtlpriv->works.lps_change_work); 1643 cancel_work_sync(&rtlpriv->works.lps_change_work);
1641 1644
@@ -1653,7 +1656,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
1653 ppsc->rfchange_inprogress = true; 1656 ppsc->rfchange_inprogress = true;
1654 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags); 1657 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1655 1658
1656 rtlpci->driver_is_goingto_unload = true;
1657 rtlpriv->cfg->ops->hw_disable(hw); 1659 rtlpriv->cfg->ops->hw_disable(hw);
1658 /* some things are not needed if firmware not available */ 1660 /* some things are not needed if firmware not available */
1659 if (!rtlpriv->max_fw_size) 1661 if (!rtlpriv->max_fw_size)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 08ae01b41c83..c47794b9d42f 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -101,6 +101,13 @@ struct xenvif_rx_meta {
101 101
102#define MAX_PENDING_REQS 256 102#define MAX_PENDING_REQS 256
103 103
104/* It's possible for an skb to have a maximal number of frags
105 * but still be less than MAX_BUFFER_OFFSET in size. Thus the
106 * worst-case number of copy operations is MAX_SKB_FRAGS per
107 * ring slot.
108 */
109#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
110
104struct xenvif { 111struct xenvif {
105 /* Unique identifier for this interface. */ 112 /* Unique identifier for this interface. */
106 domid_t domid; 113 domid_t domid;
@@ -143,13 +150,13 @@ struct xenvif {
143 */ 150 */
144 RING_IDX rx_req_cons_peek; 151 RING_IDX rx_req_cons_peek;
145 152
146 /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each 153 /* This array is allocated seperately as it is large */
147 * head/fragment page uses 2 copy operations because it 154 struct gnttab_copy *grant_copy_op;
148 * straddles two buffers in the frontend.
149 */
150 struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
151 struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
152 155
156 /* We create one meta structure per ring request we consume, so
157 * the maximum number is the same as the ring size.
158 */
159 struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
153 160
154 u8 fe_dev_addr[6]; 161 u8 fe_dev_addr[6];
155 162
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 870f1fa58370..fff8cddfed81 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -34,6 +34,7 @@
34#include <linux/ethtool.h> 34#include <linux/ethtool.h>
35#include <linux/rtnetlink.h> 35#include <linux/rtnetlink.h>
36#include <linux/if_vlan.h> 36#include <linux/if_vlan.h>
37#include <linux/vmalloc.h>
37 38
38#include <xen/events.h> 39#include <xen/events.h>
39#include <asm/xen/hypercall.h> 40#include <asm/xen/hypercall.h>
@@ -307,6 +308,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
307 SET_NETDEV_DEV(dev, parent); 308 SET_NETDEV_DEV(dev, parent);
308 309
309 vif = netdev_priv(dev); 310 vif = netdev_priv(dev);
311
312 vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
313 MAX_GRANT_COPY_OPS);
314 if (vif->grant_copy_op == NULL) {
315 pr_warn("Could not allocate grant copy space for %s\n", name);
316 free_netdev(dev);
317 return ERR_PTR(-ENOMEM);
318 }
319
310 vif->domid = domid; 320 vif->domid = domid;
311 vif->handle = handle; 321 vif->handle = handle;
312 vif->can_sg = 1; 322 vif->can_sg = 1;
@@ -487,6 +497,7 @@ void xenvif_free(struct xenvif *vif)
487 497
488 unregister_netdev(vif->dev); 498 unregister_netdev(vif->dev);
489 499
500 vfree(vif->grant_copy_op);
490 free_netdev(vif->dev); 501 free_netdev(vif->dev);
491 502
492 module_put(THIS_MODULE); 503 module_put(THIS_MODULE);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index e884ee1fe7ed..78425554a537 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -608,7 +608,7 @@ void xenvif_rx_action(struct xenvif *vif)
608 if (!npo.copy_prod) 608 if (!npo.copy_prod)
609 return; 609 return;
610 610
611 BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op)); 611 BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
612 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); 612 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
613 613
614 while ((skb = __skb_dequeue(&rxq)) != NULL) { 614 while ((skb = __skb_dequeue(&rxq)) != NULL) {
@@ -1197,6 +1197,9 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
1197 1197
1198 err = -EPROTO; 1198 err = -EPROTO;
1199 1199
1200 if (fragment)
1201 goto out;
1202
1200 switch (ip_hdr(skb)->protocol) { 1203 switch (ip_hdr(skb)->protocol) {
1201 case IPPROTO_TCP: 1204 case IPPROTO_TCP:
1202 err = maybe_pull_tail(skb, 1205 err = maybe_pull_tail(skb,
@@ -1206,8 +1209,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
1206 goto out; 1209 goto out;
1207 1210
1208 if (!skb_partial_csum_set(skb, off, 1211 if (!skb_partial_csum_set(skb, off,
1209 offsetof(struct tcphdr, check))) 1212 offsetof(struct tcphdr, check))) {
1213 err = -EPROTO;
1210 goto out; 1214 goto out;
1215 }
1211 1216
1212 if (recalculate_partial_csum) 1217 if (recalculate_partial_csum)
1213 tcp_hdr(skb)->check = 1218 tcp_hdr(skb)->check =
@@ -1224,8 +1229,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
1224 goto out; 1229 goto out;
1225 1230
1226 if (!skb_partial_csum_set(skb, off, 1231 if (!skb_partial_csum_set(skb, off,
1227 offsetof(struct udphdr, check))) 1232 offsetof(struct udphdr, check))) {
1233 err = -EPROTO;
1228 goto out; 1234 goto out;
1235 }
1229 1236
1230 if (recalculate_partial_csum) 1237 if (recalculate_partial_csum)
1231 udp_hdr(skb)->check = 1238 udp_hdr(skb)->check =
@@ -1347,8 +1354,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
1347 goto out; 1354 goto out;
1348 1355
1349 if (!skb_partial_csum_set(skb, off, 1356 if (!skb_partial_csum_set(skb, off,
1350 offsetof(struct tcphdr, check))) 1357 offsetof(struct tcphdr, check))) {
1358 err = -EPROTO;
1351 goto out; 1359 goto out;
1360 }
1352 1361
1353 if (recalculate_partial_csum) 1362 if (recalculate_partial_csum)
1354 tcp_hdr(skb)->check = 1363 tcp_hdr(skb)->check =
@@ -1365,8 +1374,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
1365 goto out; 1374 goto out;
1366 1375
1367 if (!skb_partial_csum_set(skb, off, 1376 if (!skb_partial_csum_set(skb, off,
1368 offsetof(struct udphdr, check))) 1377 offsetof(struct udphdr, check))) {
1378 err = -EPROTO;
1369 goto out; 1379 goto out;
1380 }
1370 1381
1371 if (recalculate_partial_csum) 1382 if (recalculate_partial_csum)
1372 udp_hdr(skb)->check = 1383 udp_hdr(skb)->check =
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index de6f8990246f..c6973f101a3e 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -20,7 +20,7 @@ config OF_SELFTEST
20 depends on OF_IRQ 20 depends on OF_IRQ
21 help 21 help
22 This option builds in test cases for the device tree infrastructure 22 This option builds in test cases for the device tree infrastructure
23 that are executed one at boot time, and the results dumped to the 23 that are executed once at boot time, and the results dumped to the
24 console. 24 console.
25 25
26 If unsure, say N here, but this option is safe to enable. 26 If unsure, say N here, but this option is safe to enable.
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 4b9317bdb81c..d3dd41c840f1 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -69,14 +69,6 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
69 (unsigned long long)cp, (unsigned long long)s, 69 (unsigned long long)cp, (unsigned long long)s,
70 (unsigned long long)da); 70 (unsigned long long)da);
71 71
72 /*
73 * If the number of address cells is larger than 2 we assume the
74 * mapping doesn't specify a physical address. Rather, the address
75 * specifies an identifier that must match exactly.
76 */
77 if (na > 2 && memcmp(range, addr, na * 4) != 0)
78 return OF_BAD_ADDR;
79
80 if (da < cp || da >= (cp + s)) 72 if (da < cp || da >= (cp + s))
81 return OF_BAD_ADDR; 73 return OF_BAD_ADDR;
82 return da - cp; 74 return da - cp;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 2fa024b97c43..758b4f8b30b7 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -922,8 +922,16 @@ void __init unflatten_device_tree(void)
922 */ 922 */
923void __init unflatten_and_copy_device_tree(void) 923void __init unflatten_and_copy_device_tree(void)
924{ 924{
925 int size = __be32_to_cpu(initial_boot_params->totalsize); 925 int size;
926 void *dt = early_init_dt_alloc_memory_arch(size, 926 void *dt;
927
928 if (!initial_boot_params) {
929 pr_warn("No valid device tree found, continuing without\n");
930 return;
931 }
932
933 size = __be32_to_cpu(initial_boot_params->totalsize);
934 dt = early_init_dt_alloc_memory_arch(size,
927 __alignof__(struct boot_param_header)); 935 __alignof__(struct boot_param_header));
928 936
929 if (dt) { 937 if (dt) {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 786b0b47fae4..27212402c532 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -165,7 +165,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
165 if (of_get_property(ipar, "interrupt-controller", NULL) != 165 if (of_get_property(ipar, "interrupt-controller", NULL) !=
166 NULL) { 166 NULL) {
167 pr_debug(" -> got it !\n"); 167 pr_debug(" -> got it !\n");
168 of_node_put(old);
169 return 0; 168 return 0;
170 } 169 }
171 170
@@ -250,8 +249,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
250 * Successfully parsed an interrrupt-map translation; copy new 249 * Successfully parsed an interrrupt-map translation; copy new
251 * interrupt specifier into the out_irq structure 250 * interrupt specifier into the out_irq structure
252 */ 251 */
253 of_node_put(out_irq->np); 252 out_irq->np = newpar;
254 out_irq->np = of_node_get(newpar);
255 253
256 match_array = imap - newaddrsize - newintsize; 254 match_array = imap - newaddrsize - newintsize;
257 for (i = 0; i < newintsize; i++) 255 for (i = 0; i < newintsize; i++)
@@ -268,7 +266,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
268 } 266 }
269 fail: 267 fail:
270 of_node_put(ipar); 268 of_node_put(ipar);
271 of_node_put(out_irq->np);
272 of_node_put(newpar); 269 of_node_put(newpar);
273 270
274 return -EINVAL; 271 return -EINVAL;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 1cf605f67673..e86439283a5d 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -279,7 +279,9 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
279 279
280 status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); 280 status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
281 if (ACPI_FAILURE(status)) { 281 if (ACPI_FAILURE(status)) {
282 acpi_handle_warn(handle, "can't evaluate _ADR (%#x)\n", status); 282 if (status != AE_NOT_FOUND)
283 acpi_handle_warn(handle,
284 "can't evaluate _ADR (%#x)\n", status);
283 return AE_OK; 285 return AE_OK;
284 } 286 }
285 287
@@ -643,6 +645,24 @@ static void disable_slot(struct acpiphp_slot *slot)
643 slot->flags &= (~SLOT_ENABLED); 645 slot->flags &= (~SLOT_ENABLED);
644} 646}
645 647
648static bool acpiphp_no_hotplug(acpi_handle handle)
649{
650 struct acpi_device *adev = NULL;
651
652 acpi_bus_get_device(handle, &adev);
653 return adev && adev->flags.no_hotplug;
654}
655
656static bool slot_no_hotplug(struct acpiphp_slot *slot)
657{
658 struct acpiphp_func *func;
659
660 list_for_each_entry(func, &slot->funcs, sibling)
661 if (acpiphp_no_hotplug(func_to_handle(func)))
662 return true;
663
664 return false;
665}
646 666
647/** 667/**
648 * get_slot_status - get ACPI slot status 668 * get_slot_status - get ACPI slot status
@@ -701,7 +721,8 @@ static void trim_stale_devices(struct pci_dev *dev)
701 unsigned long long sta; 721 unsigned long long sta;
702 722
703 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); 723 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
704 alive = ACPI_SUCCESS(status) && sta == ACPI_STA_ALL; 724 alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL)
725 || acpiphp_no_hotplug(handle);
705 } 726 }
706 if (!alive) { 727 if (!alive) {
707 u32 v; 728 u32 v;
@@ -741,8 +762,9 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
741 struct pci_dev *dev, *tmp; 762 struct pci_dev *dev, *tmp;
742 763
743 mutex_lock(&slot->crit_sect); 764 mutex_lock(&slot->crit_sect);
744 /* wake up all functions */ 765 if (slot_no_hotplug(slot)) {
745 if (get_slot_status(slot) == ACPI_STA_ALL) { 766 ; /* do nothing */
767 } else if (get_slot_status(slot) == ACPI_STA_ALL) {
746 /* remove stale devices if any */ 768 /* remove stale devices if any */
747 list_for_each_entry_safe(dev, tmp, &bus->devices, 769 list_for_each_entry_safe(dev, tmp, &bus->devices,
748 bus_list) 770 bus_list)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 577074efbe62..f7ebdba14bde 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -330,29 +330,32 @@ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
330static void pci_acpi_setup(struct device *dev) 330static void pci_acpi_setup(struct device *dev)
331{ 331{
332 struct pci_dev *pci_dev = to_pci_dev(dev); 332 struct pci_dev *pci_dev = to_pci_dev(dev);
333 acpi_handle handle = ACPI_HANDLE(dev); 333 struct acpi_device *adev = ACPI_COMPANION(dev);
334 struct acpi_device *adev;
335 334
336 if (acpi_bus_get_device(handle, &adev) || !adev->wakeup.flags.valid) 335 if (!adev)
336 return;
337
338 pci_acpi_add_pm_notifier(adev, pci_dev);
339 if (!adev->wakeup.flags.valid)
337 return; 340 return;
338 341
339 device_set_wakeup_capable(dev, true); 342 device_set_wakeup_capable(dev, true);
340 acpi_pci_sleep_wake(pci_dev, false); 343 acpi_pci_sleep_wake(pci_dev, false);
341
342 pci_acpi_add_pm_notifier(adev, pci_dev);
343 if (adev->wakeup.flags.run_wake) 344 if (adev->wakeup.flags.run_wake)
344 device_set_run_wake(dev, true); 345 device_set_run_wake(dev, true);
345} 346}
346 347
347static void pci_acpi_cleanup(struct device *dev) 348static void pci_acpi_cleanup(struct device *dev)
348{ 349{
349 acpi_handle handle = ACPI_HANDLE(dev); 350 struct acpi_device *adev = ACPI_COMPANION(dev);
350 struct acpi_device *adev; 351
352 if (!adev)
353 return;
351 354
352 if (!acpi_bus_get_device(handle, &adev) && adev->wakeup.flags.valid) { 355 pci_acpi_remove_pm_notifier(adev);
356 if (adev->wakeup.flags.valid) {
353 device_set_wakeup_capable(dev, false); 357 device_set_wakeup_capable(dev, false);
354 device_set_run_wake(dev, false); 358 device_set_run_wake(dev, false);
355 pci_acpi_remove_pm_notifier(adev);
356 } 359 }
357} 360}
358 361
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index a344f3d52361..330ef2d06567 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -24,8 +24,8 @@ config PHY_EXYNOS_MIPI_VIDEO
24config OMAP_USB2 24config OMAP_USB2
25 tristate "OMAP USB2 PHY Driver" 25 tristate "OMAP USB2 PHY Driver"
26 depends on ARCH_OMAP2PLUS 26 depends on ARCH_OMAP2PLUS
27 depends on USB_PHY
27 select GENERIC_PHY 28 select GENERIC_PHY
28 select USB_PHY
29 select OMAP_CONTROL_USB 29 select OMAP_CONTROL_USB
30 help 30 help
31 Enable this to support the transceiver that is part of SOC. This 31 Enable this to support the transceiver that is part of SOC. This
@@ -36,8 +36,8 @@ config OMAP_USB2
36config TWL4030_USB 36config TWL4030_USB
37 tristate "TWL4030 USB Transceiver Driver" 37 tristate "TWL4030 USB Transceiver Driver"
38 depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS 38 depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
39 depends on USB_PHY
39 select GENERIC_PHY 40 select GENERIC_PHY
40 select USB_PHY
41 help 41 help
42 Enable this to support the USB OTG transceiver on TWL4030 42 Enable this to support the USB OTG transceiver on TWL4030
43 family chips (including the TWL5030 and TPS659x0 devices). 43 family chips (including the TWL5030 and TPS659x0 devices).
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 03cf8fb81554..58e0e9739028 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -437,23 +437,18 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
437 int id; 437 int id;
438 struct phy *phy; 438 struct phy *phy;
439 439
440 if (!dev) { 440 if (WARN_ON(!dev))
441 dev_WARN(dev, "no device provided for PHY\n"); 441 return ERR_PTR(-EINVAL);
442 ret = -EINVAL;
443 goto err0;
444 }
445 442
446 phy = kzalloc(sizeof(*phy), GFP_KERNEL); 443 phy = kzalloc(sizeof(*phy), GFP_KERNEL);
447 if (!phy) { 444 if (!phy)
448 ret = -ENOMEM; 445 return ERR_PTR(-ENOMEM);
449 goto err0;
450 }
451 446
452 id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL); 447 id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL);
453 if (id < 0) { 448 if (id < 0) {
454 dev_err(dev, "unable to get id\n"); 449 dev_err(dev, "unable to get id\n");
455 ret = id; 450 ret = id;
456 goto err0; 451 goto free_phy;
457 } 452 }
458 453
459 device_initialize(&phy->dev); 454 device_initialize(&phy->dev);
@@ -468,11 +463,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
468 463
469 ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id); 464 ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id);
470 if (ret) 465 if (ret)
471 goto err1; 466 goto put_dev;
472 467
473 ret = device_add(&phy->dev); 468 ret = device_add(&phy->dev);
474 if (ret) 469 if (ret)
475 goto err1; 470 goto put_dev;
476 471
477 if (pm_runtime_enabled(dev)) { 472 if (pm_runtime_enabled(dev)) {
478 pm_runtime_enable(&phy->dev); 473 pm_runtime_enable(&phy->dev);
@@ -481,12 +476,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
481 476
482 return phy; 477 return phy;
483 478
484err1: 479put_dev:
485 ida_remove(&phy_ida, phy->id);
486 put_device(&phy->dev); 480 put_device(&phy->dev);
481 ida_remove(&phy_ida, phy->id);
482free_phy:
487 kfree(phy); 483 kfree(phy);
488
489err0:
490 return ERR_PTR(ret); 484 return ERR_PTR(ret);
491} 485}
492EXPORT_SYMBOL_GPL(phy_create); 486EXPORT_SYMBOL_GPL(phy_create);
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
index 2832576d8b12..114f5ef4b73a 100644
--- a/drivers/pinctrl/pinctrl-baytrail.c
+++ b/drivers/pinctrl/pinctrl-baytrail.c
@@ -512,6 +512,7 @@ static const struct dev_pm_ops byt_gpio_pm_ops = {
512 512
513static const struct acpi_device_id byt_gpio_acpi_match[] = { 513static const struct acpi_device_id byt_gpio_acpi_match[] = {
514 { "INT33B2", 0 }, 514 { "INT33B2", 0 },
515 { "INT33FC", 0 },
515 { } 516 { }
516}; 517};
517MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match); 518MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match);
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 11bd0d970a52..e2142956a8e5 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -254,7 +254,7 @@ struct sh_pfc_soc_info {
254#define PINMUX_GPIO(_pin) \ 254#define PINMUX_GPIO(_pin) \
255 [GPIO_##_pin] = { \ 255 [GPIO_##_pin] = { \
256 .pin = (u16)-1, \ 256 .pin = (u16)-1, \
257 .name = __stringify(name), \ 257 .name = __stringify(GPIO_##_pin), \
258 .enum_id = _pin##_DATA, \ 258 .enum_id = _pin##_DATA, \
259 } 259 }
260 260
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 5e2054afe840..85ad58c6da17 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -196,6 +196,7 @@ config BATTERY_MAX17040
196config BATTERY_MAX17042 196config BATTERY_MAX17042
197 tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge" 197 tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge"
198 depends on I2C 198 depends on I2C
199 select REGMAP_I2C
199 help 200 help
200 MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries 201 MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
201 in handheld and portable equipment. The MAX17042 is configured 202 in handheld and portable equipment. The MAX17042 is configured
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 00e667296360..557af943b2f5 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -511,6 +511,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
511 dev_set_drvdata(dev, psy); 511 dev_set_drvdata(dev, psy);
512 psy->dev = dev; 512 psy->dev = dev;
513 513
514 rc = dev_set_name(dev, "%s", psy->name);
515 if (rc)
516 goto dev_set_name_failed;
517
514 INIT_WORK(&psy->changed_work, power_supply_changed_work); 518 INIT_WORK(&psy->changed_work, power_supply_changed_work);
515 519
516 rc = power_supply_check_supplies(psy); 520 rc = power_supply_check_supplies(psy);
@@ -524,10 +528,6 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
524 if (rc) 528 if (rc)
525 goto wakeup_init_failed; 529 goto wakeup_init_failed;
526 530
527 rc = kobject_set_name(&dev->kobj, "%s", psy->name);
528 if (rc)
529 goto kobject_set_name_failed;
530
531 rc = device_add(dev); 531 rc = device_add(dev);
532 if (rc) 532 if (rc)
533 goto device_add_failed; 533 goto device_add_failed;
@@ -553,11 +553,11 @@ create_triggers_failed:
553register_cooler_failed: 553register_cooler_failed:
554 psy_unregister_thermal(psy); 554 psy_unregister_thermal(psy);
555register_thermal_failed: 555register_thermal_failed:
556wakeup_init_failed:
557 device_del(dev); 556 device_del(dev);
558kobject_set_name_failed:
559device_add_failed: 557device_add_failed:
558wakeup_init_failed:
560check_supplies_failed: 559check_supplies_failed:
560dev_set_name_failed:
561 put_device(dev); 561 put_device(dev);
562success: 562success:
563 return rc; 563 return rc;
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 2a786c504460..3c6768378a94 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -833,6 +833,11 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
833 return 0; 833 return 0;
834} 834}
835 835
836static const struct x86_cpu_id energy_unit_quirk_ids[] = {
837 { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
838 {}
839};
840
836static int rapl_check_unit(struct rapl_package *rp, int cpu) 841static int rapl_check_unit(struct rapl_package *rp, int cpu)
837{ 842{
838 u64 msr_val; 843 u64 msr_val;
@@ -853,8 +858,11 @@ static int rapl_check_unit(struct rapl_package *rp, int cpu)
853 * time unit: 1/time_unit_divisor Seconds 858 * time unit: 1/time_unit_divisor Seconds
854 */ 859 */
855 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; 860 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
856 rp->energy_unit_divisor = 1 << value; 861 /* some CPUs have different way to calculate energy unit */
857 862 if (x86_match_cpu(energy_unit_quirk_ids))
863 rp->energy_unit_divisor = 1000000 / (1 << value);
864 else
865 rp->energy_unit_divisor = 1 << value;
858 866
859 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; 867 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
860 rp->power_unit_divisor = 1 << value; 868 rp->power_unit_divisor = 1 << value;
@@ -941,6 +949,7 @@ static void package_power_limit_irq_restore(int package_id)
941static const struct x86_cpu_id rapl_ids[] = { 949static const struct x86_cpu_id rapl_ids[] = {
942 { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */ 950 { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */
943 { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */ 951 { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */
952 { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
944 { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */ 953 { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */
945 { X86_VENDOR_INTEL, 6, 0x45},/* HSW */ 954 { X86_VENDOR_INTEL, 6, 0x45},/* HSW */
946 /* TODO: Add more CPU IDs after testing */ 955 /* TODO: Add more CPU IDs after testing */
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 333677d68d0e..9e61922d8230 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -438,7 +438,7 @@ common_reg:
438 platform_set_drvdata(pdev, s2mps11); 438 platform_set_drvdata(pdev, s2mps11);
439 439
440 config.dev = &pdev->dev; 440 config.dev = &pdev->dev;
441 config.regmap = iodev->regmap; 441 config.regmap = iodev->regmap_pmic;
442 config.driver_data = s2mps11; 442 config.driver_data = s2mps11;
443 for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) { 443 for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
444 if (!reg_np) { 444 if (!reg_np) {
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 3f4ca4e09a4c..34629ea913d4 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -942,7 +942,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
942 return rc; 942 return rc;
943 } 943 }
944 944
945 tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows); 945 tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols);
946 if (IS_ERR(tp->screen)) { 946 if (IS_ERR(tp->screen)) {
947 rc = PTR_ERR(tp->screen); 947 rc = PTR_ERR(tp->screen);
948 raw3270_put_view(&tp->view); 948 raw3270_put_view(&tp->view);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 596480022b0a..38a1257e76e1 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -471,7 +471,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
471 schedule_delayed_work(&tgt->sess_del_work, 0); 471 schedule_delayed_work(&tgt->sess_del_work, 0);
472 else 472 else
473 schedule_delayed_work(&tgt->sess_del_work, 473 schedule_delayed_work(&tgt->sess_del_work,
474 jiffies - sess->expires); 474 sess->expires - jiffies);
475} 475}
476 476
477/* ha->hardware_lock supposed to be held on entry */ 477/* ha->hardware_lock supposed to be held on entry */
@@ -550,13 +550,14 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
550 struct scsi_qla_host *vha = tgt->vha; 550 struct scsi_qla_host *vha = tgt->vha;
551 struct qla_hw_data *ha = vha->hw; 551 struct qla_hw_data *ha = vha->hw;
552 struct qla_tgt_sess *sess; 552 struct qla_tgt_sess *sess;
553 unsigned long flags; 553 unsigned long flags, elapsed;
554 554
555 spin_lock_irqsave(&ha->hardware_lock, flags); 555 spin_lock_irqsave(&ha->hardware_lock, flags);
556 while (!list_empty(&tgt->del_sess_list)) { 556 while (!list_empty(&tgt->del_sess_list)) {
557 sess = list_entry(tgt->del_sess_list.next, typeof(*sess), 557 sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
558 del_list_entry); 558 del_list_entry);
559 if (time_after_eq(jiffies, sess->expires)) { 559 elapsed = jiffies;
560 if (time_after_eq(elapsed, sess->expires)) {
560 qlt_undelete_sess(sess); 561 qlt_undelete_sess(sess);
561 562
562 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 563 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
@@ -566,7 +567,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
566 ha->tgt.tgt_ops->put_sess(sess); 567 ha->tgt.tgt_ops->put_sess(sess);
567 } else { 568 } else {
568 schedule_delayed_work(&tgt->sess_del_work, 569 schedule_delayed_work(&tgt->sess_del_work,
569 jiffies - sess->expires); 570 sess->expires - elapsed);
570 break; 571 break;
571 } 572 }
572 } 573 }
@@ -4290,6 +4291,7 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
4290 if (rc != 0) { 4291 if (rc != 0) {
4291 ha->tgt.tgt_ops = NULL; 4292 ha->tgt.tgt_ops = NULL;
4292 ha->tgt.target_lport_ptr = NULL; 4293 ha->tgt.target_lport_ptr = NULL;
4294 scsi_host_put(host);
4293 } 4295 }
4294 mutex_unlock(&qla_tgt_mutex); 4296 mutex_unlock(&qla_tgt_mutex);
4295 return rc; 4297 return rc;
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c
index 53fee2f9a498..8dfdd2732bdc 100644
--- a/drivers/staging/bcm/Bcmnet.c
+++ b/drivers/staging/bcm/Bcmnet.c
@@ -39,7 +39,8 @@ static INT bcm_close(struct net_device *dev)
39 return 0; 39 return 0;
40} 40}
41 41
42static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb) 42static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb,
43 void *accel_priv)
43{ 44{
44 return ClassifyPacket(netdev_priv(dev), skb); 45 return ClassifyPacket(netdev_priv(dev), skb);
45} 46}
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 8f02bf66e20b..4964d2a2fc7d 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -446,7 +446,7 @@ int comedi_load_firmware(struct comedi_device *dev,
446 release_firmware(fw); 446 release_firmware(fw);
447 } 447 }
448 448
449 return ret; 449 return ret < 0 ? ret : 0;
450} 450}
451EXPORT_SYMBOL_GPL(comedi_load_firmware); 451EXPORT_SYMBOL_GPL(comedi_load_firmware);
452 452
diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
index 432e3f9c3301..c55f234b29e6 100644
--- a/drivers/staging/comedi/drivers/8255_pci.c
+++ b/drivers/staging/comedi/drivers/8255_pci.c
@@ -63,7 +63,8 @@ enum pci_8255_boardid {
63 BOARD_ADLINK_PCI7296, 63 BOARD_ADLINK_PCI7296,
64 BOARD_CB_PCIDIO24, 64 BOARD_CB_PCIDIO24,
65 BOARD_CB_PCIDIO24H, 65 BOARD_CB_PCIDIO24H,
66 BOARD_CB_PCIDIO48H, 66 BOARD_CB_PCIDIO48H_OLD,
67 BOARD_CB_PCIDIO48H_NEW,
67 BOARD_CB_PCIDIO96H, 68 BOARD_CB_PCIDIO96H,
68 BOARD_NI_PCIDIO96, 69 BOARD_NI_PCIDIO96,
69 BOARD_NI_PCIDIO96B, 70 BOARD_NI_PCIDIO96B,
@@ -106,11 +107,16 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = {
106 .dio_badr = 2, 107 .dio_badr = 2,
107 .n_8255 = 1, 108 .n_8255 = 1,
108 }, 109 },
109 [BOARD_CB_PCIDIO48H] = { 110 [BOARD_CB_PCIDIO48H_OLD] = {
110 .name = "cb_pci-dio48h", 111 .name = "cb_pci-dio48h",
111 .dio_badr = 1, 112 .dio_badr = 1,
112 .n_8255 = 2, 113 .n_8255 = 2,
113 }, 114 },
115 [BOARD_CB_PCIDIO48H_NEW] = {
116 .name = "cb_pci-dio48h",
117 .dio_badr = 2,
118 .n_8255 = 2,
119 },
114 [BOARD_CB_PCIDIO96H] = { 120 [BOARD_CB_PCIDIO96H] = {
115 .name = "cb_pci-dio96h", 121 .name = "cb_pci-dio96h",
116 .dio_badr = 2, 122 .dio_badr = 2,
@@ -263,7 +269,10 @@ static DEFINE_PCI_DEVICE_TABLE(pci_8255_pci_table) = {
263 { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 }, 269 { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 },
264 { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 }, 270 { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 },
265 { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H }, 271 { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H },
266 { PCI_VDEVICE(CB, 0x000b), BOARD_CB_PCIDIO48H }, 272 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, 0x0000, 0x0000),
273 .driver_data = BOARD_CB_PCIDIO48H_OLD },
274 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, PCI_VENDOR_ID_CB, 0x000b),
275 .driver_data = BOARD_CB_PCIDIO48H_NEW },
267 { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H }, 276 { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H },
268 { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 }, 277 { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 },
269 { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B }, 278 { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B },
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index 99421f90d189..0485d7f39867 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -451,7 +451,12 @@ done:
451 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ 451 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
452 BIT(IIO_CHAN_INFO_SAMP_FREQ), \ 452 BIT(IIO_CHAN_INFO_SAMP_FREQ), \
453 .scan_index = idx, \ 453 .scan_index = idx, \
454 .scan_type = IIO_ST('s', 16, 16, IIO_BE), \ 454 .scan_type = { \
455 .sign = 's', \
456 .realbits = 16, \
457 .storagebits = 16, \
458 .endianness = IIO_BE, \
459 }, \
455 } 460 }
456 461
457static const struct iio_chan_spec hmc5843_channels[] = { 462static const struct iio_chan_spec hmc5843_channels[] = {
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 0507b662ae40..09ef5fb8bae6 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -88,8 +88,9 @@ static int imx_drm_driver_unload(struct drm_device *drm)
88 88
89 imx_drm_device_put(); 89 imx_drm_device_put();
90 90
91 drm_mode_config_cleanup(imxdrm->drm); 91 drm_vblank_cleanup(imxdrm->drm);
92 drm_kms_helper_poll_fini(imxdrm->drm); 92 drm_kms_helper_poll_fini(imxdrm->drm);
93 drm_mode_config_cleanup(imxdrm->drm);
93 94
94 return 0; 95 return 0;
95} 96}
@@ -199,8 +200,8 @@ static void imx_drm_driver_preclose(struct drm_device *drm,
199 if (!file->is_master) 200 if (!file->is_master)
200 return; 201 return;
201 202
202 for (i = 0; i < 4; i++) 203 for (i = 0; i < MAX_CRTC; i++)
203 imx_drm_disable_vblank(drm , i); 204 imx_drm_disable_vblank(drm, i);
204} 205}
205 206
206static const struct file_operations imx_drm_driver_fops = { 207static const struct file_operations imx_drm_driver_fops = {
@@ -376,8 +377,6 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
376 struct imx_drm_device *imxdrm = __imx_drm_device(); 377 struct imx_drm_device *imxdrm = __imx_drm_device();
377 int ret; 378 int ret;
378 379
379 drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
380 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
381 ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256); 380 ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
382 if (ret) 381 if (ret)
383 return ret; 382 return ret;
@@ -385,6 +384,9 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
385 drm_crtc_helper_add(imx_drm_crtc->crtc, 384 drm_crtc_helper_add(imx_drm_crtc->crtc,
386 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); 385 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
387 386
387 drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
388 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
389
388 drm_mode_group_reinit(imxdrm->drm); 390 drm_mode_group_reinit(imxdrm->drm);
389 391
390 return 0; 392 return 0;
@@ -428,11 +430,11 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
428 ret = drm_mode_group_init_legacy_group(imxdrm->drm, 430 ret = drm_mode_group_init_legacy_group(imxdrm->drm,
429 &imxdrm->drm->primary->mode_group); 431 &imxdrm->drm->primary->mode_group);
430 if (ret) 432 if (ret)
431 goto err_init; 433 goto err_kms;
432 434
433 ret = drm_vblank_init(imxdrm->drm, MAX_CRTC); 435 ret = drm_vblank_init(imxdrm->drm, MAX_CRTC);
434 if (ret) 436 if (ret)
435 goto err_init; 437 goto err_kms;
436 438
437 /* 439 /*
438 * with vblank_disable_allowed = true, vblank interrupt will be disabled 440 * with vblank_disable_allowed = true, vblank interrupt will be disabled
@@ -441,14 +443,20 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
441 */ 443 */
442 imxdrm->drm->vblank_disable_allowed = true; 444 imxdrm->drm->vblank_disable_allowed = true;
443 445
444 if (!imx_drm_device_get()) 446 if (!imx_drm_device_get()) {
445 ret = -EINVAL; 447 ret = -EINVAL;
448 goto err_vblank;
449 }
446 450
447 platform_set_drvdata(drm->platformdev, drm); 451 platform_set_drvdata(drm->platformdev, drm);
452 mutex_unlock(&imxdrm->mutex);
453 return 0;
448 454
449 ret = 0; 455err_vblank:
450 456 drm_vblank_cleanup(drm);
451err_init: 457err_kms:
458 drm_kms_helper_poll_fini(drm);
459 drm_mode_config_cleanup(drm);
452 mutex_unlock(&imxdrm->mutex); 460 mutex_unlock(&imxdrm->mutex);
453 461
454 return ret; 462 return ret;
@@ -494,6 +502,15 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
494 502
495 mutex_lock(&imxdrm->mutex); 503 mutex_lock(&imxdrm->mutex);
496 504
505 /*
506 * The vblank arrays are dimensioned by MAX_CRTC - we can't
507 * pass IDs greater than this to those functions.
508 */
509 if (imxdrm->pipes >= MAX_CRTC) {
510 ret = -EINVAL;
511 goto err_busy;
512 }
513
497 if (imxdrm->drm->open_count) { 514 if (imxdrm->drm->open_count) {
498 ret = -EBUSY; 515 ret = -EBUSY;
499 goto err_busy; 516 goto err_busy;
@@ -530,6 +547,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
530 return 0; 547 return 0;
531 548
532err_register: 549err_register:
550 list_del(&imx_drm_crtc->list);
533 kfree(imx_drm_crtc); 551 kfree(imx_drm_crtc);
534err_alloc: 552err_alloc:
535err_busy: 553err_busy:
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c
index 680f4c8fa081..2c44fef8d58b 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/staging/imx-drm/imx-tve.c
@@ -114,7 +114,6 @@ struct imx_tve {
114 struct drm_encoder encoder; 114 struct drm_encoder encoder;
115 struct imx_drm_encoder *imx_drm_encoder; 115 struct imx_drm_encoder *imx_drm_encoder;
116 struct device *dev; 116 struct device *dev;
117 spinlock_t enable_lock; /* serializes tve_enable/disable */
118 spinlock_t lock; /* register lock */ 117 spinlock_t lock; /* register lock */
119 bool enabled; 118 bool enabled;
120 int mode; 119 int mode;
@@ -146,10 +145,8 @@ __releases(&tve->lock)
146 145
147static void tve_enable(struct imx_tve *tve) 146static void tve_enable(struct imx_tve *tve)
148{ 147{
149 unsigned long flags;
150 int ret; 148 int ret;
151 149
152 spin_lock_irqsave(&tve->enable_lock, flags);
153 if (!tve->enabled) { 150 if (!tve->enabled) {
154 tve->enabled = true; 151 tve->enabled = true;
155 clk_prepare_enable(tve->clk); 152 clk_prepare_enable(tve->clk);
@@ -169,23 +166,18 @@ static void tve_enable(struct imx_tve *tve)
169 TVE_CD_SM_IEN | 166 TVE_CD_SM_IEN |
170 TVE_CD_LM_IEN | 167 TVE_CD_LM_IEN |
171 TVE_CD_MON_END_IEN); 168 TVE_CD_MON_END_IEN);
172
173 spin_unlock_irqrestore(&tve->enable_lock, flags);
174} 169}
175 170
176static void tve_disable(struct imx_tve *tve) 171static void tve_disable(struct imx_tve *tve)
177{ 172{
178 unsigned long flags;
179 int ret; 173 int ret;
180 174
181 spin_lock_irqsave(&tve->enable_lock, flags);
182 if (tve->enabled) { 175 if (tve->enabled) {
183 tve->enabled = false; 176 tve->enabled = false;
184 ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, 177 ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
185 TVE_IPU_CLK_EN | TVE_EN, 0); 178 TVE_IPU_CLK_EN | TVE_EN, 0);
186 clk_disable_unprepare(tve->clk); 179 clk_disable_unprepare(tve->clk);
187 } 180 }
188 spin_unlock_irqrestore(&tve->enable_lock, flags);
189} 181}
190 182
191static int tve_setup_tvout(struct imx_tve *tve) 183static int tve_setup_tvout(struct imx_tve *tve)
@@ -601,7 +593,6 @@ static int imx_tve_probe(struct platform_device *pdev)
601 593
602 tve->dev = &pdev->dev; 594 tve->dev = &pdev->dev;
603 spin_lock_init(&tve->lock); 595 spin_lock_init(&tve->lock);
604 spin_lock_init(&tve->enable_lock);
605 596
606 ddc_node = of_parse_phandle(np, "ddc", 0); 597 ddc_node = of_parse_phandle(np, "ddc", 0);
607 if (ddc_node) { 598 if (ddc_node) {
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
index 7a22ce619ed2..97ca6924dbb3 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
@@ -996,35 +996,35 @@ static const struct ipu_platform_reg client_reg[] = {
996 }, 996 },
997}; 997};
998 998
999static DEFINE_MUTEX(ipu_client_id_mutex);
999static int ipu_client_id; 1000static int ipu_client_id;
1000 1001
1001static int ipu_add_subdevice_pdata(struct device *dev,
1002 const struct ipu_platform_reg *reg)
1003{
1004 struct platform_device *pdev;
1005
1006 pdev = platform_device_register_data(dev, reg->name, ipu_client_id++,
1007 &reg->pdata, sizeof(struct ipu_platform_reg));
1008
1009 return PTR_ERR_OR_ZERO(pdev);
1010}
1011
1012static int ipu_add_client_devices(struct ipu_soc *ipu) 1002static int ipu_add_client_devices(struct ipu_soc *ipu)
1013{ 1003{
1014 int ret; 1004 struct device *dev = ipu->dev;
1015 int i; 1005 unsigned i;
1006 int id, ret;
1007
1008 mutex_lock(&ipu_client_id_mutex);
1009 id = ipu_client_id;
1010 ipu_client_id += ARRAY_SIZE(client_reg);
1011 mutex_unlock(&ipu_client_id_mutex);
1016 1012
1017 for (i = 0; i < ARRAY_SIZE(client_reg); i++) { 1013 for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
1018 const struct ipu_platform_reg *reg = &client_reg[i]; 1014 const struct ipu_platform_reg *reg = &client_reg[i];
1019 ret = ipu_add_subdevice_pdata(ipu->dev, reg); 1015 struct platform_device *pdev;
1020 if (ret) 1016
1017 pdev = platform_device_register_data(dev, reg->name,
1018 id++, &reg->pdata, sizeof(reg->pdata));
1019
1020 if (IS_ERR(pdev))
1021 goto err_register; 1021 goto err_register;
1022 } 1022 }
1023 1023
1024 return 0; 1024 return 0;
1025 1025
1026err_register: 1026err_register:
1027 platform_device_unregister_children(to_platform_device(ipu->dev)); 1027 platform_device_unregister_children(to_platform_device(dev));
1028 1028
1029 return ret; 1029 return ret;
1030} 1030}
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 235d2b1ec593..eedffed17e39 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -306,7 +306,8 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
306 return NETDEV_TX_OK; 306 return NETDEV_TX_OK;
307} 307}
308 308
309static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb) 309static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
310 void *accel_priv)
310{ 311{
311 return (u16)smp_processor_id(); 312 return (u16)smp_processor_id();
312} 313}
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 17659bb04bef..dd69e344e409 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -652,7 +652,8 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
652 return dscp >> 5; 652 return dscp >> 5;
653} 653}
654 654
655static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb) 655static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
656 void *accel_priv)
656{ 657{
657 struct adapter *padapter = rtw_netdev_priv(dev); 658 struct adapter *padapter = rtw_netdev_priv(dev);
658 struct mlme_priv *pmlmepriv = &padapter->mlmepriv; 659 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index d70e9119e906..00867190413c 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -465,6 +465,7 @@ int iscsit_del_np(struct iscsi_np *np)
465 */ 465 */
466 send_sig(SIGINT, np->np_thread, 1); 466 send_sig(SIGINT, np->np_thread, 1);
467 kthread_stop(np->np_thread); 467 kthread_stop(np->np_thread);
468 np->np_thread = NULL;
468 } 469 }
469 470
470 np->np_transport->iscsit_free_np(np); 471 np->np_transport->iscsit_free_np(np);
@@ -823,24 +824,22 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
823 if (((hdr->flags & ISCSI_FLAG_CMD_READ) || 824 if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
824 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { 825 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
825 /* 826 /*
826 * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2) 827 * From RFC-3720 Section 10.3.1:
827 * that adds support for RESERVE/RELEASE. There is a bug 828 *
828 * add with this new functionality that sets R/W bits when 829 * "Either or both of R and W MAY be 1 when either the
829 * neither CDB carries any READ or WRITE datapayloads. 830 * Expected Data Transfer Length and/or Bidirectional Read
831 * Expected Data Transfer Length are 0"
832 *
833 * For this case, go ahead and clear the unnecssary bits
834 * to avoid any confusion with ->data_direction.
830 */ 835 */
831 if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) { 836 hdr->flags &= ~ISCSI_FLAG_CMD_READ;
832 hdr->flags &= ~ISCSI_FLAG_CMD_READ; 837 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
833 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
834 goto done;
835 }
836 838
837 pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" 839 pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
838 " set when Expected Data Transfer Length is 0 for" 840 " set when Expected Data Transfer Length is 0 for"
839 " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]); 841 " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
840 return iscsit_add_reject_cmd(cmd,
841 ISCSI_REASON_BOOKMARK_INVALID, buf);
842 } 842 }
843done:
844 843
845 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && 844 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
846 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { 845 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index e3318edb233d..1c0088fe9e99 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -474,7 +474,8 @@ static ssize_t __iscsi_##prefix##_store_##name( \
474 \ 474 \
475 if (!capable(CAP_SYS_ADMIN)) \ 475 if (!capable(CAP_SYS_ADMIN)) \
476 return -EPERM; \ 476 return -EPERM; \
477 \ 477 if (count >= sizeof(auth->name)) \
478 return -EINVAL; \
478 snprintf(auth->name, sizeof(auth->name), "%s", page); \ 479 snprintf(auth->name, sizeof(auth->name), "%s", page); \
479 if (!strncmp("NULL", auth->name, 4)) \ 480 if (!strncmp("NULL", auth->name, 4)) \
480 auth->naf_flags &= ~flags; \ 481 auth->naf_flags &= ~flags; \
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 4eb93b2b6473..e29279e6b577 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1403,11 +1403,6 @@ old_sess_out:
1403 1403
1404out: 1404out:
1405 stop = kthread_should_stop(); 1405 stop = kthread_should_stop();
1406 if (!stop && signal_pending(current)) {
1407 spin_lock_bh(&np->np_thread_lock);
1408 stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN);
1409 spin_unlock_bh(&np->np_thread_lock);
1410 }
1411 /* Wait for another socket.. */ 1406 /* Wait for another socket.. */
1412 if (!stop) 1407 if (!stop)
1413 return 1; 1408 return 1;
@@ -1415,7 +1410,6 @@ exit:
1415 iscsi_stop_login_thread_timer(np); 1410 iscsi_stop_login_thread_timer(np);
1416 spin_lock_bh(&np->np_thread_lock); 1411 spin_lock_bh(&np->np_thread_lock);
1417 np->np_thread_state = ISCSI_NP_THREAD_EXIT; 1412 np->np_thread_state = ISCSI_NP_THREAD_EXIT;
1418 np->np_thread = NULL;
1419 spin_unlock_bh(&np->np_thread_lock); 1413 spin_unlock_bh(&np->np_thread_lock);
1420 1414
1421 return 0; 1415 return 0;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 207b340498a3..d06de84b069b 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1106,6 +1106,11 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1106 dev->dev_attrib.block_size = block_size; 1106 dev->dev_attrib.block_size = block_size;
1107 pr_debug("dev[%p]: SE Device block_size changed to %u\n", 1107 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1108 dev, block_size); 1108 dev, block_size);
1109
1110 if (dev->dev_attrib.max_bytes_per_io)
1111 dev->dev_attrib.hw_max_sectors =
1112 dev->dev_attrib.max_bytes_per_io / block_size;
1113
1109 return 0; 1114 return 0;
1110} 1115}
1111 1116
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 0e34cda3271e..78241a53b555 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -66,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
66 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" 66 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
67 " Target Core Stack %s\n", hba->hba_id, FD_VERSION, 67 " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
68 TARGET_CORE_MOD_VERSION); 68 TARGET_CORE_MOD_VERSION);
69 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" 69 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
70 " MaxSectors: %u\n", 70 hba->hba_id, fd_host->fd_host_id);
71 hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
72 71
73 return 0; 72 return 0;
74} 73}
@@ -220,7 +219,8 @@ static int fd_configure_device(struct se_device *dev)
220 } 219 }
221 220
222 dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; 221 dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
223 dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; 222 dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
223 dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
224 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; 224 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
225 225
226 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { 226 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 37ffc5bd2399..d7772c167685 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -7,7 +7,10 @@
7#define FD_DEVICE_QUEUE_DEPTH 32 7#define FD_DEVICE_QUEUE_DEPTH 32
8#define FD_MAX_DEVICE_QUEUE_DEPTH 128 8#define FD_MAX_DEVICE_QUEUE_DEPTH 128
9#define FD_BLOCKSIZE 512 9#define FD_BLOCKSIZE 512
10#define FD_MAX_SECTORS 2048 10/*
11 * Limited by the number of iovecs (2048) per vfs_[writev,readv] call
12 */
13#define FD_MAX_BYTES 8388608
11 14
12#define RRF_EMULATE_CDB 0x01 15#define RRF_EMULATE_CDB 0x01
13#define RRF_GOT_LBA 0x02 16#define RRF_GOT_LBA 0x02
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index f697f8baec54..2a573de19a9f 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -278,7 +278,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
278 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 278 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
279 acl->se_tpg = tpg; 279 acl->se_tpg = tpg;
280 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 280 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
281 spin_lock_init(&acl->stats_lock);
282 acl->dynamic_node_acl = 1; 281 acl->dynamic_node_acl = 1;
283 282
284 tpg->se_tpg_tfo->set_default_node_attributes(acl); 283 tpg->se_tpg_tfo->set_default_node_attributes(acl);
@@ -406,7 +405,6 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
406 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 405 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
407 acl->se_tpg = tpg; 406 acl->se_tpg = tpg;
408 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 407 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
409 spin_lock_init(&acl->stats_lock);
410 408
411 tpg->se_tpg_tfo->set_default_node_attributes(acl); 409 tpg->se_tpg_tfo->set_default_node_attributes(acl);
412 410
@@ -658,15 +656,9 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
658 spin_lock_init(&lun->lun_sep_lock); 656 spin_lock_init(&lun->lun_sep_lock);
659 init_completion(&lun->lun_ref_comp); 657 init_completion(&lun->lun_ref_comp);
660 658
661 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
662 if (ret < 0)
663 return ret;
664
665 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); 659 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
666 if (ret < 0) { 660 if (ret < 0)
667 percpu_ref_cancel_init(&lun->lun_ref);
668 return ret; 661 return ret;
669 }
670 662
671 return 0; 663 return 0;
672} 664}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 268b62768f2b..34aacaaae14a 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -93,6 +93,7 @@ struct n_tty_data {
93 size_t canon_head; 93 size_t canon_head;
94 size_t echo_head; 94 size_t echo_head;
95 size_t echo_commit; 95 size_t echo_commit;
96 size_t echo_mark;
96 DECLARE_BITMAP(char_map, 256); 97 DECLARE_BITMAP(char_map, 256);
97 98
98 /* private to n_tty_receive_overrun (single-threaded) */ 99 /* private to n_tty_receive_overrun (single-threaded) */
@@ -336,6 +337,7 @@ static void reset_buffer_flags(struct n_tty_data *ldata)
336{ 337{
337 ldata->read_head = ldata->canon_head = ldata->read_tail = 0; 338 ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
338 ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0; 339 ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
340 ldata->echo_mark = 0;
339 ldata->line_start = 0; 341 ldata->line_start = 0;
340 342
341 ldata->erasing = 0; 343 ldata->erasing = 0;
@@ -787,6 +789,7 @@ static void commit_echoes(struct tty_struct *tty)
787 size_t head; 789 size_t head;
788 790
789 head = ldata->echo_head; 791 head = ldata->echo_head;
792 ldata->echo_mark = head;
790 old = ldata->echo_commit - ldata->echo_tail; 793 old = ldata->echo_commit - ldata->echo_tail;
791 794
792 /* Process committed echoes if the accumulated # of bytes 795 /* Process committed echoes if the accumulated # of bytes
@@ -811,10 +814,11 @@ static void process_echoes(struct tty_struct *tty)
811 size_t echoed; 814 size_t echoed;
812 815
813 if ((!L_ECHO(tty) && !L_ECHONL(tty)) || 816 if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
814 ldata->echo_commit == ldata->echo_tail) 817 ldata->echo_mark == ldata->echo_tail)
815 return; 818 return;
816 819
817 mutex_lock(&ldata->output_lock); 820 mutex_lock(&ldata->output_lock);
821 ldata->echo_commit = ldata->echo_mark;
818 echoed = __process_echoes(tty); 822 echoed = __process_echoes(tty);
819 mutex_unlock(&ldata->output_lock); 823 mutex_unlock(&ldata->output_lock);
820 824
@@ -822,6 +826,7 @@ static void process_echoes(struct tty_struct *tty)
822 tty->ops->flush_chars(tty); 826 tty->ops->flush_chars(tty);
823} 827}
824 828
829/* NB: echo_mark and echo_head should be equivalent here */
825static void flush_echoes(struct tty_struct *tty) 830static void flush_echoes(struct tty_struct *tty)
826{ 831{
827 struct n_tty_data *ldata = tty->disc_data; 832 struct n_tty_data *ldata = tty->disc_data;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 4658e3e0ec42..06525f10e364 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -96,7 +96,8 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value)
96 if (offset == UART_LCR) { 96 if (offset == UART_LCR) {
97 int tries = 1000; 97 int tries = 1000;
98 while (tries--) { 98 while (tries--) {
99 if (value == p->serial_in(p, UART_LCR)) 99 unsigned int lcr = p->serial_in(p, UART_LCR);
100 if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
100 return; 101 return;
101 dw8250_force_idle(p); 102 dw8250_force_idle(p);
102 writeb(value, p->membase + (UART_LCR << p->regshift)); 103 writeb(value, p->membase + (UART_LCR << p->regshift));
@@ -132,7 +133,8 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
132 if (offset == UART_LCR) { 133 if (offset == UART_LCR) {
133 int tries = 1000; 134 int tries = 1000;
134 while (tries--) { 135 while (tries--) {
135 if (value == p->serial_in(p, UART_LCR)) 136 unsigned int lcr = p->serial_in(p, UART_LCR);
137 if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
136 return; 138 return;
137 dw8250_force_idle(p); 139 dw8250_force_idle(p);
138 writel(value, p->membase + (UART_LCR << p->regshift)); 140 writel(value, p->membase + (UART_LCR << p->regshift));
@@ -455,6 +457,8 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match);
455static const struct acpi_device_id dw8250_acpi_match[] = { 457static const struct acpi_device_id dw8250_acpi_match[] = {
456 { "INT33C4", 0 }, 458 { "INT33C4", 0 },
457 { "INT33C5", 0 }, 459 { "INT33C5", 0 },
460 { "INT3434", 0 },
461 { "INT3435", 0 },
458 { "80860F0A", 0 }, 462 { "80860F0A", 0 },
459 { }, 463 { },
460}; 464};
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index e46e9f3f19b9..f619ad5b5eae 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -240,6 +240,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
240 continue; 240 continue;
241 } 241 }
242 242
243#ifdef SUPPORT_SYSRQ
243 /* 244 /*
244 * uart_handle_sysrq_char() doesn't work if 245 * uart_handle_sysrq_char() doesn't work if
245 * spinlocked, for some reason 246 * spinlocked, for some reason
@@ -253,6 +254,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
253 } 254 }
254 spin_lock(&port->lock); 255 spin_lock(&port->lock);
255 } 256 }
257#endif
256 258
257 port->icount.rx++; 259 port->icount.rx++;
258 260
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 22fad8ad5ac2..d8a55e87877f 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -86,11 +86,21 @@ static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
86 return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); 86 return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
87} 87}
88 88
89/*
90 * ldsem_cmpxchg() updates @*old with the last-known sem->count value.
91 * Returns 1 if count was successfully changed; @*old will have @new value.
92 * Returns 0 if count was not changed; @*old will have most recent sem->count
93 */
89static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem) 94static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
90{ 95{
91 long tmp = *old; 96 long tmp = atomic_long_cmpxchg(&sem->count, *old, new);
92 *old = atomic_long_cmpxchg(&sem->count, *old, new); 97 if (tmp == *old) {
93 return *old == tmp; 98 *old = new;
99 return 1;
100 } else {
101 *old = tmp;
102 return 0;
103 }
94} 104}
95 105
96/* 106/*
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 5d8981c5235e..6e73f8cd60e5 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -642,6 +642,10 @@ static int ci_hdrc_probe(struct platform_device *pdev)
642 : CI_ROLE_GADGET; 642 : CI_ROLE_GADGET;
643 } 643 }
644 644
645 /* only update vbus status for peripheral */
646 if (ci->role == CI_ROLE_GADGET)
647 ci_handle_vbus_change(ci);
648
645 ret = ci_role_start(ci, ci->role); 649 ret = ci_role_start(ci, ci->role);
646 if (ret) { 650 if (ret) {
647 dev_err(dev, "can't start %s role\n", ci_role(ci)->name); 651 dev_err(dev, "can't start %s role\n", ci_role(ci)->name);
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 59e6020ea753..526cd77563d8 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -88,7 +88,8 @@ static int host_start(struct ci_hdrc *ci)
88 return ret; 88 return ret;
89 89
90disable_reg: 90disable_reg:
91 regulator_disable(ci->platdata->reg_vbus); 91 if (ci->platdata->reg_vbus)
92 regulator_disable(ci->platdata->reg_vbus);
92 93
93put_hcd: 94put_hcd:
94 usb_put_hcd(hcd); 95 usb_put_hcd(hcd);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index b34c81969cba..69d20fbb38a2 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1795,9 +1795,6 @@ static int udc_start(struct ci_hdrc *ci)
1795 pm_runtime_no_callbacks(&ci->gadget.dev); 1795 pm_runtime_no_callbacks(&ci->gadget.dev);
1796 pm_runtime_enable(&ci->gadget.dev); 1796 pm_runtime_enable(&ci->gadget.dev);
1797 1797
1798 /* Update ci->vbus_active */
1799 ci_handle_vbus_change(ci);
1800
1801 return retval; 1798 return retval;
1802 1799
1803destroy_eps: 1800destroy_eps:
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 4d387596f3f0..0b23a8639311 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -854,13 +854,11 @@ static int wdm_manage_power(struct usb_interface *intf, int on)
854{ 854{
855 /* need autopm_get/put here to ensure the usbcore sees the new value */ 855 /* need autopm_get/put here to ensure the usbcore sees the new value */
856 int rv = usb_autopm_get_interface(intf); 856 int rv = usb_autopm_get_interface(intf);
857 if (rv < 0)
858 goto err;
859 857
860 intf->needs_remote_wakeup = on; 858 intf->needs_remote_wakeup = on;
861 usb_autopm_put_interface(intf); 859 if (!rv)
862err: 860 usb_autopm_put_interface(intf);
863 return rv; 861 return 0;
864} 862}
865 863
866static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id) 864static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 74f9cf02da07..a49217ae3533 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -455,9 +455,6 @@ static int dwc3_probe(struct platform_device *pdev)
455 if (IS_ERR(regs)) 455 if (IS_ERR(regs))
456 return PTR_ERR(regs); 456 return PTR_ERR(regs);
457 457
458 usb_phy_set_suspend(dwc->usb2_phy, 0);
459 usb_phy_set_suspend(dwc->usb3_phy, 0);
460
461 spin_lock_init(&dwc->lock); 458 spin_lock_init(&dwc->lock);
462 platform_set_drvdata(pdev, dwc); 459 platform_set_drvdata(pdev, dwc);
463 460
@@ -488,6 +485,9 @@ static int dwc3_probe(struct platform_device *pdev)
488 goto err0; 485 goto err0;
489 } 486 }
490 487
488 usb_phy_set_suspend(dwc->usb2_phy, 0);
489 usb_phy_set_suspend(dwc->usb3_phy, 0);
490
491 ret = dwc3_event_buffers_setup(dwc); 491 ret = dwc3_event_buffers_setup(dwc);
492 if (ret) { 492 if (ret) {
493 dev_err(dwc->dev, "failed to setup event buffers\n"); 493 dev_err(dwc->dev, "failed to setup event buffers\n");
@@ -569,6 +569,8 @@ err2:
569 dwc3_event_buffers_cleanup(dwc); 569 dwc3_event_buffers_cleanup(dwc);
570 570
571err1: 571err1:
572 usb_phy_set_suspend(dwc->usb2_phy, 1);
573 usb_phy_set_suspend(dwc->usb3_phy, 1);
572 dwc3_core_exit(dwc); 574 dwc3_core_exit(dwc);
573 575
574err0: 576err0:
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 418444ebb1b8..8c356af79409 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -136,23 +136,27 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
136 struct ohci_hcd *ohci; 136 struct ohci_hcd *ohci;
137 int retval; 137 int retval;
138 struct usb_hcd *hcd = NULL; 138 struct usb_hcd *hcd = NULL;
139 139 struct device *dev = &pdev->dev;
140 if (pdev->num_resources != 2) { 140 struct resource *res;
141 pr_debug("hcd probe: invalid num_resources"); 141 int irq;
142 return -ENODEV; 142
143 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
144 if (!res) {
145 dev_dbg(dev, "hcd probe: missing memory resource\n");
146 return -ENXIO;
143 } 147 }
144 148
145 if ((pdev->resource[0].flags != IORESOURCE_MEM) 149 irq = platform_get_irq(pdev, 0);
146 || (pdev->resource[1].flags != IORESOURCE_IRQ)) { 150 if (irq < 0) {
147 pr_debug("hcd probe: invalid resource type\n"); 151 dev_dbg(dev, "hcd probe: missing irq resource\n");
148 return -ENODEV; 152 return irq;
149 } 153 }
150 154
151 hcd = usb_create_hcd(driver, &pdev->dev, "at91"); 155 hcd = usb_create_hcd(driver, &pdev->dev, "at91");
152 if (!hcd) 156 if (!hcd)
153 return -ENOMEM; 157 return -ENOMEM;
154 hcd->rsrc_start = pdev->resource[0].start; 158 hcd->rsrc_start = res->start;
155 hcd->rsrc_len = resource_size(&pdev->resource[0]); 159 hcd->rsrc_len = resource_size(res);
156 160
157 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { 161 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
158 pr_debug("request_mem_region failed\n"); 162 pr_debug("request_mem_region failed\n");
@@ -199,7 +203,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
199 ohci->num_ports = board->ports; 203 ohci->num_ports = board->ports;
200 at91_start_hc(pdev); 204 at91_start_hc(pdev);
201 205
202 retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED); 206 retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
203 if (retval == 0) 207 if (retval == 0)
204 return retval; 208 return retval;
205 209
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index b8dffd59eb25..73f5208714a4 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -128,7 +128,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
128 * any other sleep) on Haswell machines with LPT and LPT-LP 128 * any other sleep) on Haswell machines with LPT and LPT-LP
129 * with the new Intel BIOS 129 * with the new Intel BIOS
130 */ 130 */
131 xhci->quirks |= XHCI_SPURIOUS_WAKEUP; 131 /* Limit the quirk to only known vendors, as this triggers
132 * yet another BIOS bug on some other machines
133 * https://bugzilla.kernel.org/show_bug.cgi?id=66171
134 */
135 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
136 xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
132 } 137 }
133 if (pdev->vendor == PCI_VENDOR_ID_ETRON && 138 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
134 pdev->device == PCI_DEVICE_ID_ASROCK_P67) { 139 pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 08e2f39027ec..2b41c636a52a 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -19,8 +19,9 @@ config AB8500_USB
19 in host mode, low speed. 19 in host mode, low speed.
20 20
21config FSL_USB2_OTG 21config FSL_USB2_OTG
22 bool "Freescale USB OTG Transceiver Driver" 22 tristate "Freescale USB OTG Transceiver Driver"
23 depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME 23 depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME
24 depends on USB
24 select USB_OTG 25 select USB_OTG
25 select USB_PHY 26 select USB_PHY
26 help 27 help
@@ -29,6 +30,7 @@ config FSL_USB2_OTG
29config ISP1301_OMAP 30config ISP1301_OMAP
30 tristate "Philips ISP1301 with OMAP OTG" 31 tristate "Philips ISP1301 with OMAP OTG"
31 depends on I2C && ARCH_OMAP_OTG 32 depends on I2C && ARCH_OMAP_OTG
33 depends on USB
32 select USB_PHY 34 select USB_PHY
33 help 35 help
34 If you say yes here you get support for the Philips ISP1301 36 If you say yes here you get support for the Philips ISP1301
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index 82232acf1ab6..bbe4f8e6e8d7 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -876,7 +876,7 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
876 876
877 tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start, 877 tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start,
878 resource_size(res)); 878 resource_size(res));
879 if (!tegra_phy->regs) { 879 if (!tegra_phy->pad_regs) {
880 dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n"); 880 dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n");
881 return -ENOMEM; 881 return -ENOMEM;
882 } 882 }
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index 30e8a61552d4..bad57ce77ba5 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -127,7 +127,8 @@ static inline int twl6030_writeb(struct twl6030_usb *twl, u8 module,
127 127
128static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address) 128static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address)
129{ 129{
130 u8 data, ret = 0; 130 u8 data;
131 int ret;
131 132
132 ret = twl_i2c_read_u8(module, &data, address); 133 ret = twl_i2c_read_u8(module, &data, address);
133 if (ret >= 0) 134 if (ret >= 0)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 496b7e39d5be..cc7a24154490 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -251,6 +251,7 @@ static void option_instat_callback(struct urb *urb);
251#define ZTE_PRODUCT_MF628 0x0015 251#define ZTE_PRODUCT_MF628 0x0015
252#define ZTE_PRODUCT_MF626 0x0031 252#define ZTE_PRODUCT_MF626 0x0031
253#define ZTE_PRODUCT_MC2718 0xffe8 253#define ZTE_PRODUCT_MC2718 0xffe8
254#define ZTE_PRODUCT_AC2726 0xfff1
254 255
255#define BENQ_VENDOR_ID 0x04a5 256#define BENQ_VENDOR_ID 0x04a5
256#define BENQ_PRODUCT_H10 0x4068 257#define BENQ_PRODUCT_H10 0x4068
@@ -1453,6 +1454,7 @@ static const struct usb_device_id option_ids[] = {
1453 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, 1454 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
1454 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, 1455 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
1455 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, 1456 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
1457 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
1456 1458
1457 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, 1459 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
1458 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, 1460 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c
index fca4c752a4ed..eae2c873b39f 100644
--- a/drivers/usb/serial/zte_ev.c
+++ b/drivers/usb/serial/zte_ev.c
@@ -281,8 +281,7 @@ static const struct usb_device_id id_table[] = {
281 { USB_DEVICE(0x19d2, 0xfffd) }, 281 { USB_DEVICE(0x19d2, 0xfffd) },
282 { USB_DEVICE(0x19d2, 0xfffc) }, 282 { USB_DEVICE(0x19d2, 0xfffc) },
283 { USB_DEVICE(0x19d2, 0xfffb) }, 283 { USB_DEVICE(0x19d2, 0xfffb) },
284 /* AC2726, AC8710_V3 */ 284 /* AC8710_V3 */
285 { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) },
286 { USB_DEVICE(0x19d2, 0xfff6) }, 285 { USB_DEVICE(0x19d2, 0xfff6) },
287 { USB_DEVICE(0x19d2, 0xfff7) }, 286 { USB_DEVICE(0x19d2, 0xfff7) },
288 { USB_DEVICE(0x19d2, 0xfff8) }, 287 { USB_DEVICE(0x19d2, 0xfff8) },
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index c444654fc33f..5c4a95b516cf 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -285,7 +285,7 @@ static void update_balloon_size(struct virtio_balloon *vb)
285{ 285{
286 __le32 actual = cpu_to_le32(vb->num_pages); 286 __le32 actual = cpu_to_le32(vb->num_pages);
287 287
288 virtio_cwrite(vb->vdev, struct virtio_balloon_config, num_pages, 288 virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual,
289 &actual); 289 &actual);
290} 290}
291 291
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 55ea73f7c70b..4c02e2b94103 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -350,17 +350,19 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
350 350
351 pfn = page_to_pfn(page); 351 pfn = page_to_pfn(page);
352 352
353 set_phys_to_machine(pfn, frame_list[i]);
354
355#ifdef CONFIG_XEN_HAVE_PVMMU 353#ifdef CONFIG_XEN_HAVE_PVMMU
356 /* Link back into the page tables if not highmem. */ 354 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
357 if (xen_pv_domain() && !PageHighMem(page)) { 355 set_phys_to_machine(pfn, frame_list[i]);
358 int ret; 356
359 ret = HYPERVISOR_update_va_mapping( 357 /* Link back into the page tables if not highmem. */
360 (unsigned long)__va(pfn << PAGE_SHIFT), 358 if (!PageHighMem(page)) {
361 mfn_pte(frame_list[i], PAGE_KERNEL), 359 int ret;
362 0); 360 ret = HYPERVISOR_update_va_mapping(
363 BUG_ON(ret); 361 (unsigned long)__va(pfn << PAGE_SHIFT),
362 mfn_pte(frame_list[i], PAGE_KERNEL),
363 0);
364 BUG_ON(ret);
365 }
364 } 366 }
365#endif 367#endif
366 368
@@ -378,7 +380,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
378 enum bp_state state = BP_DONE; 380 enum bp_state state = BP_DONE;
379 unsigned long pfn, i; 381 unsigned long pfn, i;
380 struct page *page; 382 struct page *page;
381 struct page *scratch_page;
382 int ret; 383 int ret;
383 struct xen_memory_reservation reservation = { 384 struct xen_memory_reservation reservation = {
384 .address_bits = 0, 385 .address_bits = 0,
@@ -411,27 +412,29 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
411 412
412 scrub_page(page); 413 scrub_page(page);
413 414
415#ifdef CONFIG_XEN_HAVE_PVMMU
414 /* 416 /*
415 * Ballooned out frames are effectively replaced with 417 * Ballooned out frames are effectively replaced with
416 * a scratch frame. Ensure direct mappings and the 418 * a scratch frame. Ensure direct mappings and the
417 * p2m are consistent. 419 * p2m are consistent.
418 */ 420 */
419 scratch_page = get_balloon_scratch_page();
420#ifdef CONFIG_XEN_HAVE_PVMMU
421 if (xen_pv_domain() && !PageHighMem(page)) {
422 ret = HYPERVISOR_update_va_mapping(
423 (unsigned long)__va(pfn << PAGE_SHIFT),
424 pfn_pte(page_to_pfn(scratch_page),
425 PAGE_KERNEL_RO), 0);
426 BUG_ON(ret);
427 }
428#endif
429 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 421 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
430 unsigned long p; 422 unsigned long p;
423 struct page *scratch_page = get_balloon_scratch_page();
424
425 if (!PageHighMem(page)) {
426 ret = HYPERVISOR_update_va_mapping(
427 (unsigned long)__va(pfn << PAGE_SHIFT),
428 pfn_pte(page_to_pfn(scratch_page),
429 PAGE_KERNEL_RO), 0);
430 BUG_ON(ret);
431 }
431 p = page_to_pfn(scratch_page); 432 p = page_to_pfn(scratch_page);
432 __set_phys_to_machine(pfn, pfn_to_mfn(p)); 433 __set_phys_to_machine(pfn, pfn_to_mfn(p));
434
435 put_balloon_scratch_page();
433 } 436 }
434 put_balloon_scratch_page(); 437#endif
435 438
436 balloon_append(pfn_to_page(pfn)); 439 balloon_append(pfn_to_page(pfn));
437 } 440 }
@@ -627,15 +630,17 @@ static int __init balloon_init(void)
627 if (!xen_domain()) 630 if (!xen_domain())
628 return -ENODEV; 631 return -ENODEV;
629 632
630 for_each_online_cpu(cpu) 633 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
631 { 634 for_each_online_cpu(cpu)
632 per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL); 635 {
633 if (per_cpu(balloon_scratch_page, cpu) == NULL) { 636 per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
634 pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu); 637 if (per_cpu(balloon_scratch_page, cpu) == NULL) {
635 return -ENOMEM; 638 pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
639 return -ENOMEM;
640 }
636 } 641 }
642 register_cpu_notifier(&balloon_cpu_notifier);
637 } 643 }
638 register_cpu_notifier(&balloon_cpu_notifier);
639 644
640 pr_info("Initialising balloon driver\n"); 645 pr_info("Initialising balloon driver\n");
641 646
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 028387192b60..aa846a48f400 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1176,7 +1176,8 @@ static int gnttab_setup(void)
1176 gnttab_shared.addr = xen_remap(xen_hvm_resume_frames, 1176 gnttab_shared.addr = xen_remap(xen_hvm_resume_frames,
1177 PAGE_SIZE * max_nr_gframes); 1177 PAGE_SIZE * max_nr_gframes);
1178 if (gnttab_shared.addr == NULL) { 1178 if (gnttab_shared.addr == NULL) {
1179 pr_warn("Failed to ioremap gnttab share frames!\n"); 1179 pr_warn("Failed to ioremap gnttab share frames (addr=0x%08lx)!\n",
1180 xen_hvm_resume_frames);
1180 return -ENOMEM; 1181 return -ENOMEM;
1181 } 1182 }
1182 } 1183 }
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 8e74590fa1bb..569a13b9e856 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -533,12 +533,17 @@ static void privcmd_close(struct vm_area_struct *vma)
533{ 533{
534 struct page **pages = vma->vm_private_data; 534 struct page **pages = vma->vm_private_data;
535 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 535 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
536 int rc;
536 537
537 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) 538 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
538 return; 539 return;
539 540
540 xen_unmap_domain_mfn_range(vma, numpgs, pages); 541 rc = xen_unmap_domain_mfn_range(vma, numpgs, pages);
541 free_xenballooned_pages(numpgs, pages); 542 if (rc == 0)
543 free_xenballooned_pages(numpgs, pages);
544 else
545 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
546 numpgs, rc);
542 kfree(pages); 547 kfree(pages);
543} 548}
544 549