aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/ac.c4
-rw-r--r--drivers/acpi/acpi_lpss.c1
-rw-r--r--drivers/acpi/apei/Kconfig1
-rw-r--r--drivers/acpi/apei/erst.c1
-rw-r--r--drivers/acpi/battery.c22
-rw-r--r--drivers/acpi/bus.c10
-rw-r--r--drivers/ata/ahci.c21
-rw-r--r--drivers/ata/ahci_imx.c3
-rw-r--r--drivers/ata/libata-core.c19
-rw-r--r--drivers/ata/libata-scsi.c21
-rw-r--r--drivers/ata/sata_sis.c4
-rw-r--r--drivers/block/null_blk.c112
-rw-r--r--drivers/block/skd_main.c4
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/char/tpm/tpm_ppi.c15
-rw-r--r--drivers/clk/clk-divider.c2
-rw-r--r--drivers/clk/clk-s2mps11.c6
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c10
-rw-r--r--drivers/clk/samsung/clk-exynos4.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c14
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/clksrc-of.c1
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c7
-rw-r--r--drivers/clocksource/sun4i_timer.c3
-rw-r--r--drivers/clocksource/time-armada-370-xp.c10
-rw-r--r--drivers/cpufreq/cpufreq.c104
-rw-r--r--drivers/cpufreq/intel_pstate.c8
-rw-r--r--drivers/cpuidle/cpuidle-calxeda.c2
-rw-r--r--drivers/crypto/ixp4xx_crypto.c4
-rw-r--r--drivers/dma/Kconfig7
-rw-r--r--drivers/dma/at_hdmac_regs.h4
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/dmatest.c8
-rw-r--r--drivers/dma/fsldma.c31
-rw-r--r--drivers/dma/ioat/dma.c11
-rw-r--r--drivers/dma/mv_xor.c101
-rw-r--r--drivers/dma/pl330.c5
-rw-r--r--drivers/dma/ppc4xx/adma.c27
-rw-r--r--drivers/dma/txx9dmac.c1
-rw-r--r--drivers/firewire/sbp2.c1
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/efi/Kconfig6
-rw-r--r--drivers/firmware/efi/Makefile2
-rw-r--r--drivers/firmware/efi/efi-pstore.c1
-rw-r--r--drivers/gpio/gpio-msm-v2.c4
-rw-r--r--drivers/gpio/gpio-rcar.c3
-rw-r--r--drivers/gpio/gpio-twl4030.c15
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h1
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c7
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c20
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c7
-rw-r--r--drivers/gpu/drm/drm_edid.c8
-rw-r--r--drivers/gpu/drm/drm_stub.c6
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c20
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c34
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c28
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c11
-rw-r--r--drivers/gpu/drm/i915/intel_display.c33
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c26
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c43
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/core/subdev.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c6
-rw-r--r--drivers/gpu/drm/qxl/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c87
-rw-r--r--drivers/gpu/drm/radeon/cik.c12
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c2
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c4
-rw-r--r--drivers/gpu/drm/radeon/ni.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/rs690.c10
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c3
-rw-r--r--drivers/idle/intel_idle.c17
-rw-r--r--drivers/iio/adc/ad7887.c16
-rw-r--r--drivers/iio/imu/adis16400_core.c7
-rw-r--r--drivers/iio/light/cm36651.c2
-rw-r--r--drivers/infiniband/core/iwcm.c11
-rw-r--r--drivers/infiniband/core/uverbs.h10
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c17
-rw-r--r--drivers/infiniband/core/uverbs_main.c27
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c78
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c3
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c22
-rw-r--r--drivers/input/input.c4
-rw-r--r--drivers/input/touchscreen/zforce_ts.c21
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c8
-rw-r--r--drivers/isdn/hisax/hfc_pci.c4
-rw-r--r--drivers/isdn/hisax/telespci.c4
-rw-r--r--drivers/leds/leds-lp5521.c12
-rw-r--r--drivers/leds/leds-lp5523.c12
-rw-r--r--drivers/md/bcache/alloc.c2
-rw-r--r--drivers/md/bcache/bcache.h12
-rw-r--r--drivers/md/bcache/btree.c27
-rw-r--r--drivers/md/bcache/movinggc.c21
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/md/bcache/sysfs.c50
-rw-r--r--drivers/md/bcache/util.c8
-rw-r--r--drivers/md/bcache/util.h2
-rw-r--r--drivers/md/bcache/writeback.c53
-rw-r--r--drivers/mfd/pcf50633-core.c2
-rw-r--r--drivers/mfd/rtsx_pcr.c10
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c45
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/can/usb/ems_usb.c3
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c3
-rw-r--r--drivers/net/ethernet/arc/emac_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h51
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c94
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c254
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c28
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c59
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c103
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h73
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c33
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c29
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c33
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/lantiq_etop.c3
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c54
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c65
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c60
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c22
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c4
-rw-r--r--drivers/net/ethernet/tile/tilegx.c3
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/hyperv/netvsc_drv.c21
-rw-r--r--drivers/net/macvlan.c26
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/team/team.c3
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/usb/Kconfig6
-rw-r--r--drivers/net/usb/dm9601.c44
-rw-r--r--drivers/net/usb/hso.c13
-rw-r--r--drivers/net/usb/mcs7830.c19
-rw-r--r--drivers/net/virtio_net.c11
-rw-r--r--drivers/net/vxlan.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c52
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c5
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c10
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/mwifiex/main.c3
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c4
-rw-r--r--drivers/net/xen-netback/common.h19
-rw-r--r--drivers/net/xen-netback/interface.c11
-rw-r--r--drivers/net/xen-netback/netback.c21
-rw-r--r--drivers/of/Kconfig2
-rw-r--r--drivers/of/address.c8
-rw-r--r--drivers/of/fdt.c12
-rw-r--r--drivers/of/irq.c5
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c30
-rw-r--r--drivers/pci/pci-acpi.c21
-rw-r--r--drivers/phy/Kconfig4
-rw-r--r--drivers/phy/phy-core.c26
-rw-r--r--drivers/pinctrl/pinctrl-baytrail.c1
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h2
-rw-r--r--drivers/power/Kconfig1
-rw-r--r--drivers/power/power_supply_core.c12
-rw-r--r--drivers/powercap/intel_rapl.c13
-rw-r--r--drivers/regulator/Kconfig15
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/ab8500.c126
-rw-r--r--drivers/regulator/act8865-regulator.c349
-rw-r--r--drivers/regulator/anatop-regulator.c36
-rw-r--r--drivers/regulator/arizona-micsupp.c52
-rw-r--r--drivers/regulator/as3722-regulator.c34
-rw-r--r--drivers/regulator/core.c15
-rw-r--r--drivers/regulator/db8500-prcmu.c20
-rw-r--r--drivers/regulator/gpio-regulator.c17
-rw-r--r--drivers/regulator/lp3971.c43
-rw-r--r--drivers/regulator/lp3972.c41
-rw-r--r--drivers/regulator/max14577.c273
-rw-r--r--drivers/regulator/max77693.c1
-rw-r--r--drivers/regulator/mc13892-regulator.c24
-rw-r--r--drivers/regulator/pcf50633-regulator.c2
-rw-r--r--drivers/regulator/pfuze100-regulator.c2
-rw-r--r--drivers/regulator/s2mps11.c2
-rw-r--r--drivers/s390/char/tty3270.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c10
-rw-r--r--drivers/staging/bcm/Bcmnet.c3
-rw-r--r--drivers/staging/comedi/drivers.c2
-rw-r--r--drivers/staging/comedi/drivers/8255_pci.c15
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c7
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c39
-rw-r--r--drivers/staging/imx-drm/imx-tve.c9
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-common.c32
-rw-r--r--drivers/staging/netlogic/xlr_net.c3
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c3
-rw-r--r--drivers/target/iscsi/iscsi_target.c27
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c6
-rw-r--r--drivers/target/target_core_device.c5
-rw-r--r--drivers/target/target_core_file.c8
-rw-r--r--drivers/target/target_core_file.h5
-rw-r--r--drivers/target/target_core_tpg.c10
-rw-r--r--drivers/tty/n_tty.c7
-rw-r--r--drivers/tty/serial/8250/8250_dw.c8
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/tty_ldsem.c16
-rw-r--r--drivers/usb/chipidea/core.c4
-rw-r--r--drivers/usb/chipidea/host.c3
-rw-r--r--drivers/usb/chipidea/udc.c3
-rw-r--r--drivers/usb/class/cdc-wdm.c8
-rw-r--r--drivers/usb/dwc3/core.c8
-rw-r--r--drivers/usb/host/ohci-at91.c26
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/phy/Kconfig4
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c2
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c3
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/zte_ev.c3
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/xen/balloon.c63
-rw-r--r--drivers/xen/grant-table.c3
-rw-r--r--drivers/xen/privcmd.c9
275 files changed, 3189 insertions, 1625 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 5d9248526d78..4770de5707b9 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -348,7 +348,6 @@ source "drivers/acpi/apei/Kconfig"
348config ACPI_EXTLOG 348config ACPI_EXTLOG
349 tristate "Extended Error Log support" 349 tristate "Extended Error Log support"
350 depends on X86_MCE && X86_LOCAL_APIC 350 depends on X86_MCE && X86_LOCAL_APIC
351 select EFI
352 select UEFI_CPER 351 select UEFI_CPER
353 default n 352 default n
354 help 353 help
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 8711e3797165..3c2e4aa529c4 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -207,7 +207,7 @@ static int acpi_ac_probe(struct platform_device *pdev)
207 goto end; 207 goto end;
208 208
209 result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev), 209 result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
210 ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler, ac); 210 ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac);
211 if (result) { 211 if (result) {
212 power_supply_unregister(&ac->charger); 212 power_supply_unregister(&ac->charger);
213 goto end; 213 goto end;
@@ -255,7 +255,7 @@ static int acpi_ac_remove(struct platform_device *pdev)
255 return -EINVAL; 255 return -EINVAL;
256 256
257 acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), 257 acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
258 ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler); 258 ACPI_ALL_NOTIFY, acpi_ac_notify_handler);
259 259
260 ac = platform_get_drvdata(pdev); 260 ac = platform_get_drvdata(pdev);
261 if (ac->charger.dev) 261 if (ac->charger.dev)
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 6745fe137b9e..e60390597372 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -162,6 +162,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
162 { "80860F14", (unsigned long)&byt_sdio_dev_desc }, 162 { "80860F14", (unsigned long)&byt_sdio_dev_desc },
163 { "80860F41", (unsigned long)&byt_i2c_dev_desc }, 163 { "80860F41", (unsigned long)&byt_i2c_dev_desc },
164 { "INT33B2", }, 164 { "INT33B2", },
165 { "INT33FC", },
165 166
166 { "INT3430", (unsigned long)&lpt_dev_desc }, 167 { "INT3430", (unsigned long)&lpt_dev_desc },
167 { "INT3431", (unsigned long)&lpt_dev_desc }, 168 { "INT3431", (unsigned long)&lpt_dev_desc },
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index 786294bb682c..3650b2183227 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -2,7 +2,6 @@ config ACPI_APEI
2 bool "ACPI Platform Error Interface (APEI)" 2 bool "ACPI Platform Error Interface (APEI)"
3 select MISC_FILESYSTEMS 3 select MISC_FILESYSTEMS
4 select PSTORE 4 select PSTORE
5 select EFI
6 select UEFI_CPER 5 select UEFI_CPER
7 depends on X86 6 depends on X86
8 help 7 help
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 26311f23c824..cb1d557fc22c 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -942,6 +942,7 @@ static int erst_clearer(enum pstore_type_id type, u64 id, int count,
942static struct pstore_info erst_info = { 942static struct pstore_info erst_info = {
943 .owner = THIS_MODULE, 943 .owner = THIS_MODULE,
944 .name = "erst", 944 .name = "erst",
945 .flags = PSTORE_FLAGS_FRAGILE,
945 .open = erst_open_pstore, 946 .open = erst_open_pstore,
946 .close = erst_close_pstore, 947 .close = erst_close_pstore,
947 .read = erst_reader, 948 .read = erst_reader,
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index fbf1aceda8b8..5876a49dfd38 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -62,6 +62,7 @@ MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>");
62MODULE_DESCRIPTION("ACPI Battery Driver"); 62MODULE_DESCRIPTION("ACPI Battery Driver");
63MODULE_LICENSE("GPL"); 63MODULE_LICENSE("GPL");
64 64
65static int battery_bix_broken_package;
65static unsigned int cache_time = 1000; 66static unsigned int cache_time = 1000;
66module_param(cache_time, uint, 0644); 67module_param(cache_time, uint, 0644);
67MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); 68MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -416,7 +417,12 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
416 ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name)); 417 ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name));
417 return -ENODEV; 418 return -ENODEV;
418 } 419 }
419 if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags)) 420
421 if (battery_bix_broken_package)
422 result = extract_package(battery, buffer.pointer,
423 extended_info_offsets + 1,
424 ARRAY_SIZE(extended_info_offsets) - 1);
425 else if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
420 result = extract_package(battery, buffer.pointer, 426 result = extract_package(battery, buffer.pointer,
421 extended_info_offsets, 427 extended_info_offsets,
422 ARRAY_SIZE(extended_info_offsets)); 428 ARRAY_SIZE(extended_info_offsets));
@@ -754,6 +760,17 @@ static int battery_notify(struct notifier_block *nb,
754 return 0; 760 return 0;
755} 761}
756 762
763static struct dmi_system_id bat_dmi_table[] = {
764 {
765 .ident = "NEC LZ750/LS",
766 .matches = {
767 DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
768 DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"),
769 },
770 },
771 {},
772};
773
757static int acpi_battery_add(struct acpi_device *device) 774static int acpi_battery_add(struct acpi_device *device)
758{ 775{
759 int result = 0; 776 int result = 0;
@@ -846,6 +863,9 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
846{ 863{
847 if (acpi_disabled) 864 if (acpi_disabled)
848 return; 865 return;
866
867 if (dmi_check_system(bat_dmi_table))
868 battery_bix_broken_package = 1;
849 acpi_bus_register_driver(&acpi_battery_driver); 869 acpi_bus_register_driver(&acpi_battery_driver);
850} 870}
851 871
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index bba9b72e25f8..0710004055c8 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -156,6 +156,16 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data)
156} 156}
157EXPORT_SYMBOL(acpi_bus_get_private_data); 157EXPORT_SYMBOL(acpi_bus_get_private_data);
158 158
159void acpi_bus_no_hotplug(acpi_handle handle)
160{
161 struct acpi_device *adev = NULL;
162
163 acpi_bus_get_device(handle, &adev);
164 if (adev)
165 adev->flags.no_hotplug = true;
166}
167EXPORT_SYMBOL_GPL(acpi_bus_no_hotplug);
168
159static void acpi_print_osc_error(acpi_handle handle, 169static void acpi_print_osc_error(acpi_handle handle,
160 struct acpi_osc_context *context, char *error) 170 struct acpi_osc_context *context, char *error)
161{ 171{
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 14f1e9506338..e3a92a6da39a 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -427,6 +427,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
427 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ 427 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
428 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125), 428 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125),
429 .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ 429 .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
430 { PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178,
431 PCI_VENDOR_ID_MARVELL_EXT, 0x9170),
432 .driver_data = board_ahci_yes_fbs }, /* 88se9170 */
430 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a), 433 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
431 .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ 434 .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
432 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172), 435 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
@@ -1238,15 +1241,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1238 if (rc) 1241 if (rc)
1239 return rc; 1242 return rc;
1240 1243
1241 /* AHCI controllers often implement SFF compatible interface.
1242 * Grab all PCI BARs just in case.
1243 */
1244 rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
1245 if (rc == -EBUSY)
1246 pcim_pin_device(pdev);
1247 if (rc)
1248 return rc;
1249
1250 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 1244 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
1251 (pdev->device == 0x2652 || pdev->device == 0x2653)) { 1245 (pdev->device == 0x2652 || pdev->device == 0x2653)) {
1252 u8 map; 1246 u8 map;
@@ -1263,6 +1257,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1263 } 1257 }
1264 } 1258 }
1265 1259
1260 /* AHCI controllers often implement SFF compatible interface.
1261 * Grab all PCI BARs just in case.
1262 */
1263 rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
1264 if (rc == -EBUSY)
1265 pcim_pin_device(pdev);
1266 if (rc)
1267 return rc;
1268
1266 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); 1269 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
1267 if (!hpriv) 1270 if (!hpriv)
1268 return -ENOMEM; 1271 return -ENOMEM;
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index ae2d73fe321e..3e23e9941dad 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -113,7 +113,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
113 /* 113 /*
114 * set PHY Paremeters, two steps to configure the GPR13, 114 * set PHY Paremeters, two steps to configure the GPR13,
115 * one write for rest of parameters, mask of first write 115 * one write for rest of parameters, mask of first write
116 * is 0x07fffffd, and the other one write for setting 116 * is 0x07ffffff, and the other one write for setting
117 * the mpll_clk_en. 117 * the mpll_clk_en.
118 */ 118 */
119 regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK 119 regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK
@@ -124,6 +124,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
124 | IMX6Q_GPR13_SATA_TX_ATTEN_MASK 124 | IMX6Q_GPR13_SATA_TX_ATTEN_MASK
125 | IMX6Q_GPR13_SATA_TX_BOOST_MASK 125 | IMX6Q_GPR13_SATA_TX_BOOST_MASK
126 | IMX6Q_GPR13_SATA_TX_LVL_MASK 126 | IMX6Q_GPR13_SATA_TX_LVL_MASK
127 | IMX6Q_GPR13_SATA_MPLL_CLK_EN
127 | IMX6Q_GPR13_SATA_TX_EDGE_RATE 128 | IMX6Q_GPR13_SATA_TX_EDGE_RATE
128 , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB 129 , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB
129 | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M 130 | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 75b93678bbcd..1393a5890ed5 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2149,9 +2149,16 @@ static int ata_dev_config_ncq(struct ata_device *dev,
2149 "failed to get NCQ Send/Recv Log Emask 0x%x\n", 2149 "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2150 err_mask); 2150 err_mask);
2151 } else { 2151 } else {
2152 u8 *cmds = dev->ncq_send_recv_cmds;
2153
2152 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; 2154 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2153 memcpy(dev->ncq_send_recv_cmds, ap->sector_buf, 2155 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2154 ATA_LOG_NCQ_SEND_RECV_SIZE); 2156
2157 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2158 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2159 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2160 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2161 }
2155 } 2162 }
2156 } 2163 }
2157 2164
@@ -4156,6 +4163,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4156 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4163 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4157 ATA_HORKAGE_FIRMWARE_WARN }, 4164 ATA_HORKAGE_FIRMWARE_WARN },
4158 4165
4166 /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
4167 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4168
4159 /* Blacklist entries taken from Silicon Image 3124/3132 4169 /* Blacklist entries taken from Silicon Image 3124/3132
4160 Windows driver .inf file - also several Linux problem reports */ 4170 Windows driver .inf file - also several Linux problem reports */
4161 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 4171 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
@@ -4202,6 +4212,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4202 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, 4212 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4203 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4213 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4204 4214
4215 /* devices that don't properly handle queued TRIM commands */
4216 { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4217 { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4218
4205 /* End Marker */ 4219 /* End Marker */
4206 { } 4220 { }
4207}; 4221};
@@ -6519,6 +6533,7 @@ static int __init ata_parse_force_one(char **cur,
6519 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, 6533 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6520 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, 6534 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
6521 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, 6535 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
6536 { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
6522 }; 6537 };
6523 char *start = *cur, *p = *cur; 6538 char *start = *cur, *p = *cur;
6524 char *id, *val, *endp; 6539 char *id, *val, *endp;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index ab58556d347c..377eb889f555 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3872,6 +3872,27 @@ void ata_scsi_hotplug(struct work_struct *work)
3872 return; 3872 return;
3873 } 3873 }
3874 3874
3875 /*
3876 * XXX - UGLY HACK
3877 *
3878 * The block layer suspend/resume path is fundamentally broken due
3879 * to freezable kthreads and workqueue and may deadlock if a block
3880 * device gets removed while resume is in progress. I don't know
3881 * what the solution is short of removing freezable kthreads and
3882 * workqueues altogether.
3883 *
3884 * The following is an ugly hack to avoid kicking off device
3885 * removal while freezer is active. This is a joke but does avoid
3886 * this particular deadlock scenario.
3887 *
3888 * https://bugzilla.kernel.org/show_bug.cgi?id=62801
3889 * http://marc.info/?l=linux-kernel&m=138695698516487
3890 */
3891#ifdef CONFIG_FREEZER
3892 while (pm_freezing)
3893 msleep(10);
3894#endif
3895
3875 DPRINTK("ENTER\n"); 3896 DPRINTK("ENTER\n");
3876 mutex_lock(&ap->scsi_scan_mutex); 3897 mutex_lock(&ap->scsi_scan_mutex);
3877 3898
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index fe3ca0989b14..1ad2f62d34b9 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -83,6 +83,10 @@ static struct pci_driver sis_pci_driver = {
83 .id_table = sis_pci_tbl, 83 .id_table = sis_pci_tbl,
84 .probe = sis_init_one, 84 .probe = sis_init_one,
85 .remove = ata_pci_remove_one, 85 .remove = ata_pci_remove_one,
86#ifdef CONFIG_PM
87 .suspend = ata_pci_device_suspend,
88 .resume = ata_pci_device_resume,
89#endif
86}; 90};
87 91
88static struct scsi_host_template sis_sht = { 92static struct scsi_host_template sis_sht = {
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index f370fc13aea5..83a598ebb65a 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -1,4 +1,5 @@
1#include <linux/module.h> 1#include <linux/module.h>
2
2#include <linux/moduleparam.h> 3#include <linux/moduleparam.h>
3#include <linux/sched.h> 4#include <linux/sched.h>
4#include <linux/fs.h> 5#include <linux/fs.h>
@@ -65,7 +66,7 @@ enum {
65 NULL_Q_MQ = 2, 66 NULL_Q_MQ = 2,
66}; 67};
67 68
68static int submit_queues = 1; 69static int submit_queues;
69module_param(submit_queues, int, S_IRUGO); 70module_param(submit_queues, int, S_IRUGO);
70MODULE_PARM_DESC(submit_queues, "Number of submission queues"); 71MODULE_PARM_DESC(submit_queues, "Number of submission queues");
71 72
@@ -101,9 +102,9 @@ static int hw_queue_depth = 64;
101module_param(hw_queue_depth, int, S_IRUGO); 102module_param(hw_queue_depth, int, S_IRUGO);
102MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); 103MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
103 104
104static bool use_per_node_hctx = true; 105static bool use_per_node_hctx = false;
105module_param(use_per_node_hctx, bool, S_IRUGO); 106module_param(use_per_node_hctx, bool, S_IRUGO);
106MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true"); 107MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
107 108
108static void put_tag(struct nullb_queue *nq, unsigned int tag) 109static void put_tag(struct nullb_queue *nq, unsigned int tag)
109{ 110{
@@ -346,8 +347,37 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
346 347
347static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) 348static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index)
348{ 349{
349 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, 350 int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes);
350 hctx_index); 351 int tip = (reg->nr_hw_queues % nr_online_nodes);
352 int node = 0, i, n;
353
354 /*
355 * Split submit queues evenly wrt to the number of nodes. If uneven,
356 * fill the first buckets with one extra, until the rest is filled with
357 * no extra.
358 */
359 for (i = 0, n = 1; i < hctx_index; i++, n++) {
360 if (n % b_size == 0) {
361 n = 0;
362 node++;
363
364 tip--;
365 if (!tip)
366 b_size = reg->nr_hw_queues / nr_online_nodes;
367 }
368 }
369
370 /*
371 * A node might not be online, therefore map the relative node id to the
372 * real node id.
373 */
374 for_each_online_node(n) {
375 if (!node)
376 break;
377 node--;
378 }
379
380 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n);
351} 381}
352 382
353static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) 383static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
@@ -355,16 +385,24 @@ static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
355 kfree(hctx); 385 kfree(hctx);
356} 386}
357 387
388static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
389{
390 BUG_ON(!nullb);
391 BUG_ON(!nq);
392
393 init_waitqueue_head(&nq->wait);
394 nq->queue_depth = nullb->queue_depth;
395}
396
358static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 397static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
359 unsigned int index) 398 unsigned int index)
360{ 399{
361 struct nullb *nullb = data; 400 struct nullb *nullb = data;
362 struct nullb_queue *nq = &nullb->queues[index]; 401 struct nullb_queue *nq = &nullb->queues[index];
363 402
364 init_waitqueue_head(&nq->wait);
365 nq->queue_depth = nullb->queue_depth;
366 nullb->nr_queues++;
367 hctx->driver_data = nq; 403 hctx->driver_data = nq;
404 null_init_queue(nullb, nq);
405 nullb->nr_queues++;
368 406
369 return 0; 407 return 0;
370} 408}
@@ -387,10 +425,7 @@ static void null_del_dev(struct nullb *nullb)
387 list_del_init(&nullb->list); 425 list_del_init(&nullb->list);
388 426
389 del_gendisk(nullb->disk); 427 del_gendisk(nullb->disk);
390 if (queue_mode == NULL_Q_MQ) 428 blk_cleanup_queue(nullb->q);
391 blk_mq_free_queue(nullb->q);
392 else
393 blk_cleanup_queue(nullb->q);
394 put_disk(nullb->disk); 429 put_disk(nullb->disk);
395 kfree(nullb); 430 kfree(nullb);
396} 431}
@@ -417,13 +452,13 @@ static int setup_commands(struct nullb_queue *nq)
417 452
418 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); 453 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
419 if (!nq->cmds) 454 if (!nq->cmds)
420 return 1; 455 return -ENOMEM;
421 456
422 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; 457 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
423 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); 458 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
424 if (!nq->tag_map) { 459 if (!nq->tag_map) {
425 kfree(nq->cmds); 460 kfree(nq->cmds);
426 return 1; 461 return -ENOMEM;
427 } 462 }
428 463
429 for (i = 0; i < nq->queue_depth; i++) { 464 for (i = 0; i < nq->queue_depth; i++) {
@@ -454,33 +489,37 @@ static void cleanup_queues(struct nullb *nullb)
454 489
455static int setup_queues(struct nullb *nullb) 490static int setup_queues(struct nullb *nullb)
456{ 491{
457 struct nullb_queue *nq; 492 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
458 int i; 493 GFP_KERNEL);
459
460 nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL);
461 if (!nullb->queues) 494 if (!nullb->queues)
462 return 1; 495 return -ENOMEM;
463 496
464 nullb->nr_queues = 0; 497 nullb->nr_queues = 0;
465 nullb->queue_depth = hw_queue_depth; 498 nullb->queue_depth = hw_queue_depth;
466 499
467 if (queue_mode == NULL_Q_MQ) 500 return 0;
468 return 0; 501}
502
503static int init_driver_queues(struct nullb *nullb)
504{
505 struct nullb_queue *nq;
506 int i, ret = 0;
469 507
470 for (i = 0; i < submit_queues; i++) { 508 for (i = 0; i < submit_queues; i++) {
471 nq = &nullb->queues[i]; 509 nq = &nullb->queues[i];
472 init_waitqueue_head(&nq->wait); 510
473 nq->queue_depth = hw_queue_depth; 511 null_init_queue(nullb, nq);
474 if (setup_commands(nq)) 512
475 break; 513 ret = setup_commands(nq);
514 if (ret)
515 goto err_queue;
476 nullb->nr_queues++; 516 nullb->nr_queues++;
477 } 517 }
478 518
479 if (i == submit_queues) 519 return 0;
480 return 0; 520err_queue:
481
482 cleanup_queues(nullb); 521 cleanup_queues(nullb);
483 return 1; 522 return ret;
484} 523}
485 524
486static int null_add_dev(void) 525static int null_add_dev(void)
@@ -518,11 +557,13 @@ static int null_add_dev(void)
518 } else if (queue_mode == NULL_Q_BIO) { 557 } else if (queue_mode == NULL_Q_BIO) {
519 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); 558 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
520 blk_queue_make_request(nullb->q, null_queue_bio); 559 blk_queue_make_request(nullb->q, null_queue_bio);
560 init_driver_queues(nullb);
521 } else { 561 } else {
522 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); 562 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
523 blk_queue_prep_rq(nullb->q, null_rq_prep_fn); 563 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
524 if (nullb->q) 564 if (nullb->q)
525 blk_queue_softirq_done(nullb->q, null_softirq_done_fn); 565 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
566 init_driver_queues(nullb);
526 } 567 }
527 568
528 if (!nullb->q) 569 if (!nullb->q)
@@ -534,10 +575,7 @@ static int null_add_dev(void)
534 disk = nullb->disk = alloc_disk_node(1, home_node); 575 disk = nullb->disk = alloc_disk_node(1, home_node);
535 if (!disk) { 576 if (!disk) {
536queue_fail: 577queue_fail:
537 if (queue_mode == NULL_Q_MQ) 578 blk_cleanup_queue(nullb->q);
538 blk_mq_free_queue(nullb->q);
539 else
540 blk_cleanup_queue(nullb->q);
541 cleanup_queues(nullb); 579 cleanup_queues(nullb);
542err: 580err:
543 kfree(nullb); 581 kfree(nullb);
@@ -579,7 +617,13 @@ static int __init null_init(void)
579 } 617 }
580#endif 618#endif
581 619
582 if (submit_queues > nr_cpu_ids) 620 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
621 if (submit_queues < nr_online_nodes) {
622 pr_warn("null_blk: submit_queues param is set to %u.",
623 nr_online_nodes);
624 submit_queues = nr_online_nodes;
625 }
626 } else if (submit_queues > nr_cpu_ids)
583 submit_queues = nr_cpu_ids; 627 submit_queues = nr_cpu_ids;
584 else if (!submit_queues) 628 else if (!submit_queues)
585 submit_queues = 1; 629 submit_queues = 1;
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 9199c93be926..eb6e1e0e8db2 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -5269,7 +5269,7 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state)
5269 } 5269 }
5270} 5270}
5271 5271
5272const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) 5272static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
5273{ 5273{
5274 switch (state) { 5274 switch (state) {
5275 case SKD_MSG_STATE_IDLE: 5275 case SKD_MSG_STATE_IDLE:
@@ -5281,7 +5281,7 @@ const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
5281 } 5281 }
5282} 5282}
5283 5283
5284const char *skd_skreq_state_to_str(enum skd_req_state state) 5284static const char *skd_skreq_state_to_str(enum skd_req_state state)
5285{ 5285{
5286 switch (state) { 5286 switch (state) {
5287 case SKD_REQ_STATE_IDLE: 5287 case SKD_REQ_STATE_IDLE:
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 6bfc1bb318f6..dceb85f8d9a8 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -87,6 +87,7 @@ static const struct usb_device_id ath3k_table[] = {
87 { USB_DEVICE(0x0CF3, 0xE004) }, 87 { USB_DEVICE(0x0CF3, 0xE004) },
88 { USB_DEVICE(0x0CF3, 0xE005) }, 88 { USB_DEVICE(0x0CF3, 0xE005) },
89 { USB_DEVICE(0x0930, 0x0219) }, 89 { USB_DEVICE(0x0930, 0x0219) },
90 { USB_DEVICE(0x0930, 0x0220) },
90 { USB_DEVICE(0x0489, 0xe057) }, 91 { USB_DEVICE(0x0489, 0xe057) },
91 { USB_DEVICE(0x13d3, 0x3393) }, 92 { USB_DEVICE(0x13d3, 0x3393) },
92 { USB_DEVICE(0x0489, 0xe04e) }, 93 { USB_DEVICE(0x0489, 0xe04e) },
@@ -129,6 +130,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
129 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 130 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
130 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, 131 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
131 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 132 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
133 { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
132 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 134 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
133 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 135 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
134 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, 136 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index c0ff34f2d2df..3980fd18f6ea 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -154,6 +154,7 @@ static const struct usb_device_id blacklist_table[] = {
154 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 154 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
155 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, 155 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
156 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 156 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
157 { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
157 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 158 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
158 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 159 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
159 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, 160 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index 8e562dc65601..e1f3337a0cf9 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -27,15 +27,18 @@ static char *tpm_device_name = "TPM";
27static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context, 27static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context,
28 void **return_value) 28 void **return_value)
29{ 29{
30 acpi_status status; 30 acpi_status status = AE_OK;
31 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 31 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
32 status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); 32
33 if (strstr(buffer.pointer, context) != NULL) { 33 if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) {
34 *return_value = handle; 34 if (strstr(buffer.pointer, context) != NULL) {
35 *return_value = handle;
36 status = AE_CTRL_TERMINATE;
37 }
35 kfree(buffer.pointer); 38 kfree(buffer.pointer);
36 return AE_CTRL_TERMINATE;
37 } 39 }
38 return AE_OK; 40
41 return status;
39} 42}
40 43
41static inline void ppi_assign_params(union acpi_object params[4], 44static inline void ppi_assign_params(union acpi_object params[4],
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 8d3009e44fba..5543b7df8e16 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -87,7 +87,7 @@ static unsigned int _get_table_val(const struct clk_div_table *table,
87 return 0; 87 return 0;
88} 88}
89 89
90static unsigned int _get_val(struct clk_divider *divider, u8 div) 90static unsigned int _get_val(struct clk_divider *divider, unsigned int div)
91{ 91{
92 if (divider->flags & CLK_DIVIDER_ONE_BASED) 92 if (divider->flags & CLK_DIVIDER_ONE_BASED)
93 return div; 93 return div;
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 7be41e676a64..00a3abe103a5 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -60,7 +60,7 @@ static int s2mps11_clk_prepare(struct clk_hw *hw)
60 struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); 60 struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
61 int ret; 61 int ret;
62 62
63 ret = regmap_update_bits(s2mps11->iodev->regmap, 63 ret = regmap_update_bits(s2mps11->iodev->regmap_pmic,
64 S2MPS11_REG_RTC_CTRL, 64 S2MPS11_REG_RTC_CTRL,
65 s2mps11->mask, s2mps11->mask); 65 s2mps11->mask, s2mps11->mask);
66 if (!ret) 66 if (!ret)
@@ -74,7 +74,7 @@ static void s2mps11_clk_unprepare(struct clk_hw *hw)
74 struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); 74 struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
75 int ret; 75 int ret;
76 76
77 ret = regmap_update_bits(s2mps11->iodev->regmap, S2MPS11_REG_RTC_CTRL, 77 ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, S2MPS11_REG_RTC_CTRL,
78 s2mps11->mask, ~s2mps11->mask); 78 s2mps11->mask, ~s2mps11->mask);
79 79
80 if (!ret) 80 if (!ret)
@@ -174,7 +174,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
174 s2mps11_clk->hw.init = &s2mps11_clks_init[i]; 174 s2mps11_clk->hw.init = &s2mps11_clks_init[i];
175 s2mps11_clk->mask = 1 << i; 175 s2mps11_clk->mask = 1 << i;
176 176
177 ret = regmap_read(s2mps11_clk->iodev->regmap, 177 ret = regmap_read(s2mps11_clk->iodev->regmap_pmic,
178 S2MPS11_REG_RTC_CTRL, &val); 178 S2MPS11_REG_RTC_CTRL, &val);
179 if (ret < 0) 179 if (ret < 0)
180 goto err_reg; 180 goto err_reg;
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 39b40aaede2b..68e515d093d8 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -26,17 +26,17 @@ static struct clk_onecell_data clk_data;
26#define ASS_CLK_DIV 0x4 26#define ASS_CLK_DIV 0x4
27#define ASS_CLK_GATE 0x8 27#define ASS_CLK_GATE 0x8
28 28
29/* list of all parent clock list */
30static const char *mout_audss_p[] = { "fin_pll", "fout_epll" };
31static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" };
32
33#ifdef CONFIG_PM_SLEEP
29static unsigned long reg_save[][2] = { 34static unsigned long reg_save[][2] = {
30 {ASS_CLK_SRC, 0}, 35 {ASS_CLK_SRC, 0},
31 {ASS_CLK_DIV, 0}, 36 {ASS_CLK_DIV, 0},
32 {ASS_CLK_GATE, 0}, 37 {ASS_CLK_GATE, 0},
33}; 38};
34 39
35/* list of all parent clock list */
36static const char *mout_audss_p[] = { "fin_pll", "fout_epll" };
37static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" };
38
39#ifdef CONFIG_PM_SLEEP
40static int exynos_audss_clk_suspend(void) 40static int exynos_audss_clk_suspend(void)
41{ 41{
42 int i; 42 int i;
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index ad5ff50c5f28..1a7c1b929c69 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -39,7 +39,7 @@
39#define SRC_TOP1 0xc214 39#define SRC_TOP1 0xc214
40#define SRC_CAM 0xc220 40#define SRC_CAM 0xc220
41#define SRC_TV 0xc224 41#define SRC_TV 0xc224
42#define SRC_MFC 0xcc28 42#define SRC_MFC 0xc228
43#define SRC_G3D 0xc22c 43#define SRC_G3D 0xc22c
44#define E4210_SRC_IMAGE 0xc230 44#define E4210_SRC_IMAGE 0xc230
45#define SRC_LCD0 0xc234 45#define SRC_LCD0 0xc234
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index adf32343c9f9..e52359cf9b6f 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -25,6 +25,7 @@
25#define MPLL_LOCK 0x4000 25#define MPLL_LOCK 0x4000
26#define MPLL_CON0 0x4100 26#define MPLL_CON0 0x4100
27#define SRC_CORE1 0x4204 27#define SRC_CORE1 0x4204
28#define GATE_IP_ACP 0x8800
28#define CPLL_LOCK 0x10020 29#define CPLL_LOCK 0x10020
29#define EPLL_LOCK 0x10030 30#define EPLL_LOCK 0x10030
30#define VPLL_LOCK 0x10040 31#define VPLL_LOCK 0x10040
@@ -75,7 +76,6 @@
75#define SRC_CDREX 0x20200 76#define SRC_CDREX 0x20200
76#define PLL_DIV2_SEL 0x20a24 77#define PLL_DIV2_SEL 0x20a24
77#define GATE_IP_DISP1 0x10928 78#define GATE_IP_DISP1 0x10928
78#define GATE_IP_ACP 0x10000
79 79
80/* list of PLLs to be registered */ 80/* list of PLLs to be registered */
81enum exynos5250_plls { 81enum exynos5250_plls {
@@ -120,7 +120,8 @@ enum exynos5250_clks {
120 spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2, 120 spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2,
121 hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1, 121 hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1,
122 tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct, 122 tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct,
123 wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d, 123 wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d, mdma0,
124 smmu_mdma0,
124 125
125 /* mux clocks */ 126 /* mux clocks */
126 mout_hdmi = 1024, 127 mout_hdmi = 1024,
@@ -354,8 +355,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
354 GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0), 355 GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0),
355 GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0), 356 GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0),
356 GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0), 357 GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0),
357 GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 1, 0, 0), 358 GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 2, 0, 0),
358 GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 2, 0, 0), 359 GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 1, 0, 0),
359 GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0), 360 GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0),
360 GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0), 361 GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0),
361 GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0), 362 GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0),
@@ -406,7 +407,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
406 GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0), 407 GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0),
407 GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0), 408 GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0),
408 GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0), 409 GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0),
409 GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0), 410 GATE(sysreg, "sysreg", "aclk66",
411 GATE_IP_PERIS, 1, CLK_IGNORE_UNUSED, 0),
410 GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0), 412 GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0),
411 GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0), 413 GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0),
412 GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0), 414 GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0),
@@ -492,6 +494,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
492 GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0), 494 GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0),
493 GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0), 495 GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
494 GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0), 496 GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0),
497 GATE(mdma0, "mdma0", "aclk266", GATE_IP_ACP, 1, 0, 0),
498 GATE(smmu_mdma0, "smmu_mdma0", "aclk266", GATE_IP_ACP, 5, 0, 0),
495}; 499};
496 500
497static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = { 501static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = {
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 5c07a56962db..634c4d6dd45a 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -75,6 +75,7 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
75config CLKSRC_EFM32 75config CLKSRC_EFM32
76 bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32 76 bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32
77 depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) 77 depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
78 select CLKSRC_MMIO
78 default ARCH_EFM32 79 default ARCH_EFM32
79 help 80 help
80 Support to use the timers of EFM32 SoCs as clock source and clock 81 Support to use the timers of EFM32 SoCs as clock source and clock
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
index 35639cf4e5a2..b9ddd9e3a2f5 100644
--- a/drivers/clocksource/clksrc-of.c
+++ b/drivers/clocksource/clksrc-of.c
@@ -35,6 +35,5 @@ void __init clocksource_of_init(void)
35 35
36 init_func = match->data; 36 init_func = match->data;
37 init_func(np); 37 init_func(np);
38 of_node_put(np);
39 } 38 }
40} 39}
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 45ba8aecc729..2a2ea2717f3a 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -108,12 +108,11 @@ static void __init add_clocksource(struct device_node *source_timer)
108 108
109static u64 read_sched_clock(void) 109static u64 read_sched_clock(void)
110{ 110{
111 return __raw_readl(sched_io_base); 111 return ~__raw_readl(sched_io_base);
112} 112}
113 113
114static const struct of_device_id sptimer_ids[] __initconst = { 114static const struct of_device_id sptimer_ids[] __initconst = {
115 { .compatible = "picochip,pc3x2-rtc" }, 115 { .compatible = "picochip,pc3x2-rtc" },
116 { .compatible = "snps,dw-apb-timer-sp" },
117 { /* Sentinel */ }, 116 { /* Sentinel */ },
118}; 117};
119 118
@@ -151,4 +150,6 @@ static void __init dw_apb_timer_init(struct device_node *timer)
151 num_called++; 150 num_called++;
152} 151}
153CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init); 152CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init);
154CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer-osc", dw_apb_timer_init); 153CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
154CLOCKSOURCE_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init);
155CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init);
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index 2fb4695a28d8..a4f6119aafd8 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -179,6 +179,9 @@ static void __init sun4i_timer_init(struct device_node *node)
179 writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), 179 writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
180 timer_base + TIMER_CTL_REG(0)); 180 timer_base + TIMER_CTL_REG(0));
181 181
182 /* Make sure timer is stopped before playing with interrupts */
183 sun4i_clkevt_time_stop(0);
184
182 ret = setup_irq(irq, &sun4i_timer_irq); 185 ret = setup_irq(irq, &sun4i_timer_irq);
183 if (ret) 186 if (ret)
184 pr_warn("failed to setup irq %d\n", irq); 187 pr_warn("failed to setup irq %d\n", irq);
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index d8e47e502785..4e7f6802e840 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -256,11 +256,6 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
256 ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; 256 ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
257 257
258 /* 258 /*
259 * Set scale and timer for sched_clock.
260 */
261 sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
262
263 /*
264 * Setup free-running clocksource timer (interrupts 259 * Setup free-running clocksource timer (interrupts
265 * disabled). 260 * disabled).
266 */ 261 */
@@ -270,6 +265,11 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
270 timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN | 265 timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN |
271 TIMER0_DIV(TIMER_DIVIDER_SHIFT)); 266 TIMER0_DIV(TIMER_DIVIDER_SHIFT));
272 267
268 /*
269 * Set scale and timer for sched_clock.
270 */
271 sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
272
273 clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, 273 clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
274 "armada_370_xp_clocksource", 274 "armada_370_xp_clocksource",
275 timer_clk, 300, 32, clocksource_mmio_readl_down); 275 timer_clk, 300, 32, clocksource_mmio_readl_down);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 02d534da22dd..8d19f7c06010 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -828,14 +828,17 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
828 int ret = 0; 828 int ret = 0;
829 829
830 memcpy(&new_policy, policy, sizeof(*policy)); 830 memcpy(&new_policy, policy, sizeof(*policy));
831
832 /* Use the default policy if its valid. */
833 if (cpufreq_driver->setpolicy)
834 cpufreq_parse_governor(policy->governor->name,
835 &new_policy.policy, NULL);
836
831 /* assure that the starting sequence is run in cpufreq_set_policy */ 837 /* assure that the starting sequence is run in cpufreq_set_policy */
832 policy->governor = NULL; 838 policy->governor = NULL;
833 839
834 /* set default policy */ 840 /* set default policy */
835 ret = cpufreq_set_policy(policy, &new_policy); 841 ret = cpufreq_set_policy(policy, &new_policy);
836 policy->user_policy.policy = policy->policy;
837 policy->user_policy.governor = policy->governor;
838
839 if (ret) { 842 if (ret) {
840 pr_debug("setting policy failed\n"); 843 pr_debug("setting policy failed\n");
841 if (cpufreq_driver->exit) 844 if (cpufreq_driver->exit)
@@ -845,8 +848,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
845 848
846#ifdef CONFIG_HOTPLUG_CPU 849#ifdef CONFIG_HOTPLUG_CPU
847static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, 850static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
848 unsigned int cpu, struct device *dev, 851 unsigned int cpu, struct device *dev)
849 bool frozen)
850{ 852{
851 int ret = 0; 853 int ret = 0;
852 unsigned long flags; 854 unsigned long flags;
@@ -877,11 +879,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
877 } 879 }
878 } 880 }
879 881
880 /* Don't touch sysfs links during light-weight init */ 882 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
881 if (!frozen)
882 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
883
884 return ret;
885} 883}
886#endif 884#endif
887 885
@@ -926,6 +924,27 @@ err_free_policy:
926 return NULL; 924 return NULL;
927} 925}
928 926
927static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
928{
929 struct kobject *kobj;
930 struct completion *cmp;
931
932 down_read(&policy->rwsem);
933 kobj = &policy->kobj;
934 cmp = &policy->kobj_unregister;
935 up_read(&policy->rwsem);
936 kobject_put(kobj);
937
938 /*
939 * We need to make sure that the underlying kobj is
940 * actually not referenced anymore by anybody before we
941 * proceed with unloading.
942 */
943 pr_debug("waiting for dropping of refcount\n");
944 wait_for_completion(cmp);
945 pr_debug("wait complete\n");
946}
947
929static void cpufreq_policy_free(struct cpufreq_policy *policy) 948static void cpufreq_policy_free(struct cpufreq_policy *policy)
930{ 949{
931 free_cpumask_var(policy->related_cpus); 950 free_cpumask_var(policy->related_cpus);
@@ -986,7 +1005,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
986 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) { 1005 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
987 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) { 1006 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
988 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1007 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
989 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen); 1008 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
990 up_read(&cpufreq_rwsem); 1009 up_read(&cpufreq_rwsem);
991 return ret; 1010 return ret;
992 } 1011 }
@@ -994,15 +1013,17 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
994 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1013 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
995#endif 1014#endif
996 1015
997 if (frozen) 1016 /*
998 /* Restore the saved policy when doing light-weight init */ 1017 * Restore the saved policy when doing light-weight init and fall back
999 policy = cpufreq_policy_restore(cpu); 1018 * to the full init if that fails.
1000 else 1019 */
1020 policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
1021 if (!policy) {
1022 frozen = false;
1001 policy = cpufreq_policy_alloc(); 1023 policy = cpufreq_policy_alloc();
1002 1024 if (!policy)
1003 if (!policy) 1025 goto nomem_out;
1004 goto nomem_out; 1026 }
1005
1006 1027
1007 /* 1028 /*
1008 * In the resume path, since we restore a saved policy, the assignment 1029 * In the resume path, since we restore a saved policy, the assignment
@@ -1047,8 +1068,10 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1047 */ 1068 */
1048 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); 1069 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1049 1070
1050 policy->user_policy.min = policy->min; 1071 if (!frozen) {
1051 policy->user_policy.max = policy->max; 1072 policy->user_policy.min = policy->min;
1073 policy->user_policy.max = policy->max;
1074 }
1052 1075
1053 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1076 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1054 CPUFREQ_START, policy); 1077 CPUFREQ_START, policy);
@@ -1079,6 +1102,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1079 1102
1080 cpufreq_init_policy(policy); 1103 cpufreq_init_policy(policy);
1081 1104
1105 if (!frozen) {
1106 policy->user_policy.policy = policy->policy;
1107 policy->user_policy.governor = policy->governor;
1108 }
1109
1082 kobject_uevent(&policy->kobj, KOBJ_ADD); 1110 kobject_uevent(&policy->kobj, KOBJ_ADD);
1083 up_read(&cpufreq_rwsem); 1111 up_read(&cpufreq_rwsem);
1084 1112
@@ -1096,7 +1124,13 @@ err_get_freq:
1096 if (cpufreq_driver->exit) 1124 if (cpufreq_driver->exit)
1097 cpufreq_driver->exit(policy); 1125 cpufreq_driver->exit(policy);
1098err_set_policy_cpu: 1126err_set_policy_cpu:
1127 if (frozen) {
1128 /* Do not leave stale fallback data behind. */
1129 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1130 cpufreq_policy_put_kobj(policy);
1131 }
1099 cpufreq_policy_free(policy); 1132 cpufreq_policy_free(policy);
1133
1100nomem_out: 1134nomem_out:
1101 up_read(&cpufreq_rwsem); 1135 up_read(&cpufreq_rwsem);
1102 1136
@@ -1118,7 +1152,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1118} 1152}
1119 1153
1120static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, 1154static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1121 unsigned int old_cpu, bool frozen) 1155 unsigned int old_cpu)
1122{ 1156{
1123 struct device *cpu_dev; 1157 struct device *cpu_dev;
1124 int ret; 1158 int ret;
@@ -1126,10 +1160,6 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1126 /* first sibling now owns the new sysfs dir */ 1160 /* first sibling now owns the new sysfs dir */
1127 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu)); 1161 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
1128 1162
1129 /* Don't touch sysfs files during light-weight tear-down */
1130 if (frozen)
1131 return cpu_dev->id;
1132
1133 sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); 1163 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1134 ret = kobject_move(&policy->kobj, &cpu_dev->kobj); 1164 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1135 if (ret) { 1165 if (ret) {
@@ -1196,7 +1226,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1196 if (!frozen) 1226 if (!frozen)
1197 sysfs_remove_link(&dev->kobj, "cpufreq"); 1227 sysfs_remove_link(&dev->kobj, "cpufreq");
1198 } else if (cpus > 1) { 1228 } else if (cpus > 1) {
1199 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen); 1229 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
1200 if (new_cpu >= 0) { 1230 if (new_cpu >= 0) {
1201 update_policy_cpu(policy, new_cpu); 1231 update_policy_cpu(policy, new_cpu);
1202 1232
@@ -1218,8 +1248,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1218 int ret; 1248 int ret;
1219 unsigned long flags; 1249 unsigned long flags;
1220 struct cpufreq_policy *policy; 1250 struct cpufreq_policy *policy;
1221 struct kobject *kobj;
1222 struct completion *cmp;
1223 1251
1224 read_lock_irqsave(&cpufreq_driver_lock, flags); 1252 read_lock_irqsave(&cpufreq_driver_lock, flags);
1225 policy = per_cpu(cpufreq_cpu_data, cpu); 1253 policy = per_cpu(cpufreq_cpu_data, cpu);
@@ -1249,22 +1277,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1249 } 1277 }
1250 } 1278 }
1251 1279
1252 if (!frozen) { 1280 if (!frozen)
1253 down_read(&policy->rwsem); 1281 cpufreq_policy_put_kobj(policy);
1254 kobj = &policy->kobj;
1255 cmp = &policy->kobj_unregister;
1256 up_read(&policy->rwsem);
1257 kobject_put(kobj);
1258
1259 /*
1260 * We need to make sure that the underlying kobj is
1261 * actually not referenced anymore by anybody before we
1262 * proceed with unloading.
1263 */
1264 pr_debug("waiting for dropping of refcount\n");
1265 wait_for_completion(cmp);
1266 pr_debug("wait complete\n");
1267 }
1268 1282
1269 /* 1283 /*
1270 * Perform the ->exit() even during light-weight tear-down, 1284 * Perform the ->exit() even during light-weight tear-down,
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 5f1cbae36961..d51f17ed691e 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -581,7 +581,8 @@ static void intel_pstate_timer_func(unsigned long __data)
581} 581}
582 582
583#define ICPU(model, policy) \ 583#define ICPU(model, policy) \
584 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy } 584 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
585 (unsigned long)&policy }
585 586
586static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 587static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
587 ICPU(0x2a, core_params), 588 ICPU(0x2a, core_params),
@@ -614,6 +615,11 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
614 cpu = all_cpu_data[cpunum]; 615 cpu = all_cpu_data[cpunum];
615 616
616 intel_pstate_get_cpu_pstates(cpu); 617 intel_pstate_get_cpu_pstates(cpu);
618 if (!cpu->pstate.current_pstate) {
619 all_cpu_data[cpunum] = NULL;
620 kfree(cpu);
621 return -ENODATA;
622 }
617 623
618 cpu->cpu = cpunum; 624 cpu->cpu = cpunum;
619 625
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index 36795639df0d..6e51114057d0 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -65,7 +65,7 @@ static struct cpuidle_driver calxeda_idle_driver = {
65 .state_count = 2, 65 .state_count = 2,
66}; 66};
67 67
68static int __init calxeda_cpuidle_probe(struct platform_device *pdev) 68static int calxeda_cpuidle_probe(struct platform_device *pdev)
69{ 69{
70 return cpuidle_register(&calxeda_idle_driver, NULL); 70 return cpuidle_register(&calxeda_idle_driver, NULL);
71} 71}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 9dd6e01eac33..f757a0f428bd 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1410,14 +1410,12 @@ static const struct platform_device_info ixp_dev_info __initdata = {
1410static int __init ixp_module_init(void) 1410static int __init ixp_module_init(void)
1411{ 1411{
1412 int num = ARRAY_SIZE(ixp4xx_algos); 1412 int num = ARRAY_SIZE(ixp4xx_algos);
1413 int i, err ; 1413 int i, err;
1414 1414
1415 pdev = platform_device_register_full(&ixp_dev_info); 1415 pdev = platform_device_register_full(&ixp_dev_info);
1416 if (IS_ERR(pdev)) 1416 if (IS_ERR(pdev))
1417 return PTR_ERR(pdev); 1417 return PTR_ERR(pdev);
1418 1418
1419 dev = &pdev->dev;
1420
1421 spin_lock_init(&desc_lock); 1419 spin_lock_init(&desc_lock);
1422 spin_lock_init(&emerg_lock); 1420 spin_lock_init(&emerg_lock);
1423 1421
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 446687cc2334..c823daaf9043 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -62,6 +62,7 @@ config INTEL_IOATDMA
62 tristate "Intel I/OAT DMA support" 62 tristate "Intel I/OAT DMA support"
63 depends on PCI && X86 63 depends on PCI && X86
64 select DMA_ENGINE 64 select DMA_ENGINE
65 select DMA_ENGINE_RAID
65 select DCA 66 select DCA
66 help 67 help
67 Enable support for the Intel(R) I/OAT DMA engine present 68 Enable support for the Intel(R) I/OAT DMA engine present
@@ -112,6 +113,7 @@ config MV_XOR
112 bool "Marvell XOR engine support" 113 bool "Marvell XOR engine support"
113 depends on PLAT_ORION 114 depends on PLAT_ORION
114 select DMA_ENGINE 115 select DMA_ENGINE
116 select DMA_ENGINE_RAID
115 select ASYNC_TX_ENABLE_CHANNEL_SWITCH 117 select ASYNC_TX_ENABLE_CHANNEL_SWITCH
116 ---help--- 118 ---help---
117 Enable support for the Marvell XOR engine. 119 Enable support for the Marvell XOR engine.
@@ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA
187 tristate "AMCC PPC440SPe ADMA support" 189 tristate "AMCC PPC440SPe ADMA support"
188 depends on 440SPe || 440SP 190 depends on 440SPe || 440SP
189 select DMA_ENGINE 191 select DMA_ENGINE
192 select DMA_ENGINE_RAID
190 select ARCH_HAS_ASYNC_TX_FIND_CHANNEL 193 select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
191 select ASYNC_TX_ENABLE_CHANNEL_SWITCH 194 select ASYNC_TX_ENABLE_CHANNEL_SWITCH
192 help 195 help
@@ -352,6 +355,7 @@ config NET_DMA
352 bool "Network: TCP receive copy offload" 355 bool "Network: TCP receive copy offload"
353 depends on DMA_ENGINE && NET 356 depends on DMA_ENGINE && NET
354 default (INTEL_IOATDMA || FSL_DMA) 357 default (INTEL_IOATDMA || FSL_DMA)
358 depends on BROKEN
355 help 359 help
356 This enables the use of DMA engines in the network stack to 360 This enables the use of DMA engines in the network stack to
357 offload receive copy-to-user operations, freeing CPU cycles. 361 offload receive copy-to-user operations, freeing CPU cycles.
@@ -377,4 +381,7 @@ config DMATEST
377 Simple DMA test client. Say N unless you're debugging a 381 Simple DMA test client. Say N unless you're debugging a
378 DMA Device driver. 382 DMA Device driver.
379 383
384config DMA_ENGINE_RAID
385 bool
386
380endif 387endif
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index f31d647acdfa..2787aba60c6b 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan)
347{ 347{
348 return &chan->dev->device; 348 return &chan->dev->device;
349} 349}
350static struct device *chan2parent(struct dma_chan *chan)
351{
352 return chan->dev->device.parent;
353}
354 350
355#if defined(VERBOSE_DEBUG) 351#if defined(VERBOSE_DEBUG)
356static void vdbg_dump_regs(struct at_dma_chan *atchan) 352static void vdbg_dump_regs(struct at_dma_chan *atchan)
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index ea806bdc12ef..ef63b9058f3c 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -912,7 +912,7 @@ struct dmaengine_unmap_pool {
912#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } 912#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
913static struct dmaengine_unmap_pool unmap_pool[] = { 913static struct dmaengine_unmap_pool unmap_pool[] = {
914 __UNMAP_POOL(2), 914 __UNMAP_POOL(2),
915 #if IS_ENABLED(CONFIG_ASYNC_TX_DMA) 915 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
916 __UNMAP_POOL(16), 916 __UNMAP_POOL(16),
917 __UNMAP_POOL(128), 917 __UNMAP_POOL(128),
918 __UNMAP_POOL(256), 918 __UNMAP_POOL(256),
@@ -1054,7 +1054,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1054 dma_cookie_t cookie; 1054 dma_cookie_t cookie;
1055 unsigned long flags; 1055 unsigned long flags;
1056 1056
1057 unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO); 1057 unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
1058 if (!unmap) 1058 if (!unmap)
1059 return -ENOMEM; 1059 return -ENOMEM;
1060 1060
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 20f9a3aaf926..9dfcaf5c1288 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -539,9 +539,9 @@ static int dmatest_func(void *data)
539 539
540 um->len = params->buf_size; 540 um->len = params->buf_size;
541 for (i = 0; i < src_cnt; i++) { 541 for (i = 0; i < src_cnt; i++) {
542 unsigned long buf = (unsigned long) thread->srcs[i]; 542 void *buf = thread->srcs[i];
543 struct page *pg = virt_to_page(buf); 543 struct page *pg = virt_to_page(buf);
544 unsigned pg_off = buf & ~PAGE_MASK; 544 unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
545 545
546 um->addr[i] = dma_map_page(dev->dev, pg, pg_off, 546 um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
547 um->len, DMA_TO_DEVICE); 547 um->len, DMA_TO_DEVICE);
@@ -559,9 +559,9 @@ static int dmatest_func(void *data)
559 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ 559 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
560 dsts = &um->addr[src_cnt]; 560 dsts = &um->addr[src_cnt];
561 for (i = 0; i < dst_cnt; i++) { 561 for (i = 0; i < dst_cnt; i++) {
562 unsigned long buf = (unsigned long) thread->dsts[i]; 562 void *buf = thread->dsts[i];
563 struct page *pg = virt_to_page(buf); 563 struct page *pg = virt_to_page(buf);
564 unsigned pg_off = buf & ~PAGE_MASK; 564 unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
565 565
566 dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, 566 dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
567 DMA_BIDIRECTIONAL); 567 DMA_BIDIRECTIONAL);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 7086a16a55f2..f157c6f76b32 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan,
86 hw->count = CPU_TO_DMA(chan, count, 32); 86 hw->count = CPU_TO_DMA(chan, count, 32);
87} 87}
88 88
89static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
90{
91 return DMA_TO_CPU(chan, desc->hw.count, 32);
92}
93
94static void set_desc_src(struct fsldma_chan *chan, 89static void set_desc_src(struct fsldma_chan *chan,
95 struct fsl_dma_ld_hw *hw, dma_addr_t src) 90 struct fsl_dma_ld_hw *hw, dma_addr_t src)
96{ 91{
@@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan,
101 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); 96 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
102} 97}
103 98
104static dma_addr_t get_desc_src(struct fsldma_chan *chan,
105 struct fsl_desc_sw *desc)
106{
107 u64 snoop_bits;
108
109 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
110 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
111 return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
112}
113
114static void set_desc_dst(struct fsldma_chan *chan, 99static void set_desc_dst(struct fsldma_chan *chan,
115 struct fsl_dma_ld_hw *hw, dma_addr_t dst) 100 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
116{ 101{
@@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan,
121 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); 106 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
122} 107}
123 108
124static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
125 struct fsl_desc_sw *desc)
126{
127 u64 snoop_bits;
128
129 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
130 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
131 return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
132}
133
134static void set_desc_next(struct fsldma_chan *chan, 109static void set_desc_next(struct fsldma_chan *chan,
135 struct fsl_dma_ld_hw *hw, dma_addr_t next) 110 struct fsl_dma_ld_hw *hw, dma_addr_t next)
136{ 111{
@@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
408 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); 383 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
409 struct fsl_desc_sw *child; 384 struct fsl_desc_sw *child;
410 unsigned long flags; 385 unsigned long flags;
411 dma_cookie_t cookie; 386 dma_cookie_t cookie = -EINVAL;
412 387
413 spin_lock_irqsave(&chan->desc_lock, flags); 388 spin_lock_irqsave(&chan->desc_lock, flags);
414 389
@@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
854 struct fsl_desc_sw *desc) 829 struct fsl_desc_sw *desc)
855{ 830{
856 struct dma_async_tx_descriptor *txd = &desc->async_tx; 831 struct dma_async_tx_descriptor *txd = &desc->async_tx;
857 struct device *dev = chan->common.device->dev;
858 dma_addr_t src = get_desc_src(chan, desc);
859 dma_addr_t dst = get_desc_dst(chan, desc);
860 u32 len = get_desc_cnt(chan, desc);
861 832
862 /* Run the link descriptor callback function */ 833 /* Run the link descriptor callback function */
863 if (txd->callback) { 834 if (txd->callback) {
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1a49c777607c..87529181efcc 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -817,7 +817,15 @@ int ioat_dma_self_test(struct ioatdma_device *device)
817 } 817 }
818 818
819 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); 819 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
820 if (dma_mapping_error(dev, dma_src)) {
821 dev_err(dev, "mapping src buffer failed\n");
822 goto free_resources;
823 }
820 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); 824 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
825 if (dma_mapping_error(dev, dma_dest)) {
826 dev_err(dev, "mapping dest buffer failed\n");
827 goto unmap_src;
828 }
821 flags = DMA_PREP_INTERRUPT; 829 flags = DMA_PREP_INTERRUPT;
822 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, 830 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
823 IOAT_TEST_SIZE, flags); 831 IOAT_TEST_SIZE, flags);
@@ -855,8 +863,9 @@ int ioat_dma_self_test(struct ioatdma_device *device)
855 } 863 }
856 864
857unmap_dma: 865unmap_dma:
858 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
859 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); 866 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
867unmap_src:
868 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
860free_resources: 869free_resources:
861 dma->device_free_chan_resources(dma_chan); 870 dma->device_free_chan_resources(dma_chan);
862out: 871out:
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 7807f0ef4e20..53fb0c8365b0 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -54,12 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
54 hw_desc->desc_command = (1 << 31); 54 hw_desc->desc_command = (1 << 31);
55} 55}
56 56
57static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
58{
59 struct mv_xor_desc *hw_desc = desc->hw_desc;
60 return hw_desc->phy_dest_addr;
61}
62
63static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, 57static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
64 u32 byte_count) 58 u32 byte_count)
65{ 59{
@@ -787,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
787/* 781/*
788 * Perform a transaction to verify the HW works. 782 * Perform a transaction to verify the HW works.
789 */ 783 */
790#define MV_XOR_TEST_SIZE 2000
791 784
792static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) 785static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
793{ 786{
@@ -797,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
797 struct dma_chan *dma_chan; 790 struct dma_chan *dma_chan;
798 dma_cookie_t cookie; 791 dma_cookie_t cookie;
799 struct dma_async_tx_descriptor *tx; 792 struct dma_async_tx_descriptor *tx;
793 struct dmaengine_unmap_data *unmap;
800 int err = 0; 794 int err = 0;
801 795
802 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 796 src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
803 if (!src) 797 if (!src)
804 return -ENOMEM; 798 return -ENOMEM;
805 799
806 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 800 dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
807 if (!dest) { 801 if (!dest) {
808 kfree(src); 802 kfree(src);
809 return -ENOMEM; 803 return -ENOMEM;
810 } 804 }
811 805
812 /* Fill in src buffer */ 806 /* Fill in src buffer */
813 for (i = 0; i < MV_XOR_TEST_SIZE; i++) 807 for (i = 0; i < PAGE_SIZE; i++)
814 ((u8 *) src)[i] = (u8)i; 808 ((u8 *) src)[i] = (u8)i;
815 809
816 dma_chan = &mv_chan->dmachan; 810 dma_chan = &mv_chan->dmachan;
@@ -819,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
819 goto out; 813 goto out;
820 } 814 }
821 815
822 dest_dma = dma_map_single(dma_chan->device->dev, dest, 816 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
823 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 817 if (!unmap) {
818 err = -ENOMEM;
819 goto free_resources;
820 }
821
822 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
823 PAGE_SIZE, DMA_TO_DEVICE);
824 unmap->to_cnt = 1;
825 unmap->addr[0] = src_dma;
824 826
825 src_dma = dma_map_single(dma_chan->device->dev, src, 827 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
826 MV_XOR_TEST_SIZE, DMA_TO_DEVICE); 828 PAGE_SIZE, DMA_FROM_DEVICE);
829 unmap->from_cnt = 1;
830 unmap->addr[1] = dest_dma;
831
832 unmap->len = PAGE_SIZE;
827 833
828 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 834 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
829 MV_XOR_TEST_SIZE, 0); 835 PAGE_SIZE, 0);
830 cookie = mv_xor_tx_submit(tx); 836 cookie = mv_xor_tx_submit(tx);
831 mv_xor_issue_pending(dma_chan); 837 mv_xor_issue_pending(dma_chan);
832 async_tx_ack(tx); 838 async_tx_ack(tx);
@@ -841,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
841 } 847 }
842 848
843 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 849 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
844 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 850 PAGE_SIZE, DMA_FROM_DEVICE);
845 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { 851 if (memcmp(src, dest, PAGE_SIZE)) {
846 dev_err(dma_chan->device->dev, 852 dev_err(dma_chan->device->dev,
847 "Self-test copy failed compare, disabling\n"); 853 "Self-test copy failed compare, disabling\n");
848 err = -ENODEV; 854 err = -ENODEV;
@@ -850,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
850 } 856 }
851 857
852free_resources: 858free_resources:
859 dmaengine_unmap_put(unmap);
853 mv_xor_free_chan_resources(dma_chan); 860 mv_xor_free_chan_resources(dma_chan);
854out: 861out:
855 kfree(src); 862 kfree(src);
@@ -867,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
867 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; 874 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
868 dma_addr_t dest_dma; 875 dma_addr_t dest_dma;
869 struct dma_async_tx_descriptor *tx; 876 struct dma_async_tx_descriptor *tx;
877 struct dmaengine_unmap_data *unmap;
870 struct dma_chan *dma_chan; 878 struct dma_chan *dma_chan;
871 dma_cookie_t cookie; 879 dma_cookie_t cookie;
872 u8 cmp_byte = 0; 880 u8 cmp_byte = 0;
873 u32 cmp_word; 881 u32 cmp_word;
874 int err = 0; 882 int err = 0;
883 int src_count = MV_XOR_NUM_SRC_TEST;
875 884
876 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 885 for (src_idx = 0; src_idx < src_count; src_idx++) {
877 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 886 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
878 if (!xor_srcs[src_idx]) { 887 if (!xor_srcs[src_idx]) {
879 while (src_idx--) 888 while (src_idx--)
@@ -890,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
890 } 899 }
891 900
892 /* Fill in src buffers */ 901 /* Fill in src buffers */
893 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 902 for (src_idx = 0; src_idx < src_count; src_idx++) {
894 u8 *ptr = page_address(xor_srcs[src_idx]); 903 u8 *ptr = page_address(xor_srcs[src_idx]);
895 for (i = 0; i < PAGE_SIZE; i++) 904 for (i = 0; i < PAGE_SIZE; i++)
896 ptr[i] = (1 << src_idx); 905 ptr[i] = (1 << src_idx);
897 } 906 }
898 907
899 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) 908 for (src_idx = 0; src_idx < src_count; src_idx++)
900 cmp_byte ^= (u8) (1 << src_idx); 909 cmp_byte ^= (u8) (1 << src_idx);
901 910
902 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 911 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
@@ -910,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
910 goto out; 919 goto out;
911 } 920 }
912 921
922 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
923 GFP_KERNEL);
924 if (!unmap) {
925 err = -ENOMEM;
926 goto free_resources;
927 }
928
913 /* test xor */ 929 /* test xor */
914 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, 930 for (i = 0; i < src_count; i++) {
915 DMA_FROM_DEVICE); 931 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
932 0, PAGE_SIZE, DMA_TO_DEVICE);
933 dma_srcs[i] = unmap->addr[i];
934 unmap->to_cnt++;
935 }
916 936
917 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) 937 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
918 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 938 DMA_FROM_DEVICE);
919 0, PAGE_SIZE, DMA_TO_DEVICE); 939 dest_dma = unmap->addr[src_count];
940 unmap->from_cnt = 1;
941 unmap->len = PAGE_SIZE;
920 942
921 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 943 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
922 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); 944 src_count, PAGE_SIZE, 0);
923 945
924 cookie = mv_xor_tx_submit(tx); 946 cookie = mv_xor_tx_submit(tx);
925 mv_xor_issue_pending(dma_chan); 947 mv_xor_issue_pending(dma_chan);
@@ -948,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
948 } 970 }
949 971
950free_resources: 972free_resources:
973 dmaengine_unmap_put(unmap);
951 mv_xor_free_chan_resources(dma_chan); 974 mv_xor_free_chan_resources(dma_chan);
952out: 975out:
953 src_idx = MV_XOR_NUM_SRC_TEST; 976 src_idx = src_count;
954 while (src_idx--) 977 while (src_idx--)
955 __free_page(xor_srcs[src_idx]); 978 __free_page(xor_srcs[src_idx]);
956 __free_page(dest); 979 __free_page(dest);
@@ -1176,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev)
1176 int i = 0; 1199 int i = 0;
1177 1200
1178 for_each_child_of_node(pdev->dev.of_node, np) { 1201 for_each_child_of_node(pdev->dev.of_node, np) {
1202 struct mv_xor_chan *chan;
1179 dma_cap_mask_t cap_mask; 1203 dma_cap_mask_t cap_mask;
1180 int irq; 1204 int irq;
1181 1205
@@ -1193,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev)
1193 goto err_channel_add; 1217 goto err_channel_add;
1194 } 1218 }
1195 1219
1196 xordev->channels[i] = 1220 chan = mv_xor_channel_add(xordev, pdev, i,
1197 mv_xor_channel_add(xordev, pdev, i, 1221 cap_mask, irq);
1198 cap_mask, irq); 1222 if (IS_ERR(chan)) {
1199 if (IS_ERR(xordev->channels[i])) { 1223 ret = PTR_ERR(chan);
1200 ret = PTR_ERR(xordev->channels[i]);
1201 xordev->channels[i] = NULL;
1202 irq_dispose_mapping(irq); 1224 irq_dispose_mapping(irq);
1203 goto err_channel_add; 1225 goto err_channel_add;
1204 } 1226 }
1205 1227
1228 xordev->channels[i] = chan;
1206 i++; 1229 i++;
1207 } 1230 }
1208 } else if (pdata && pdata->channels) { 1231 } else if (pdata && pdata->channels) {
1209 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 1232 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1210 struct mv_xor_channel_data *cd; 1233 struct mv_xor_channel_data *cd;
1234 struct mv_xor_chan *chan;
1211 int irq; 1235 int irq;
1212 1236
1213 cd = &pdata->channels[i]; 1237 cd = &pdata->channels[i];
@@ -1222,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev)
1222 goto err_channel_add; 1246 goto err_channel_add;
1223 } 1247 }
1224 1248
1225 xordev->channels[i] = 1249 chan = mv_xor_channel_add(xordev, pdev, i,
1226 mv_xor_channel_add(xordev, pdev, i, 1250 cd->cap_mask, irq);
1227 cd->cap_mask, irq); 1251 if (IS_ERR(chan)) {
1228 if (IS_ERR(xordev->channels[i])) { 1252 ret = PTR_ERR(chan);
1229 ret = PTR_ERR(xordev->channels[i]);
1230 goto err_channel_add; 1253 goto err_channel_add;
1231 } 1254 }
1255
1256 xordev->channels[i] = chan;
1232 } 1257 }
1233 } 1258 }
1234 1259
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index cdf0483b8f2d..536632f6479c 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2492,12 +2492,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2492 2492
2493static inline void _init_desc(struct dma_pl330_desc *desc) 2493static inline void _init_desc(struct dma_pl330_desc *desc)
2494{ 2494{
2495 desc->pchan = NULL;
2496 desc->req.x = &desc->px; 2495 desc->req.x = &desc->px;
2497 desc->req.token = desc; 2496 desc->req.token = desc;
2498 desc->rqcfg.swap = SWAP_NO; 2497 desc->rqcfg.swap = SWAP_NO;
2499 desc->rqcfg.privileged = 0;
2500 desc->rqcfg.insnaccess = 0;
2501 desc->rqcfg.scctl = SCCTRL0; 2498 desc->rqcfg.scctl = SCCTRL0;
2502 desc->rqcfg.dcctl = DCCTRL0; 2499 desc->rqcfg.dcctl = DCCTRL0;
2503 desc->req.cfg = &desc->rqcfg; 2500 desc->req.cfg = &desc->rqcfg;
@@ -2517,7 +2514,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
2517 if (!pdmac) 2514 if (!pdmac)
2518 return 0; 2515 return 0;
2519 2516
2520 desc = kmalloc(count * sizeof(*desc), flg); 2517 desc = kcalloc(count, sizeof(*desc), flg);
2521 if (!desc) 2518 if (!desc)
2522 return 0; 2519 return 0;
2523 2520
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 8da48c6b2a38..8bba298535b0 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -533,29 +533,6 @@ static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
533} 533}
534 534
535/** 535/**
536 * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation
537 */
538static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc,
539 int value, unsigned long flags)
540{
541 struct dma_cdb *hw_desc = desc->hw_desc;
542
543 memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
544 desc->hw_next = NULL;
545 desc->src_cnt = 1;
546 desc->dst_cnt = 1;
547
548 if (flags & DMA_PREP_INTERRUPT)
549 set_bit(PPC440SPE_DESC_INT, &desc->flags);
550 else
551 clear_bit(PPC440SPE_DESC_INT, &desc->flags);
552
553 hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value);
554 hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value);
555 hw_desc->opc = DMA_CDB_OPC_DFILL128;
556}
557
558/**
559 * ppc440spe_desc_set_src_addr - set source address into the descriptor 536 * ppc440spe_desc_set_src_addr - set source address into the descriptor
560 */ 537 */
561static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc, 538static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc,
@@ -1504,8 +1481,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
1504 struct ppc440spe_adma_chan *chan, 1481 struct ppc440spe_adma_chan *chan,
1505 dma_cookie_t cookie) 1482 dma_cookie_t cookie)
1506{ 1483{
1507 int i;
1508
1509 BUG_ON(desc->async_tx.cookie < 0); 1484 BUG_ON(desc->async_tx.cookie < 0);
1510 if (desc->async_tx.cookie > 0) { 1485 if (desc->async_tx.cookie > 0) {
1511 cookie = desc->async_tx.cookie; 1486 cookie = desc->async_tx.cookie;
@@ -3898,7 +3873,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
3898 ppc440spe_adma_prep_dma_interrupt; 3873 ppc440spe_adma_prep_dma_interrupt;
3899 } 3874 }
3900 pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: " 3875 pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
3901 "( %s%s%s%s%s%s%s)\n", 3876 "( %s%s%s%s%s%s)\n",
3902 dev_name(adev->dev), 3877 dev_name(adev->dev),
3903 dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "", 3878 dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
3904 dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "", 3879 dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index bae6c29f5502..17686caf64d5 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -406,7 +406,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
406 dma_async_tx_callback callback; 406 dma_async_tx_callback callback;
407 void *param; 407 void *param;
408 struct dma_async_tx_descriptor *txd = &desc->txd; 408 struct dma_async_tx_descriptor *txd = &desc->txd;
409 struct txx9dmac_slave *ds = dc->chan.private;
410 409
411 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", 410 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
412 txd->cookie, desc); 411 txd->cookie, desc);
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index b0bb056458a3..281029daf98c 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1623,7 +1623,6 @@ static struct scsi_host_template scsi_driver_template = {
1623 .cmd_per_lun = 1, 1623 .cmd_per_lun = 1,
1624 .can_queue = 1, 1624 .can_queue = 1,
1625 .sdev_attrs = sbp2_scsi_sysfs_attrs, 1625 .sdev_attrs = sbp2_scsi_sysfs_attrs,
1626 .no_write_same = 1,
1627}; 1626};
1628 1627
1629MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); 1628MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 299fad6b5867..5373dc5b6011 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
14 14
15obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ 15obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
16obj-$(CONFIG_EFI) += efi/ 16obj-$(CONFIG_EFI) += efi/
17obj-$(CONFIG_UEFI_CPER) += efi/
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 3150aa4874e8..6aecbc86ec94 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -36,7 +36,7 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE
36 backend for pstore by default. This setting can be overridden 36 backend for pstore by default. This setting can be overridden
37 using the efivars module's pstore_disable parameter. 37 using the efivars module's pstore_disable parameter.
38 38
39config UEFI_CPER
40 def_bool n
41
42endmenu 39endmenu
40
41config UEFI_CPER
42 bool
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 9ba156d3c775..6c2a41ec21ba 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -1,7 +1,7 @@
1# 1#
2# Makefile for linux kernel 2# Makefile for linux kernel
3# 3#
4obj-y += efi.o vars.o 4obj-$(CONFIG_EFI) += efi.o vars.o
5obj-$(CONFIG_EFI_VARS) += efivars.o 5obj-$(CONFIG_EFI_VARS) += efivars.o
6obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o 6obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
7obj-$(CONFIG_UEFI_CPER) += cper.o 7obj-$(CONFIG_UEFI_CPER) += cper.o
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 743fd426f21b..4b9dc836dcf9 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -356,6 +356,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
356static struct pstore_info efi_pstore_info = { 356static struct pstore_info efi_pstore_info = {
357 .owner = THIS_MODULE, 357 .owner = THIS_MODULE,
358 .name = "efi", 358 .name = "efi",
359 .flags = PSTORE_FLAGS_FRAGILE,
359 .open = efi_pstore_open, 360 .open = efi_pstore_open,
360 .close = efi_pstore_close, 361 .close = efi_pstore_close,
361 .read = efi_pstore_read, 362 .read = efi_pstore_read,
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index 7b37300973db..2baf0ddf7e02 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -252,7 +252,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
252 252
253 spin_lock_irqsave(&tlmm_lock, irq_flags); 253 spin_lock_irqsave(&tlmm_lock, irq_flags);
254 writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio)); 254 writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
255 clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); 255 clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
256 __clear_bit(gpio, msm_gpio.enabled_irqs); 256 __clear_bit(gpio, msm_gpio.enabled_irqs);
257 spin_unlock_irqrestore(&tlmm_lock, irq_flags); 257 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
258} 258}
@@ -264,7 +264,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
264 264
265 spin_lock_irqsave(&tlmm_lock, irq_flags); 265 spin_lock_irqsave(&tlmm_lock, irq_flags);
266 __set_bit(gpio, msm_gpio.enabled_irqs); 266 __set_bit(gpio, msm_gpio.enabled_irqs);
267 set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); 267 set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
268 writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio)); 268 writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
269 spin_unlock_irqrestore(&tlmm_lock, irq_flags); 269 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
270} 270}
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index fe088a30567a..8b7e719a68c3 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -169,7 +169,8 @@ static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
169 u32 pending; 169 u32 pending;
170 unsigned int offset, irqs_handled = 0; 170 unsigned int offset, irqs_handled = 0;
171 171
172 while ((pending = gpio_rcar_read(p, INTDT))) { 172 while ((pending = gpio_rcar_read(p, INTDT) &
173 gpio_rcar_read(p, INTMSK))) {
173 offset = __ffs(pending); 174 offset = __ffs(pending);
174 gpio_rcar_write(p, INTCLR, BIT(offset)); 175 gpio_rcar_write(p, INTCLR, BIT(offset));
175 generic_handle_irq(irq_find_mapping(p->irq_domain, offset)); 176 generic_handle_irq(irq_find_mapping(p->irq_domain, offset));
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index b97d6a6577b9..f9996899c1f2 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -300,7 +300,7 @@ static int twl_direction_in(struct gpio_chip *chip, unsigned offset)
300 if (offset < TWL4030_GPIO_MAX) 300 if (offset < TWL4030_GPIO_MAX)
301 ret = twl4030_set_gpio_direction(offset, 1); 301 ret = twl4030_set_gpio_direction(offset, 1);
302 else 302 else
303 ret = -EINVAL; 303 ret = -EINVAL; /* LED outputs can't be set as input */
304 304
305 if (!ret) 305 if (!ret)
306 priv->direction &= ~BIT(offset); 306 priv->direction &= ~BIT(offset);
@@ -354,11 +354,20 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
354static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value) 354static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
355{ 355{
356 struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip); 356 struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
357 int ret = -EINVAL; 357 int ret = 0;
358 358
359 mutex_lock(&priv->mutex); 359 mutex_lock(&priv->mutex);
360 if (offset < TWL4030_GPIO_MAX) 360 if (offset < TWL4030_GPIO_MAX) {
361 ret = twl4030_set_gpio_direction(offset, 0); 361 ret = twl4030_set_gpio_direction(offset, 0);
362 if (ret) {
363 mutex_unlock(&priv->mutex);
364 return ret;
365 }
366 }
367
368 /*
369 * LED gpios i.e. offset >= TWL4030_GPIO_MAX are always output
370 */
362 371
363 priv->direction |= BIT(offset); 372 priv->direction |= BIT(offset);
364 mutex_unlock(&priv->mutex); 373 mutex_unlock(&priv->mutex);
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index eef09ec9a5ff..a72cae03b99b 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -103,6 +103,7 @@ void armada_drm_queue_unref_work(struct drm_device *,
103extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs; 103extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
104 104
105int armada_fbdev_init(struct drm_device *); 105int armada_fbdev_init(struct drm_device *);
106void armada_fbdev_lastclose(struct drm_device *);
106void armada_fbdev_fini(struct drm_device *); 107void armada_fbdev_fini(struct drm_device *);
107 108
108int armada_overlay_plane_create(struct drm_device *, unsigned long); 109int armada_overlay_plane_create(struct drm_device *, unsigned long);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 4f2b28354915..62d0ff3efddf 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -321,6 +321,11 @@ static struct drm_ioctl_desc armada_ioctls[] = {
321 DRM_UNLOCKED), 321 DRM_UNLOCKED),
322}; 322};
323 323
324static void armada_drm_lastclose(struct drm_device *dev)
325{
326 armada_fbdev_lastclose(dev);
327}
328
324static const struct file_operations armada_drm_fops = { 329static const struct file_operations armada_drm_fops = {
325 .owner = THIS_MODULE, 330 .owner = THIS_MODULE,
326 .llseek = no_llseek, 331 .llseek = no_llseek,
@@ -337,7 +342,7 @@ static struct drm_driver armada_drm_driver = {
337 .open = NULL, 342 .open = NULL,
338 .preclose = NULL, 343 .preclose = NULL,
339 .postclose = NULL, 344 .postclose = NULL,
340 .lastclose = NULL, 345 .lastclose = armada_drm_lastclose,
341 .unload = armada_drm_unload, 346 .unload = armada_drm_unload,
342 .get_vblank_counter = drm_vblank_count, 347 .get_vblank_counter = drm_vblank_count,
343 .enable_vblank = armada_drm_enable_vblank, 348 .enable_vblank = armada_drm_enable_vblank,
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index dd5ea77dac96..948cb14c561e 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -105,9 +105,9 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
105 drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth); 105 drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
106 drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height); 106 drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
107 107
108 DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n", 108 DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n",
109 dfb->fb.width, dfb->fb.height, 109 dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel,
110 dfb->fb.bits_per_pixel, obj->phys_addr); 110 (unsigned long long)obj->phys_addr);
111 111
112 return 0; 112 return 0;
113 113
@@ -177,6 +177,16 @@ int armada_fbdev_init(struct drm_device *dev)
177 return ret; 177 return ret;
178} 178}
179 179
180void armada_fbdev_lastclose(struct drm_device *dev)
181{
182 struct armada_private *priv = dev->dev_private;
183
184 drm_modeset_lock_all(dev);
185 if (priv->fbdev)
186 drm_fb_helper_restore_fbdev_mode(priv->fbdev);
187 drm_modeset_unlock_all(dev);
188}
189
180void armada_fbdev_fini(struct drm_device *dev) 190void armada_fbdev_fini(struct drm_device *dev)
181{ 191{
182 struct armada_private *priv = dev->dev_private; 192 struct armada_private *priv = dev->dev_private;
@@ -192,11 +202,11 @@ void armada_fbdev_fini(struct drm_device *dev)
192 framebuffer_release(info); 202 framebuffer_release(info);
193 } 203 }
194 204
205 drm_fb_helper_fini(fbh);
206
195 if (fbh->fb) 207 if (fbh->fb)
196 fbh->fb->funcs->destroy(fbh->fb); 208 fbh->fb->funcs->destroy(fbh->fb);
197 209
198 drm_fb_helper_fini(fbh);
199
200 priv->fbdev = NULL; 210 priv->fbdev = NULL;
201 } 211 }
202} 212}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 9f2356bae7fd..887816f43476 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -172,8 +172,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
172 obj->dev_addr = obj->linear->start; 172 obj->dev_addr = obj->linear->start;
173 } 173 }
174 174
175 DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n", 175 DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
176 obj, obj->phys_addr, obj->dev_addr); 176 (unsigned long long)obj->phys_addr,
177 (unsigned long long)obj->dev_addr);
177 178
178 return 0; 179 return 0;
179} 180}
@@ -557,7 +558,6 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
557 * refcount on the gem object itself. 558 * refcount on the gem object itself.
558 */ 559 */
559 drm_gem_object_reference(obj); 560 drm_gem_object_reference(obj);
560 dma_buf_put(buf);
561 return obj; 561 return obj;
562 } 562 }
563 } 563 }
@@ -573,6 +573,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
573 } 573 }
574 574
575 dobj->obj.import_attach = attach; 575 dobj->obj.import_attach = attach;
576 get_dma_buf(buf);
576 577
577 /* 578 /*
578 * Don't call dma_buf_map_attachment() here - it maps the 579 * Don't call dma_buf_map_attachment() here - it maps the
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 0a1e4a5f4234..8835dcddfac3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -68,6 +68,8 @@
68#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) 68#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
69/* Force reduced-blanking timings for detailed modes */ 69/* Force reduced-blanking timings for detailed modes */
70#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) 70#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
71/* Force 8bpc */
72#define EDID_QUIRK_FORCE_8BPC (1 << 8)
71 73
72struct detailed_mode_closure { 74struct detailed_mode_closure {
73 struct drm_connector *connector; 75 struct drm_connector *connector;
@@ -128,6 +130,9 @@ static struct edid_quirk {
128 130
129 /* Medion MD 30217 PG */ 131 /* Medion MD 30217 PG */
130 { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, 132 { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
133
134 /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
135 { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
131}; 136};
132 137
133/* 138/*
@@ -3435,6 +3440,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
3435 3440
3436 drm_add_display_info(edid, &connector->display_info); 3441 drm_add_display_info(edid, &connector->display_info);
3437 3442
3443 if (quirks & EDID_QUIRK_FORCE_8BPC)
3444 connector->display_info.bpc = 8;
3445
3438 return num_modes; 3446 return num_modes;
3439} 3447}
3440EXPORT_SYMBOL(drm_add_edid_modes); 3448EXPORT_SYMBOL(drm_add_edid_modes);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index f53d5246979c..66dd3a001cf1 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -566,11 +566,11 @@ err_unload:
566 if (dev->driver->unload) 566 if (dev->driver->unload)
567 dev->driver->unload(dev); 567 dev->driver->unload(dev);
568err_primary_node: 568err_primary_node:
569 drm_put_minor(dev->primary); 569 drm_unplug_minor(dev->primary);
570err_render_node: 570err_render_node:
571 drm_put_minor(dev->render); 571 drm_unplug_minor(dev->render);
572err_control_node: 572err_control_node:
573 drm_put_minor(dev->control); 573 drm_unplug_minor(dev->control);
574err_agp: 574err_agp:
575 if (dev->driver->bus->agp_destroy) 575 if (dev->driver->bus->agp_destroy)
576 dev->driver->bus->agp_destroy(dev); 576 dev->driver->bus->agp_destroy(dev);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0cab2d045135..5c648425c1e0 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -83,6 +83,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
83 drm_i915_private_t *dev_priv = dev->dev_private; 83 drm_i915_private_t *dev_priv = dev->dev_private;
84 struct drm_i915_master_private *master_priv; 84 struct drm_i915_master_private *master_priv;
85 85
86 /*
87 * The dri breadcrumb update races against the drm master disappearing.
88 * Instead of trying to fix this (this is by far not the only ums issue)
89 * just don't do the update in kms mode.
90 */
91 if (drm_core_check_feature(dev, DRIVER_MODESET))
92 return;
93
86 if (dev->primary->master) { 94 if (dev->primary->master) {
87 master_priv = dev->primary->master->driver_priv; 95 master_priv = dev->primary->master->driver_priv;
88 if (master_priv->sarea_priv) 96 if (master_priv->sarea_priv)
@@ -1490,16 +1498,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1490 spin_lock_init(&dev_priv->uncore.lock); 1498 spin_lock_init(&dev_priv->uncore.lock);
1491 spin_lock_init(&dev_priv->mm.object_stat_lock); 1499 spin_lock_init(&dev_priv->mm.object_stat_lock);
1492 mutex_init(&dev_priv->dpio_lock); 1500 mutex_init(&dev_priv->dpio_lock);
1493 mutex_init(&dev_priv->rps.hw_lock);
1494 mutex_init(&dev_priv->modeset_restore_lock); 1501 mutex_init(&dev_priv->modeset_restore_lock);
1495 1502
1496 mutex_init(&dev_priv->pc8.lock); 1503 intel_pm_setup(dev);
1497 dev_priv->pc8.requirements_met = false;
1498 dev_priv->pc8.gpu_idle = false;
1499 dev_priv->pc8.irqs_disabled = false;
1500 dev_priv->pc8.enabled = false;
1501 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
1502 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
1503 1504
1504 intel_display_crc_init(dev); 1505 intel_display_crc_init(dev);
1505 1506
@@ -1603,7 +1604,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1603 } 1604 }
1604 1605
1605 intel_irq_init(dev); 1606 intel_irq_init(dev);
1606 intel_pm_init(dev);
1607 intel_uncore_sanitize(dev); 1607 intel_uncore_sanitize(dev);
1608 1608
1609 /* Try to make sure MCHBAR is enabled before poking at it */ 1609 /* Try to make sure MCHBAR is enabled before poking at it */
@@ -1848,8 +1848,10 @@ void i915_driver_lastclose(struct drm_device * dev)
1848 1848
1849void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1849void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1850{ 1850{
1851 mutex_lock(&dev->struct_mutex);
1851 i915_gem_context_close(dev, file_priv); 1852 i915_gem_context_close(dev, file_priv);
1852 i915_gem_release(dev, file_priv); 1853 i915_gem_release(dev, file_priv);
1854 mutex_unlock(&dev->struct_mutex);
1853} 1855}
1854 1856
1855void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 1857void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2e367a1c6a64..5b7b7e06cb3a 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -651,6 +651,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
651 intel_modeset_init_hw(dev); 651 intel_modeset_init_hw(dev);
652 652
653 drm_modeset_lock_all(dev); 653 drm_modeset_lock_all(dev);
654 drm_mode_config_reset(dev);
654 intel_modeset_setup_hw_state(dev, true); 655 intel_modeset_setup_hw_state(dev, true);
655 drm_modeset_unlock_all(dev); 656 drm_modeset_unlock_all(dev);
656 657
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ccdbecca070d..90fcccba17b0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1755,8 +1755,13 @@ struct drm_i915_file_private {
1755#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1755#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1756#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 1756#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
1757 ((dev)->pdev->device & 0xFF00) == 0x0C00) 1757 ((dev)->pdev->device & 0xFF00) == 0x0C00)
1758#define IS_ULT(dev) (IS_HASWELL(dev) && \ 1758#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
1759 (((dev)->pdev->device & 0xf) == 0x2 || \
1760 ((dev)->pdev->device & 0xf) == 0x6 || \
1761 ((dev)->pdev->device & 0xf) == 0xe))
1762#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
1759 ((dev)->pdev->device & 0xFF00) == 0x0A00) 1763 ((dev)->pdev->device & 0xFF00) == 0x0A00)
1764#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
1760#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 1765#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
1761 ((dev)->pdev->device & 0x00F0) == 0x0020) 1766 ((dev)->pdev->device & 0x00F0) == 0x0020)
1762#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 1767#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
@@ -1901,9 +1906,7 @@ void i915_queue_hangcheck(struct drm_device *dev);
1901void i915_handle_error(struct drm_device *dev, bool wedged); 1906void i915_handle_error(struct drm_device *dev, bool wedged);
1902 1907
1903extern void intel_irq_init(struct drm_device *dev); 1908extern void intel_irq_init(struct drm_device *dev);
1904extern void intel_pm_init(struct drm_device *dev);
1905extern void intel_hpd_init(struct drm_device *dev); 1909extern void intel_hpd_init(struct drm_device *dev);
1906extern void intel_pm_init(struct drm_device *dev);
1907 1910
1908extern void intel_uncore_sanitize(struct drm_device *dev); 1911extern void intel_uncore_sanitize(struct drm_device *dev);
1909extern void intel_uncore_early_sanitize(struct drm_device *dev); 1912extern void intel_uncore_early_sanitize(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 621c7c67a643..76d3d1ab73c6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2343,15 +2343,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
2343 kfree(request); 2343 kfree(request);
2344} 2344}
2345 2345
2346static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, 2346static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2347 struct intel_ring_buffer *ring) 2347 struct intel_ring_buffer *ring)
2348{ 2348{
2349 u32 completed_seqno; 2349 u32 completed_seqno = ring->get_seqno(ring, false);
2350 u32 acthd; 2350 u32 acthd = intel_ring_get_active_head(ring);
2351 struct drm_i915_gem_request *request;
2352
2353 list_for_each_entry(request, &ring->request_list, list) {
2354 if (i915_seqno_passed(completed_seqno, request->seqno))
2355 continue;
2351 2356
2352 acthd = intel_ring_get_active_head(ring); 2357 i915_set_reset_status(ring, request, acthd);
2353 completed_seqno = ring->get_seqno(ring, false); 2358 }
2359}
2354 2360
2361static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2362 struct intel_ring_buffer *ring)
2363{
2355 while (!list_empty(&ring->request_list)) { 2364 while (!list_empty(&ring->request_list)) {
2356 struct drm_i915_gem_request *request; 2365 struct drm_i915_gem_request *request;
2357 2366
@@ -2359,9 +2368,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2359 struct drm_i915_gem_request, 2368 struct drm_i915_gem_request,
2360 list); 2369 list);
2361 2370
2362 if (request->seqno > completed_seqno)
2363 i915_set_reset_status(ring, request, acthd);
2364
2365 i915_gem_free_request(request); 2371 i915_gem_free_request(request);
2366 } 2372 }
2367 2373
@@ -2403,8 +2409,16 @@ void i915_gem_reset(struct drm_device *dev)
2403 struct intel_ring_buffer *ring; 2409 struct intel_ring_buffer *ring;
2404 int i; 2410 int i;
2405 2411
2412 /*
2413 * Before we free the objects from the requests, we need to inspect
2414 * them for finding the guilty party. As the requests only borrow
2415 * their reference to the objects, the inspection must be done first.
2416 */
2417 for_each_ring(ring, dev_priv, i)
2418 i915_gem_reset_ring_status(dev_priv, ring);
2419
2406 for_each_ring(ring, dev_priv, i) 2420 for_each_ring(ring, dev_priv, i)
2407 i915_gem_reset_ring_lists(dev_priv, ring); 2421 i915_gem_reset_ring_cleanup(dev_priv, ring);
2408 2422
2409 i915_gem_cleanup_ringbuffer(dev); 2423 i915_gem_cleanup_ringbuffer(dev);
2410 2424
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 72a3df32292f..b0f42b9ca037 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -347,10 +347,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
347{ 347{
348 struct drm_i915_file_private *file_priv = file->driver_priv; 348 struct drm_i915_file_private *file_priv = file->driver_priv;
349 349
350 mutex_lock(&dev->struct_mutex);
351 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 350 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
352 idr_destroy(&file_priv->context_idr); 351 idr_destroy(&file_priv->context_idr);
353 mutex_unlock(&dev->struct_mutex);
354} 352}
355 353
356static struct i915_hw_context * 354static struct i915_hw_context *
@@ -423,11 +421,21 @@ static int do_switch(struct i915_hw_context *to)
423 if (ret) 421 if (ret)
424 return ret; 422 return ret;
425 423
426 /* Clear this page out of any CPU caches for coherent swap-in/out. Note 424 /*
425 * Pin can switch back to the default context if we end up calling into
426 * evict_everything - as a last ditch gtt defrag effort that also
427 * switches to the default context. Hence we need to reload from here.
428 */
429 from = ring->last_context;
430
431 /*
432 * Clear this page out of any CPU caches for coherent swap-in/out. Note
427 * that thanks to write = false in this call and us not setting any gpu 433 * that thanks to write = false in this call and us not setting any gpu
428 * write domains when putting a context object onto the active list 434 * write domains when putting a context object onto the active list
429 * (when switching away from it), this won't block. 435 * (when switching away from it), this won't block.
430 * XXX: We need a real interface to do this instead of trickery. */ 436 *
437 * XXX: We need a real interface to do this instead of trickery.
438 */
431 ret = i915_gem_object_set_to_gtt_domain(to->obj, false); 439 ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
432 if (ret) { 440 if (ret) {
433 i915_gem_object_unpin(to->obj); 441 i915_gem_object_unpin(to->obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index b7376533633d..8f3adc7d0dc8 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -88,6 +88,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
88 } else 88 } else
89 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); 89 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
90 90
91search_again:
91 /* First see if there is a large enough contiguous idle region... */ 92 /* First see if there is a large enough contiguous idle region... */
92 list_for_each_entry(vma, &vm->inactive_list, mm_list) { 93 list_for_each_entry(vma, &vm->inactive_list, mm_list) {
93 if (mark_free(vma, &unwind_list)) 94 if (mark_free(vma, &unwind_list))
@@ -115,10 +116,17 @@ none:
115 list_del_init(&vma->exec_list); 116 list_del_init(&vma->exec_list);
116 } 117 }
117 118
118 /* We expect the caller to unpin, evict all and try again, or give up. 119 /* Can we unpin some objects such as idle hw contents,
119 * So calling i915_gem_evict_vm() is unnecessary. 120 * or pending flips?
120 */ 121 */
121 return -ENOSPC; 122 ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev);
123 if (ret)
124 return ret;
125
126 /* Only idle the GPU and repeat the search once */
127 i915_gem_retire_requests(dev);
128 nonblocking = true;
129 goto search_again;
122 130
123found: 131found:
124 /* drm_mm doesn't allow any other other operations while 132 /* drm_mm doesn't allow any other other operations while
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b7e787fb4649..a3ba9a8cd687 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -93,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
93{ 93{
94 struct drm_i915_gem_object *obj; 94 struct drm_i915_gem_object *obj;
95 struct list_head objects; 95 struct list_head objects;
96 int i, ret = 0; 96 int i, ret;
97 97
98 INIT_LIST_HEAD(&objects); 98 INIT_LIST_HEAD(&objects);
99 spin_lock(&file->table_lock); 99 spin_lock(&file->table_lock);
@@ -106,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
106 DRM_DEBUG("Invalid object handle %d at index %d\n", 106 DRM_DEBUG("Invalid object handle %d at index %d\n",
107 exec[i].handle, i); 107 exec[i].handle, i);
108 ret = -ENOENT; 108 ret = -ENOENT;
109 goto out; 109 goto err;
110 } 110 }
111 111
112 if (!list_empty(&obj->obj_exec_link)) { 112 if (!list_empty(&obj->obj_exec_link)) {
@@ -114,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
114 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", 114 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
115 obj, exec[i].handle, i); 115 obj, exec[i].handle, i);
116 ret = -EINVAL; 116 ret = -EINVAL;
117 goto out; 117 goto err;
118 } 118 }
119 119
120 drm_gem_object_reference(&obj->base); 120 drm_gem_object_reference(&obj->base);
@@ -123,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb,
123 spin_unlock(&file->table_lock); 123 spin_unlock(&file->table_lock);
124 124
125 i = 0; 125 i = 0;
126 list_for_each_entry(obj, &objects, obj_exec_link) { 126 while (!list_empty(&objects)) {
127 struct i915_vma *vma; 127 struct i915_vma *vma;
128 128
129 obj = list_first_entry(&objects,
130 struct drm_i915_gem_object,
131 obj_exec_link);
132
129 /* 133 /*
130 * NOTE: We can leak any vmas created here when something fails 134 * NOTE: We can leak any vmas created here when something fails
131 * later on. But that's no issue since vma_unbind can deal with 135 * later on. But that's no issue since vma_unbind can deal with
@@ -138,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb,
138 if (IS_ERR(vma)) { 142 if (IS_ERR(vma)) {
139 DRM_DEBUG("Failed to lookup VMA\n"); 143 DRM_DEBUG("Failed to lookup VMA\n");
140 ret = PTR_ERR(vma); 144 ret = PTR_ERR(vma);
141 goto out; 145 goto err;
142 } 146 }
143 147
148 /* Transfer ownership from the objects list to the vmas list. */
144 list_add_tail(&vma->exec_list, &eb->vmas); 149 list_add_tail(&vma->exec_list, &eb->vmas);
150 list_del_init(&obj->obj_exec_link);
145 151
146 vma->exec_entry = &exec[i]; 152 vma->exec_entry = &exec[i];
147 if (eb->and < 0) { 153 if (eb->and < 0) {
@@ -155,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb,
155 ++i; 161 ++i;
156 } 162 }
157 163
164 return 0;
165
158 166
159out: 167err:
160 while (!list_empty(&objects)) { 168 while (!list_empty(&objects)) {
161 obj = list_first_entry(&objects, 169 obj = list_first_entry(&objects,
162 struct drm_i915_gem_object, 170 struct drm_i915_gem_object,
163 obj_exec_link); 171 obj_exec_link);
164 list_del_init(&obj->obj_exec_link); 172 list_del_init(&obj->obj_exec_link);
165 if (ret) 173 drm_gem_object_unreference(&obj->base);
166 drm_gem_object_unreference(&obj->base);
167 } 174 }
175 /*
176 * Objects already transfered to the vmas list will be unreferenced by
177 * eb_destroy.
178 */
179
168 return ret; 180 return ret;
169} 181}
170 182
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 38cb8d44a013..d3c3b5b15824 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -337,8 +337,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
337 kfree(ppgtt->gen8_pt_dma_addr[i]); 337 kfree(ppgtt->gen8_pt_dma_addr[i]);
338 } 338 }
339 339
340 __free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT); 340 __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
341 __free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT); 341 __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
342} 342}
343 343
344/** 344/**
@@ -906,14 +906,12 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
906 WARN_ON(readq(&gtt_entries[i-1]) 906 WARN_ON(readq(&gtt_entries[i-1])
907 != gen8_pte_encode(addr, level, true)); 907 != gen8_pte_encode(addr, level, true));
908 908
909#if 0 /* TODO: Still needed on GEN8? */
910 /* This next bit makes the above posting read even more important. We 909 /* This next bit makes the above posting read even more important. We
911 * want to flush the TLBs only after we're certain all the PTE updates 910 * want to flush the TLBs only after we're certain all the PTE updates
912 * have finished. 911 * have finished.
913 */ 912 */
914 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 913 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
915 POSTING_READ(GFX_FLSH_CNTL_GEN6); 914 POSTING_READ(GFX_FLSH_CNTL_GEN6);
916#endif
917} 915}
918 916
919/* 917/*
@@ -1241,6 +1239,11 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
1241 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; 1239 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
1242 if (bdw_gmch_ctl) 1240 if (bdw_gmch_ctl)
1243 bdw_gmch_ctl = 1 << bdw_gmch_ctl; 1241 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
1242 if (bdw_gmch_ctl > 4) {
1243 WARN_ON(!i915_preliminary_hw_support);
1244 return 4<<20;
1245 }
1246
1244 return bdw_gmch_ctl << 20; 1247 return bdw_gmch_ctl << 20;
1245} 1248}
1246 1249
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 080f6fd4e839..769b864465a9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6303,7 +6303,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
6303 uint32_t val; 6303 uint32_t val;
6304 6304
6305 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) 6305 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
6306 WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n", 6306 WARN(crtc->active, "CRTC for pipe %c enabled\n",
6307 pipe_name(crtc->pipe)); 6307 pipe_name(crtc->pipe));
6308 6308
6309 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); 6309 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
@@ -9135,7 +9135,7 @@ intel_pipe_config_compare(struct drm_device *dev,
9135 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 9135 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
9136 PIPE_CONF_CHECK_I(pipe_bpp); 9136 PIPE_CONF_CHECK_I(pipe_bpp);
9137 9137
9138 if (!IS_HASWELL(dev)) { 9138 if (!HAS_DDI(dev)) {
9139 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); 9139 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
9140 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 9140 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
9141 } 9141 }
@@ -10541,11 +10541,20 @@ static struct intel_quirk intel_quirks[] = {
10541 /* Sony Vaio Y cannot use SSC on LVDS */ 10541 /* Sony Vaio Y cannot use SSC on LVDS */
10542 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 10542 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
10543 10543
10544 /* 10544 /* Acer Aspire 5734Z must invert backlight brightness */
10545 * All GM45 Acer (and its brands eMachines and Packard Bell) laptops 10545 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
10546 * seem to use inverted backlight PWM. 10546
10547 */ 10547 /* Acer/eMachines G725 */
10548 { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness }, 10548 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
10549
10550 /* Acer/eMachines e725 */
10551 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
10552
10553 /* Acer/Packard Bell NCL20 */
10554 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
10555
10556 /* Acer Aspire 4736Z */
10557 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
10549 10558
10550 /* Dell XPS13 HD Sandy Bridge */ 10559 /* Dell XPS13 HD Sandy Bridge */
10551 { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable }, 10560 { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
@@ -11036,8 +11045,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
11036 } 11045 }
11037 11046
11038 intel_modeset_check_state(dev); 11047 intel_modeset_check_state(dev);
11039
11040 drm_mode_config_reset(dev);
11041} 11048}
11042 11049
11043void intel_modeset_gem_init(struct drm_device *dev) 11050void intel_modeset_gem_init(struct drm_device *dev)
@@ -11046,7 +11053,10 @@ void intel_modeset_gem_init(struct drm_device *dev)
11046 11053
11047 intel_setup_overlay(dev); 11054 intel_setup_overlay(dev);
11048 11055
11056 drm_modeset_lock_all(dev);
11057 drm_mode_config_reset(dev);
11049 intel_modeset_setup_hw_state(dev, false); 11058 intel_modeset_setup_hw_state(dev, false);
11059 drm_modeset_unlock_all(dev);
11050} 11060}
11051 11061
11052void intel_modeset_cleanup(struct drm_device *dev) 11062void intel_modeset_cleanup(struct drm_device *dev)
@@ -11125,14 +11135,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
11125int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 11135int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
11126{ 11136{
11127 struct drm_i915_private *dev_priv = dev->dev_private; 11137 struct drm_i915_private *dev_priv = dev->dev_private;
11138 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
11128 u16 gmch_ctrl; 11139 u16 gmch_ctrl;
11129 11140
11130 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); 11141 pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
11131 if (state) 11142 if (state)
11132 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 11143 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
11133 else 11144 else
11134 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 11145 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
11135 pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); 11146 pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
11136 return 0; 11147 return 0;
11137} 11148}
11138 11149
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a18e88b3e425..79f91f26e288 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -821,6 +821,7 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
821 uint32_t sprite_width, int pixel_size, 821 uint32_t sprite_width, int pixel_size,
822 bool enabled, bool scaled); 822 bool enabled, bool scaled);
823void intel_init_pm(struct drm_device *dev); 823void intel_init_pm(struct drm_device *dev);
824void intel_pm_setup(struct drm_device *dev);
824bool intel_fbc_enabled(struct drm_device *dev); 825bool intel_fbc_enabled(struct drm_device *dev);
825void intel_update_fbc(struct drm_device *dev); 826void intel_update_fbc(struct drm_device *dev);
826void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 827void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index f161ac02c4f6..e6f782d1c669 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -451,7 +451,9 @@ static u32 intel_panel_get_backlight(struct drm_device *dev,
451 451
452 spin_lock_irqsave(&dev_priv->backlight.lock, flags); 452 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
453 453
454 if (HAS_PCH_SPLIT(dev)) { 454 if (IS_BROADWELL(dev)) {
455 val = I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
456 } else if (HAS_PCH_SPLIT(dev)) {
455 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 457 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
456 } else { 458 } else {
457 if (IS_VALLEYVIEW(dev)) 459 if (IS_VALLEYVIEW(dev))
@@ -479,6 +481,13 @@ static u32 intel_panel_get_backlight(struct drm_device *dev,
479 return val; 481 return val;
480} 482}
481 483
484static void intel_bdw_panel_set_backlight(struct drm_device *dev, u32 level)
485{
486 struct drm_i915_private *dev_priv = dev->dev_private;
487 u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
488 I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
489}
490
482static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level) 491static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
483{ 492{
484 struct drm_i915_private *dev_priv = dev->dev_private; 493 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -496,7 +505,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev,
496 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); 505 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
497 level = intel_panel_compute_brightness(dev, pipe, level); 506 level = intel_panel_compute_brightness(dev, pipe, level);
498 507
499 if (HAS_PCH_SPLIT(dev)) 508 if (IS_BROADWELL(dev))
509 return intel_bdw_panel_set_backlight(dev, level);
510 else if (HAS_PCH_SPLIT(dev))
500 return intel_pch_panel_set_backlight(dev, level); 511 return intel_pch_panel_set_backlight(dev, level);
501 512
502 if (is_backlight_combination_mode(dev)) { 513 if (is_backlight_combination_mode(dev)) {
@@ -666,7 +677,16 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
666 POSTING_READ(reg); 677 POSTING_READ(reg);
667 I915_WRITE(reg, tmp | BLM_PWM_ENABLE); 678 I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
668 679
669 if (HAS_PCH_SPLIT(dev) && 680 if (IS_BROADWELL(dev)) {
681 /*
682 * Broadwell requires PCH override to drive the PCH
683 * backlight pin. The above will configure the CPU
684 * backlight pin, which we don't plan to use.
685 */
686 tmp = I915_READ(BLC_PWM_PCH_CTL1);
687 tmp |= BLM_PCH_OVERRIDE_ENABLE | BLM_PCH_PWM_ENABLE;
688 I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
689 } else if (HAS_PCH_SPLIT(dev) &&
670 !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) { 690 !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
671 tmp = I915_READ(BLC_PWM_PCH_CTL1); 691 tmp = I915_READ(BLC_PWM_PCH_CTL1);
672 tmp |= BLM_PCH_PWM_ENABLE; 692 tmp |= BLM_PCH_PWM_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 6e0d5e075b15..26c29c173221 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5685,8 +5685,11 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5685{ 5685{
5686 struct drm_i915_private *dev_priv = dev->dev_private; 5686 struct drm_i915_private *dev_priv = dev->dev_private;
5687 bool is_enabled, enable_requested; 5687 bool is_enabled, enable_requested;
5688 unsigned long irqflags;
5688 uint32_t tmp; 5689 uint32_t tmp;
5689 5690
5691 WARN_ON(dev_priv->pc8.enabled);
5692
5690 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 5693 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
5691 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; 5694 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
5692 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; 5695 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
@@ -5702,9 +5705,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5702 HSW_PWR_WELL_STATE_ENABLED), 20)) 5705 HSW_PWR_WELL_STATE_ENABLED), 20))
5703 DRM_ERROR("Timeout enabling power well\n"); 5706 DRM_ERROR("Timeout enabling power well\n");
5704 } 5707 }
5708
5709 if (IS_BROADWELL(dev)) {
5710 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
5711 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
5712 dev_priv->de_irq_mask[PIPE_B]);
5713 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
5714 ~dev_priv->de_irq_mask[PIPE_B] |
5715 GEN8_PIPE_VBLANK);
5716 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
5717 dev_priv->de_irq_mask[PIPE_C]);
5718 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
5719 ~dev_priv->de_irq_mask[PIPE_C] |
5720 GEN8_PIPE_VBLANK);
5721 POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
5722 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
5723 }
5705 } else { 5724 } else {
5706 if (enable_requested) { 5725 if (enable_requested) {
5707 unsigned long irqflags;
5708 enum pipe p; 5726 enum pipe p;
5709 5727
5710 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 5728 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
@@ -5731,16 +5749,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5731static void __intel_power_well_get(struct drm_device *dev, 5749static void __intel_power_well_get(struct drm_device *dev,
5732 struct i915_power_well *power_well) 5750 struct i915_power_well *power_well)
5733{ 5751{
5734 if (!power_well->count++) 5752 struct drm_i915_private *dev_priv = dev->dev_private;
5753
5754 if (!power_well->count++) {
5755 hsw_disable_package_c8(dev_priv);
5735 __intel_set_power_well(dev, true); 5756 __intel_set_power_well(dev, true);
5757 }
5736} 5758}
5737 5759
5738static void __intel_power_well_put(struct drm_device *dev, 5760static void __intel_power_well_put(struct drm_device *dev,
5739 struct i915_power_well *power_well) 5761 struct i915_power_well *power_well)
5740{ 5762{
5763 struct drm_i915_private *dev_priv = dev->dev_private;
5764
5741 WARN_ON(!power_well->count); 5765 WARN_ON(!power_well->count);
5742 if (!--power_well->count && i915_disable_power_well) 5766 if (!--power_well->count && i915_disable_power_well) {
5743 __intel_set_power_well(dev, false); 5767 __intel_set_power_well(dev, false);
5768 hsw_enable_package_c8(dev_priv);
5769 }
5744} 5770}
5745 5771
5746void intel_display_power_get(struct drm_device *dev, 5772void intel_display_power_get(struct drm_device *dev,
@@ -6130,10 +6156,19 @@ int vlv_freq_opcode(int ddr_freq, int val)
6130 return val; 6156 return val;
6131} 6157}
6132 6158
6133void intel_pm_init(struct drm_device *dev) 6159void intel_pm_setup(struct drm_device *dev)
6134{ 6160{
6135 struct drm_i915_private *dev_priv = dev->dev_private; 6161 struct drm_i915_private *dev_priv = dev->dev_private;
6136 6162
6163 mutex_init(&dev_priv->rps.hw_lock);
6164
6165 mutex_init(&dev_priv->pc8.lock);
6166 dev_priv->pc8.requirements_met = false;
6167 dev_priv->pc8.gpu_idle = false;
6168 dev_priv->pc8.irqs_disabled = false;
6169 dev_priv->pc8.enabled = false;
6170 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
6171 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
6137 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, 6172 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
6138 intel_gen6_powersave_work); 6173 intel_gen6_powersave_work);
6139} 6174}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b620337e6d67..c2f09d456300 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -965,6 +965,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
965 } else if (IS_GEN6(ring->dev)) { 965 } else if (IS_GEN6(ring->dev)) {
966 mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 966 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
967 } else { 967 } else {
968 /* XXX: gen8 returns to sanity */
968 mmio = RING_HWS_PGA(ring->mmio_base); 969 mmio = RING_HWS_PGA(ring->mmio_base);
969 } 970 }
970 971
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 0b02078a0b84..25cbe073c388 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -784,6 +784,7 @@ static int gen6_do_reset(struct drm_device *dev)
784int intel_gpu_reset(struct drm_device *dev) 784int intel_gpu_reset(struct drm_device *dev)
785{ 785{
786 switch (INTEL_INFO(dev)->gen) { 786 switch (INTEL_INFO(dev)->gen) {
787 case 8:
787 case 7: 788 case 7:
788 case 6: return gen6_do_reset(dev); 789 case 6: return gen6_do_reset(dev);
789 case 5: return ironlake_do_reset(dev); 790 case 5: return ironlake_do_reset(dev);
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c
index 48f06378d3f9..2ea5568b6cf5 100644
--- a/drivers/gpu/drm/nouveau/core/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/core/core/subdev.c
@@ -104,11 +104,8 @@ nouveau_subdev_create_(struct nouveau_object *parent,
104 104
105 if (parent) { 105 if (parent) {
106 struct nouveau_device *device = nv_device(parent); 106 struct nouveau_device *device = nv_device(parent);
107 int subidx = nv_hclass(subdev) & 0xff;
108
109 subdev->debug = nouveau_dbgopt(device->dbgopt, subname); 107 subdev->debug = nouveau_dbgopt(device->dbgopt, subname);
110 subdev->mmio = nv_subdev(device)->mmio; 108 subdev->mmio = nv_subdev(device)->mmio;
111 device->subdev[subidx] = *pobject;
112 } 109 }
113 110
114 return 0; 111 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 9135b25a29d0..dd01c6c435d6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -268,6 +268,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
268 if (ret) 268 if (ret)
269 return ret; 269 return ret;
270 270
271 device->subdev[i] = devobj->subdev[i];
272
271 /* note: can't init *any* subdevs until devinit has been run 273 /* note: can't init *any* subdevs until devinit has been run
272 * due to not knowing exactly what the vbios init tables will 274 * due to not knowing exactly what the vbios init tables will
273 * mess with. devinit also can't be run until all of its 275 * mess with. devinit also can't be run until all of its
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 8d06eef2b9ee..dbc5e33de94f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -161,7 +161,7 @@ nvc0_identify(struct nouveau_device *device)
161 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 161 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
162 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 162 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
163 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 163 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
164 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; 164 device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
165 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 165 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
167 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 167 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 434bb4b0fa2e..5c8a63dc506a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -334,7 +334,7 @@ nvc0_graph_mthd(struct nvc0_graph_priv *priv, struct nvc0_graph_mthd *mthds)
334 while ((mthd = &mthds[i++]) && (init = mthd->init)) { 334 while ((mthd = &mthds[i++]) && (init = mthd->init)) {
335 u32 addr = 0x80000000 | mthd->oclass; 335 u32 addr = 0x80000000 | mthd->oclass;
336 for (data = 0; init->count; init++) { 336 for (data = 0; init->count; init++) {
337 if (data != init->data) { 337 if (init == mthd->init || data != init->data) {
338 nv_wr32(priv, 0x40448c, init->data); 338 nv_wr32(priv, 0x40448c, init->data);
339 data = init->data; 339 data = init->data;
340 } 340 }
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 8541aa382ff2..d89dbdf39b0d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -75,6 +75,11 @@ struct nouveau_fb {
75static inline struct nouveau_fb * 75static inline struct nouveau_fb *
76nouveau_fb(void *obj) 76nouveau_fb(void *obj)
77{ 77{
78 /* fbram uses this before device subdev pointer is valid */
79 if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
80 nv_subidx(obj) == NVDEV_SUBDEV_FB)
81 return obj;
82
78 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB]; 83 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB];
79} 84}
80 85
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 420908cb82b6..df1b1b423093 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -365,13 +365,13 @@ static u16
365init_script(struct nouveau_bios *bios, int index) 365init_script(struct nouveau_bios *bios, int index)
366{ 366{
367 struct nvbios_init init = { .bios = bios }; 367 struct nvbios_init init = { .bios = bios };
368 u16 data; 368 u16 bmp_ver = bmp_version(bios), data;
369 369
370 if (bmp_version(bios) && bmp_version(bios) < 0x0510) { 370 if (bmp_ver && bmp_ver < 0x0510) {
371 if (index > 1) 371 if (index > 1 || bmp_ver < 0x0100)
372 return 0x0000; 372 return 0x0000;
373 373
374 data = bios->bmp_offset + (bios->version.major < 2 ? 14 : 18); 374 data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18);
375 return nv_ro16(bios, data + (index * 2)); 375 return nv_ro16(bios, data + (index * 2));
376 } 376 }
377 377
@@ -1294,7 +1294,11 @@ init_jump(struct nvbios_init *init)
1294 u16 offset = nv_ro16(bios, init->offset + 1); 1294 u16 offset = nv_ro16(bios, init->offset + 1);
1295 1295
1296 trace("JUMP\t0x%04x\n", offset); 1296 trace("JUMP\t0x%04x\n", offset);
1297 init->offset = offset; 1297
1298 if (init_exec(init))
1299 init->offset = offset;
1300 else
1301 init->offset += 3;
1298} 1302}
1299 1303
1300/** 1304/**
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 6828d81ed7b9..900fae01793e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -447,6 +447,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
447 if (ret) 447 if (ret)
448 goto done; 448 goto done;
449 449
450 info->offset = ntfy->node->offset;
451
450done: 452done:
451 if (ret) 453 if (ret)
452 nouveau_abi16_ntfy_fini(chan, ntfy); 454 nouveau_abi16_ntfy_fini(chan, ntfy);
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 95c740454049..ba0183fb84f3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -51,6 +51,7 @@ static struct nouveau_dsm_priv {
51 bool dsm_detected; 51 bool dsm_detected;
52 bool optimus_detected; 52 bool optimus_detected;
53 acpi_handle dhandle; 53 acpi_handle dhandle;
54 acpi_handle other_handle;
54 acpi_handle rom_handle; 55 acpi_handle rom_handle;
55} nouveau_dsm_priv; 56} nouveau_dsm_priv;
56 57
@@ -260,9 +261,10 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
260 if (!dhandle) 261 if (!dhandle)
261 return false; 262 return false;
262 263
263 if (!acpi_has_method(dhandle, "_DSM")) 264 if (!acpi_has_method(dhandle, "_DSM")) {
265 nouveau_dsm_priv.other_handle = dhandle;
264 return false; 266 return false;
265 267 }
266 if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER)) 268 if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
267 retval |= NOUVEAU_DSM_HAS_MUX; 269 retval |= NOUVEAU_DSM_HAS_MUX;
268 270
@@ -338,6 +340,16 @@ static bool nouveau_dsm_detect(void)
338 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", 340 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
339 acpi_method_name); 341 acpi_method_name);
340 nouveau_dsm_priv.dsm_detected = true; 342 nouveau_dsm_priv.dsm_detected = true;
343 /*
344 * On some systems hotplug events are generated for the device
345 * being switched off when _DSM is executed. They cause ACPI
346 * hotplug to trigger and attempt to remove the device from
347 * the system, which causes it to break down. Prevent that from
348 * happening by setting the no_hotplug flag for the involved
349 * ACPI device objects.
350 */
351 acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle);
352 acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle);
341 ret = true; 353 ret = true;
342 } 354 }
343 355
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 29c3efdfc7dd..25ea82f8def3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -610,7 +610,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
610 ret = nouveau_fence_sync(fence, chan); 610 ret = nouveau_fence_sync(fence, chan);
611 nouveau_fence_unref(&fence); 611 nouveau_fence_unref(&fence);
612 if (ret) 612 if (ret)
613 return ret; 613 goto fail_free;
614 614
615 if (new_bo != old_bo) { 615 if (new_bo != old_bo) {
616 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); 616 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 7a3759f1c41a..98a22e6e27a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -858,6 +858,12 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
858 if (nouveau_runtime_pm == 0) 858 if (nouveau_runtime_pm == 0)
859 return -EINVAL; 859 return -EINVAL;
860 860
861 /* are we optimus enabled? */
862 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
863 DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
864 return -EINVAL;
865 }
866
861 nv_debug_level(SILENT); 867 nv_debug_level(SILENT);
862 drm_kms_helper_poll_disable(drm_dev); 868 drm_kms_helper_poll_disable(drm_dev);
863 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); 869 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index 037d324bf58f..66ac0ff95f5a 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -8,5 +8,6 @@ config DRM_QXL
8 select DRM_KMS_HELPER 8 select DRM_KMS_HELPER
9 select DRM_KMS_FB_HELPER 9 select DRM_KMS_FB_HELPER
10 select DRM_TTM 10 select DRM_TTM
11 select CRC32
11 help 12 help
12 QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting. 13 QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 5e827c29d194..d70aafb83307 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -24,7 +24,7 @@
24 */ 24 */
25 25
26 26
27#include "linux/crc32.h" 27#include <linux/crc32.h>
28 28
29#include "qxl_drv.h" 29#include "qxl_drv.h"
30#include "qxl_object.h" 30#include "qxl_object.h"
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 80a20120e625..0b9621c9aeea 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1143,31 +1143,53 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1143 } 1143 }
1144 1144
1145 if (tiling_flags & RADEON_TILING_MACRO) { 1145 if (tiling_flags & RADEON_TILING_MACRO) {
1146 if (rdev->family >= CHIP_BONAIRE) 1146 evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
1147 tmp = rdev->config.cik.tile_config;
1148 else if (rdev->family >= CHIP_TAHITI)
1149 tmp = rdev->config.si.tile_config;
1150 else if (rdev->family >= CHIP_CAYMAN)
1151 tmp = rdev->config.cayman.tile_config;
1152 else
1153 tmp = rdev->config.evergreen.tile_config;
1154 1147
1155 switch ((tmp & 0xf0) >> 4) { 1148 /* Set NUM_BANKS. */
1156 case 0: /* 4 banks */ 1149 if (rdev->family >= CHIP_BONAIRE) {
1157 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK); 1150 unsigned tileb, index, num_banks, tile_split_bytes;
1158 break; 1151
1159 case 1: /* 8 banks */ 1152 /* Calculate the macrotile mode index. */
1160 default: 1153 tile_split_bytes = 64 << tile_split;
1161 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK); 1154 tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
1162 break; 1155 tileb = min(tile_split_bytes, tileb);
1163 case 2: /* 16 banks */ 1156
1164 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK); 1157 for (index = 0; tileb > 64; index++) {
1165 break; 1158 tileb >>= 1;
1159 }
1160
1161 if (index >= 16) {
1162 DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
1163 target_fb->bits_per_pixel, tile_split);
1164 return -EINVAL;
1165 }
1166
1167 num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
1168 fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
1169 } else {
1170 /* SI and older. */
1171 if (rdev->family >= CHIP_TAHITI)
1172 tmp = rdev->config.si.tile_config;
1173 else if (rdev->family >= CHIP_CAYMAN)
1174 tmp = rdev->config.cayman.tile_config;
1175 else
1176 tmp = rdev->config.evergreen.tile_config;
1177
1178 switch ((tmp & 0xf0) >> 4) {
1179 case 0: /* 4 banks */
1180 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
1181 break;
1182 case 1: /* 8 banks */
1183 default:
1184 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
1185 break;
1186 case 2: /* 16 banks */
1187 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
1188 break;
1189 }
1166 } 1190 }
1167 1191
1168 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); 1192 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
1169
1170 evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
1171 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split); 1193 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
1172 fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw); 1194 fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
1173 fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh); 1195 fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
@@ -1180,23 +1202,18 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1180 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); 1202 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
1181 1203
1182 if (rdev->family >= CHIP_BONAIRE) { 1204 if (rdev->family >= CHIP_BONAIRE) {
1183 u32 num_pipe_configs = rdev->config.cik.max_tile_pipes; 1205 /* Read the pipe config from the 2D TILED SCANOUT mode.
1184 u32 num_rb = rdev->config.cik.max_backends_per_se; 1206 * It should be the same for the other modes too, but not all
1185 if (num_pipe_configs > 8) 1207 * modes set the pipe config field. */
1186 num_pipe_configs = 8; 1208 u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f;
1187 if (num_pipe_configs == 8) 1209
1188 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P8_32x32_16x16); 1210 fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config);
1189 else if (num_pipe_configs == 4) {
1190 if (num_rb == 4)
1191 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_16x16);
1192 else if (num_rb < 4)
1193 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_8x16);
1194 } else if (num_pipe_configs == 2)
1195 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P2);
1196 } else if ((rdev->family == CHIP_TAHITI) || 1211 } else if ((rdev->family == CHIP_TAHITI) ||
1197 (rdev->family == CHIP_PITCAIRN)) 1212 (rdev->family == CHIP_PITCAIRN))
1198 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16); 1213 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
1199 else if (rdev->family == CHIP_VERDE) 1214 else if ((rdev->family == CHIP_VERDE) ||
1215 (rdev->family == CHIP_OLAND) ||
1216 (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */
1200 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16); 1217 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
1201 1218
1202 switch (radeon_crtc->crtc_id) { 1219 switch (radeon_crtc->crtc_id) {
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index b43a3a3c9067..e950fabd7f5e 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3057,7 +3057,7 @@ static u32 cik_create_bitmask(u32 bit_width)
3057 * Returns the disabled RB bitmask. 3057 * Returns the disabled RB bitmask.
3058 */ 3058 */
3059static u32 cik_get_rb_disabled(struct radeon_device *rdev, 3059static u32 cik_get_rb_disabled(struct radeon_device *rdev,
3060 u32 max_rb_num, u32 se_num, 3060 u32 max_rb_num_per_se,
3061 u32 sh_per_se) 3061 u32 sh_per_se)
3062{ 3062{
3063 u32 data, mask; 3063 u32 data, mask;
@@ -3071,7 +3071,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
3071 3071
3072 data >>= BACKEND_DISABLE_SHIFT; 3072 data >>= BACKEND_DISABLE_SHIFT;
3073 3073
3074 mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se); 3074 mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se);
3075 3075
3076 return data & mask; 3076 return data & mask;
3077} 3077}
@@ -3088,7 +3088,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
3088 */ 3088 */
3089static void cik_setup_rb(struct radeon_device *rdev, 3089static void cik_setup_rb(struct radeon_device *rdev,
3090 u32 se_num, u32 sh_per_se, 3090 u32 se_num, u32 sh_per_se,
3091 u32 max_rb_num) 3091 u32 max_rb_num_per_se)
3092{ 3092{
3093 int i, j; 3093 int i, j;
3094 u32 data, mask; 3094 u32 data, mask;
@@ -3098,7 +3098,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
3098 for (i = 0; i < se_num; i++) { 3098 for (i = 0; i < se_num; i++) {
3099 for (j = 0; j < sh_per_se; j++) { 3099 for (j = 0; j < sh_per_se; j++) {
3100 cik_select_se_sh(rdev, i, j); 3100 cik_select_se_sh(rdev, i, j);
3101 data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); 3101 data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
3102 if (rdev->family == CHIP_HAWAII) 3102 if (rdev->family == CHIP_HAWAII)
3103 disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH); 3103 disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
3104 else 3104 else
@@ -3108,12 +3108,14 @@ static void cik_setup_rb(struct radeon_device *rdev,
3108 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 3108 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3109 3109
3110 mask = 1; 3110 mask = 1;
3111 for (i = 0; i < max_rb_num; i++) { 3111 for (i = 0; i < max_rb_num_per_se * se_num; i++) {
3112 if (!(disabled_rbs & mask)) 3112 if (!(disabled_rbs & mask))
3113 enabled_rbs |= mask; 3113 enabled_rbs |= mask;
3114 mask <<= 1; 3114 mask <<= 1;
3115 } 3115 }
3116 3116
3117 rdev->config.cik.backend_enable_mask = enabled_rbs;
3118
3117 for (i = 0; i < se_num; i++) { 3119 for (i = 0; i < se_num; i++) {
3118 cik_select_se_sh(rdev, i, 0xffffffff); 3120 cik_select_se_sh(rdev, i, 0xffffffff);
3119 data = 0; 3121 data = 0;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 0300727a4f70..d08b83c6267b 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -458,7 +458,7 @@ int cik_copy_dma(struct radeon_device *rdev,
458 radeon_ring_write(ring, 0); /* src/dst endian swap */ 458 radeon_ring_write(ring, 0); /* src/dst endian swap */
459 radeon_ring_write(ring, src_offset & 0xffffffff); 459 radeon_ring_write(ring, src_offset & 0xffffffff);
460 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff); 460 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
461 radeon_ring_write(ring, dst_offset & 0xfffffffc); 461 radeon_ring_write(ring, dst_offset & 0xffffffff);
462 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff); 462 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
463 src_offset += cur_size_in_bytes; 463 src_offset += cur_size_in_bytes;
464 dst_offset += cur_size_in_bytes; 464 dst_offset += cur_size_in_bytes;
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index de86493cbc44..713a5d359901 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -174,7 +174,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
174 } 174 }
175 175
176 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); 176 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
177 if (sad_count < 0) { 177 if (sad_count <= 0) {
178 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 178 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
179 return; 179 return;
180 } 180 }
@@ -235,7 +235,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
235 } 235 }
236 236
237 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); 237 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
238 if (sad_count < 0) { 238 if (sad_count <= 0) {
239 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 239 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
240 return; 240 return;
241 } 241 }
@@ -308,7 +308,9 @@ int dce6_audio_init(struct radeon_device *rdev)
308 rdev->audio.enabled = true; 308 rdev->audio.enabled = true;
309 309
310 if (ASIC_IS_DCE8(rdev)) 310 if (ASIC_IS_DCE8(rdev))
311 rdev->audio.num_pins = 7; 311 rdev->audio.num_pins = 6;
312 else if (ASIC_IS_DCE61(rdev))
313 rdev->audio.num_pins = 4;
312 else 314 else
313 rdev->audio.num_pins = 6; 315 rdev->audio.num_pins = 6;
314 316
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index aa695c4feb3d..0c6d5cef4cf1 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -118,7 +118,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
118 } 118 }
119 119
120 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); 120 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
121 if (sad_count < 0) { 121 if (sad_count <= 0) {
122 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 122 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
123 return; 123 return;
124 } 124 }
@@ -173,7 +173,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
173 } 173 }
174 174
175 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); 175 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
176 if (sad_count < 0) { 176 if (sad_count <= 0) {
177 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 177 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
178 return; 178 return;
179 } 179 }
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 11aab2ab54ce..f59a9e9fccf8 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -895,6 +895,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
895 (rdev->pdev->device == 0x999C)) { 895 (rdev->pdev->device == 0x999C)) {
896 rdev->config.cayman.max_simds_per_se = 6; 896 rdev->config.cayman.max_simds_per_se = 6;
897 rdev->config.cayman.max_backends_per_se = 2; 897 rdev->config.cayman.max_backends_per_se = 2;
898 rdev->config.cayman.max_hw_contexts = 8;
899 rdev->config.cayman.sx_max_export_size = 256;
900 rdev->config.cayman.sx_max_export_pos_size = 64;
901 rdev->config.cayman.sx_max_export_smx_size = 192;
898 } else if ((rdev->pdev->device == 0x9903) || 902 } else if ((rdev->pdev->device == 0x9903) ||
899 (rdev->pdev->device == 0x9904) || 903 (rdev->pdev->device == 0x9904) ||
900 (rdev->pdev->device == 0x990A) || 904 (rdev->pdev->device == 0x990A) ||
@@ -905,6 +909,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
905 (rdev->pdev->device == 0x999D)) { 909 (rdev->pdev->device == 0x999D)) {
906 rdev->config.cayman.max_simds_per_se = 4; 910 rdev->config.cayman.max_simds_per_se = 4;
907 rdev->config.cayman.max_backends_per_se = 2; 911 rdev->config.cayman.max_backends_per_se = 2;
912 rdev->config.cayman.max_hw_contexts = 8;
913 rdev->config.cayman.sx_max_export_size = 256;
914 rdev->config.cayman.sx_max_export_pos_size = 64;
915 rdev->config.cayman.sx_max_export_smx_size = 192;
908 } else if ((rdev->pdev->device == 0x9919) || 916 } else if ((rdev->pdev->device == 0x9919) ||
909 (rdev->pdev->device == 0x9990) || 917 (rdev->pdev->device == 0x9990) ||
910 (rdev->pdev->device == 0x9991) || 918 (rdev->pdev->device == 0x9991) ||
@@ -915,9 +923,17 @@ static void cayman_gpu_init(struct radeon_device *rdev)
915 (rdev->pdev->device == 0x99A0)) { 923 (rdev->pdev->device == 0x99A0)) {
916 rdev->config.cayman.max_simds_per_se = 3; 924 rdev->config.cayman.max_simds_per_se = 3;
917 rdev->config.cayman.max_backends_per_se = 1; 925 rdev->config.cayman.max_backends_per_se = 1;
926 rdev->config.cayman.max_hw_contexts = 4;
927 rdev->config.cayman.sx_max_export_size = 128;
928 rdev->config.cayman.sx_max_export_pos_size = 32;
929 rdev->config.cayman.sx_max_export_smx_size = 96;
918 } else { 930 } else {
919 rdev->config.cayman.max_simds_per_se = 2; 931 rdev->config.cayman.max_simds_per_se = 2;
920 rdev->config.cayman.max_backends_per_se = 1; 932 rdev->config.cayman.max_backends_per_se = 1;
933 rdev->config.cayman.max_hw_contexts = 4;
934 rdev->config.cayman.sx_max_export_size = 128;
935 rdev->config.cayman.sx_max_export_pos_size = 32;
936 rdev->config.cayman.sx_max_export_smx_size = 96;
921 } 937 }
922 rdev->config.cayman.max_texture_channel_caches = 2; 938 rdev->config.cayman.max_texture_channel_caches = 2;
923 rdev->config.cayman.max_gprs = 256; 939 rdev->config.cayman.max_gprs = 256;
@@ -925,10 +941,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
925 rdev->config.cayman.max_gs_threads = 32; 941 rdev->config.cayman.max_gs_threads = 32;
926 rdev->config.cayman.max_stack_entries = 512; 942 rdev->config.cayman.max_stack_entries = 512;
927 rdev->config.cayman.sx_num_of_sets = 8; 943 rdev->config.cayman.sx_num_of_sets = 8;
928 rdev->config.cayman.sx_max_export_size = 256;
929 rdev->config.cayman.sx_max_export_pos_size = 64;
930 rdev->config.cayman.sx_max_export_smx_size = 192;
931 rdev->config.cayman.max_hw_contexts = 8;
932 rdev->config.cayman.sq_num_cf_insts = 2; 944 rdev->config.cayman.sq_num_cf_insts = 2;
933 945
934 rdev->config.cayman.sc_prim_fifo_size = 0x40; 946 rdev->config.cayman.sc_prim_fifo_size = 0x40;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b1f990d0eaa1..45e1f447bc79 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1940,7 +1940,7 @@ struct si_asic {
1940 unsigned sc_earlyz_tile_fifo_size; 1940 unsigned sc_earlyz_tile_fifo_size;
1941 1941
1942 unsigned num_tile_pipes; 1942 unsigned num_tile_pipes;
1943 unsigned num_backends_per_se; 1943 unsigned backend_enable_mask;
1944 unsigned backend_disable_mask_per_asic; 1944 unsigned backend_disable_mask_per_asic;
1945 unsigned backend_map; 1945 unsigned backend_map;
1946 unsigned num_texture_channel_caches; 1946 unsigned num_texture_channel_caches;
@@ -1970,7 +1970,7 @@ struct cik_asic {
1970 unsigned sc_earlyz_tile_fifo_size; 1970 unsigned sc_earlyz_tile_fifo_size;
1971 1971
1972 unsigned num_tile_pipes; 1972 unsigned num_tile_pipes;
1973 unsigned num_backends_per_se; 1973 unsigned backend_enable_mask;
1974 unsigned backend_disable_mask_per_asic; 1974 unsigned backend_disable_mask_per_asic;
1975 unsigned backend_map; 1975 unsigned backend_map;
1976 unsigned num_texture_channel_caches; 1976 unsigned num_texture_channel_caches;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e354ce94cdd1..c0425bb6223a 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2021,7 +2021,7 @@ static struct radeon_asic ci_asic = {
2021 .hdmi_setmode = &evergreen_hdmi_setmode, 2021 .hdmi_setmode = &evergreen_hdmi_setmode,
2022 }, 2022 },
2023 .copy = { 2023 .copy = {
2024 .blit = NULL, 2024 .blit = &cik_copy_cpdma,
2025 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 2025 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2026 .dma = &cik_copy_dma, 2026 .dma = &cik_copy_dma,
2027 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 2027 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2122,7 +2122,7 @@ static struct radeon_asic kv_asic = {
2122 .hdmi_setmode = &evergreen_hdmi_setmode, 2122 .hdmi_setmode = &evergreen_hdmi_setmode,
2123 }, 2123 },
2124 .copy = { 2124 .copy = {
2125 .blit = NULL, 2125 .blit = &cik_copy_cpdma,
2126 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 2126 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2127 .dma = &cik_copy_dma, 2127 .dma = &cik_copy_dma,
2128 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 2128 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 9d302eaeea15..485848f889f5 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -33,6 +33,7 @@ static struct radeon_atpx_priv {
33 bool atpx_detected; 33 bool atpx_detected;
34 /* handle for device - and atpx */ 34 /* handle for device - and atpx */
35 acpi_handle dhandle; 35 acpi_handle dhandle;
36 acpi_handle other_handle;
36 struct radeon_atpx atpx; 37 struct radeon_atpx atpx;
37} radeon_atpx_priv; 38} radeon_atpx_priv;
38 39
@@ -451,9 +452,10 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
451 return false; 452 return false;
452 453
453 status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); 454 status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
454 if (ACPI_FAILURE(status)) 455 if (ACPI_FAILURE(status)) {
456 radeon_atpx_priv.other_handle = dhandle;
455 return false; 457 return false;
456 458 }
457 radeon_atpx_priv.dhandle = dhandle; 459 radeon_atpx_priv.dhandle = dhandle;
458 radeon_atpx_priv.atpx.handle = atpx_handle; 460 radeon_atpx_priv.atpx.handle = atpx_handle;
459 return true; 461 return true;
@@ -530,6 +532,16 @@ static bool radeon_atpx_detect(void)
530 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", 532 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
531 acpi_method_name); 533 acpi_method_name);
532 radeon_atpx_priv.atpx_detected = true; 534 radeon_atpx_priv.atpx_detected = true;
535 /*
536 * On some systems hotplug events are generated for the device
537 * being switched off when ATPX is executed. They cause ACPI
538 * hotplug to trigger and attempt to remove the device from
539 * the system, which causes it to break down. Prevent that from
540 * happening by setting the no_hotplug flag for the involved
541 * ACPI device objects.
542 */
543 acpi_bus_no_hotplug(radeon_atpx_priv.dhandle);
544 acpi_bus_no_hotplug(radeon_atpx_priv.other_handle);
533 return true; 545 return true;
534 } 546 }
535 return false; 547 return false;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 9f5ff28864f6..db39ea36bf22 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -77,9 +77,10 @@
77 * 2.33.0 - Add SI tiling mode array query 77 * 2.33.0 - Add SI tiling mode array query
78 * 2.34.0 - Add CIK tiling mode array query 78 * 2.34.0 - Add CIK tiling mode array query
79 * 2.35.0 - Add CIK macrotile mode array query 79 * 2.35.0 - Add CIK macrotile mode array query
80 * 2.36.0 - Fix CIK DCE tiling setup
80 */ 81 */
81#define KMS_DRIVER_MAJOR 2 82#define KMS_DRIVER_MAJOR 2
82#define KMS_DRIVER_MINOR 35 83#define KMS_DRIVER_MINOR 36
83#define KMS_DRIVER_PATCHLEVEL 0 84#define KMS_DRIVER_PATCHLEVEL 0
84int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 85int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
85int radeon_driver_unload_kms(struct drm_device *dev); 86int radeon_driver_unload_kms(struct drm_device *dev);
@@ -508,15 +509,6 @@ static const struct file_operations radeon_driver_kms_fops = {
508#endif 509#endif
509}; 510};
510 511
511
512static void
513radeon_pci_shutdown(struct pci_dev *pdev)
514{
515 struct drm_device *dev = pci_get_drvdata(pdev);
516
517 radeon_driver_unload_kms(dev);
518}
519
520static struct drm_driver kms_driver = { 512static struct drm_driver kms_driver = {
521 .driver_features = 513 .driver_features =
522 DRIVER_USE_AGP | 514 DRIVER_USE_AGP |
@@ -586,7 +578,6 @@ static struct pci_driver radeon_kms_pci_driver = {
586 .probe = radeon_pci_probe, 578 .probe = radeon_pci_probe,
587 .remove = radeon_pci_remove, 579 .remove = radeon_pci_remove,
588 .driver.pm = &radeon_pm_ops, 580 .driver.pm = &radeon_pm_ops,
589 .shutdown = radeon_pci_shutdown,
590}; 581};
591 582
592static int __init radeon_init(void) 583static int __init radeon_init(void)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 55d0b474bd37..21d593c0ecaf 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -461,6 +461,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
461 case RADEON_INFO_SI_CP_DMA_COMPUTE: 461 case RADEON_INFO_SI_CP_DMA_COMPUTE:
462 *value = 1; 462 *value = 1;
463 break; 463 break;
464 case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
465 if (rdev->family >= CHIP_BONAIRE) {
466 *value = rdev->config.cik.backend_enable_mask;
467 } else if (rdev->family >= CHIP_TAHITI) {
468 *value = rdev->config.si.backend_enable_mask;
469 } else {
470 DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
471 }
472 break;
464 default: 473 default:
465 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 474 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
466 return -EINVAL; 475 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 373d088bac66..b9c0529b4a2e 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -473,7 +473,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
473 return -EINVAL; 473 return -EINVAL;
474 } 474 }
475 475
476 if ((start >> 28) != (end >> 28)) { 476 if ((start >> 28) != ((end - 1) >> 28)) {
477 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", 477 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
478 start, end); 478 start, end);
479 return -EINVAL; 479 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 1c560629575a..e7dab069cccf 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev)
162 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); 162 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
163 base = G_000100_MC_FB_START(base) << 16; 163 base = G_000100_MC_FB_START(base) << 16;
164 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 164 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
165 /* Some boards seem to be configured for 128MB of sideport memory,
166 * but really only have 64MB. Just skip the sideport and use
167 * UMA memory.
168 */
169 if (rdev->mc.igp_sideport_enabled &&
170 (rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
171 base += 128 * 1024 * 1024;
172 rdev->mc.real_vram_size -= 128 * 1024 * 1024;
173 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
174 }
165 175
166 /* Use K8 direct mapping for fast fb access. */ 176 /* Use K8 direct mapping for fast fb access. */
167 rdev->fastfb_working = false; 177 rdev->fastfb_working = false;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 913b025ae9b3..374499db20c7 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2328,6 +2328,12 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
2328 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, 2328 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
2329 ASIC_INTERNAL_MEMORY_SS, 0); 2329 ASIC_INTERNAL_MEMORY_SS, 0);
2330 2330
2331 /* disable ss, causes hangs on some cayman boards */
2332 if (rdev->family == CHIP_CAYMAN) {
2333 pi->sclk_ss = false;
2334 pi->mclk_ss = false;
2335 }
2336
2331 if (pi->sclk_ss || pi->mclk_ss) 2337 if (pi->sclk_ss || pi->mclk_ss)
2332 pi->dynamic_ss = true; 2338 pi->dynamic_ss = true;
2333 else 2339 else
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index a36736dab5e0..85e1edfaa3be 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2811,7 +2811,7 @@ static void si_setup_spi(struct radeon_device *rdev,
2811} 2811}
2812 2812
2813static u32 si_get_rb_disabled(struct radeon_device *rdev, 2813static u32 si_get_rb_disabled(struct radeon_device *rdev,
2814 u32 max_rb_num, u32 se_num, 2814 u32 max_rb_num_per_se,
2815 u32 sh_per_se) 2815 u32 sh_per_se)
2816{ 2816{
2817 u32 data, mask; 2817 u32 data, mask;
@@ -2825,14 +2825,14 @@ static u32 si_get_rb_disabled(struct radeon_device *rdev,
2825 2825
2826 data >>= BACKEND_DISABLE_SHIFT; 2826 data >>= BACKEND_DISABLE_SHIFT;
2827 2827
2828 mask = si_create_bitmask(max_rb_num / se_num / sh_per_se); 2828 mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
2829 2829
2830 return data & mask; 2830 return data & mask;
2831} 2831}
2832 2832
2833static void si_setup_rb(struct radeon_device *rdev, 2833static void si_setup_rb(struct radeon_device *rdev,
2834 u32 se_num, u32 sh_per_se, 2834 u32 se_num, u32 sh_per_se,
2835 u32 max_rb_num) 2835 u32 max_rb_num_per_se)
2836{ 2836{
2837 int i, j; 2837 int i, j;
2838 u32 data, mask; 2838 u32 data, mask;
@@ -2842,19 +2842,21 @@ static void si_setup_rb(struct radeon_device *rdev,
2842 for (i = 0; i < se_num; i++) { 2842 for (i = 0; i < se_num; i++) {
2843 for (j = 0; j < sh_per_se; j++) { 2843 for (j = 0; j < sh_per_se; j++) {
2844 si_select_se_sh(rdev, i, j); 2844 si_select_se_sh(rdev, i, j);
2845 data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); 2845 data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
2846 disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); 2846 disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
2847 } 2847 }
2848 } 2848 }
2849 si_select_se_sh(rdev, 0xffffffff, 0xffffffff); 2849 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
2850 2850
2851 mask = 1; 2851 mask = 1;
2852 for (i = 0; i < max_rb_num; i++) { 2852 for (i = 0; i < max_rb_num_per_se * se_num; i++) {
2853 if (!(disabled_rbs & mask)) 2853 if (!(disabled_rbs & mask))
2854 enabled_rbs |= mask; 2854 enabled_rbs |= mask;
2855 mask <<= 1; 2855 mask <<= 1;
2856 } 2856 }
2857 2857
2858 rdev->config.si.backend_enable_mask = enabled_rbs;
2859
2858 for (i = 0; i < se_num; i++) { 2860 for (i = 0; i < se_num; i++) {
2859 si_select_se_sh(rdev, i, 0xffffffff); 2861 si_select_se_sh(rdev, i, 0xffffffff);
2860 data = 0; 2862 data = 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 15b86a94949d..406152152315 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -353,7 +353,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
353 * Don't move nonexistent data. Clear destination instead. 353 * Don't move nonexistent data. Clear destination instead.
354 */ 354 */
355 if (old_iomap == NULL && 355 if (old_iomap == NULL &&
356 (ttm == NULL || ttm->state == tt_unpopulated)) { 356 (ttm == NULL || (ttm->state == tt_unpopulated &&
357 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
357 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); 358 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
358 goto out2; 359 goto out2;
359 } 360 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index b249ab9b1eb2..6440eeac22d2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -169,9 +169,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
169 } 169 }
170 170
171 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 171 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
172 drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; 172 vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
173 page_last = vma_pages(vma) + 173 page_last = vma_pages(vma) + vma->vm_pgoff -
174 drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; 174 drm_vma_node_start(&bo->vma_node);
175 175
176 if (unlikely(page_offset >= bo->num_pages)) { 176 if (unlikely(page_offset >= bo->num_pages)) {
177 retval = VM_FAULT_SIGBUS; 177 retval = VM_FAULT_SIGBUS;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index a51f48e3e917..45d5b5ab6ca9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -68,6 +68,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
68 SVGA_FIFO_3D_HWVERSION)); 68 SVGA_FIFO_3D_HWVERSION));
69 break; 69 break;
70 } 70 }
71 case DRM_VMW_PARAM_MAX_SURF_MEMORY:
72 param->value = dev_priv->memory_size;
73 break;
71 default: 74 default:
72 DRM_ERROR("Illegal vmwgfx get param request: %d\n", 75 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
73 param->param); 76 param->param);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 92d1206482a6..797ed29a36ea 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -123,7 +123,7 @@ static struct cpuidle_state *cpuidle_state_table;
123 * which is also the index into the MWAIT hint array. 123 * which is also the index into the MWAIT hint array.
124 * Thus C0 is a dummy. 124 * Thus C0 is a dummy.
125 */ 125 */
126static struct cpuidle_state nehalem_cstates[] __initdata = { 126static struct cpuidle_state nehalem_cstates[] = {
127 { 127 {
128 .name = "C1-NHM", 128 .name = "C1-NHM",
129 .desc = "MWAIT 0x00", 129 .desc = "MWAIT 0x00",
@@ -156,7 +156,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
156 .enter = NULL } 156 .enter = NULL }
157}; 157};
158 158
159static struct cpuidle_state snb_cstates[] __initdata = { 159static struct cpuidle_state snb_cstates[] = {
160 { 160 {
161 .name = "C1-SNB", 161 .name = "C1-SNB",
162 .desc = "MWAIT 0x00", 162 .desc = "MWAIT 0x00",
@@ -196,7 +196,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
196 .enter = NULL } 196 .enter = NULL }
197}; 197};
198 198
199static struct cpuidle_state ivb_cstates[] __initdata = { 199static struct cpuidle_state ivb_cstates[] = {
200 { 200 {
201 .name = "C1-IVB", 201 .name = "C1-IVB",
202 .desc = "MWAIT 0x00", 202 .desc = "MWAIT 0x00",
@@ -236,7 +236,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
236 .enter = NULL } 236 .enter = NULL }
237}; 237};
238 238
239static struct cpuidle_state hsw_cstates[] __initdata = { 239static struct cpuidle_state hsw_cstates[] = {
240 { 240 {
241 .name = "C1-HSW", 241 .name = "C1-HSW",
242 .desc = "MWAIT 0x00", 242 .desc = "MWAIT 0x00",
@@ -297,7 +297,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
297 .enter = NULL } 297 .enter = NULL }
298}; 298};
299 299
300static struct cpuidle_state atom_cstates[] __initdata = { 300static struct cpuidle_state atom_cstates[] = {
301 { 301 {
302 .name = "C1E-ATM", 302 .name = "C1E-ATM",
303 .desc = "MWAIT 0x00", 303 .desc = "MWAIT 0x00",
@@ -329,7 +329,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
329 { 329 {
330 .enter = NULL } 330 .enter = NULL }
331}; 331};
332static struct cpuidle_state avn_cstates[] __initdata = { 332static struct cpuidle_state avn_cstates[] = {
333 { 333 {
334 .name = "C1-AVN", 334 .name = "C1-AVN",
335 .desc = "MWAIT 0x00", 335 .desc = "MWAIT 0x00",
@@ -344,6 +344,8 @@ static struct cpuidle_state avn_cstates[] __initdata = {
344 .exit_latency = 15, 344 .exit_latency = 15,
345 .target_residency = 45, 345 .target_residency = 45,
346 .enter = &intel_idle }, 346 .enter = &intel_idle },
347 {
348 .enter = NULL }
347}; 349};
348 350
349/** 351/**
@@ -377,6 +379,9 @@ static int intel_idle(struct cpuidle_device *dev,
377 379
378 if (!current_set_polling_and_test()) { 380 if (!current_set_polling_and_test()) {
379 381
382 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
383 clflush((void *)&current_thread_info()->flags);
384
380 __monitor((void *)&current_thread_info()->flags, 0, 0); 385 __monitor((void *)&current_thread_info()->flags, 0, 0);
381 smp_mb(); 386 smp_mb();
382 if (!need_resched()) 387 if (!need_resched())
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index acb7f90359a3..749a6cadab8b 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -200,7 +200,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
200 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), 200 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
201 .address = 1, 201 .address = 1,
202 .scan_index = 1, 202 .scan_index = 1,
203 .scan_type = IIO_ST('u', 12, 16, 0), 203 .scan_type = {
204 .sign = 'u',
205 .realbits = 12,
206 .storagebits = 16,
207 .shift = 0,
208 .endianness = IIO_BE,
209 },
204 }, 210 },
205 .channel[1] = { 211 .channel[1] = {
206 .type = IIO_VOLTAGE, 212 .type = IIO_VOLTAGE,
@@ -210,7 +216,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
210 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), 216 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
211 .address = 0, 217 .address = 0,
212 .scan_index = 0, 218 .scan_index = 0,
213 .scan_type = IIO_ST('u', 12, 16, 0), 219 .scan_type = {
220 .sign = 'u',
221 .realbits = 12,
222 .storagebits = 16,
223 .shift = 0,
224 .endianness = IIO_BE,
225 },
214 }, 226 },
215 .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2), 227 .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
216 .int_vref_mv = 2500, 228 .int_vref_mv = 2500,
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 3fb7757a1028..368660dfe135 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -651,7 +651,12 @@ static const struct iio_chan_spec adis16448_channels[] = {
651 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), 651 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
652 .address = ADIS16448_BARO_OUT, 652 .address = ADIS16448_BARO_OUT,
653 .scan_index = ADIS16400_SCAN_BARO, 653 .scan_index = ADIS16400_SCAN_BARO,
654 .scan_type = IIO_ST('s', 16, 16, 0), 654 .scan_type = {
655 .sign = 's',
656 .realbits = 16,
657 .storagebits = 16,
658 .endianness = IIO_BE,
659 },
655 }, 660 },
656 ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12), 661 ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
657 IIO_CHAN_SOFT_TIMESTAMP(11) 662 IIO_CHAN_SOFT_TIMESTAMP(11)
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 21df57130018..0922e39b0ea9 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -387,7 +387,7 @@ static int cm36651_read_int_time(struct cm36651_data *cm36651,
387 return -EINVAL; 387 return -EINVAL;
388 } 388 }
389 389
390 return IIO_VAL_INT_PLUS_MICRO; 390 return IIO_VAL_INT;
391} 391}
392 392
393static int cm36651_write_int_time(struct cm36651_data *cm36651, 393static int cm36651_write_int_time(struct cm36651_data *cm36651,
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index c47c2034ca71..0717940ec3b5 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -181,9 +181,16 @@ static void add_ref(struct iw_cm_id *cm_id)
181static void rem_ref(struct iw_cm_id *cm_id) 181static void rem_ref(struct iw_cm_id *cm_id)
182{ 182{
183 struct iwcm_id_private *cm_id_priv; 183 struct iwcm_id_private *cm_id_priv;
184 int cb_destroy;
185
184 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); 186 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
185 if (iwcm_deref_id(cm_id_priv) && 187
186 test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) { 188 /*
189 * Test bit before deref in case the cm_id gets freed on another
190 * thread.
191 */
192 cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
193 if (iwcm_deref_id(cm_id_priv) && cb_destroy) {
187 BUG_ON(!list_empty(&cm_id_priv->work_list)); 194 BUG_ON(!list_empty(&cm_id_priv->work_list));
188 free_cm_id(cm_id_priv); 195 free_cm_id(cm_id_priv);
189 } 196 }
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index bdc842e9faef..a283274a5a09 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -49,12 +49,20 @@
49 49
50#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 50#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
51 do { \ 51 do { \
52 (udata)->inbuf = (void __user *) (ibuf); \ 52 (udata)->inbuf = (const void __user *) (ibuf); \
53 (udata)->outbuf = (void __user *) (obuf); \ 53 (udata)->outbuf = (void __user *) (obuf); \
54 (udata)->inlen = (ilen); \ 54 (udata)->inlen = (ilen); \
55 (udata)->outlen = (olen); \ 55 (udata)->outlen = (olen); \
56 } while (0) 56 } while (0)
57 57
58#define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen) \
59 do { \
60 (udata)->inbuf = (ilen) ? (const void __user *) (ibuf) : NULL; \
61 (udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL; \
62 (udata)->inlen = (ilen); \
63 (udata)->outlen = (olen); \
64 } while (0)
65
58/* 66/*
59 * Our lifetime rules for these structs are the following: 67 * Our lifetime rules for these structs are the following:
60 * 68 *
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 65f6e7dc380c..f1cc83855af6 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2593,6 +2593,9 @@ out_put:
2593static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, 2593static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
2594 union ib_flow_spec *ib_spec) 2594 union ib_flow_spec *ib_spec)
2595{ 2595{
2596 if (kern_spec->reserved)
2597 return -EINVAL;
2598
2596 ib_spec->type = kern_spec->type; 2599 ib_spec->type = kern_spec->type;
2597 2600
2598 switch (ib_spec->type) { 2601 switch (ib_spec->type) {
@@ -2646,6 +2649,9 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2646 void *ib_spec; 2649 void *ib_spec;
2647 int i; 2650 int i;
2648 2651
2652 if (ucore->inlen < sizeof(cmd))
2653 return -EINVAL;
2654
2649 if (ucore->outlen < sizeof(resp)) 2655 if (ucore->outlen < sizeof(resp))
2650 return -ENOSPC; 2656 return -ENOSPC;
2651 2657
@@ -2671,6 +2677,10 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2671 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 2677 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
2672 return -EINVAL; 2678 return -EINVAL;
2673 2679
2680 if (cmd.flow_attr.reserved[0] ||
2681 cmd.flow_attr.reserved[1])
2682 return -EINVAL;
2683
2674 if (cmd.flow_attr.num_of_specs) { 2684 if (cmd.flow_attr.num_of_specs) {
2675 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 2685 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
2676 GFP_KERNEL); 2686 GFP_KERNEL);
@@ -2731,6 +2741,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2731 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 2741 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
2732 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 2742 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
2733 i, cmd.flow_attr.size); 2743 i, cmd.flow_attr.size);
2744 err = -EINVAL;
2734 goto err_free; 2745 goto err_free;
2735 } 2746 }
2736 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 2747 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
@@ -2791,10 +2802,16 @@ int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
2791 struct ib_uobject *uobj; 2802 struct ib_uobject *uobj;
2792 int ret; 2803 int ret;
2793 2804
2805 if (ucore->inlen < sizeof(cmd))
2806 return -EINVAL;
2807
2794 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 2808 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2795 if (ret) 2809 if (ret)
2796 return ret; 2810 return ret;
2797 2811
2812 if (cmd.comp_mask)
2813 return -EINVAL;
2814
2798 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, 2815 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
2799 file->ucontext); 2816 file->ucontext);
2800 if (!uobj) 2817 if (!uobj)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 34386943ebcf..08219fb3338b 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -668,25 +668,30 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
668 if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count) 668 if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count)
669 return -EINVAL; 669 return -EINVAL;
670 670
671 if (ex_hdr.cmd_hdr_reserved)
672 return -EINVAL;
673
671 if (ex_hdr.response) { 674 if (ex_hdr.response) {
672 if (!hdr.out_words && !ex_hdr.provider_out_words) 675 if (!hdr.out_words && !ex_hdr.provider_out_words)
673 return -EINVAL; 676 return -EINVAL;
677
678 if (!access_ok(VERIFY_WRITE,
679 (void __user *) (unsigned long) ex_hdr.response,
680 (hdr.out_words + ex_hdr.provider_out_words) * 8))
681 return -EFAULT;
674 } else { 682 } else {
675 if (hdr.out_words || ex_hdr.provider_out_words) 683 if (hdr.out_words || ex_hdr.provider_out_words)
676 return -EINVAL; 684 return -EINVAL;
677 } 685 }
678 686
679 INIT_UDATA(&ucore, 687 INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response,
680 (hdr.in_words) ? buf : 0, 688 hdr.in_words * 8, hdr.out_words * 8);
681 (unsigned long)ex_hdr.response, 689
682 hdr.in_words * 8, 690 INIT_UDATA_BUF_OR_NULL(&uhw,
683 hdr.out_words * 8); 691 buf + ucore.inlen,
684 692 (unsigned long) ex_hdr.response + ucore.outlen,
685 INIT_UDATA(&uhw, 693 ex_hdr.provider_in_words * 8,
686 (ex_hdr.provider_in_words) ? buf + ucore.inlen : 0, 694 ex_hdr.provider_out_words * 8);
687 (ex_hdr.provider_out_words) ? (unsigned long)ex_hdr.response + ucore.outlen : 0,
688 ex_hdr.provider_in_words * 8,
689 ex_hdr.provider_out_words * 8);
690 695
691 err = uverbs_ex_cmd_table[command](file, 696 err = uverbs_ex_cmd_table[command](file,
692 &ucore, 697 &ucore,
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 12fef76c791c..45126879ad28 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -524,50 +524,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
524 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 524 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
525} 525}
526 526
527#define VLAN_NONE 0xfff
528#define FILTER_SEL_VLAN_NONE 0xffff
529#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
530#define FILTER_SEL_WIDTH_VIN_P_FC \
531 (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
532#define FILTER_SEL_WIDTH_TAG_P_FC \
533 (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
534#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
535
536static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
537 struct l2t_entry *l2t)
538{
539 unsigned int ntuple = 0;
540 u32 viid;
541
542 switch (dev->rdev.lldi.filt_mode) {
543
544 /* default filter mode */
545 case HW_TPL_FR_MT_PR_IV_P_FC:
546 if (l2t->vlan == VLAN_NONE)
547 ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
548 else {
549 ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
550 ntuple |= 1 << FILTER_SEL_WIDTH_TAG_P_FC;
551 }
552 ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
553 FILTER_SEL_WIDTH_VLD_TAG_P_FC;
554 break;
555 case HW_TPL_FR_MT_PR_OV_P_FC: {
556 viid = cxgb4_port_viid(l2t->neigh->dev);
557
558 ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
559 ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
560 ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
561 ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
562 FILTER_SEL_WIDTH_VLD_TAG_P_FC;
563 break;
564 }
565 default:
566 break;
567 }
568 return ntuple;
569}
570
571static int send_connect(struct c4iw_ep *ep) 527static int send_connect(struct c4iw_ep *ep)
572{ 528{
573 struct cpl_act_open_req *req; 529 struct cpl_act_open_req *req;
@@ -641,8 +597,9 @@ static int send_connect(struct c4iw_ep *ep)
641 req->local_ip = la->sin_addr.s_addr; 597 req->local_ip = la->sin_addr.s_addr;
642 req->peer_ip = ra->sin_addr.s_addr; 598 req->peer_ip = ra->sin_addr.s_addr;
643 req->opt0 = cpu_to_be64(opt0); 599 req->opt0 = cpu_to_be64(opt0);
644 req->params = cpu_to_be32(select_ntuple(ep->com.dev, 600 req->params = cpu_to_be32(cxgb4_select_ntuple(
645 ep->dst, ep->l2t)); 601 ep->com.dev->rdev.lldi.ports[0],
602 ep->l2t));
646 req->opt2 = cpu_to_be32(opt2); 603 req->opt2 = cpu_to_be32(opt2);
647 } else { 604 } else {
648 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen); 605 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
@@ -662,9 +619,9 @@ static int send_connect(struct c4iw_ep *ep)
662 req6->peer_ip_lo = *((__be64 *) 619 req6->peer_ip_lo = *((__be64 *)
663 (ra6->sin6_addr.s6_addr + 8)); 620 (ra6->sin6_addr.s6_addr + 8));
664 req6->opt0 = cpu_to_be64(opt0); 621 req6->opt0 = cpu_to_be64(opt0);
665 req6->params = cpu_to_be32( 622 req6->params = cpu_to_be32(cxgb4_select_ntuple(
666 select_ntuple(ep->com.dev, ep->dst, 623 ep->com.dev->rdev.lldi.ports[0],
667 ep->l2t)); 624 ep->l2t));
668 req6->opt2 = cpu_to_be32(opt2); 625 req6->opt2 = cpu_to_be32(opt2);
669 } 626 }
670 } else { 627 } else {
@@ -681,8 +638,9 @@ static int send_connect(struct c4iw_ep *ep)
681 t5_req->peer_ip = ra->sin_addr.s_addr; 638 t5_req->peer_ip = ra->sin_addr.s_addr;
682 t5_req->opt0 = cpu_to_be64(opt0); 639 t5_req->opt0 = cpu_to_be64(opt0);
683 t5_req->params = cpu_to_be64(V_FILTER_TUPLE( 640 t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
684 select_ntuple(ep->com.dev, 641 cxgb4_select_ntuple(
685 ep->dst, ep->l2t))); 642 ep->com.dev->rdev.lldi.ports[0],
643 ep->l2t)));
686 t5_req->opt2 = cpu_to_be32(opt2); 644 t5_req->opt2 = cpu_to_be32(opt2);
687 } else { 645 } else {
688 t5_req6 = (struct cpl_t5_act_open_req6 *) 646 t5_req6 = (struct cpl_t5_act_open_req6 *)
@@ -703,7 +661,9 @@ static int send_connect(struct c4iw_ep *ep)
703 (ra6->sin6_addr.s6_addr + 8)); 661 (ra6->sin6_addr.s6_addr + 8));
704 t5_req6->opt0 = cpu_to_be64(opt0); 662 t5_req6->opt0 = cpu_to_be64(opt0);
705 t5_req6->params = (__force __be64)cpu_to_be32( 663 t5_req6->params = (__force __be64)cpu_to_be32(
706 select_ntuple(ep->com.dev, ep->dst, ep->l2t)); 664 cxgb4_select_ntuple(
665 ep->com.dev->rdev.lldi.ports[0],
666 ep->l2t));
707 t5_req6->opt2 = cpu_to_be32(opt2); 667 t5_req6->opt2 = cpu_to_be32(opt2);
708 } 668 }
709 } 669 }
@@ -1630,7 +1590,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1630 memset(req, 0, sizeof(*req)); 1590 memset(req, 0, sizeof(*req));
1631 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); 1591 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
1632 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 1592 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
1633 req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, 1593 req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
1594 ep->com.dev->rdev.lldi.ports[0],
1634 ep->l2t)); 1595 ep->l2t));
1635 sin = (struct sockaddr_in *)&ep->com.local_addr; 1596 sin = (struct sockaddr_in *)&ep->com.local_addr;
1636 req->le.lport = sin->sin_port; 1597 req->le.lport = sin->sin_port;
@@ -2938,7 +2899,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2938 /* 2899 /*
2939 * Allocate a server TID. 2900 * Allocate a server TID.
2940 */ 2901 */
2941 if (dev->rdev.lldi.enable_fw_ofld_conn) 2902 if (dev->rdev.lldi.enable_fw_ofld_conn &&
2903 ep->com.local_addr.ss_family == AF_INET)
2942 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, 2904 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
2943 cm_id->local_addr.ss_family, ep); 2905 cm_id->local_addr.ss_family, ep);
2944 else 2906 else
@@ -3323,9 +3285,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3323 /* 3285 /*
3324 * Calculate the server tid from filter hit index from cpl_rx_pkt. 3286 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3325 */ 3287 */
3326 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val) 3288 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
3327 - dev->rdev.lldi.tids->sftid_base
3328 + dev->rdev.lldi.tids->nstids;
3329 3289
3330 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); 3290 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
3331 if (!lep) { 3291 if (!lep) {
@@ -3397,7 +3357,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3397 window = (__force u16) htons((__force u16)tcph->window); 3357 window = (__force u16) htons((__force u16)tcph->window);
3398 3358
3399 /* Calcuate filter portion for LE region. */ 3359 /* Calcuate filter portion for LE region. */
3400 filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e)); 3360 filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
3361 dev->rdev.lldi.ports[0],
3362 e));
3401 3363
3402 /* 3364 /*
3403 * Synthesize the cpl_pass_accept_req. We have everything except the 3365 * Synthesize the cpl_pass_accept_req. We have everything except the
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 4cb8eb24497c..84e45006451c 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -173,7 +173,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
173 return ret; 173 return ret;
174} 174}
175 175
176int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) 176static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
177{ 177{
178 u32 remain = len; 178 u32 remain = len;
179 u32 dmalen; 179 u32 dmalen;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index c29b5c838833..cdc7df4fdb8a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -31,6 +31,7 @@
31 */ 31 */
32 32
33#include <linux/netdevice.h> 33#include <linux/netdevice.h>
34#include <linux/if_arp.h> /* For ARPHRD_xxx */
34#include <linux/module.h> 35#include <linux/module.h>
35#include <net/rtnetlink.h> 36#include <net/rtnetlink.h>
36#include "ipoib.h" 37#include "ipoib.h"
@@ -103,7 +104,7 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
103 return -EINVAL; 104 return -EINVAL;
104 105
105 pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); 106 pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
106 if (!pdev) 107 if (!pdev || pdev->type != ARPHRD_INFINIBAND)
107 return -ENODEV; 108 return -ENODEV;
108 109
109 ppriv = netdev_priv(pdev); 110 ppriv = netdev_priv(pdev);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 6be57c38638d..9804fca6bf06 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -207,7 +207,9 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
207 isert_conn->conn_rx_descs = NULL; 207 isert_conn->conn_rx_descs = NULL;
208} 208}
209 209
210static void isert_cq_tx_work(struct work_struct *);
210static void isert_cq_tx_callback(struct ib_cq *, void *); 211static void isert_cq_tx_callback(struct ib_cq *, void *);
212static void isert_cq_rx_work(struct work_struct *);
211static void isert_cq_rx_callback(struct ib_cq *, void *); 213static void isert_cq_rx_callback(struct ib_cq *, void *);
212 214
213static int 215static int
@@ -259,26 +261,36 @@ isert_create_device_ib_res(struct isert_device *device)
259 cq_desc[i].device = device; 261 cq_desc[i].device = device;
260 cq_desc[i].cq_index = i; 262 cq_desc[i].cq_index = i;
261 263
264 INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
262 device->dev_rx_cq[i] = ib_create_cq(device->ib_device, 265 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
263 isert_cq_rx_callback, 266 isert_cq_rx_callback,
264 isert_cq_event_callback, 267 isert_cq_event_callback,
265 (void *)&cq_desc[i], 268 (void *)&cq_desc[i],
266 ISER_MAX_RX_CQ_LEN, i); 269 ISER_MAX_RX_CQ_LEN, i);
267 if (IS_ERR(device->dev_rx_cq[i])) 270 if (IS_ERR(device->dev_rx_cq[i])) {
271 ret = PTR_ERR(device->dev_rx_cq[i]);
272 device->dev_rx_cq[i] = NULL;
268 goto out_cq; 273 goto out_cq;
274 }
269 275
276 INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
270 device->dev_tx_cq[i] = ib_create_cq(device->ib_device, 277 device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
271 isert_cq_tx_callback, 278 isert_cq_tx_callback,
272 isert_cq_event_callback, 279 isert_cq_event_callback,
273 (void *)&cq_desc[i], 280 (void *)&cq_desc[i],
274 ISER_MAX_TX_CQ_LEN, i); 281 ISER_MAX_TX_CQ_LEN, i);
275 if (IS_ERR(device->dev_tx_cq[i])) 282 if (IS_ERR(device->dev_tx_cq[i])) {
283 ret = PTR_ERR(device->dev_tx_cq[i]);
284 device->dev_tx_cq[i] = NULL;
276 goto out_cq; 285 goto out_cq;
286 }
277 287
278 if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP)) 288 ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
289 if (ret)
279 goto out_cq; 290 goto out_cq;
280 291
281 if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP)) 292 ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
293 if (ret)
282 goto out_cq; 294 goto out_cq;
283 } 295 }
284 296
@@ -1724,7 +1736,6 @@ isert_cq_tx_callback(struct ib_cq *cq, void *context)
1724{ 1736{
1725 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; 1737 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1726 1738
1727 INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
1728 queue_work(isert_comp_wq, &cq_desc->cq_tx_work); 1739 queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
1729} 1740}
1730 1741
@@ -1768,7 +1779,6 @@ isert_cq_rx_callback(struct ib_cq *cq, void *context)
1768{ 1779{
1769 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; 1780 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1770 1781
1771 INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
1772 queue_work(isert_rx_wq, &cq_desc->cq_rx_work); 1782 queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
1773} 1783}
1774 1784
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 846ccdd905b1..d2965e4b3224 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1871,6 +1871,10 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
1871 break; 1871 break;
1872 1872
1873 case EV_ABS: 1873 case EV_ABS:
1874 input_alloc_absinfo(dev);
1875 if (!dev->absinfo)
1876 return;
1877
1874 __set_bit(code, dev->absbit); 1878 __set_bit(code, dev->absbit);
1875 break; 1879 break;
1876 1880
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index 75762d6ff3ba..aa127ba392a4 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -455,7 +455,18 @@ static void zforce_complete(struct zforce_ts *ts, int cmd, int result)
455 } 455 }
456} 456}
457 457
458static irqreturn_t zforce_interrupt(int irq, void *dev_id) 458static irqreturn_t zforce_irq(int irq, void *dev_id)
459{
460 struct zforce_ts *ts = dev_id;
461 struct i2c_client *client = ts->client;
462
463 if (ts->suspended && device_may_wakeup(&client->dev))
464 pm_wakeup_event(&client->dev, 500);
465
466 return IRQ_WAKE_THREAD;
467}
468
469static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
459{ 470{
460 struct zforce_ts *ts = dev_id; 471 struct zforce_ts *ts = dev_id;
461 struct i2c_client *client = ts->client; 472 struct i2c_client *client = ts->client;
@@ -465,12 +476,10 @@ static irqreturn_t zforce_interrupt(int irq, void *dev_id)
465 u8 *payload; 476 u8 *payload;
466 477
467 /* 478 /*
468 * When suspended, emit a wakeup signal if necessary and return. 479 * When still suspended, return.
469 * Due to the level-interrupt we will get re-triggered later. 480 * Due to the level-interrupt we will get re-triggered later.
470 */ 481 */
471 if (ts->suspended) { 482 if (ts->suspended) {
472 if (device_may_wakeup(&client->dev))
473 pm_wakeup_event(&client->dev, 500);
474 msleep(20); 483 msleep(20);
475 return IRQ_HANDLED; 484 return IRQ_HANDLED;
476 } 485 }
@@ -763,8 +772,8 @@ static int zforce_probe(struct i2c_client *client,
763 * Therefore we can trigger the interrupt anytime it is low and do 772 * Therefore we can trigger the interrupt anytime it is low and do
764 * not need to limit it to the interrupt edge. 773 * not need to limit it to the interrupt edge.
765 */ 774 */
766 ret = devm_request_threaded_irq(&client->dev, client->irq, NULL, 775 ret = devm_request_threaded_irq(&client->dev, client->irq,
767 zforce_interrupt, 776 zforce_irq, zforce_irq_thread,
768 IRQF_TRIGGER_LOW | IRQF_ONESHOT, 777 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
769 input_dev->name, ts); 778 input_dev->name, ts);
770 if (ret) { 779 if (ret) {
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 82cec63a9011..3ee78f02e5d7 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -149,8 +149,9 @@ static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p,
149static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p, 149static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
150 int irq, int do_mask) 150 int irq, int do_mask)
151{ 151{
152 int bitfield_width = 4; /* PRIO assumed to have fixed bitfield width */ 152 /* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */
153 int shift = (7 - irq) * bitfield_width; /* PRIO assumed to be 32-bit */ 153 int bitfield_width = 4;
154 int shift = 32 - (irq + 1) * bitfield_width;
154 155
155 intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO, 156 intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO,
156 shift, bitfield_width, 157 shift, bitfield_width,
@@ -159,8 +160,9 @@ static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
159 160
160static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value) 161static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value)
161{ 162{
163 /* The SENSE register is assumed to be 32-bit. */
162 int bitfield_width = p->config.sense_bitfield_width; 164 int bitfield_width = p->config.sense_bitfield_width;
163 int shift = (7 - irq) * bitfield_width; /* SENSE assumed to be 32-bit */ 165 int shift = 32 - (irq + 1) * bitfield_width;
164 166
165 dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value); 167 dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value);
166 168
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 497bd026c237..4a4825528188 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -1643,10 +1643,6 @@ setup_hfcpci(struct IsdnCard *card)
1643 int i; 1643 int i;
1644 struct pci_dev *tmp_hfcpci = NULL; 1644 struct pci_dev *tmp_hfcpci = NULL;
1645 1645
1646#ifdef __BIG_ENDIAN
1647#error "not running on big endian machines now"
1648#endif
1649
1650 strcpy(tmp, hfcpci_revision); 1646 strcpy(tmp, hfcpci_revision);
1651 printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp)); 1647 printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
1652 1648
diff --git a/drivers/isdn/hisax/telespci.c b/drivers/isdn/hisax/telespci.c
index f6ab63aa6995..33eeb4602c7e 100644
--- a/drivers/isdn/hisax/telespci.c
+++ b/drivers/isdn/hisax/telespci.c
@@ -290,10 +290,6 @@ int setup_telespci(struct IsdnCard *card)
290 struct IsdnCardState *cs = card->cs; 290 struct IsdnCardState *cs = card->cs;
291 char tmp[64]; 291 char tmp[64];
292 292
293#ifdef __BIG_ENDIAN
294#error "not running on big endian machines now"
295#endif
296
297 strcpy(tmp, telespci_revision); 293 strcpy(tmp, telespci_revision);
298 printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp)); 294 printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp));
299 if (cs->typ != ISDN_CTYPE_TELESPCI) 295 if (cs->typ != ISDN_CTYPE_TELESPCI)
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 05188351711d..a97263e902ff 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -244,18 +244,12 @@ static int lp5521_update_program_memory(struct lp55xx_chip *chip,
244 if (i % 2) 244 if (i % 2)
245 goto err; 245 goto err;
246 246
247 mutex_lock(&chip->lock);
248
249 for (i = 0; i < LP5521_PROGRAM_LENGTH; i++) { 247 for (i = 0; i < LP5521_PROGRAM_LENGTH; i++) {
250 ret = lp55xx_write(chip, addr[idx] + i, pattern[i]); 248 ret = lp55xx_write(chip, addr[idx] + i, pattern[i]);
251 if (ret) { 249 if (ret)
252 mutex_unlock(&chip->lock);
253 return -EINVAL; 250 return -EINVAL;
254 }
255 } 251 }
256 252
257 mutex_unlock(&chip->lock);
258
259 return size; 253 return size;
260 254
261err: 255err:
@@ -427,15 +421,17 @@ static ssize_t store_engine_load(struct device *dev,
427{ 421{
428 struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); 422 struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
429 struct lp55xx_chip *chip = led->chip; 423 struct lp55xx_chip *chip = led->chip;
424 int ret;
430 425
431 mutex_lock(&chip->lock); 426 mutex_lock(&chip->lock);
432 427
433 chip->engine_idx = nr; 428 chip->engine_idx = nr;
434 lp5521_load_engine(chip); 429 lp5521_load_engine(chip);
430 ret = lp5521_update_program_memory(chip, buf, len);
435 431
436 mutex_unlock(&chip->lock); 432 mutex_unlock(&chip->lock);
437 433
438 return lp5521_update_program_memory(chip, buf, len); 434 return ret;
439} 435}
440store_load(1) 436store_load(1)
441store_load(2) 437store_load(2)
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 6b553d9f4266..fd9ab5f61441 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -337,18 +337,12 @@ static int lp5523_update_program_memory(struct lp55xx_chip *chip,
337 if (i % 2) 337 if (i % 2)
338 goto err; 338 goto err;
339 339
340 mutex_lock(&chip->lock);
341
342 for (i = 0; i < LP5523_PROGRAM_LENGTH; i++) { 340 for (i = 0; i < LP5523_PROGRAM_LENGTH; i++) {
343 ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]); 341 ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]);
344 if (ret) { 342 if (ret)
345 mutex_unlock(&chip->lock);
346 return -EINVAL; 343 return -EINVAL;
347 }
348 } 344 }
349 345
350 mutex_unlock(&chip->lock);
351
352 return size; 346 return size;
353 347
354err: 348err:
@@ -548,15 +542,17 @@ static ssize_t store_engine_load(struct device *dev,
548{ 542{
549 struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); 543 struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
550 struct lp55xx_chip *chip = led->chip; 544 struct lp55xx_chip *chip = led->chip;
545 int ret;
551 546
552 mutex_lock(&chip->lock); 547 mutex_lock(&chip->lock);
553 548
554 chip->engine_idx = nr; 549 chip->engine_idx = nr;
555 lp5523_load_engine_and_select_page(chip); 550 lp5523_load_engine_and_select_page(chip);
551 ret = lp5523_update_program_memory(chip, buf, len);
556 552
557 mutex_unlock(&chip->lock); 553 mutex_unlock(&chip->lock);
558 554
559 return lp5523_update_program_memory(chip, buf, len); 555 return ret;
560} 556}
561store_load(1) 557store_load(1)
562store_load(2) 558store_load(2)
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 2b46bf1d7e40..4c9852d92b0a 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -421,9 +421,11 @@ out:
421 421
422 if (watermark <= WATERMARK_METADATA) { 422 if (watermark <= WATERMARK_METADATA) {
423 SET_GC_MARK(b, GC_MARK_METADATA); 423 SET_GC_MARK(b, GC_MARK_METADATA);
424 SET_GC_MOVE(b, 0);
424 b->prio = BTREE_PRIO; 425 b->prio = BTREE_PRIO;
425 } else { 426 } else {
426 SET_GC_MARK(b, GC_MARK_RECLAIMABLE); 427 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
428 SET_GC_MOVE(b, 0);
427 b->prio = INITIAL_PRIO; 429 b->prio = INITIAL_PRIO;
428 } 430 }
429 431
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 4beb55a0ff30..754f43177483 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -197,7 +197,7 @@ struct bucket {
197 uint8_t disk_gen; 197 uint8_t disk_gen;
198 uint8_t last_gc; /* Most out of date gen in the btree */ 198 uint8_t last_gc; /* Most out of date gen in the btree */
199 uint8_t gc_gen; 199 uint8_t gc_gen;
200 uint16_t gc_mark; 200 uint16_t gc_mark; /* Bitfield used by GC. See below for field */
201}; 201};
202 202
203/* 203/*
@@ -209,7 +209,8 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
209#define GC_MARK_RECLAIMABLE 0 209#define GC_MARK_RECLAIMABLE 0
210#define GC_MARK_DIRTY 1 210#define GC_MARK_DIRTY 1
211#define GC_MARK_METADATA 2 211#define GC_MARK_METADATA 2
212BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14); 212BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13);
213BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
213 214
214#include "journal.h" 215#include "journal.h"
215#include "stats.h" 216#include "stats.h"
@@ -372,14 +373,14 @@ struct cached_dev {
372 unsigned char writeback_percent; 373 unsigned char writeback_percent;
373 unsigned writeback_delay; 374 unsigned writeback_delay;
374 375
375 int writeback_rate_change;
376 int64_t writeback_rate_derivative;
377 uint64_t writeback_rate_target; 376 uint64_t writeback_rate_target;
377 int64_t writeback_rate_proportional;
378 int64_t writeback_rate_derivative;
379 int64_t writeback_rate_change;
378 380
379 unsigned writeback_rate_update_seconds; 381 unsigned writeback_rate_update_seconds;
380 unsigned writeback_rate_d_term; 382 unsigned writeback_rate_d_term;
381 unsigned writeback_rate_p_term_inverse; 383 unsigned writeback_rate_p_term_inverse;
382 unsigned writeback_rate_d_smooth;
383}; 384};
384 385
385enum alloc_watermarks { 386enum alloc_watermarks {
@@ -445,7 +446,6 @@ struct cache {
445 * call prio_write() to keep gens from wrapping. 446 * call prio_write() to keep gens from wrapping.
446 */ 447 */
447 uint8_t need_save_prio; 448 uint8_t need_save_prio;
448 unsigned gc_move_threshold;
449 449
450 /* 450 /*
451 * If nonzero, we know we aren't going to find any buckets to invalidate 451 * If nonzero, we know we aren't going to find any buckets to invalidate
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 5e2765aadce1..31bb53fcc67a 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1561,6 +1561,28 @@ size_t bch_btree_gc_finish(struct cache_set *c)
1561 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), 1561 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1562 GC_MARK_METADATA); 1562 GC_MARK_METADATA);
1563 1563
1564 /* don't reclaim buckets to which writeback keys point */
1565 rcu_read_lock();
1566 for (i = 0; i < c->nr_uuids; i++) {
1567 struct bcache_device *d = c->devices[i];
1568 struct cached_dev *dc;
1569 struct keybuf_key *w, *n;
1570 unsigned j;
1571
1572 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1573 continue;
1574 dc = container_of(d, struct cached_dev, disk);
1575
1576 spin_lock(&dc->writeback_keys.lock);
1577 rbtree_postorder_for_each_entry_safe(w, n,
1578 &dc->writeback_keys.keys, node)
1579 for (j = 0; j < KEY_PTRS(&w->key); j++)
1580 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1581 GC_MARK_DIRTY);
1582 spin_unlock(&dc->writeback_keys.lock);
1583 }
1584 rcu_read_unlock();
1585
1564 for_each_cache(ca, c, i) { 1586 for_each_cache(ca, c, i) {
1565 uint64_t *i; 1587 uint64_t *i;
1566 1588
@@ -1817,7 +1839,8 @@ static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
1817 if (KEY_START(k) > KEY_START(insert) + sectors_found) 1839 if (KEY_START(k) > KEY_START(insert) + sectors_found)
1818 goto check_failed; 1840 goto check_failed;
1819 1841
1820 if (KEY_PTRS(replace_key) != KEY_PTRS(k)) 1842 if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
1843 KEY_DIRTY(k) != KEY_DIRTY(replace_key))
1821 goto check_failed; 1844 goto check_failed;
1822 1845
1823 /* skip past gen */ 1846 /* skip past gen */
@@ -2217,7 +2240,7 @@ struct btree_insert_op {
2217 struct bkey *replace_key; 2240 struct bkey *replace_key;
2218}; 2241};
2219 2242
2220int btree_insert_fn(struct btree_op *b_op, struct btree *b) 2243static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2221{ 2244{
2222 struct btree_insert_op *op = container_of(b_op, 2245 struct btree_insert_op *op = container_of(b_op,
2223 struct btree_insert_op, op); 2246 struct btree_insert_op, op);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 7c1275e66025..f2f0998c4a91 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -25,10 +25,9 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
25 unsigned i; 25 unsigned i;
26 26
27 for (i = 0; i < KEY_PTRS(k); i++) { 27 for (i = 0; i < KEY_PTRS(k); i++) {
28 struct cache *ca = PTR_CACHE(c, k, i);
29 struct bucket *g = PTR_BUCKET(c, k, i); 28 struct bucket *g = PTR_BUCKET(c, k, i);
30 29
31 if (GC_SECTORS_USED(g) < ca->gc_move_threshold) 30 if (GC_MOVE(g))
32 return true; 31 return true;
33 } 32 }
34 33
@@ -65,11 +64,16 @@ static void write_moving_finish(struct closure *cl)
65 64
66static void read_moving_endio(struct bio *bio, int error) 65static void read_moving_endio(struct bio *bio, int error)
67{ 66{
67 struct bbio *b = container_of(bio, struct bbio, bio);
68 struct moving_io *io = container_of(bio->bi_private, 68 struct moving_io *io = container_of(bio->bi_private,
69 struct moving_io, cl); 69 struct moving_io, cl);
70 70
71 if (error) 71 if (error)
72 io->op.error = error; 72 io->op.error = error;
73 else if (!KEY_DIRTY(&b->key) &&
74 ptr_stale(io->op.c, &b->key, 0)) {
75 io->op.error = -EINTR;
76 }
73 77
74 bch_bbio_endio(io->op.c, bio, error, "reading data to move"); 78 bch_bbio_endio(io->op.c, bio, error, "reading data to move");
75} 79}
@@ -141,6 +145,11 @@ static void read_moving(struct cache_set *c)
141 if (!w) 145 if (!w)
142 break; 146 break;
143 147
148 if (ptr_stale(c, &w->key, 0)) {
149 bch_keybuf_del(&c->moving_gc_keys, w);
150 continue;
151 }
152
144 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) 153 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
145 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 154 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
146 GFP_KERNEL); 155 GFP_KERNEL);
@@ -184,7 +193,8 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r)
184 193
185static unsigned bucket_heap_top(struct cache *ca) 194static unsigned bucket_heap_top(struct cache *ca)
186{ 195{
187 return GC_SECTORS_USED(heap_peek(&ca->heap)); 196 struct bucket *b;
197 return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
188} 198}
189 199
190void bch_moving_gc(struct cache_set *c) 200void bch_moving_gc(struct cache_set *c)
@@ -226,9 +236,8 @@ void bch_moving_gc(struct cache_set *c)
226 sectors_to_move -= GC_SECTORS_USED(b); 236 sectors_to_move -= GC_SECTORS_USED(b);
227 } 237 }
228 238
229 ca->gc_move_threshold = bucket_heap_top(ca); 239 while (heap_pop(&ca->heap, b, bucket_cmp))
230 240 SET_GC_MOVE(b, 1);
231 pr_debug("threshold %u", ca->gc_move_threshold);
232 } 241 }
233 242
234 mutex_unlock(&c->bucket_lock); 243 mutex_unlock(&c->bucket_lock);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index dec15cd2d797..c57bfa071a57 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1676,7 +1676,7 @@ err:
1676static bool can_attach_cache(struct cache *ca, struct cache_set *c) 1676static bool can_attach_cache(struct cache *ca, struct cache_set *c)
1677{ 1677{
1678 return ca->sb.block_size == c->sb.block_size && 1678 return ca->sb.block_size == c->sb.block_size &&
1679 ca->sb.bucket_size == c->sb.block_size && 1679 ca->sb.bucket_size == c->sb.bucket_size &&
1680 ca->sb.nr_in_set == c->sb.nr_in_set; 1680 ca->sb.nr_in_set == c->sb.nr_in_set;
1681} 1681}
1682 1682
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 80d4c2bee18a..a1f85612f0b3 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -83,7 +83,6 @@ rw_attribute(writeback_rate);
83rw_attribute(writeback_rate_update_seconds); 83rw_attribute(writeback_rate_update_seconds);
84rw_attribute(writeback_rate_d_term); 84rw_attribute(writeback_rate_d_term);
85rw_attribute(writeback_rate_p_term_inverse); 85rw_attribute(writeback_rate_p_term_inverse);
86rw_attribute(writeback_rate_d_smooth);
87read_attribute(writeback_rate_debug); 86read_attribute(writeback_rate_debug);
88 87
89read_attribute(stripe_size); 88read_attribute(stripe_size);
@@ -129,31 +128,41 @@ SHOW(__bch_cached_dev)
129 var_printf(writeback_running, "%i"); 128 var_printf(writeback_running, "%i");
130 var_print(writeback_delay); 129 var_print(writeback_delay);
131 var_print(writeback_percent); 130 var_print(writeback_percent);
132 sysfs_print(writeback_rate, dc->writeback_rate.rate); 131 sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9);
133 132
134 var_print(writeback_rate_update_seconds); 133 var_print(writeback_rate_update_seconds);
135 var_print(writeback_rate_d_term); 134 var_print(writeback_rate_d_term);
136 var_print(writeback_rate_p_term_inverse); 135 var_print(writeback_rate_p_term_inverse);
137 var_print(writeback_rate_d_smooth);
138 136
139 if (attr == &sysfs_writeback_rate_debug) { 137 if (attr == &sysfs_writeback_rate_debug) {
138 char rate[20];
140 char dirty[20]; 139 char dirty[20];
141 char derivative[20];
142 char target[20]; 140 char target[20];
143 bch_hprint(dirty, 141 char proportional[20];
144 bcache_dev_sectors_dirty(&dc->disk) << 9); 142 char derivative[20];
145 bch_hprint(derivative, dc->writeback_rate_derivative << 9); 143 char change[20];
144 s64 next_io;
145
146 bch_hprint(rate, dc->writeback_rate.rate << 9);
147 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
146 bch_hprint(target, dc->writeback_rate_target << 9); 148 bch_hprint(target, dc->writeback_rate_target << 9);
149 bch_hprint(proportional,dc->writeback_rate_proportional << 9);
150 bch_hprint(derivative, dc->writeback_rate_derivative << 9);
151 bch_hprint(change, dc->writeback_rate_change << 9);
152
153 next_io = div64_s64(dc->writeback_rate.next - local_clock(),
154 NSEC_PER_MSEC);
147 155
148 return sprintf(buf, 156 return sprintf(buf,
149 "rate:\t\t%u\n" 157 "rate:\t\t%s/sec\n"
150 "change:\t\t%i\n"
151 "dirty:\t\t%s\n" 158 "dirty:\t\t%s\n"
159 "target:\t\t%s\n"
160 "proportional:\t%s\n"
152 "derivative:\t%s\n" 161 "derivative:\t%s\n"
153 "target:\t\t%s\n", 162 "change:\t\t%s/sec\n"
154 dc->writeback_rate.rate, 163 "next io:\t%llims\n",
155 dc->writeback_rate_change, 164 rate, dirty, target, proportional,
156 dirty, derivative, target); 165 derivative, change, next_io);
157 } 166 }
158 167
159 sysfs_hprint(dirty_data, 168 sysfs_hprint(dirty_data,
@@ -189,6 +198,7 @@ STORE(__cached_dev)
189 struct kobj_uevent_env *env; 198 struct kobj_uevent_env *env;
190 199
191#define d_strtoul(var) sysfs_strtoul(var, dc->var) 200#define d_strtoul(var) sysfs_strtoul(var, dc->var)
201#define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
192#define d_strtoi_h(var) sysfs_hatoi(var, dc->var) 202#define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
193 203
194 sysfs_strtoul(data_csum, dc->disk.data_csum); 204 sysfs_strtoul(data_csum, dc->disk.data_csum);
@@ -197,16 +207,15 @@ STORE(__cached_dev)
197 d_strtoul(writeback_metadata); 207 d_strtoul(writeback_metadata);
198 d_strtoul(writeback_running); 208 d_strtoul(writeback_running);
199 d_strtoul(writeback_delay); 209 d_strtoul(writeback_delay);
200 sysfs_strtoul_clamp(writeback_rate, 210
201 dc->writeback_rate.rate, 1, 1000000);
202 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40); 211 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
203 212
204 d_strtoul(writeback_rate_update_seconds); 213 sysfs_strtoul_clamp(writeback_rate,
214 dc->writeback_rate.rate, 1, INT_MAX);
215
216 d_strtoul_nonzero(writeback_rate_update_seconds);
205 d_strtoul(writeback_rate_d_term); 217 d_strtoul(writeback_rate_d_term);
206 d_strtoul(writeback_rate_p_term_inverse); 218 d_strtoul_nonzero(writeback_rate_p_term_inverse);
207 sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
208 dc->writeback_rate_p_term_inverse, 1, INT_MAX);
209 d_strtoul(writeback_rate_d_smooth);
210 219
211 d_strtoi_h(sequential_cutoff); 220 d_strtoi_h(sequential_cutoff);
212 d_strtoi_h(readahead); 221 d_strtoi_h(readahead);
@@ -313,7 +322,6 @@ static struct attribute *bch_cached_dev_files[] = {
313 &sysfs_writeback_rate_update_seconds, 322 &sysfs_writeback_rate_update_seconds,
314 &sysfs_writeback_rate_d_term, 323 &sysfs_writeback_rate_d_term,
315 &sysfs_writeback_rate_p_term_inverse, 324 &sysfs_writeback_rate_p_term_inverse,
316 &sysfs_writeback_rate_d_smooth,
317 &sysfs_writeback_rate_debug, 325 &sysfs_writeback_rate_debug,
318 &sysfs_dirty_data, 326 &sysfs_dirty_data,
319 &sysfs_stripe_size, 327 &sysfs_stripe_size,
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 462214eeacbe..bb37618e7664 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -209,7 +209,13 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
209{ 209{
210 uint64_t now = local_clock(); 210 uint64_t now = local_clock();
211 211
212 d->next += div_u64(done, d->rate); 212 d->next += div_u64(done * NSEC_PER_SEC, d->rate);
213
214 if (time_before64(now + NSEC_PER_SEC, d->next))
215 d->next = now + NSEC_PER_SEC;
216
217 if (time_after64(now - NSEC_PER_SEC * 2, d->next))
218 d->next = now - NSEC_PER_SEC * 2;
213 219
214 return time_after64(d->next, now) 220 return time_after64(d->next, now)
215 ? div_u64(d->next - now, NSEC_PER_SEC / HZ) 221 ? div_u64(d->next - now, NSEC_PER_SEC / HZ)
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 362c4b3f8b4a..1030c6020e98 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -110,7 +110,7 @@ do { \
110 _r; \ 110 _r; \
111}) 111})
112 112
113#define heap_peek(h) ((h)->size ? (h)->data[0] : NULL) 113#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
114 114
115#define heap_full(h) ((h)->used == (h)->size) 115#define heap_full(h) ((h)->used == (h)->size)
116 116
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 99053b1251be..6c44fe059c27 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -30,38 +30,40 @@ static void __update_writeback_rate(struct cached_dev *dc)
30 30
31 /* PD controller */ 31 /* PD controller */
32 32
33 int change = 0;
34 int64_t error;
35 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); 33 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
36 int64_t derivative = dirty - dc->disk.sectors_dirty_last; 34 int64_t derivative = dirty - dc->disk.sectors_dirty_last;
35 int64_t proportional = dirty - target;
36 int64_t change;
37 37
38 dc->disk.sectors_dirty_last = dirty; 38 dc->disk.sectors_dirty_last = dirty;
39 39
40 derivative *= dc->writeback_rate_d_term; 40 /* Scale to sectors per second */
41 derivative = clamp(derivative, -dirty, dirty);
42 41
43 derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, 42 proportional *= dc->writeback_rate_update_seconds;
44 dc->writeback_rate_d_smooth, 0); 43 proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse);
45 44
46 /* Avoid divide by zero */ 45 derivative = div_s64(derivative, dc->writeback_rate_update_seconds);
47 if (!target)
48 goto out;
49 46
50 error = div64_s64((dirty + derivative - target) << 8, target); 47 derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
48 (dc->writeback_rate_d_term /
49 dc->writeback_rate_update_seconds) ?: 1, 0);
50
51 derivative *= dc->writeback_rate_d_term;
52 derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse);
51 53
52 change = div_s64((dc->writeback_rate.rate * error) >> 8, 54 change = proportional + derivative;
53 dc->writeback_rate_p_term_inverse);
54 55
55 /* Don't increase writeback rate if the device isn't keeping up */ 56 /* Don't increase writeback rate if the device isn't keeping up */
56 if (change > 0 && 57 if (change > 0 &&
57 time_after64(local_clock(), 58 time_after64(local_clock(),
58 dc->writeback_rate.next + 10 * NSEC_PER_MSEC)) 59 dc->writeback_rate.next + NSEC_PER_MSEC))
59 change = 0; 60 change = 0;
60 61
61 dc->writeback_rate.rate = 62 dc->writeback_rate.rate =
62 clamp_t(int64_t, dc->writeback_rate.rate + change, 63 clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,
63 1, NSEC_PER_MSEC); 64 1, NSEC_PER_MSEC);
64out: 65
66 dc->writeback_rate_proportional = proportional;
65 dc->writeback_rate_derivative = derivative; 67 dc->writeback_rate_derivative = derivative;
66 dc->writeback_rate_change = change; 68 dc->writeback_rate_change = change;
67 dc->writeback_rate_target = target; 69 dc->writeback_rate_target = target;
@@ -87,15 +89,11 @@ static void update_writeback_rate(struct work_struct *work)
87 89
88static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) 90static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
89{ 91{
90 uint64_t ret;
91
92 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || 92 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
93 !dc->writeback_percent) 93 !dc->writeback_percent)
94 return 0; 94 return 0;
95 95
96 ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); 96 return bch_next_delay(&dc->writeback_rate, sectors);
97
98 return min_t(uint64_t, ret, HZ);
99} 97}
100 98
101struct dirty_io { 99struct dirty_io {
@@ -241,7 +239,7 @@ static void read_dirty(struct cached_dev *dc)
241 if (KEY_START(&w->key) != dc->last_read || 239 if (KEY_START(&w->key) != dc->last_read ||
242 jiffies_to_msecs(delay) > 50) 240 jiffies_to_msecs(delay) > 50)
243 while (!kthread_should_stop() && delay) 241 while (!kthread_should_stop() && delay)
244 delay = schedule_timeout_interruptible(delay); 242 delay = schedule_timeout_uninterruptible(delay);
245 243
246 dc->last_read = KEY_OFFSET(&w->key); 244 dc->last_read = KEY_OFFSET(&w->key);
247 245
@@ -438,7 +436,7 @@ static int bch_writeback_thread(void *arg)
438 while (delay && 436 while (delay &&
439 !kthread_should_stop() && 437 !kthread_should_stop() &&
440 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) 438 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
441 delay = schedule_timeout_interruptible(delay); 439 delay = schedule_timeout_uninterruptible(delay);
442 } 440 }
443 } 441 }
444 442
@@ -476,6 +474,8 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
476 474
477 bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), 475 bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
478 sectors_dirty_init_fn, 0); 476 sectors_dirty_init_fn, 0);
477
478 dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
479} 479}
480 480
481int bch_cached_dev_writeback_init(struct cached_dev *dc) 481int bch_cached_dev_writeback_init(struct cached_dev *dc)
@@ -490,18 +490,15 @@ int bch_cached_dev_writeback_init(struct cached_dev *dc)
490 dc->writeback_delay = 30; 490 dc->writeback_delay = 30;
491 dc->writeback_rate.rate = 1024; 491 dc->writeback_rate.rate = 1024;
492 492
493 dc->writeback_rate_update_seconds = 30; 493 dc->writeback_rate_update_seconds = 5;
494 dc->writeback_rate_d_term = 16; 494 dc->writeback_rate_d_term = 30;
495 dc->writeback_rate_p_term_inverse = 64; 495 dc->writeback_rate_p_term_inverse = 6000;
496 dc->writeback_rate_d_smooth = 8;
497 496
498 dc->writeback_thread = kthread_create(bch_writeback_thread, dc, 497 dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
499 "bcache_writeback"); 498 "bcache_writeback");
500 if (IS_ERR(dc->writeback_thread)) 499 if (IS_ERR(dc->writeback_thread))
501 return PTR_ERR(dc->writeback_thread); 500 return PTR_ERR(dc->writeback_thread);
502 501
503 set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE);
504
505 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); 502 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
506 schedule_delayed_work(&dc->writeback_rate_update, 503 schedule_delayed_work(&dc->writeback_rate_update,
507 dc->writeback_rate_update_seconds * HZ); 504 dc->writeback_rate_update_seconds * HZ);
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 6841d6805fd6..41ab5e34d2ac 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -245,7 +245,7 @@ static int pcf50633_probe(struct i2c_client *client,
245 for (i = 0; i < PCF50633_NUM_REGULATORS; i++) { 245 for (i = 0; i < PCF50633_NUM_REGULATORS; i++) {
246 struct platform_device *pdev; 246 struct platform_device *pdev;
247 247
248 pdev = platform_device_alloc("pcf50633-regltr", i); 248 pdev = platform_device_alloc("pcf50633-regulator", i);
249 if (!pdev) { 249 if (!pdev) {
250 dev_err(pcf->dev, "Cannot create regulator %d\n", i); 250 dev_err(pcf->dev, "Cannot create regulator %d\n", i);
251 continue; 251 continue;
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 11e20afbdcac..705698fd2c7e 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -1228,8 +1228,14 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
1228 1228
1229 pcr->remove_pci = true; 1229 pcr->remove_pci = true;
1230 1230
1231 cancel_delayed_work(&pcr->carddet_work); 1231 /* Disable interrupts at the pcr level */
1232 cancel_delayed_work(&pcr->idle_work); 1232 spin_lock_irq(&pcr->lock);
1233 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1234 pcr->bier = 0;
1235 spin_unlock_irq(&pcr->lock);
1236
1237 cancel_delayed_work_sync(&pcr->carddet_work);
1238 cancel_delayed_work_sync(&pcr->idle_work);
1233 1239
1234 mfd_remove_devices(&pcidev->dev); 1240 mfd_remove_devices(&pcidev->dev);
1235 1241
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index d210d131fef2..0f55589a56b8 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -73,7 +73,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
73 return -ENOMEM; 73 return -ENOMEM;
74 } 74 }
75 info->map.cached = 75 info->map.cached =
76 ioremap_cached(info->map.phys, info->map.size); 76 ioremap_cache(info->map.phys, info->map.size);
77 if (!info->map.cached) 77 if (!info->map.cached)
78 printk(KERN_WARNING "Failed to ioremap cached %s\n", 78 printk(KERN_WARNING "Failed to ioremap cached %s\n",
79 info->map.name); 79 info->map.name);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 187b1b7772ef..4ced59436558 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2201,20 +2201,25 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
2201 2201
2202 port = &(SLAVE_AD_INFO(slave).port); 2202 port = &(SLAVE_AD_INFO(slave).port);
2203 2203
2204 // if slave is null, the whole port is not initialized 2204 /* if slave is null, the whole port is not initialized */
2205 if (!port->slave) { 2205 if (!port->slave) {
2206 pr_warning("Warning: %s: speed changed for uninitialized port on %s\n", 2206 pr_warning("Warning: %s: speed changed for uninitialized port on %s\n",
2207 slave->bond->dev->name, slave->dev->name); 2207 slave->bond->dev->name, slave->dev->name);
2208 return; 2208 return;
2209 } 2209 }
2210 2210
2211 __get_state_machine_lock(port);
2212
2211 port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; 2213 port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
2212 port->actor_oper_port_key = port->actor_admin_port_key |= 2214 port->actor_oper_port_key = port->actor_admin_port_key |=
2213 (__get_link_speed(port) << 1); 2215 (__get_link_speed(port) << 1);
2214 pr_debug("Port %d changed speed\n", port->actor_port_number); 2216 pr_debug("Port %d changed speed\n", port->actor_port_number);
2215 // there is no need to reselect a new aggregator, just signal the 2217 /* there is no need to reselect a new aggregator, just signal the
2216 // state machines to reinitialize 2218 * state machines to reinitialize
2219 */
2217 port->sm_vars |= AD_PORT_BEGIN; 2220 port->sm_vars |= AD_PORT_BEGIN;
2221
2222 __release_state_machine_lock(port);
2218} 2223}
2219 2224
2220/** 2225/**
@@ -2229,20 +2234,25 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
2229 2234
2230 port = &(SLAVE_AD_INFO(slave).port); 2235 port = &(SLAVE_AD_INFO(slave).port);
2231 2236
2232 // if slave is null, the whole port is not initialized 2237 /* if slave is null, the whole port is not initialized */
2233 if (!port->slave) { 2238 if (!port->slave) {
2234 pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n", 2239 pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n",
2235 slave->bond->dev->name, slave->dev->name); 2240 slave->bond->dev->name, slave->dev->name);
2236 return; 2241 return;
2237 } 2242 }
2238 2243
2244 __get_state_machine_lock(port);
2245
2239 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; 2246 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
2240 port->actor_oper_port_key = port->actor_admin_port_key |= 2247 port->actor_oper_port_key = port->actor_admin_port_key |=
2241 __get_duplex(port); 2248 __get_duplex(port);
2242 pr_debug("Port %d changed duplex\n", port->actor_port_number); 2249 pr_debug("Port %d changed duplex\n", port->actor_port_number);
2243 // there is no need to reselect a new aggregator, just signal the 2250 /* there is no need to reselect a new aggregator, just signal the
2244 // state machines to reinitialize 2251 * state machines to reinitialize
2252 */
2245 port->sm_vars |= AD_PORT_BEGIN; 2253 port->sm_vars |= AD_PORT_BEGIN;
2254
2255 __release_state_machine_lock(port);
2246} 2256}
2247 2257
2248/** 2258/**
@@ -2258,15 +2268,21 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2258 2268
2259 port = &(SLAVE_AD_INFO(slave).port); 2269 port = &(SLAVE_AD_INFO(slave).port);
2260 2270
2261 // if slave is null, the whole port is not initialized 2271 /* if slave is null, the whole port is not initialized */
2262 if (!port->slave) { 2272 if (!port->slave) {
2263 pr_warning("Warning: %s: link status changed for uninitialized port on %s\n", 2273 pr_warning("Warning: %s: link status changed for uninitialized port on %s\n",
2264 slave->bond->dev->name, slave->dev->name); 2274 slave->bond->dev->name, slave->dev->name);
2265 return; 2275 return;
2266 } 2276 }
2267 2277
2268 // on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed) 2278 __get_state_machine_lock(port);
2269 // on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report 2279 /* on link down we are zeroing duplex and speed since
2280 * some of the adaptors(ce1000.lan) report full duplex/speed
2281 * instead of N/A(duplex) / 0(speed).
2282 *
2283 * on link up we are forcing recheck on the duplex and speed since
2284 * some of he adaptors(ce1000.lan) report.
2285 */
2270 if (link == BOND_LINK_UP) { 2286 if (link == BOND_LINK_UP) {
2271 port->is_enabled = true; 2287 port->is_enabled = true;
2272 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; 2288 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
@@ -2282,10 +2298,15 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2282 port->actor_oper_port_key = (port->actor_admin_port_key &= 2298 port->actor_oper_port_key = (port->actor_admin_port_key &=
2283 ~AD_SPEED_KEY_BITS); 2299 ~AD_SPEED_KEY_BITS);
2284 } 2300 }
2285 //BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN"))); 2301 pr_debug("Port %d changed link status to %s",
2286 // there is no need to reselect a new aggregator, just signal the 2302 port->actor_port_number,
2287 // state machines to reinitialize 2303 (link == BOND_LINK_UP) ? "UP" : "DOWN");
2304 /* there is no need to reselect a new aggregator, just signal the
2305 * state machines to reinitialize
2306 */
2288 port->sm_vars |= AD_PORT_BEGIN; 2307 port->sm_vars |= AD_PORT_BEGIN;
2308
2309 __release_state_machine_lock(port);
2289} 2310}
2290 2311
2291/* 2312/*
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 398e299ee1bd..4b8c58b0ec24 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3732,7 +3732,8 @@ static inline int bond_slave_override(struct bonding *bond,
3732} 3732}
3733 3733
3734 3734
3735static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) 3735static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
3736 void *accel_priv)
3736{ 3737{
3737 /* 3738 /*
3738 * This helper function exists to help dev_pick_tx get the correct 3739 * This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 5f9a7ad9b964..8aeec0b4601a 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -625,6 +625,7 @@ static int ems_usb_start(struct ems_usb *dev)
625 usb_unanchor_urb(urb); 625 usb_unanchor_urb(urb);
626 usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, 626 usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
627 urb->transfer_dma); 627 urb->transfer_dma);
628 usb_free_urb(urb);
628 break; 629 break;
629 } 630 }
630 631
@@ -798,8 +799,8 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
798 * allowed (MAX_TX_URBS). 799 * allowed (MAX_TX_URBS).
799 */ 800 */
800 if (!context) { 801 if (!context) {
801 usb_unanchor_urb(urb);
802 usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); 802 usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
803 usb_free_urb(urb);
803 804
804 netdev_warn(netdev, "couldn't find free context\n"); 805 netdev_warn(netdev, "couldn't find free context\n");
805 806
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 8ee9d1556e6e..263dd921edc4 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -927,6 +927,9 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
927 /* set LED in default state (end of init phase) */ 927 /* set LED in default state (end of init phase) */
928 pcan_usb_pro_set_led(dev, 0, 1); 928 pcan_usb_pro_set_led(dev, 0, 1);
929 929
930 kfree(bi);
931 kfree(fi);
932
930 return 0; 933 return 0;
931 934
932 err_out: 935 err_out:
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index b2ffad1304d2..248baf6273fb 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -565,6 +565,8 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
565 /* Make sure pointer to data buffer is set */ 565 /* Make sure pointer to data buffer is set */
566 wmb(); 566 wmb();
567 567
568 skb_tx_timestamp(skb);
569
568 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); 570 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
569 571
570 /* Increment index to point to the next BD */ 572 /* Increment index to point to the next BD */
@@ -579,8 +581,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
579 581
580 arc_reg_set(priv, R_STATUS, TXPL_MASK); 582 arc_reg_set(priv, R_STATUS, TXPL_MASK);
581 583
582 skb_tx_timestamp(skb);
583
584 return NETDEV_TX_OK; 584 return NETDEV_TX_OK;
585} 585}
586 586
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index a36a760ada28..29801750f239 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -145,9 +145,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
145 * Mask some pcie error bits 145 * Mask some pcie error bits
146 */ 146 */
147 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 147 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
148 pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data); 148 if (pos) {
149 data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); 149 pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
150 pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); 150 data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
151 pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
152 }
151 /* clear error status */ 153 /* clear error status */
152 pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, 154 pcie_capability_write_word(pdev, PCI_EXP_DEVSTA,
153 PCI_EXP_DEVSTA_NFED | 155 PCI_EXP_DEVSTA_NFED |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index a1f66e2c9a86..ec6119089b82 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -520,10 +520,12 @@ struct bnx2x_fastpath {
520#define BNX2X_FP_STATE_IDLE 0 520#define BNX2X_FP_STATE_IDLE 0
521#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ 521#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
522#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ 522#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
523#define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */ 523#define BNX2X_FP_STATE_DISABLED (1 << 2)
524#define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */ 524#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
525#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
526#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
525#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) 527#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
526#define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) 528#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
527#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) 529#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
528 /* protect state */ 530 /* protect state */
529 spinlock_t lock; 531 spinlock_t lock;
@@ -613,7 +615,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
613{ 615{
614 bool rc = true; 616 bool rc = true;
615 617
616 spin_lock(&fp->lock); 618 spin_lock_bh(&fp->lock);
617 if (fp->state & BNX2X_FP_LOCKED) { 619 if (fp->state & BNX2X_FP_LOCKED) {
618 WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); 620 WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
619 fp->state |= BNX2X_FP_STATE_NAPI_YIELD; 621 fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
@@ -622,7 +624,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
622 /* we don't care if someone yielded */ 624 /* we don't care if someone yielded */
623 fp->state = BNX2X_FP_STATE_NAPI; 625 fp->state = BNX2X_FP_STATE_NAPI;
624 } 626 }
625 spin_unlock(&fp->lock); 627 spin_unlock_bh(&fp->lock);
626 return rc; 628 return rc;
627} 629}
628 630
@@ -631,14 +633,16 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
631{ 633{
632 bool rc = false; 634 bool rc = false;
633 635
634 spin_lock(&fp->lock); 636 spin_lock_bh(&fp->lock);
635 WARN_ON(fp->state & 637 WARN_ON(fp->state &
636 (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); 638 (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
637 639
638 if (fp->state & BNX2X_FP_STATE_POLL_YIELD) 640 if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
639 rc = true; 641 rc = true;
640 fp->state = BNX2X_FP_STATE_IDLE; 642
641 spin_unlock(&fp->lock); 643 /* state ==> idle, unless currently disabled */
644 fp->state &= BNX2X_FP_STATE_DISABLED;
645 spin_unlock_bh(&fp->lock);
642 return rc; 646 return rc;
643} 647}
644 648
@@ -669,7 +673,9 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
669 673
670 if (fp->state & BNX2X_FP_STATE_POLL_YIELD) 674 if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
671 rc = true; 675 rc = true;
672 fp->state = BNX2X_FP_STATE_IDLE; 676
677 /* state ==> idle, unless currently disabled */
678 fp->state &= BNX2X_FP_STATE_DISABLED;
673 spin_unlock_bh(&fp->lock); 679 spin_unlock_bh(&fp->lock);
674 return rc; 680 return rc;
675} 681}
@@ -677,9 +683,23 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
677/* true if a socket is polling, even if it did not get the lock */ 683/* true if a socket is polling, even if it did not get the lock */
678static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) 684static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
679{ 685{
680 WARN_ON(!(fp->state & BNX2X_FP_LOCKED)); 686 WARN_ON(!(fp->state & BNX2X_FP_OWNED));
681 return fp->state & BNX2X_FP_USER_PEND; 687 return fp->state & BNX2X_FP_USER_PEND;
682} 688}
689
690/* false if fp is currently owned */
691static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
692{
693 int rc = true;
694
695 spin_lock_bh(&fp->lock);
696 if (fp->state & BNX2X_FP_OWNED)
697 rc = false;
698 fp->state |= BNX2X_FP_STATE_DISABLED;
699 spin_unlock_bh(&fp->lock);
700
701 return rc;
702}
683#else 703#else
684static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) 704static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
685{ 705{
@@ -709,6 +729,10 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
709{ 729{
710 return false; 730 return false;
711} 731}
732static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
733{
734 return true;
735}
712#endif /* CONFIG_NET_RX_BUSY_POLL */ 736#endif /* CONFIG_NET_RX_BUSY_POLL */
713 737
714/* Use 2500 as a mini-jumbo MTU for FCoE */ 738/* Use 2500 as a mini-jumbo MTU for FCoE */
@@ -1250,7 +1274,10 @@ struct bnx2x_slowpath {
1250 * Therefore, if they would have been defined in the same union, 1274 * Therefore, if they would have been defined in the same union,
1251 * data can get corrupted. 1275 * data can get corrupted.
1252 */ 1276 */
1253 struct afex_vif_list_ramrod_data func_afex_rdata; 1277 union {
1278 struct afex_vif_list_ramrod_data viflist_data;
1279 struct function_update_data func_update;
1280 } func_afex_rdata;
1254 1281
1255 /* used by dmae command executer */ 1282 /* used by dmae command executer */
1256 struct dmae_command dmae[MAX_DMAE_C]; 1283 struct dmae_command dmae[MAX_DMAE_C];
@@ -2499,4 +2526,6 @@ void bnx2x_set_local_cmng(struct bnx2x *bp);
2499#define MCPR_SCRATCH_BASE(bp) \ 2526#define MCPR_SCRATCH_BASE(bp) \
2500 (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) 2527 (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
2501 2528
2529#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
2530
2502#endif /* bnx2x.h */ 2531#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ec96130533cc..bf811565ee24 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -160,6 +160,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
160 struct sk_buff *skb = tx_buf->skb; 160 struct sk_buff *skb = tx_buf->skb;
161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; 161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
162 int nbd; 162 int nbd;
163 u16 split_bd_len = 0;
163 164
164 /* prefetch skb end pointer to speedup dev_kfree_skb() */ 165 /* prefetch skb end pointer to speedup dev_kfree_skb() */
165 prefetch(&skb->end); 166 prefetch(&skb->end);
@@ -167,10 +168,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
167 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", 168 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
168 txdata->txq_index, idx, tx_buf, skb); 169 txdata->txq_index, idx, tx_buf, skb);
169 170
170 /* unmap first bd */
171 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; 171 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
172 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
173 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
174 172
175 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; 173 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
176#ifdef BNX2X_STOP_ON_ERROR 174#ifdef BNX2X_STOP_ON_ERROR
@@ -188,12 +186,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
188 --nbd; 186 --nbd;
189 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 187 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
190 188
191 /* ...and the TSO split header bd since they have no mapping */ 189 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
192 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { 190 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
191 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
192 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
193 --nbd; 193 --nbd;
194 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 194 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
195 } 195 }
196 196
197 /* unmap first bd */
198 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
199 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
200 DMA_TO_DEVICE);
201
197 /* now free frags */ 202 /* now free frags */
198 while (nbd > 0) { 203 while (nbd > 0) {
199 204
@@ -1790,26 +1795,22 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1790{ 1795{
1791 int i; 1796 int i;
1792 1797
1793 local_bh_disable();
1794 for_each_rx_queue_cnic(bp, i) { 1798 for_each_rx_queue_cnic(bp, i) {
1795 napi_disable(&bnx2x_fp(bp, i, napi)); 1799 napi_disable(&bnx2x_fp(bp, i, napi));
1796 while (!bnx2x_fp_lock_napi(&bp->fp[i])) 1800 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1797 mdelay(1); 1801 usleep_range(1000, 2000);
1798 } 1802 }
1799 local_bh_enable();
1800} 1803}
1801 1804
1802static void bnx2x_napi_disable(struct bnx2x *bp) 1805static void bnx2x_napi_disable(struct bnx2x *bp)
1803{ 1806{
1804 int i; 1807 int i;
1805 1808
1806 local_bh_disable();
1807 for_each_eth_queue(bp, i) { 1809 for_each_eth_queue(bp, i) {
1808 napi_disable(&bnx2x_fp(bp, i, napi)); 1810 napi_disable(&bnx2x_fp(bp, i, napi));
1809 while (!bnx2x_fp_lock_napi(&bp->fp[i])) 1811 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1810 mdelay(1); 1812 usleep_range(1000, 2000);
1811 } 1813 }
1812 local_bh_enable();
1813} 1814}
1814 1815
1815void bnx2x_netif_start(struct bnx2x *bp) 1816void bnx2x_netif_start(struct bnx2x *bp)
@@ -1832,7 +1833,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1832 bnx2x_napi_disable_cnic(bp); 1833 bnx2x_napi_disable_cnic(bp);
1833} 1834}
1834 1835
1835u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) 1836u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1837 void *accel_priv)
1836{ 1838{
1837 struct bnx2x *bp = netdev_priv(dev); 1839 struct bnx2x *bp = netdev_priv(dev);
1838 1840
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index da8fcaa74495..41f3ca5ad972 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -524,7 +524,8 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
524int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); 524int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
525 525
526/* select_queue callback */ 526/* select_queue callback */
527u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); 527u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
528 void *accel_priv);
528 529
529static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 530static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
530 struct bnx2x_fastpath *fp, 531 struct bnx2x_fastpath *fp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 20dcc02431ca..11fc79585491 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -3865,6 +3865,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3865 3865
3866 bnx2x_warpcore_enable_AN_KR2(phy, params, vars); 3866 bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
3867 } else { 3867 } else {
3868 /* Enable Auto-Detect to support 1G over CL37 as well */
3869 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3870 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
3871
3872 /* Force cl48 sync_status LOW to avoid getting stuck in CL73
3873 * parallel-detect loop when CL73 and CL37 are enabled.
3874 */
3875 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
3876 MDIO_AER_BLOCK_AER_REG, 0);
3877 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3878 MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800);
3879 bnx2x_set_aer_mmd(params, phy);
3880
3868 bnx2x_disable_kr2(params, vars, phy); 3881 bnx2x_disable_kr2(params, vars, phy);
3869 } 3882 }
3870 3883
@@ -8120,17 +8133,20 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8120 *edc_mode = EDC_MODE_ACTIVE_DAC; 8133 *edc_mode = EDC_MODE_ACTIVE_DAC;
8121 else 8134 else
8122 check_limiting_mode = 1; 8135 check_limiting_mode = 1;
8123 } else if (copper_module_type & 8136 } else {
8124 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { 8137 *edc_mode = EDC_MODE_PASSIVE_DAC;
8138 /* Even in case PASSIVE_DAC indication is not set,
8139 * treat it as a passive DAC cable, since some cables
8140 * don't have this indication.
8141 */
8142 if (copper_module_type &
8143 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
8125 DP(NETIF_MSG_LINK, 8144 DP(NETIF_MSG_LINK,
8126 "Passive Copper cable detected\n"); 8145 "Passive Copper cable detected\n");
8127 *edc_mode = 8146 } else {
8128 EDC_MODE_PASSIVE_DAC; 8147 DP(NETIF_MSG_LINK,
8129 } else { 8148 "Unknown copper-cable-type\n");
8130 DP(NETIF_MSG_LINK, 8149 }
8131 "Unknown copper-cable-type 0x%x !!!\n",
8132 copper_module_type);
8133 return -EINVAL;
8134 } 8150 }
8135 break; 8151 break;
8136 } 8152 }
@@ -10825,9 +10841,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10825 (1<<11)); 10841 (1<<11));
10826 10842
10827 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 10843 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
10828 (phy->speed_cap_mask & 10844 (phy->speed_cap_mask &
10829 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || 10845 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
10830 (phy->req_line_speed == SPEED_1000)) { 10846 (phy->req_line_speed == SPEED_1000)) {
10831 an_1000_val |= (1<<8); 10847 an_1000_val |= (1<<8);
10832 autoneg_val |= (1<<9 | 1<<12); 10848 autoneg_val |= (1<<9 | 1<<12);
10833 if (phy->req_duplex == DUPLEX_FULL) 10849 if (phy->req_duplex == DUPLEX_FULL)
@@ -10843,30 +10859,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10843 0x09, 10859 0x09,
10844 &an_1000_val); 10860 &an_1000_val);
10845 10861
10846 /* Set 100 speed advertisement */ 10862 /* Advertise 10/100 link speed */
10847 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 10863 if (phy->req_line_speed == SPEED_AUTO_NEG) {
10848 (phy->speed_cap_mask & 10864 if (phy->speed_cap_mask &
10849 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | 10865 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
10850 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) { 10866 an_10_100_val |= (1<<5);
10851 an_10_100_val |= (1<<7); 10867 autoneg_val |= (1<<9 | 1<<12);
10852 /* Enable autoneg and restart autoneg for legacy speeds */ 10868 DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
10853 autoneg_val |= (1<<9 | 1<<12); 10869 }
10854 10870 if (phy->speed_cap_mask &
10855 if (phy->req_duplex == DUPLEX_FULL) 10871 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
10856 an_10_100_val |= (1<<8);
10857 DP(NETIF_MSG_LINK, "Advertising 100M\n");
10858 }
10859
10860 /* Set 10 speed advertisement */
10861 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
10862 (phy->speed_cap_mask &
10863 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
10864 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
10865 an_10_100_val |= (1<<5);
10866 autoneg_val |= (1<<9 | 1<<12);
10867 if (phy->req_duplex == DUPLEX_FULL)
10868 an_10_100_val |= (1<<6); 10872 an_10_100_val |= (1<<6);
10869 DP(NETIF_MSG_LINK, "Advertising 10M\n"); 10873 autoneg_val |= (1<<9 | 1<<12);
10874 DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
10875 }
10876 if (phy->speed_cap_mask &
10877 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
10878 an_10_100_val |= (1<<7);
10879 autoneg_val |= (1<<9 | 1<<12);
10880 DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
10881 }
10882 if (phy->speed_cap_mask &
10883 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
10884 an_10_100_val |= (1<<8);
10885 autoneg_val |= (1<<9 | 1<<12);
10886 DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
10887 }
10870 } 10888 }
10871 10889
10872 /* Only 10/100 are allowed to work in FORCE mode */ 10890 /* Only 10/100 are allowed to work in FORCE mode */
@@ -13342,6 +13360,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
13342 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, 13360 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
13343 old_status, status); 13361 old_status, status);
13344 13362
13363 /* Do not touch the link in case physical link down */
13364 if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
13365 return 1;
13366
13345 /* a. Update shmem->link_status accordingly 13367 /* a. Update shmem->link_status accordingly
13346 * b. Update link_vars->link_up 13368 * b. Update link_vars->link_up
13347 */ 13369 */
@@ -13550,7 +13572,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13550 */ 13572 */
13551 not_kr2_device = (((base_page & 0x8000) == 0) || 13573 not_kr2_device = (((base_page & 0x8000) == 0) ||
13552 (((base_page & 0x8000) && 13574 (((base_page & 0x8000) &&
13553 ((next_page & 0xe0) == 0x2)))); 13575 ((next_page & 0xe0) == 0x20))));
13554 13576
13555 /* In case KR2 is already disabled, check if we need to re-enable it */ 13577 /* In case KR2 is already disabled, check if we need to re-enable it */
13556 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { 13578 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 814d0eca9b33..8b3107b2fcc1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -11447,9 +11447,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
11447 } 11447 }
11448 } 11448 }
11449 11449
11450 /* adjust igu_sb_cnt to MF for E1x */ 11450 /* adjust igu_sb_cnt to MF for E1H */
11451 if (CHIP_IS_E1x(bp) && IS_MF(bp)) 11451 if (CHIP_IS_E1H(bp) && IS_MF(bp))
11452 bp->igu_sb_cnt /= E1HVN_MAX; 11452 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
11453 11453
11454 /* port info */ 11454 /* port info */
11455 bnx2x_get_port_hwinfo(bp); 11455 bnx2x_get_port_hwinfo(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 3efbb35267c8..14ffb6e56e59 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -7179,6 +7179,7 @@ Theotherbitsarereservedandshouldbezero*/
7179#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca 7179#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
7180#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da 7180#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
7181#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea 7181#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
7182#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa
7182#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104 7183#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
7183#define MDIO_WC_REG_XGXS_STATUS3 0x8129 7184#define MDIO_WC_REG_XGXS_STATUS3 0x8129
7184#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 7185#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 32c92abf5094..18438a504d57 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2038,6 +2038,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2038 struct bnx2x_vlan_mac_ramrod_params p; 2038 struct bnx2x_vlan_mac_ramrod_params p;
2039 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 2039 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
2040 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; 2040 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
2041 unsigned long flags;
2041 int read_lock; 2042 int read_lock;
2042 int rc = 0; 2043 int rc = 0;
2043 2044
@@ -2046,8 +2047,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2046 spin_lock_bh(&exeq->lock); 2047 spin_lock_bh(&exeq->lock);
2047 2048
2048 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { 2049 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
2049 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == 2050 flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
2050 *vlan_mac_flags) { 2051 if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
2052 BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
2051 rc = exeq->remove(bp, exeq->owner, exeq_pos); 2053 rc = exeq->remove(bp, exeq->owner, exeq_pos);
2052 if (rc) { 2054 if (rc) {
2053 BNX2X_ERR("Failed to remove command\n"); 2055 BNX2X_ERR("Failed to remove command\n");
@@ -2080,7 +2082,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2080 return read_lock; 2082 return read_lock;
2081 2083
2082 list_for_each_entry(pos, &o->head, link) { 2084 list_for_each_entry(pos, &o->head, link) {
2083 if (pos->vlan_mac_flags == *vlan_mac_flags) { 2085 flags = pos->vlan_mac_flags;
2086 if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
2087 BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
2084 p.user_req.vlan_mac_flags = pos->vlan_mac_flags; 2088 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
2085 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); 2089 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
2086 rc = bnx2x_config_vlan_mac(bp, &p); 2090 rc = bnx2x_config_vlan_mac(bp, &p);
@@ -4382,8 +4386,11 @@ int bnx2x_config_rss(struct bnx2x *bp,
4382 struct bnx2x_raw_obj *r = &o->raw; 4386 struct bnx2x_raw_obj *r = &o->raw;
4383 4387
4384 /* Do nothing if only driver cleanup was requested */ 4388 /* Do nothing if only driver cleanup was requested */
4385 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) 4389 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
4390 DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
4391 p->ramrod_flags);
4386 return 0; 4392 return 0;
4393 }
4387 4394
4388 r->set_pending(r); 4395 r->set_pending(r);
4389 4396
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 658f4e33abf9..6a53c15c85a3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -266,6 +266,13 @@ enum {
266 BNX2X_DONT_CONSUME_CAM_CREDIT, 266 BNX2X_DONT_CONSUME_CAM_CREDIT,
267 BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, 267 BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
268}; 268};
269/* When looking for matching filters, some flags are not interesting */
270#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \
271 1 << BNX2X_ETH_MAC | \
272 1 << BNX2X_ISCSI_ETH_MAC | \
273 1 << BNX2X_NETQ_ETH_MAC)
274#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
275 ((flags) & BNX2X_VLAN_MAC_CMP_MASK)
269 276
270struct bnx2x_vlan_mac_ramrod_params { 277struct bnx2x_vlan_mac_ramrod_params {
271 /* Object to run the command from */ 278 /* Object to run the command from */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 2e46c28fc601..e7845e5be1c7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1209,6 +1209,11 @@ static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
1209 /* next state */ 1209 /* next state */
1210 vfop->state = BNX2X_VFOP_RXMODE_DONE; 1210 vfop->state = BNX2X_VFOP_RXMODE_DONE;
1211 1211
1212 /* record the accept flags in vfdb so hypervisor can modify them
1213 * if necessary
1214 */
1215 bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) =
1216 ramrod->rx_accept_flags;
1212 vfop->rc = bnx2x_config_rx_mode(bp, ramrod); 1217 vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
1213 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1218 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1214op_err: 1219op_err:
@@ -1224,39 +1229,43 @@ op_pending:
1224 return; 1229 return;
1225} 1230}
1226 1231
1232static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
1233 struct bnx2x_rx_mode_ramrod_params *ramrod,
1234 struct bnx2x_virtf *vf,
1235 unsigned long accept_flags)
1236{
1237 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1238
1239 memset(ramrod, 0, sizeof(*ramrod));
1240 ramrod->cid = vfq->cid;
1241 ramrod->cl_id = vfq_cl_id(vf, vfq);
1242 ramrod->rx_mode_obj = &bp->rx_mode_obj;
1243 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1244 ramrod->rx_accept_flags = accept_flags;
1245 ramrod->tx_accept_flags = accept_flags;
1246 ramrod->pstate = &vf->filter_state;
1247 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1248
1249 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1250 set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1251 set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1252
1253 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1254 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1255}
1256
1227int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, 1257int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
1228 struct bnx2x_virtf *vf, 1258 struct bnx2x_virtf *vf,
1229 struct bnx2x_vfop_cmd *cmd, 1259 struct bnx2x_vfop_cmd *cmd,
1230 int qid, unsigned long accept_flags) 1260 int qid, unsigned long accept_flags)
1231{ 1261{
1232 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1233 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1262 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1234 1263
1235 if (vfop) { 1264 if (vfop) {
1236 struct bnx2x_rx_mode_ramrod_params *ramrod = 1265 struct bnx2x_rx_mode_ramrod_params *ramrod =
1237 &vf->op_params.rx_mode; 1266 &vf->op_params.rx_mode;
1238 1267
1239 memset(ramrod, 0, sizeof(*ramrod)); 1268 bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags);
1240
1241 /* Prepare ramrod parameters */
1242 ramrod->cid = vfq->cid;
1243 ramrod->cl_id = vfq_cl_id(vf, vfq);
1244 ramrod->rx_mode_obj = &bp->rx_mode_obj;
1245 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1246
1247 ramrod->rx_accept_flags = accept_flags;
1248 ramrod->tx_accept_flags = accept_flags;
1249 ramrod->pstate = &vf->filter_state;
1250 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1251
1252 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1253 set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1254 set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1255
1256 ramrod->rdata =
1257 bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1258 ramrod->rdata_mapping =
1259 bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1260 1269
1261 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, 1270 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
1262 bnx2x_vfop_rxmode, cmd->done); 1271 bnx2x_vfop_rxmode, cmd->done);
@@ -3202,13 +3211,16 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
3202 bnx2x_iov_static_resc(bp, vf); 3211 bnx2x_iov_static_resc(bp, vf);
3203 } 3212 }
3204 3213
3205 /* prepare msix vectors in VF configuration space */ 3214 /* prepare msix vectors in VF configuration space - the value in the
3215 * PCI configuration space should be the index of the last entry,
3216 * namely one less than the actual size of the table
3217 */
3206 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 3218 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
3207 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 3219 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
3208 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 3220 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
3209 num_vf_queues); 3221 num_vf_queues - 1);
3210 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", 3222 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
3211 vf_idx, num_vf_queues); 3223 vf_idx, num_vf_queues - 1);
3212 } 3224 }
3213 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 3225 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
3214 3226
@@ -3436,10 +3448,18 @@ out:
3436 3448
3437int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 3449int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3438{ 3450{
3451 struct bnx2x_queue_state_params q_params = {NULL};
3452 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3453 struct bnx2x_queue_update_params *update_params;
3454 struct pf_vf_bulletin_content *bulletin = NULL;
3455 struct bnx2x_rx_mode_ramrod_params rx_ramrod;
3439 struct bnx2x *bp = netdev_priv(dev); 3456 struct bnx2x *bp = netdev_priv(dev);
3440 int rc, q_logical_state; 3457 struct bnx2x_vlan_mac_obj *vlan_obj;
3458 unsigned long vlan_mac_flags = 0;
3459 unsigned long ramrod_flags = 0;
3441 struct bnx2x_virtf *vf = NULL; 3460 struct bnx2x_virtf *vf = NULL;
3442 struct pf_vf_bulletin_content *bulletin = NULL; 3461 unsigned long accept_flags;
3462 int rc;
3443 3463
3444 /* sanity and init */ 3464 /* sanity and init */
3445 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3465 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
@@ -3457,104 +3477,118 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3457 /* update PF's copy of the VF's bulletin. No point in posting the vlan 3477 /* update PF's copy of the VF's bulletin. No point in posting the vlan
3458 * to the VF since it doesn't have anything to do with it. But it useful 3478 * to the VF since it doesn't have anything to do with it. But it useful
3459 * to store it here in case the VF is not up yet and we can only 3479 * to store it here in case the VF is not up yet and we can only
3460 * configure the vlan later when it does. 3480 * configure the vlan later when it does. Treat vlan id 0 as remove the
3481 * Host tag.
3461 */ 3482 */
3462 bulletin->valid_bitmap |= 1 << VLAN_VALID; 3483 if (vlan > 0)
3484 bulletin->valid_bitmap |= 1 << VLAN_VALID;
3485 else
3486 bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
3463 bulletin->vlan = vlan; 3487 bulletin->vlan = vlan;
3464 3488
3465 /* is vf initialized and queue set up? */ 3489 /* is vf initialized and queue set up? */
3466 q_logical_state = 3490 if (vf->state != VF_ENABLED ||
3467 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); 3491 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
3468 if (vf->state == VF_ENABLED && 3492 BNX2X_Q_LOGICAL_STATE_ACTIVE)
3469 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3493 return rc;
3470 /* configure the vlan in device on this vf's queue */
3471 unsigned long ramrod_flags = 0;
3472 unsigned long vlan_mac_flags = 0;
3473 struct bnx2x_vlan_mac_obj *vlan_obj =
3474 &bnx2x_leading_vfq(vf, vlan_obj);
3475 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3476 struct bnx2x_queue_state_params q_params = {NULL};
3477 struct bnx2x_queue_update_params *update_params;
3478 3494
3479 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); 3495 /* configure the vlan in device on this vf's queue */
3480 if (rc) 3496 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
3481 return rc; 3497 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
3482 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3498 if (rc)
3499 return rc;
3483 3500
3484 /* must lock vfpf channel to protect against vf flows */ 3501 /* must lock vfpf channel to protect against vf flows */
3485 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3502 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3486 3503
3487 /* remove existing vlans */ 3504 /* remove existing vlans */
3488 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3505 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3489 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 3506 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
3490 &ramrod_flags); 3507 &ramrod_flags);
3491 if (rc) { 3508 if (rc) {
3492 BNX2X_ERR("failed to delete vlans\n"); 3509 BNX2X_ERR("failed to delete vlans\n");
3493 rc = -EINVAL; 3510 rc = -EINVAL;
3494 goto out; 3511 goto out;
3495 } 3512 }
3513
3514 /* need to remove/add the VF's accept_any_vlan bit */
3515 accept_flags = bnx2x_leading_vfq(vf, accept_flags);
3516 if (vlan)
3517 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
3518 else
3519 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
3520
3521 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
3522 accept_flags);
3523 bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
3524 bnx2x_config_rx_mode(bp, &rx_ramrod);
3525
3526 /* configure the new vlan to device */
3527 memset(&ramrod_param, 0, sizeof(ramrod_param));
3528 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3529 ramrod_param.vlan_mac_obj = vlan_obj;
3530 ramrod_param.ramrod_flags = ramrod_flags;
3531 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
3532 &ramrod_param.user_req.vlan_mac_flags);
3533 ramrod_param.user_req.u.vlan.vlan = vlan;
3534 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
3535 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3536 if (rc) {
3537 BNX2X_ERR("failed to configure vlan\n");
3538 rc = -EINVAL;
3539 goto out;
3540 }
3496 3541
3497 /* send queue update ramrod to configure default vlan and silent 3542 /* send queue update ramrod to configure default vlan and silent
3498 * vlan removal 3543 * vlan removal
3544 */
3545 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3546 q_params.cmd = BNX2X_Q_CMD_UPDATE;
3547 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
3548 update_params = &q_params.params.update;
3549 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
3550 &update_params->update_flags);
3551 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
3552 &update_params->update_flags);
3553 if (vlan == 0) {
3554 /* if vlan is 0 then we want to leave the VF traffic
3555 * untagged, and leave the incoming traffic untouched
3556 * (i.e. do not remove any vlan tags).
3499 */ 3557 */
3500 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3558 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3501 q_params.cmd = BNX2X_Q_CMD_UPDATE; 3559 &update_params->update_flags);
3502 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); 3560 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3503 update_params = &q_params.params.update; 3561 &update_params->update_flags);
3504 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 3562 } else {
3563 /* configure default vlan to vf queue and set silent
3564 * vlan removal (the vf remains unaware of this vlan).
3565 */
3566 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3505 &update_params->update_flags); 3567 &update_params->update_flags);
3506 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 3568 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3507 &update_params->update_flags); 3569 &update_params->update_flags);
3570 update_params->def_vlan = vlan;
3571 update_params->silent_removal_value =
3572 vlan & VLAN_VID_MASK;
3573 update_params->silent_removal_mask = VLAN_VID_MASK;
3574 }
3508 3575
3509 if (vlan == 0) { 3576 /* Update the Queue state */
3510 /* if vlan is 0 then we want to leave the VF traffic 3577 rc = bnx2x_queue_state_change(bp, &q_params);
3511 * untagged, and leave the incoming traffic untouched 3578 if (rc) {
3512 * (i.e. do not remove any vlan tags). 3579 BNX2X_ERR("Failed to configure default VLAN\n");
3513 */ 3580 goto out;
3514 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3581 }
3515 &update_params->update_flags);
3516 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3517 &update_params->update_flags);
3518 } else {
3519 /* configure the new vlan to device */
3520 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3521 ramrod_param.vlan_mac_obj = vlan_obj;
3522 ramrod_param.ramrod_flags = ramrod_flags;
3523 ramrod_param.user_req.u.vlan.vlan = vlan;
3524 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
3525 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3526 if (rc) {
3527 BNX2X_ERR("failed to configure vlan\n");
3528 rc = -EINVAL;
3529 goto out;
3530 }
3531
3532 /* configure default vlan to vf queue and set silent
3533 * vlan removal (the vf remains unaware of this vlan).
3534 */
3535 update_params = &q_params.params.update;
3536 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3537 &update_params->update_flags);
3538 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3539 &update_params->update_flags);
3540 update_params->def_vlan = vlan;
3541 }
3542 3582
3543 /* Update the Queue state */
3544 rc = bnx2x_queue_state_change(bp, &q_params);
3545 if (rc) {
3546 BNX2X_ERR("Failed to configure default VLAN\n");
3547 goto out;
3548 }
3549 3583
3550 /* clear the flag indicating that this VF needs its vlan 3584 /* clear the flag indicating that this VF needs its vlan
3551 * (will only be set if the HV configured the Vlan before vf was 3585 * (will only be set if the HV configured the Vlan before vf was
3552 * up and we were called because the VF came up later 3586 * up and we were called because the VF came up later
3553 */ 3587 */
3554out: 3588out:
3555 vf->cfg_flags &= ~VF_CFG_VLAN; 3589 vf->cfg_flags &= ~VF_CFG_VLAN;
3556 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3590 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3557 } 3591
3558 return rc; 3592 return rc;
3559} 3593}
3560 3594
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 1ff6a9366629..8c213fa52174 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -74,6 +74,7 @@ struct bnx2x_vf_queue {
74 /* VLANs object */ 74 /* VLANs object */
75 struct bnx2x_vlan_mac_obj vlan_obj; 75 struct bnx2x_vlan_mac_obj vlan_obj;
76 atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ 76 atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
77 unsigned long accept_flags; /* last accept flags configured */
77 78
78 /* Queue Slow-path State object */ 79 /* Queue Slow-path State object */
79 struct bnx2x_queue_sp_obj sp_obj; 80 struct bnx2x_queue_sp_obj sp_obj;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index efa8a151d789..0756d7dabdd5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -208,7 +208,7 @@ static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
208 return -EINVAL; 208 return -EINVAL;
209 } 209 }
210 210
211 BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg); 211 DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
212 212
213 *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT; 213 *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
214 214
@@ -1598,6 +1598,8 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1598 1598
1599 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { 1599 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
1600 unsigned long accept = 0; 1600 unsigned long accept = 0;
1601 struct pf_vf_bulletin_content *bulletin =
1602 BP_VF_BULLETIN(bp, vf->index);
1601 1603
1602 /* covert VF-PF if mask to bnx2x accept flags */ 1604 /* covert VF-PF if mask to bnx2x accept flags */
1603 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) 1605 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
@@ -1617,9 +1619,11 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1617 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); 1619 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
1618 1620
1619 /* A packet arriving the vf's mac should be accepted 1621 /* A packet arriving the vf's mac should be accepted
1620 * with any vlan 1622 * with any vlan, unless a vlan has already been
1623 * configured.
1621 */ 1624 */
1622 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); 1625 if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
1626 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
1623 1627
1624 /* set rx-mode */ 1628 /* set rx-mode */
1625 rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, 1629 rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
@@ -1710,6 +1714,21 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1710 goto response; 1714 goto response;
1711 } 1715 }
1712 } 1716 }
1717 /* if vlan was set by hypervisor we don't allow guest to config vlan */
1718 if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
1719 int i;
1720
1721 /* search for vlan filters */
1722 for (i = 0; i < filters->n_mac_vlan_filters; i++) {
1723 if (filters->filters[i].flags &
1724 VFPF_Q_FILTER_VLAN_TAG_VALID) {
1725 BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
1726 vf->abs_vfid);
1727 vf->op_rc = -EPERM;
1728 goto response;
1729 }
1730 }
1731 }
1713 1732
1714 /* verify vf_qid */ 1733 /* verify vf_qid */
1715 if (filters->vf_qid > vf_rxq_count(vf)) 1734 if (filters->vf_qid > vf_rxq_count(vf))
@@ -1805,6 +1824,9 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
1805 vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; 1824 vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
1806 1825
1807 /* flags handled individually for backward/forward compatability */ 1826 /* flags handled individually for backward/forward compatability */
1827 vf_op_params->rss_flags = 0;
1828 vf_op_params->ramrod_flags = 0;
1829
1808 if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) 1830 if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
1809 __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); 1831 __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
1810 if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) 1832 if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index f3dd93b4aeaa..15a66e4b1f57 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7622,7 +7622,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7622{ 7622{
7623 u32 base = (u32) mapping & 0xffffffff; 7623 u32 base = (u32) mapping & 0xffffffff;
7624 7624
7625 return (base > 0xffffdcc0) && (base + len + 8 < base); 7625 return base + len + 8 < base;
7626} 7626}
7627 7627
7628/* Test for TSO DMA buffers that cross into regions which are within MSS bytes 7628/* Test for TSO DMA buffers that cross into regions which are within MSS bytes
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 6c9308850453..56e0415f8cdf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -228,6 +228,25 @@ struct tp_params {
228 228
229 uint32_t dack_re; /* DACK timer resolution */ 229 uint32_t dack_re; /* DACK timer resolution */
230 unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ 230 unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
231
232 u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
233 u32 ingress_config; /* cached TP_INGRESS_CONFIG */
234
235 /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
236 * subset of the set of fields which may be present in the Compressed
237 * Filter Tuple portion of filters and TCP TCB connections. The
238 * fields which are present are controlled by the TP_VLAN_PRI_MAP.
239 * Since a variable number of fields may or may not be present, their
240 * shifted field positions within the Compressed Filter Tuple may
241 * vary, or not even be present if the field isn't selected in
242 * TP_VLAN_PRI_MAP. Since some of these fields are needed in various
243 * places we store their offsets here, or a -1 if the field isn't
244 * present.
245 */
246 int vlan_shift;
247 int vnic_shift;
248 int port_shift;
249 int protocol_shift;
231}; 250};
232 251
233struct vpd_params { 252struct vpd_params {
@@ -926,6 +945,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
926 const u8 *fw_data, unsigned int fw_size, 945 const u8 *fw_data, unsigned int fw_size,
927 struct fw_hdr *card_fw, enum dev_state state, int *reset); 946 struct fw_hdr *card_fw, enum dev_state state, int *reset);
928int t4_prep_adapter(struct adapter *adapter); 947int t4_prep_adapter(struct adapter *adapter);
948int t4_init_tp_params(struct adapter *adap);
949int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
929int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); 950int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
930void t4_fatal_err(struct adapter *adapter); 951void t4_fatal_err(struct adapter *adapter);
931int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 952int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index d6b12e035a7d..fff02ed1295e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2986,7 +2986,14 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2986 if (stid >= 0) { 2986 if (stid >= 0) {
2987 t->stid_tab[stid].data = data; 2987 t->stid_tab[stid].data = data;
2988 stid += t->stid_base; 2988 stid += t->stid_base;
2989 t->stids_in_use++; 2989 /* IPv6 requires max of 520 bits or 16 cells in TCAM
2990 * This is equivalent to 4 TIDs. With CLIP enabled it
2991 * needs 2 TIDs.
2992 */
2993 if (family == PF_INET)
2994 t->stids_in_use++;
2995 else
2996 t->stids_in_use += 4;
2990 } 2997 }
2991 spin_unlock_bh(&t->stid_lock); 2998 spin_unlock_bh(&t->stid_lock);
2992 return stid; 2999 return stid;
@@ -3012,7 +3019,8 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3012 } 3019 }
3013 if (stid >= 0) { 3020 if (stid >= 0) {
3014 t->stid_tab[stid].data = data; 3021 t->stid_tab[stid].data = data;
3015 stid += t->stid_base; 3022 stid -= t->nstids;
3023 stid += t->sftid_base;
3016 t->stids_in_use++; 3024 t->stids_in_use++;
3017 } 3025 }
3018 spin_unlock_bh(&t->stid_lock); 3026 spin_unlock_bh(&t->stid_lock);
@@ -3024,14 +3032,24 @@ EXPORT_SYMBOL(cxgb4_alloc_sftid);
3024 */ 3032 */
3025void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) 3033void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3026{ 3034{
3027 stid -= t->stid_base; 3035 /* Is it a server filter TID? */
3036 if (t->nsftids && (stid >= t->sftid_base)) {
3037 stid -= t->sftid_base;
3038 stid += t->nstids;
3039 } else {
3040 stid -= t->stid_base;
3041 }
3042
3028 spin_lock_bh(&t->stid_lock); 3043 spin_lock_bh(&t->stid_lock);
3029 if (family == PF_INET) 3044 if (family == PF_INET)
3030 __clear_bit(stid, t->stid_bmap); 3045 __clear_bit(stid, t->stid_bmap);
3031 else 3046 else
3032 bitmap_release_region(t->stid_bmap, stid, 2); 3047 bitmap_release_region(t->stid_bmap, stid, 2);
3033 t->stid_tab[stid].data = NULL; 3048 t->stid_tab[stid].data = NULL;
3034 t->stids_in_use--; 3049 if (family == PF_INET)
3050 t->stids_in_use--;
3051 else
3052 t->stids_in_use -= 4;
3035 spin_unlock_bh(&t->stid_lock); 3053 spin_unlock_bh(&t->stid_lock);
3036} 3054}
3037EXPORT_SYMBOL(cxgb4_free_stid); 3055EXPORT_SYMBOL(cxgb4_free_stid);
@@ -3134,6 +3152,7 @@ static int tid_init(struct tid_info *t)
3134 size_t size; 3152 size_t size;
3135 unsigned int stid_bmap_size; 3153 unsigned int stid_bmap_size;
3136 unsigned int natids = t->natids; 3154 unsigned int natids = t->natids;
3155 struct adapter *adap = container_of(t, struct adapter, tids);
3137 3156
3138 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); 3157 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3139 size = t->ntids * sizeof(*t->tid_tab) + 3158 size = t->ntids * sizeof(*t->tid_tab) +
@@ -3167,6 +3186,11 @@ static int tid_init(struct tid_info *t)
3167 t->afree = t->atid_tab; 3186 t->afree = t->atid_tab;
3168 } 3187 }
3169 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); 3188 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3189 /* Reserve stid 0 for T4/T5 adapters */
3190 if (!t->stid_base &&
3191 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3192 __set_bit(0, t->stid_bmap);
3193
3170 return 0; 3194 return 0;
3171} 3195}
3172 3196
@@ -3731,7 +3755,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
3731 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( 3755 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
3732 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> 3756 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3733 (adap->fn * 4)); 3757 (adap->fn * 4));
3734 lli.filt_mode = adap->filter_mode; 3758 lli.filt_mode = adap->params.tp.vlan_pri_map;
3735 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ 3759 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3736 for (i = 0; i < NCHAN; i++) 3760 for (i = 0; i < NCHAN; i++)
3737 lli.tx_modq[i] = i; 3761 lli.tx_modq[i] = i;
@@ -4179,7 +4203,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4179 adap = netdev2adap(dev); 4203 adap = netdev2adap(dev);
4180 4204
4181 /* Adjust stid to correct filter index */ 4205 /* Adjust stid to correct filter index */
4182 stid -= adap->tids.nstids; 4206 stid -= adap->tids.sftid_base;
4183 stid += adap->tids.nftids; 4207 stid += adap->tids.nftids;
4184 4208
4185 /* Check to make sure the filter requested is writable ... 4209 /* Check to make sure the filter requested is writable ...
@@ -4205,12 +4229,17 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4205 f->fs.val.lip[i] = val[i]; 4229 f->fs.val.lip[i] = val[i];
4206 f->fs.mask.lip[i] = ~0; 4230 f->fs.mask.lip[i] = ~0;
4207 } 4231 }
4208 if (adap->filter_mode & F_PORT) { 4232 if (adap->params.tp.vlan_pri_map & F_PORT) {
4209 f->fs.val.iport = port; 4233 f->fs.val.iport = port;
4210 f->fs.mask.iport = mask; 4234 f->fs.mask.iport = mask;
4211 } 4235 }
4212 } 4236 }
4213 4237
4238 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
4239 f->fs.val.proto = IPPROTO_TCP;
4240 f->fs.mask.proto = ~0;
4241 }
4242
4214 f->fs.dirsteer = 1; 4243 f->fs.dirsteer = 1;
4215 f->fs.iq = queue; 4244 f->fs.iq = queue;
4216 /* Mark filter as locked */ 4245 /* Mark filter as locked */
@@ -4237,7 +4266,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4237 adap = netdev2adap(dev); 4266 adap = netdev2adap(dev);
4238 4267
4239 /* Adjust stid to correct filter index */ 4268 /* Adjust stid to correct filter index */
4240 stid -= adap->tids.nstids; 4269 stid -= adap->tids.sftid_base;
4241 stid += adap->tids.nftids; 4270 stid += adap->tids.nftids;
4242 4271
4243 f = &adap->tids.ftid_tab[stid]; 4272 f = &adap->tids.ftid_tab[stid];
@@ -5092,7 +5121,7 @@ static int adap_init0(struct adapter *adap)
5092 enum dev_state state; 5121 enum dev_state state;
5093 u32 params[7], val[7]; 5122 u32 params[7], val[7];
5094 struct fw_caps_config_cmd caps_cmd; 5123 struct fw_caps_config_cmd caps_cmd;
5095 int reset = 1, j; 5124 int reset = 1;
5096 5125
5097 /* 5126 /*
5098 * Contact FW, advertising Master capability (and potentially forcing 5127 * Contact FW, advertising Master capability (and potentially forcing
@@ -5434,21 +5463,11 @@ static int adap_init0(struct adapter *adap)
5434 /* 5463 /*
5435 * These are finalized by FW initialization, load their values now. 5464 * These are finalized by FW initialization, load their values now.
5436 */ 5465 */
5437 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
5438 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
5439 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
5440 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); 5466 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5441 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, 5467 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5442 adap->params.b_wnd); 5468 adap->params.b_wnd);
5443 5469
5444 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ 5470 t4_init_tp_params(adap);
5445 for (j = 0; j < NCHAN; j++)
5446 adap->params.tp.tx_modq[j] = j;
5447
5448 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5449 &adap->filter_mode, 1,
5450 TP_VLAN_PRI_MAP);
5451
5452 adap->flags |= FW_OK; 5471 adap->flags |= FW_OK;
5453 return 0; 5472 return 0;
5454 5473
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 6f21f2451c30..4dd0a82533e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -131,7 +131,14 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
131 131
132static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) 132static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
133{ 133{
134 stid -= t->stid_base; 134 /* Is it a server filter TID? */
135 if (t->nsftids && (stid >= t->sftid_base)) {
136 stid -= t->sftid_base;
137 stid += t->nstids;
138 } else {
139 stid -= t->stid_base;
140 }
141
135 return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL; 142 return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL;
136} 143}
137 144
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 29878098101e..cb05be905def 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -45,6 +45,7 @@
45#include "l2t.h" 45#include "l2t.h"
46#include "t4_msg.h" 46#include "t4_msg.h"
47#include "t4fw_api.h" 47#include "t4fw_api.h"
48#include "t4_regs.h"
48 49
49#define VLAN_NONE 0xfff 50#define VLAN_NONE 0xfff
50 51
@@ -411,6 +412,40 @@ done:
411} 412}
412EXPORT_SYMBOL(cxgb4_l2t_get); 413EXPORT_SYMBOL(cxgb4_l2t_get);
413 414
415u64 cxgb4_select_ntuple(struct net_device *dev,
416 const struct l2t_entry *l2t)
417{
418 struct adapter *adap = netdev2adap(dev);
419 struct tp_params *tp = &adap->params.tp;
420 u64 ntuple = 0;
421
422 /* Initialize each of the fields which we care about which are present
423 * in the Compressed Filter Tuple.
424 */
425 if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
426 ntuple |= (F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
427
428 if (tp->port_shift >= 0)
429 ntuple |= (u64)l2t->lport << tp->port_shift;
430
431 if (tp->protocol_shift >= 0)
432 ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
433
434 if (tp->vnic_shift >= 0) {
435 u32 viid = cxgb4_port_viid(dev);
436 u32 vf = FW_VIID_VIN_GET(viid);
437 u32 pf = FW_VIID_PFN_GET(viid);
438 u32 vld = FW_VIID_VIVLD_GET(viid);
439
440 ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
441 V_FT_VNID_ID_PF(pf) |
442 V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
443 }
444
445 return ntuple;
446}
447EXPORT_SYMBOL(cxgb4_select_ntuple);
448
414/* 449/*
415 * Called when address resolution fails for an L2T entry to handle packets 450 * Called when address resolution fails for an L2T entry to handle packets
416 * on the arpq head. If a packet specifies a failure handler it is invoked, 451 * on the arpq head. If a packet specifies a failure handler it is invoked,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 108c0f1fce1c..85eb5c71358d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -98,7 +98,8 @@ int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
98struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, 98struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
99 const struct net_device *physdev, 99 const struct net_device *physdev,
100 unsigned int priority); 100 unsigned int priority);
101 101u64 cxgb4_select_ntuple(struct net_device *dev,
102 const struct l2t_entry *l2t);
102void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); 103void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
103struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); 104struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
104int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, 105int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index cc380c36e1a8..cc3511a5cd0c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2581,7 +2581,7 @@ static int t4_sge_init_soft(struct adapter *adap)
2581 #undef READ_FL_BUF 2581 #undef READ_FL_BUF
2582 2582
2583 if (fl_small_pg != PAGE_SIZE || 2583 if (fl_small_pg != PAGE_SIZE ||
2584 (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg || 2584 (fl_large_pg != 0 && (fl_large_pg < fl_small_pg ||
2585 (fl_large_pg & (fl_large_pg-1)) != 0))) { 2585 (fl_large_pg & (fl_large_pg-1)) != 0))) {
2586 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", 2586 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
2587 fl_small_pg, fl_large_pg); 2587 fl_small_pg, fl_large_pg);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 74a6fce5a15a..e1413eacdbd2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3808,6 +3808,109 @@ int t4_prep_adapter(struct adapter *adapter)
3808 return 0; 3808 return 0;
3809} 3809}
3810 3810
3811/**
3812 * t4_init_tp_params - initialize adap->params.tp
3813 * @adap: the adapter
3814 *
3815 * Initialize various fields of the adapter's TP Parameters structure.
3816 */
3817int t4_init_tp_params(struct adapter *adap)
3818{
3819 int chan;
3820 u32 v;
3821
3822 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3823 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3824 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
3825
3826 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
3827 for (chan = 0; chan < NCHAN; chan++)
3828 adap->params.tp.tx_modq[chan] = chan;
3829
3830 /* Cache the adapter's Compressed Filter Mode and global Incress
3831 * Configuration.
3832 */
3833 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3834 &adap->params.tp.vlan_pri_map, 1,
3835 TP_VLAN_PRI_MAP);
3836 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3837 &adap->params.tp.ingress_config, 1,
3838 TP_INGRESS_CONFIG);
3839
3840 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
3841 * shift positions of several elements of the Compressed Filter Tuple
3842 * for this adapter which we need frequently ...
3843 */
3844 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
3845 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
3846 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
3847 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
3848 F_PROTOCOL);
3849
3850 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
3851 * represents the presense of an Outer VLAN instead of a VNIC ID.
3852 */
3853 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
3854 adap->params.tp.vnic_shift = -1;
3855
3856 return 0;
3857}
3858
3859/**
3860 * t4_filter_field_shift - calculate filter field shift
3861 * @adap: the adapter
3862 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
3863 *
3864 * Return the shift position of a filter field within the Compressed
3865 * Filter Tuple. The filter field is specified via its selection bit
3866 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
3867 */
3868int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
3869{
3870 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
3871 unsigned int sel;
3872 int field_shift;
3873
3874 if ((filter_mode & filter_sel) == 0)
3875 return -1;
3876
3877 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
3878 switch (filter_mode & sel) {
3879 case F_FCOE:
3880 field_shift += W_FT_FCOE;
3881 break;
3882 case F_PORT:
3883 field_shift += W_FT_PORT;
3884 break;
3885 case F_VNIC_ID:
3886 field_shift += W_FT_VNIC_ID;
3887 break;
3888 case F_VLAN:
3889 field_shift += W_FT_VLAN;
3890 break;
3891 case F_TOS:
3892 field_shift += W_FT_TOS;
3893 break;
3894 case F_PROTOCOL:
3895 field_shift += W_FT_PROTOCOL;
3896 break;
3897 case F_ETHERTYPE:
3898 field_shift += W_FT_ETHERTYPE;
3899 break;
3900 case F_MACMATCH:
3901 field_shift += W_FT_MACMATCH;
3902 break;
3903 case F_MPSHITTYPE:
3904 field_shift += W_FT_MPSHITTYPE;
3905 break;
3906 case F_FRAGMENTATION:
3907 field_shift += W_FT_FRAGMENTATION;
3908 break;
3909 }
3910 }
3911 return field_shift;
3912}
3913
3811int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) 3914int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3812{ 3915{
3813 u8 addr[6]; 3916 u8 addr[6];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 0a8205d69d2c..4082522d8140 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1171,10 +1171,50 @@
1171 1171
1172#define A_TP_TX_SCHED_PCMD 0x25 1172#define A_TP_TX_SCHED_PCMD 0x25
1173 1173
1174#define S_VNIC 11
1175#define V_VNIC(x) ((x) << S_VNIC)
1176#define F_VNIC V_VNIC(1U)
1177
1178#define S_FRAGMENTATION 9
1179#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION)
1180#define F_FRAGMENTATION V_FRAGMENTATION(1U)
1181
1182#define S_MPSHITTYPE 8
1183#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE)
1184#define F_MPSHITTYPE V_MPSHITTYPE(1U)
1185
1186#define S_MACMATCH 7
1187#define V_MACMATCH(x) ((x) << S_MACMATCH)
1188#define F_MACMATCH V_MACMATCH(1U)
1189
1190#define S_ETHERTYPE 6
1191#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE)
1192#define F_ETHERTYPE V_ETHERTYPE(1U)
1193
1194#define S_PROTOCOL 5
1195#define V_PROTOCOL(x) ((x) << S_PROTOCOL)
1196#define F_PROTOCOL V_PROTOCOL(1U)
1197
1198#define S_TOS 4
1199#define V_TOS(x) ((x) << S_TOS)
1200#define F_TOS V_TOS(1U)
1201
1202#define S_VLAN 3
1203#define V_VLAN(x) ((x) << S_VLAN)
1204#define F_VLAN V_VLAN(1U)
1205
1206#define S_VNIC_ID 2
1207#define V_VNIC_ID(x) ((x) << S_VNIC_ID)
1208#define F_VNIC_ID V_VNIC_ID(1U)
1209
1174#define S_PORT 1 1210#define S_PORT 1
1175#define V_PORT(x) ((x) << S_PORT) 1211#define V_PORT(x) ((x) << S_PORT)
1176#define F_PORT V_PORT(1U) 1212#define F_PORT V_PORT(1U)
1177 1213
1214#define S_FCOE 0
1215#define V_FCOE(x) ((x) << S_FCOE)
1216#define F_FCOE V_FCOE(1U)
1217
1178#define NUM_MPS_CLS_SRAM_L_INSTANCES 336 1218#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
1179#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512 1219#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
1180 1220
@@ -1213,4 +1253,37 @@
1213#define V_CHIPID(x) ((x) << S_CHIPID) 1253#define V_CHIPID(x) ((x) << S_CHIPID)
1214#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID) 1254#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID)
1215 1255
1256/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
1257 * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
1258 * selects for a particular field being present. These fields, when present
1259 * in the Compressed Filter Tuple, have the following widths in bits.
1260 */
1261#define W_FT_FCOE 1
1262#define W_FT_PORT 3
1263#define W_FT_VNIC_ID 17
1264#define W_FT_VLAN 17
1265#define W_FT_TOS 8
1266#define W_FT_PROTOCOL 8
1267#define W_FT_ETHERTYPE 16
1268#define W_FT_MACMATCH 9
1269#define W_FT_MPSHITTYPE 3
1270#define W_FT_FRAGMENTATION 1
1271
1272/* Some of the Compressed Filter Tuple fields have internal structure. These
1273 * bit shifts/masks describe those structures. All shifts are relative to the
1274 * base position of the fields within the Compressed Filter Tuple
1275 */
1276#define S_FT_VLAN_VLD 16
1277#define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD)
1278#define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U)
1279
1280#define S_FT_VNID_ID_VF 0
1281#define V_FT_VNID_ID_VF(x) ((x) << S_FT_VNID_ID_VF)
1282
1283#define S_FT_VNID_ID_PF 7
1284#define V_FT_VNID_ID_PF(x) ((x) << S_FT_VNID_ID_PF)
1285
1286#define S_FT_VNID_ID_VLD 16
1287#define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD)
1288
1216#endif /* __T4_REGS_H */ 1289#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 5878df619b53..4ccaf9af6fc9 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -104,6 +104,7 @@ static inline char *nic_name(struct pci_dev *pdev)
104#define BE3_MAX_RSS_QS 16 104#define BE3_MAX_RSS_QS 16
105#define BE3_MAX_TX_QS 16 105#define BE3_MAX_TX_QS 16
106#define BE3_MAX_EVT_QS 16 106#define BE3_MAX_EVT_QS 16
107#define BE3_SRIOV_MAX_EVT_QS 8
107 108
108#define MAX_RX_QS 32 109#define MAX_RX_QS 32
109#define MAX_EVT_QS 32 110#define MAX_EVT_QS 32
@@ -480,7 +481,7 @@ struct be_adapter {
480 struct list_head entry; 481 struct list_head entry;
481 482
482 u32 flash_status; 483 u32 flash_status;
483 struct completion flash_compl; 484 struct completion et_cmd_compl;
484 485
485 struct be_resources res; /* resources available for the func */ 486 struct be_resources res; /* resources available for the func */
486 u16 num_vfs; /* Number of VFs provisioned by PF */ 487 u16 num_vfs; /* Number of VFs provisioned by PF */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index e0e8bc1ef14c..94c35c8d799d 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -141,11 +141,17 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
141 subsystem = resp_hdr->subsystem; 141 subsystem = resp_hdr->subsystem;
142 } 142 }
143 143
144 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
145 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
146 complete(&adapter->et_cmd_compl);
147 return 0;
148 }
149
144 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) || 150 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
145 (opcode == OPCODE_COMMON_WRITE_OBJECT)) && 151 (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
146 (subsystem == CMD_SUBSYSTEM_COMMON)) { 152 (subsystem == CMD_SUBSYSTEM_COMMON)) {
147 adapter->flash_status = compl_status; 153 adapter->flash_status = compl_status;
148 complete(&adapter->flash_compl); 154 complete(&adapter->et_cmd_compl);
149 } 155 }
150 156
151 if (compl_status == MCC_STATUS_SUCCESS) { 157 if (compl_status == MCC_STATUS_SUCCESS) {
@@ -2017,6 +2023,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2017 0x3ea83c02, 0x4a110304}; 2023 0x3ea83c02, 0x4a110304};
2018 int status; 2024 int status;
2019 2025
2026 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2027 return 0;
2028
2020 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2029 if (mutex_lock_interruptible(&adapter->mbox_lock))
2021 return -1; 2030 return -1;
2022 2031
@@ -2160,7 +2169,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2160 be_mcc_notify(adapter); 2169 be_mcc_notify(adapter);
2161 spin_unlock_bh(&adapter->mcc_lock); 2170 spin_unlock_bh(&adapter->mcc_lock);
2162 2171
2163 if (!wait_for_completion_timeout(&adapter->flash_compl, 2172 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2164 msecs_to_jiffies(60000))) 2173 msecs_to_jiffies(60000)))
2165 status = -1; 2174 status = -1;
2166 else 2175 else
@@ -2255,8 +2264,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2255 be_mcc_notify(adapter); 2264 be_mcc_notify(adapter);
2256 spin_unlock_bh(&adapter->mcc_lock); 2265 spin_unlock_bh(&adapter->mcc_lock);
2257 2266
2258 if (!wait_for_completion_timeout(&adapter->flash_compl, 2267 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2259 msecs_to_jiffies(40000))) 2268 msecs_to_jiffies(40000)))
2260 status = -1; 2269 status = -1;
2261 else 2270 else
2262 status = adapter->flash_status; 2271 status = adapter->flash_status;
@@ -2367,6 +2376,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2367{ 2376{
2368 struct be_mcc_wrb *wrb; 2377 struct be_mcc_wrb *wrb;
2369 struct be_cmd_req_loopback_test *req; 2378 struct be_cmd_req_loopback_test *req;
2379 struct be_cmd_resp_loopback_test *resp;
2370 int status; 2380 int status;
2371 2381
2372 spin_lock_bh(&adapter->mcc_lock); 2382 spin_lock_bh(&adapter->mcc_lock);
@@ -2381,8 +2391,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2381 2391
2382 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2392 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2383 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); 2393 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2384 req->hdr.timeout = cpu_to_le32(4);
2385 2394
2395 req->hdr.timeout = cpu_to_le32(15);
2386 req->pattern = cpu_to_le64(pattern); 2396 req->pattern = cpu_to_le64(pattern);
2387 req->src_port = cpu_to_le32(port_num); 2397 req->src_port = cpu_to_le32(port_num);
2388 req->dest_port = cpu_to_le32(port_num); 2398 req->dest_port = cpu_to_le32(port_num);
@@ -2390,12 +2400,15 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2390 req->num_pkts = cpu_to_le32(num_pkts); 2400 req->num_pkts = cpu_to_le32(num_pkts);
2391 req->loopback_type = cpu_to_le32(loopback_type); 2401 req->loopback_type = cpu_to_le32(loopback_type);
2392 2402
2393 status = be_mcc_notify_wait(adapter); 2403 be_mcc_notify(adapter);
2394 if (!status) { 2404
2395 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb); 2405 spin_unlock_bh(&adapter->mcc_lock);
2396 status = le32_to_cpu(resp->status);
2397 }
2398 2406
2407 wait_for_completion(&adapter->et_cmd_compl);
2408 resp = embedded_payload(wrb);
2409 status = le32_to_cpu(resp->status);
2410
2411 return status;
2399err: 2412err:
2400 spin_unlock_bh(&adapter->mcc_lock); 2413 spin_unlock_bh(&adapter->mcc_lock);
2401 return status; 2414 return status;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 0fde69d5cb6a..bf40fdaecfa3 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2744,13 +2744,16 @@ static int be_rx_qs_create(struct be_adapter *adapter)
2744 if (!BEx_chip(adapter)) 2744 if (!BEx_chip(adapter))
2745 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 | 2745 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2746 RSS_ENABLE_UDP_IPV6; 2746 RSS_ENABLE_UDP_IPV6;
2747 } else {
2748 /* Disable RSS, if only default RX Q is created */
2749 adapter->rss_flags = RSS_ENABLE_NONE;
2750 }
2747 2751
2748 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags, 2752 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2749 128); 2753 128);
2750 if (rc) { 2754 if (rc) {
2751 adapter->rss_flags = 0; 2755 adapter->rss_flags = RSS_ENABLE_NONE;
2752 return rc; 2756 return rc;
2753 }
2754 } 2757 }
2755 2758
2756 /* First time posting */ 2759 /* First time posting */
@@ -3124,11 +3127,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
3124{ 3127{
3125 struct pci_dev *pdev = adapter->pdev; 3128 struct pci_dev *pdev = adapter->pdev;
3126 bool use_sriov = false; 3129 bool use_sriov = false;
3130 int max_vfs;
3127 3131
3128 if (BE3_chip(adapter) && sriov_want(adapter)) { 3132 max_vfs = pci_sriov_get_totalvfs(pdev);
3129 int max_vfs;
3130 3133
3131 max_vfs = pci_sriov_get_totalvfs(pdev); 3134 if (BE3_chip(adapter) && sriov_want(adapter)) {
3132 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; 3135 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3133 use_sriov = res->max_vfs; 3136 use_sriov = res->max_vfs;
3134 } 3137 }
@@ -3159,7 +3162,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
3159 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; 3162 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3160 res->max_rx_qs = res->max_rss_qs + 1; 3163 res->max_rx_qs = res->max_rss_qs + 1;
3161 3164
3162 res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1; 3165 if (be_physfn(adapter))
3166 res->max_evt_qs = (max_vfs > 0) ?
3167 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3168 else
3169 res->max_evt_qs = 1;
3163 3170
3164 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT; 3171 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3165 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS)) 3172 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
@@ -4205,7 +4212,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
4205 spin_lock_init(&adapter->mcc_lock); 4212 spin_lock_init(&adapter->mcc_lock);
4206 spin_lock_init(&adapter->mcc_cq_lock); 4213 spin_lock_init(&adapter->mcc_cq_lock);
4207 4214
4208 init_completion(&adapter->flash_compl); 4215 init_completion(&adapter->et_cmd_compl);
4209 pci_save_state(adapter->pdev); 4216 pci_save_state(adapter->pdev);
4210 return 0; 4217 return 0;
4211 4218
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index e7c8b749c5a5..50bb71c663e2 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -428,6 +428,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
428 /* If this was the last BD in the ring, start at the beginning again. */ 428 /* If this was the last BD in the ring, start at the beginning again. */
429 bdp = fec_enet_get_nextdesc(bdp, fep); 429 bdp = fec_enet_get_nextdesc(bdp, fep);
430 430
431 skb_tx_timestamp(skb);
432
431 fep->cur_tx = bdp; 433 fep->cur_tx = bdp;
432 434
433 if (fep->cur_tx == fep->dirty_tx) 435 if (fep->cur_tx == fep->dirty_tx)
@@ -436,8 +438,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
436 /* Trigger transmission start */ 438 /* Trigger transmission start */
437 writel(0, fep->hwp + FEC_X_DES_ACTIVE); 439 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
438 440
439 skb_tx_timestamp(skb);
440
441 return NETDEV_TX_OK; 441 return NETDEV_TX_OK;
442} 442}
443 443
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 895450e9bb3c..ff2d806eaef7 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -718,8 +718,11 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
718 e1000_release_phy_80003es2lan(hw); 718 e1000_release_phy_80003es2lan(hw);
719 719
720 /* Disable IBIST slave mode (far-end loopback) */ 720 /* Disable IBIST slave mode (far-end loopback) */
721 e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, 721 ret_val =
722 &kum_reg_data); 722 e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
723 &kum_reg_data);
724 if (ret_val)
725 return ret_val;
723 kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; 726 kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
724 e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, 727 e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
725 kum_reg_data); 728 kum_reg_data);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 8d3945ab7334..c30d41d6e426 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6174,7 +6174,7 @@ static int __e1000_resume(struct pci_dev *pdev)
6174 return 0; 6174 return 0;
6175} 6175}
6176 6176
6177#ifdef CONFIG_PM_SLEEP 6177#ifdef CONFIG_PM
6178static int e1000_suspend(struct device *dev) 6178static int e1000_suspend(struct device *dev)
6179{ 6179{
6180 struct pci_dev *pdev = to_pci_dev(dev); 6180 struct pci_dev *pdev = to_pci_dev(dev);
@@ -6193,7 +6193,7 @@ static int e1000_resume(struct device *dev)
6193 6193
6194 return __e1000_resume(pdev); 6194 return __e1000_resume(pdev);
6195} 6195}
6196#endif /* CONFIG_PM_SLEEP */ 6196#endif /* CONFIG_PM */
6197 6197
6198#ifdef CONFIG_PM_RUNTIME 6198#ifdef CONFIG_PM_RUNTIME
6199static int e1000_runtime_suspend(struct device *dev) 6199static int e1000_runtime_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index da2be59505c0..20e71f4ca426 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1757,19 +1757,23 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
1757 * it across the board. 1757 * it across the board.
1758 */ 1758 */
1759 ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); 1759 ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
1760 if (ret_val) 1760 if (ret_val) {
1761 /* If the first read fails, another entity may have 1761 /* If the first read fails, another entity may have
1762 * ownership of the resources, wait and try again to 1762 * ownership of the resources, wait and try again to
1763 * see if they have relinquished the resources yet. 1763 * see if they have relinquished the resources yet.
1764 */ 1764 */
1765 udelay(usec_interval); 1765 if (usec_interval >= 1000)
1766 msleep(usec_interval / 1000);
1767 else
1768 udelay(usec_interval);
1769 }
1766 ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); 1770 ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
1767 if (ret_val) 1771 if (ret_val)
1768 break; 1772 break;
1769 if (phy_status & BMSR_LSTATUS) 1773 if (phy_status & BMSR_LSTATUS)
1770 break; 1774 break;
1771 if (usec_interval >= 1000) 1775 if (usec_interval >= 1000)
1772 mdelay(usec_interval / 1000); 1776 msleep(usec_interval / 1000);
1773 else 1777 else
1774 udelay(usec_interval); 1778 udelay(usec_interval);
1775 } 1779 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index cc06854296a3..5bcc870f8367 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6827,12 +6827,20 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6827 return __ixgbe_maybe_stop_tx(tx_ring, size); 6827 return __ixgbe_maybe_stop_tx(tx_ring, size);
6828} 6828}
6829 6829
6830#ifdef IXGBE_FCOE 6830static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
6831static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) 6831 void *accel_priv)
6832{ 6832{
6833 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
6834#ifdef IXGBE_FCOE
6833 struct ixgbe_adapter *adapter; 6835 struct ixgbe_adapter *adapter;
6834 struct ixgbe_ring_feature *f; 6836 struct ixgbe_ring_feature *f;
6835 int txq; 6837 int txq;
6838#endif
6839
6840 if (fwd_adapter)
6841 return skb->queue_mapping + fwd_adapter->tx_base_queue;
6842
6843#ifdef IXGBE_FCOE
6836 6844
6837 /* 6845 /*
6838 * only execute the code below if protocol is FCoE 6846 * only execute the code below if protocol is FCoE
@@ -6858,9 +6866,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6858 txq -= f->indices; 6866 txq -= f->indices;
6859 6867
6860 return txq + f->offset; 6868 return txq + f->offset;
6869#else
6870 return __netdev_pick_tx(dev, skb);
6871#endif
6861} 6872}
6862 6873
6863#endif
6864netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, 6874netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6865 struct ixgbe_adapter *adapter, 6875 struct ixgbe_adapter *adapter,
6866 struct ixgbe_ring *tx_ring) 6876 struct ixgbe_ring *tx_ring)
@@ -7629,27 +7639,11 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
7629 kfree(fwd_adapter); 7639 kfree(fwd_adapter);
7630} 7640}
7631 7641
7632static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb,
7633 struct net_device *dev,
7634 void *priv)
7635{
7636 struct ixgbe_fwd_adapter *fwd_adapter = priv;
7637 unsigned int queue;
7638 struct ixgbe_ring *tx_ring;
7639
7640 queue = skb->queue_mapping + fwd_adapter->tx_base_queue;
7641 tx_ring = fwd_adapter->real_adapter->tx_ring[queue];
7642
7643 return __ixgbe_xmit_frame(skb, dev, tx_ring);
7644}
7645
7646static const struct net_device_ops ixgbe_netdev_ops = { 7642static const struct net_device_ops ixgbe_netdev_ops = {
7647 .ndo_open = ixgbe_open, 7643 .ndo_open = ixgbe_open,
7648 .ndo_stop = ixgbe_close, 7644 .ndo_stop = ixgbe_close,
7649 .ndo_start_xmit = ixgbe_xmit_frame, 7645 .ndo_start_xmit = ixgbe_xmit_frame,
7650#ifdef IXGBE_FCOE
7651 .ndo_select_queue = ixgbe_select_queue, 7646 .ndo_select_queue = ixgbe_select_queue,
7652#endif
7653 .ndo_set_rx_mode = ixgbe_set_rx_mode, 7647 .ndo_set_rx_mode = ixgbe_set_rx_mode,
7654 .ndo_validate_addr = eth_validate_addr, 7648 .ndo_validate_addr = eth_validate_addr,
7655 .ndo_set_mac_address = ixgbe_set_mac, 7649 .ndo_set_mac_address = ixgbe_set_mac,
@@ -7689,7 +7683,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7689 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, 7683 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
7690 .ndo_dfwd_add_station = ixgbe_fwd_add, 7684 .ndo_dfwd_add_station = ixgbe_fwd_add,
7691 .ndo_dfwd_del_station = ixgbe_fwd_del, 7685 .ndo_dfwd_del_station = ixgbe_fwd_del,
7692 .ndo_dfwd_start_xmit = ixgbe_fwd_xmit,
7693}; 7686};
7694 7687
7695/** 7688/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index d6f0c0d8cf11..72084f70adbb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -291,7 +291,9 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
291{ 291{
292 struct ixgbe_adapter *adapter = pci_get_drvdata(dev); 292 struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
293 int err; 293 int err;
294#ifdef CONFIG_PCI_IOV
294 u32 current_flags = adapter->flags; 295 u32 current_flags = adapter->flags;
296#endif
295 297
296 err = ixgbe_disable_sriov(adapter); 298 err = ixgbe_disable_sriov(adapter);
297 299
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 6a6c1f76d8e0..ec94a20d7099 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -619,7 +619,8 @@ ltq_etop_set_multicast_list(struct net_device *dev)
619} 619}
620 620
621static u16 621static u16
622ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb) 622ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
623 void *accel_priv)
623{ 624{
624 /* we are currently only using the first queue */ 625 /* we are currently only using the first queue */
625 return 0; 626 return 0;
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 7354960b583b..c4eeb69a5bee 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -92,6 +92,12 @@ static int orion_mdio_wait_ready(struct mii_bus *bus)
92 if (time_is_before_jiffies(end)) 92 if (time_is_before_jiffies(end))
93 ++timedout; 93 ++timedout;
94 } else { 94 } else {
95 /* wait_event_timeout does not guarantee a delay of at
96 * least one whole jiffie, so timeout must be no less
97 * than two.
98 */
99 if (timeout < 2)
100 timeout = 2;
95 wait_event_timeout(dev->smi_busy_wait, 101 wait_event_timeout(dev->smi_busy_wait,
96 orion_mdio_smi_is_done(dev), 102 orion_mdio_smi_is_done(dev),
97 timeout); 103 timeout);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index f54ebd5a1702..a7fcd593b2db 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -592,7 +592,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
592 } 592 }
593} 593}
594 594
595u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) 595u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
596 void *accel_priv)
596{ 597{
597 struct mlx4_en_priv *priv = netdev_priv(dev); 598 struct mlx4_en_priv *priv = netdev_priv(dev);
598 u16 rings_p_up = priv->num_tx_rings_p_up; 599 u16 rings_p_up = priv->num_tx_rings_p_up;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index f3758de59c05..d5758adceaa2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -714,7 +714,8 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
714int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 714int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
715 715
716void mlx4_en_tx_irq(struct mlx4_cq *mcq); 716void mlx4_en_tx_irq(struct mlx4_cq *mcq);
717u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); 717u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
718 void *accel_priv);
718netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 719netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
719 720
720int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 721int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 7692dfd4f262..cc68657f0536 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1604,13 +1604,13 @@ netxen_process_lro(struct netxen_adapter *adapter,
1604 u32 seq_number; 1604 u32 seq_number;
1605 u8 vhdr_len = 0; 1605 u8 vhdr_len = 0;
1606 1606
1607 if (unlikely(ring > adapter->max_rds_rings)) 1607 if (unlikely(ring >= adapter->max_rds_rings))
1608 return NULL; 1608 return NULL;
1609 1609
1610 rds_ring = &recv_ctx->rds_rings[ring]; 1610 rds_ring = &recv_ctx->rds_rings[ring];
1611 1611
1612 index = netxen_get_lro_sts_refhandle(sts_data0); 1612 index = netxen_get_lro_sts_refhandle(sts_data0);
1613 if (unlikely(index > rds_ring->num_desc)) 1613 if (unlikely(index >= rds_ring->num_desc))
1614 return NULL; 1614 return NULL;
1615 1615
1616 buffer = &rds_ring->rx_buf_arr[index]; 1616 buffer = &rds_ring->rx_buf_arr[index];
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 631ea0ac1cd8..f2a7c7166e24 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -487,6 +487,7 @@ struct qlcnic_hardware_context {
487 struct qlcnic_mailbox *mailbox; 487 struct qlcnic_mailbox *mailbox;
488 u8 extend_lb_time; 488 u8 extend_lb_time;
489 u8 phys_port_id[ETH_ALEN]; 489 u8 phys_port_id[ETH_ALEN];
490 u8 lb_mode;
490}; 491};
491 492
492struct qlcnic_adapter_stats { 493struct qlcnic_adapter_stats {
@@ -578,6 +579,8 @@ struct qlcnic_host_tx_ring {
578 dma_addr_t phys_addr; 579 dma_addr_t phys_addr;
579 dma_addr_t hw_cons_phys_addr; 580 dma_addr_t hw_cons_phys_addr;
580 struct netdev_queue *txq; 581 struct netdev_queue *txq;
582 /* Lock to protect Tx descriptors cleanup */
583 spinlock_t tx_clean_lock;
581} ____cacheline_internodealigned_in_smp; 584} ____cacheline_internodealigned_in_smp;
582 585
583/* 586/*
@@ -808,6 +811,7 @@ struct qlcnic_mac_list_s {
808 811
809#define QLCNIC_ILB_MODE 0x1 812#define QLCNIC_ILB_MODE 0x1
810#define QLCNIC_ELB_MODE 0x2 813#define QLCNIC_ELB_MODE 0x2
814#define QLCNIC_LB_MODE_MASK 0x3
811 815
812#define QLCNIC_LINKEVENT 0x1 816#define QLCNIC_LINKEVENT 0x1
813#define QLCNIC_LB_RESPONSE 0x2 817#define QLCNIC_LB_RESPONSE 0x2
@@ -1093,7 +1097,6 @@ struct qlcnic_adapter {
1093 struct qlcnic_filter_hash rx_fhash; 1097 struct qlcnic_filter_hash rx_fhash;
1094 struct list_head vf_mc_list; 1098 struct list_head vf_mc_list;
1095 1099
1096 spinlock_t tx_clean_lock;
1097 spinlock_t mac_learn_lock; 1100 spinlock_t mac_learn_lock;
1098 /* spinlock for catching rcv filters for eswitch traffic */ 1101 /* spinlock for catching rcv filters for eswitch traffic */
1099 spinlock_t rx_mac_learn_lock; 1102 spinlock_t rx_mac_learn_lock;
@@ -1708,6 +1711,7 @@ int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *);
1708void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *); 1711void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *);
1709void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx); 1712void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx);
1710void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx); 1713void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx);
1714void qlcnic_update_stats(struct qlcnic_adapter *);
1711 1715
1712/* Adapter hardware abstraction */ 1716/* Adapter hardware abstraction */
1713struct qlcnic_hardware_ops { 1717struct qlcnic_hardware_ops {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index b1cb0ffb15c7..f776f99f7915 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -447,8 +447,9 @@ irqreturn_t qlcnic_83xx_intr(int irq, void *data)
447 447
448 qlcnic_83xx_poll_process_aen(adapter); 448 qlcnic_83xx_poll_process_aen(adapter);
449 449
450 if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) { 450 if (ahw->diag_test) {
451 ahw->diag_cnt++; 451 if (ahw->diag_test == QLCNIC_INTERRUPT_TEST)
452 ahw->diag_cnt++;
452 qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); 453 qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
453 return IRQ_HANDLED; 454 return IRQ_HANDLED;
454 } 455 }
@@ -1345,11 +1346,6 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
1345 } 1346 }
1346 1347
1347 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { 1348 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
1348 /* disable and free mailbox interrupt */
1349 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
1350 qlcnic_83xx_enable_mbx_poll(adapter);
1351 qlcnic_83xx_free_mbx_intr(adapter);
1352 }
1353 adapter->ahw->loopback_state = 0; 1349 adapter->ahw->loopback_state = 0;
1354 adapter->ahw->hw_ops->setup_link_event(adapter, 1); 1350 adapter->ahw->hw_ops->setup_link_event(adapter, 1);
1355 } 1351 }
@@ -1363,33 +1359,20 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
1363{ 1359{
1364 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1360 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1365 struct qlcnic_host_sds_ring *sds_ring; 1361 struct qlcnic_host_sds_ring *sds_ring;
1366 int ring, err; 1362 int ring;
1367 1363
1368 clear_bit(__QLCNIC_DEV_UP, &adapter->state); 1364 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
1369 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { 1365 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
1370 for (ring = 0; ring < adapter->drv_sds_rings; ring++) { 1366 for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
1371 sds_ring = &adapter->recv_ctx->sds_rings[ring]; 1367 sds_ring = &adapter->recv_ctx->sds_rings[ring];
1372 qlcnic_83xx_disable_intr(adapter, sds_ring); 1368 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1373 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) 1369 qlcnic_83xx_disable_intr(adapter, sds_ring);
1374 qlcnic_83xx_enable_mbx_poll(adapter);
1375 } 1370 }
1376 } 1371 }
1377 1372
1378 qlcnic_fw_destroy_ctx(adapter); 1373 qlcnic_fw_destroy_ctx(adapter);
1379 qlcnic_detach(adapter); 1374 qlcnic_detach(adapter);
1380 1375
1381 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
1382 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
1383 err = qlcnic_83xx_setup_mbx_intr(adapter);
1384 qlcnic_83xx_disable_mbx_poll(adapter);
1385 if (err) {
1386 dev_err(&adapter->pdev->dev,
1387 "%s: failed to setup mbx interrupt\n",
1388 __func__);
1389 goto out;
1390 }
1391 }
1392 }
1393 adapter->ahw->diag_test = 0; 1376 adapter->ahw->diag_test = 0;
1394 adapter->drv_sds_rings = drv_sds_rings; 1377 adapter->drv_sds_rings = drv_sds_rings;
1395 1378
@@ -1399,9 +1382,6 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
1399 if (netif_running(netdev)) 1382 if (netif_running(netdev))
1400 __qlcnic_up(adapter, netdev); 1383 __qlcnic_up(adapter, netdev);
1401 1384
1402 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST &&
1403 !(adapter->flags & QLCNIC_MSIX_ENABLED))
1404 qlcnic_83xx_disable_mbx_poll(adapter);
1405out: 1385out:
1406 netif_device_attach(netdev); 1386 netif_device_attach(netdev);
1407} 1387}
@@ -1704,12 +1684,6 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
1704 } 1684 }
1705 } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); 1685 } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
1706 1686
1707 /* Make sure carrier is off and queue is stopped during loopback */
1708 if (netif_running(netdev)) {
1709 netif_carrier_off(netdev);
1710 netif_tx_stop_all_queues(netdev);
1711 }
1712
1713 ret = qlcnic_do_lb_test(adapter, mode); 1687 ret = qlcnic_do_lb_test(adapter, mode);
1714 1688
1715 qlcnic_83xx_clear_lb_mode(adapter, mode); 1689 qlcnic_83xx_clear_lb_mode(adapter, mode);
@@ -2141,6 +2115,7 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
2141 ahw->link_autoneg = MSB(MSW(data[3])); 2115 ahw->link_autoneg = MSB(MSW(data[3]));
2142 ahw->module_type = MSB(LSW(data[3])); 2116 ahw->module_type = MSB(LSW(data[3]));
2143 ahw->has_link_events = 1; 2117 ahw->has_link_events = 1;
2118 ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK;
2144 qlcnic_advert_link_change(adapter, link_status); 2119 qlcnic_advert_link_change(adapter, link_status);
2145} 2120}
2146 2121
@@ -3754,6 +3729,19 @@ static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
3754 return; 3729 return;
3755} 3730}
3756 3731
3732static inline void qlcnic_dump_mailbox_registers(struct qlcnic_adapter *adapter)
3733{
3734 struct qlcnic_hardware_context *ahw = adapter->ahw;
3735 u32 offset;
3736
3737 offset = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
3738 dev_info(&adapter->pdev->dev, "Mbx interrupt mask=0x%x, Mbx interrupt enable=0x%x, Host mbx control=0x%x, Fw mbx control=0x%x",
3739 readl(ahw->pci_base0 + offset),
3740 QLCRDX(ahw, QLCNIC_MBX_INTR_ENBL),
3741 QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL),
3742 QLCRDX(ahw, QLCNIC_FW_MBX_CTRL));
3743}
3744
3757static void qlcnic_83xx_mailbox_worker(struct work_struct *work) 3745static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
3758{ 3746{
3759 struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox, 3747 struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
@@ -3798,6 +3786,8 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
3798 __func__, cmd->cmd_op, cmd->type, ahw->pci_func, 3786 __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
3799 ahw->op_mode); 3787 ahw->op_mode);
3800 clear_bit(QLC_83XX_MBX_READY, &mbx->status); 3788 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
3789 qlcnic_dump_mailbox_registers(adapter);
3790 qlcnic_83xx_get_mbx_data(adapter, cmd);
3801 qlcnic_dump_mbx(adapter, cmd); 3791 qlcnic_dump_mbx(adapter, cmd);
3802 qlcnic_83xx_idc_request_reset(adapter, 3792 qlcnic_83xx_idc_request_reset(adapter,
3803 QLCNIC_FORCE_FW_DUMP_KEY); 3793 QLCNIC_FORCE_FW_DUMP_KEY);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 4cae6caa6bfa..a6a33508e401 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -662,4 +662,5 @@ pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
662 pci_channel_state_t); 662 pci_channel_state_t);
663pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *); 663pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
664void qlcnic_83xx_io_resume(struct pci_dev *); 664void qlcnic_83xx_io_resume(struct pci_dev *);
665void qlcnic_83xx_stop_hw(struct qlcnic_adapter *);
665#endif 666#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 89208e5b25d6..918e18ddf038 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -740,6 +740,7 @@ static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter)
740 adapter->ahw->idc.err_code = -EIO; 740 adapter->ahw->idc.err_code = -EIO;
741 dev_err(&adapter->pdev->dev, 741 dev_err(&adapter->pdev->dev,
742 "%s: Device in unknown state\n", __func__); 742 "%s: Device in unknown state\n", __func__);
743 clear_bit(__QLCNIC_RESETTING, &adapter->state);
743 return 0; 744 return 0;
744} 745}
745 746
@@ -818,7 +819,6 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
818 struct qlcnic_hardware_context *ahw = adapter->ahw; 819 struct qlcnic_hardware_context *ahw = adapter->ahw;
819 struct qlcnic_mailbox *mbx = ahw->mailbox; 820 struct qlcnic_mailbox *mbx = ahw->mailbox;
820 int ret = 0; 821 int ret = 0;
821 u32 owner;
822 u32 val; 822 u32 val;
823 823
824 /* Perform NIC configuration based ready state entry actions */ 824 /* Perform NIC configuration based ready state entry actions */
@@ -848,9 +848,9 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
848 set_bit(__QLCNIC_RESETTING, &adapter->state); 848 set_bit(__QLCNIC_RESETTING, &adapter->state);
849 qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); 849 qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
850 } else { 850 } else {
851 owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); 851 netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
852 if (ahw->pci_func == owner) 852 __func__);
853 qlcnic_dump_fw(adapter); 853 qlcnic_83xx_idc_enter_failed_state(adapter, 1);
854 } 854 }
855 return -EIO; 855 return -EIO;
856 } 856 }
@@ -948,13 +948,26 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter)
948 return 0; 948 return 0;
949} 949}
950 950
951static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) 951static void qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
952{ 952{
953 dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__); 953 struct qlcnic_hardware_context *ahw = adapter->ahw;
954 u32 val, owner;
955
956 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
957 if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
958 owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
959 if (ahw->pci_func == owner) {
960 qlcnic_83xx_stop_hw(adapter);
961 qlcnic_dump_fw(adapter);
962 }
963 }
964
965 netdev_warn(adapter->netdev, "%s: Reboot will be required to recover the adapter!!\n",
966 __func__);
954 clear_bit(__QLCNIC_RESETTING, &adapter->state); 967 clear_bit(__QLCNIC_RESETTING, &adapter->state);
955 adapter->ahw->idc.err_code = -EIO; 968 ahw->idc.err_code = -EIO;
956 969
957 return 0; 970 return;
958} 971}
959 972
960static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter) 973static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter)
@@ -1063,12 +1076,6 @@ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
1063 adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state; 1076 adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
1064 qlcnic_83xx_periodic_tasks(adapter); 1077 qlcnic_83xx_periodic_tasks(adapter);
1065 1078
1066 /* Do not reschedule if firmaware is in hanged state and auto
1067 * recovery is disabled
1068 */
1069 if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset)
1070 return;
1071
1072 /* Re-schedule the function */ 1079 /* Re-schedule the function */
1073 if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status)) 1080 if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
1074 qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, 1081 qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
@@ -1219,10 +1226,10 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
1219 } 1226 }
1220 1227
1221 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); 1228 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
1222 if ((val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) || 1229 if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
1223 !qlcnic_auto_fw_reset) { 1230 netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
1224 dev_err(&adapter->pdev->dev, 1231 __func__);
1225 "%s:failed, device in non reset mode\n", __func__); 1232 qlcnic_83xx_idc_enter_failed_state(adapter, 0);
1226 qlcnic_83xx_unlock_driver(adapter); 1233 qlcnic_83xx_unlock_driver(adapter);
1227 return; 1234 return;
1228 } 1235 }
@@ -1254,24 +1261,24 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
1254 if (size & 0xF) 1261 if (size & 0xF)
1255 size = (size + 16) & ~0xF; 1262 size = (size + 16) & ~0xF;
1256 1263
1257 p_cache = kzalloc(size, GFP_KERNEL); 1264 p_cache = vzalloc(size);
1258 if (p_cache == NULL) 1265 if (p_cache == NULL)
1259 return -ENOMEM; 1266 return -ENOMEM;
1260 1267
1261 ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache, 1268 ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache,
1262 size / sizeof(u32)); 1269 size / sizeof(u32));
1263 if (ret) { 1270 if (ret) {
1264 kfree(p_cache); 1271 vfree(p_cache);
1265 return ret; 1272 return ret;
1266 } 1273 }
1267 /* 16 byte write to MS memory */ 1274 /* 16 byte write to MS memory */
1268 ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache, 1275 ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache,
1269 size / 16); 1276 size / 16);
1270 if (ret) { 1277 if (ret) {
1271 kfree(p_cache); 1278 vfree(p_cache);
1272 return ret; 1279 return ret;
1273 } 1280 }
1274 kfree(p_cache); 1281 vfree(p_cache);
1275 1282
1276 return ret; 1283 return ret;
1277} 1284}
@@ -1939,7 +1946,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
1939 p_dev->ahw->reset.seq_index = index; 1946 p_dev->ahw->reset.seq_index = index;
1940} 1947}
1941 1948
1942static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev) 1949void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
1943{ 1950{
1944 p_dev->ahw->reset.seq_index = 0; 1951 p_dev->ahw->reset.seq_index = 0;
1945 1952
@@ -1994,6 +2001,14 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
1994 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); 2001 val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
1995 if (!(val & QLC_83XX_IDC_GRACEFULL_RESET)) 2002 if (!(val & QLC_83XX_IDC_GRACEFULL_RESET))
1996 qlcnic_dump_fw(adapter); 2003 qlcnic_dump_fw(adapter);
2004
2005 if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
2006 netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
2007 __func__);
2008 qlcnic_83xx_idc_enter_failed_state(adapter, 1);
2009 return err;
2010 }
2011
1997 qlcnic_83xx_init_hw(adapter); 2012 qlcnic_83xx_init_hw(adapter);
1998 2013
1999 if (qlcnic_83xx_copy_bootloader(adapter)) 2014 if (qlcnic_83xx_copy_bootloader(adapter))
@@ -2073,8 +2088,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
2073 ahw->nic_mode = QLCNIC_DEFAULT_MODE; 2088 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
2074 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; 2089 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
2075 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; 2090 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
2076 adapter->max_sds_rings = ahw->max_rx_ques; 2091 adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
2077 adapter->max_tx_rings = ahw->max_tx_ques; 2092 adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
2078 } else { 2093 } else {
2079 return -EIO; 2094 return -EIO;
2080 } 2095 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index b36c02fafcfd..6b08194aa0d4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -167,27 +167,35 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
167 167
168#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) 168#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
169 169
170static inline int qlcnic_82xx_statistics(void) 170static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter)
171{ 171{
172 return ARRAY_SIZE(qlcnic_device_gstrings_stats) + 172 return ARRAY_SIZE(qlcnic_gstrings_stats) +
173 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); 173 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
174 QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
174} 175}
175 176
176static inline int qlcnic_83xx_statistics(void) 177static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter)
177{ 178{
178 return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + 179 return ARRAY_SIZE(qlcnic_gstrings_stats) +
180 ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
179 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + 181 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
180 ARRAY_SIZE(qlcnic_83xx_rx_stats_strings); 182 ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) +
183 QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
181} 184}
182 185
183static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter) 186static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
184{ 187{
185 if (qlcnic_82xx_check(adapter)) 188 int len = -1;
186 return qlcnic_82xx_statistics(); 189
187 else if (qlcnic_83xx_check(adapter)) 190 if (qlcnic_82xx_check(adapter)) {
188 return qlcnic_83xx_statistics(); 191 len = qlcnic_82xx_statistics(adapter);
189 else 192 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
190 return -1; 193 len += ARRAY_SIZE(qlcnic_device_gstrings_stats);
194 } else if (qlcnic_83xx_check(adapter)) {
195 len = qlcnic_83xx_statistics(adapter);
196 }
197
198 return len;
191} 199}
192 200
193#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412 201#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412
@@ -667,30 +675,25 @@ qlcnic_set_ringparam(struct net_device *dev,
667static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter, 675static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter,
668 u8 rx_ring, u8 tx_ring) 676 u8 rx_ring, u8 tx_ring)
669{ 677{
678 if (rx_ring == 0 || tx_ring == 0)
679 return -EINVAL;
680
670 if (rx_ring != 0) { 681 if (rx_ring != 0) {
671 if (rx_ring > adapter->max_sds_rings) { 682 if (rx_ring > adapter->max_sds_rings) {
672 netdev_err(adapter->netdev, "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n", 683 netdev_err(adapter->netdev,
684 "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n",
673 rx_ring, adapter->max_sds_rings); 685 rx_ring, adapter->max_sds_rings);
674 return -EINVAL; 686 return -EINVAL;
675 } 687 }
676 } 688 }
677 689
678 if (tx_ring != 0) { 690 if (tx_ring != 0) {
679 if (qlcnic_82xx_check(adapter) && 691 if (tx_ring > adapter->max_tx_rings) {
680 (tx_ring > adapter->max_tx_rings)) {
681 netdev_err(adapter->netdev, 692 netdev_err(adapter->netdev,
682 "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n", 693 "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n",
683 tx_ring, adapter->max_tx_rings); 694 tx_ring, adapter->max_tx_rings);
684 return -EINVAL; 695 return -EINVAL;
685 } 696 }
686
687 if (qlcnic_83xx_check(adapter) &&
688 (tx_ring > QLCNIC_SINGLE_RING)) {
689 netdev_err(adapter->netdev,
690 "Invalid ring count, Tx ring count %d should not be greater than %d driver Tx rings.\n",
691 tx_ring, QLCNIC_SINGLE_RING);
692 return -EINVAL;
693 }
694 } 697 }
695 698
696 return 0; 699 return 0;
@@ -925,18 +928,13 @@ static int qlcnic_eeprom_test(struct net_device *dev)
925 928
926static int qlcnic_get_sset_count(struct net_device *dev, int sset) 929static int qlcnic_get_sset_count(struct net_device *dev, int sset)
927{ 930{
928 int len;
929 931
930 struct qlcnic_adapter *adapter = netdev_priv(dev); 932 struct qlcnic_adapter *adapter = netdev_priv(dev);
931 switch (sset) { 933 switch (sset) {
932 case ETH_SS_TEST: 934 case ETH_SS_TEST:
933 return QLCNIC_TEST_LEN; 935 return QLCNIC_TEST_LEN;
934 case ETH_SS_STATS: 936 case ETH_SS_STATS:
935 len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN; 937 return qlcnic_dev_statistics_len(adapter);
936 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
937 qlcnic_83xx_check(adapter))
938 return len;
939 return qlcnic_82xx_statistics();
940 default: 938 default:
941 return -EOPNOTSUPP; 939 return -EOPNOTSUPP;
942 } 940 }
@@ -948,6 +946,7 @@ static int qlcnic_irq_test(struct net_device *netdev)
948 struct qlcnic_hardware_context *ahw = adapter->ahw; 946 struct qlcnic_hardware_context *ahw = adapter->ahw;
949 struct qlcnic_cmd_args cmd; 947 struct qlcnic_cmd_args cmd;
950 int ret, drv_sds_rings = adapter->drv_sds_rings; 948 int ret, drv_sds_rings = adapter->drv_sds_rings;
949 int drv_tx_rings = adapter->drv_tx_rings;
951 950
952 if (qlcnic_83xx_check(adapter)) 951 if (qlcnic_83xx_check(adapter))
953 return qlcnic_83xx_interrupt_test(netdev); 952 return qlcnic_83xx_interrupt_test(netdev);
@@ -980,6 +979,7 @@ free_diag_res:
980 979
981clear_diag_irq: 980clear_diag_irq:
982 adapter->drv_sds_rings = drv_sds_rings; 981 adapter->drv_sds_rings = drv_sds_rings;
982 adapter->drv_tx_rings = drv_tx_rings;
983 clear_bit(__QLCNIC_RESETTING, &adapter->state); 983 clear_bit(__QLCNIC_RESETTING, &adapter->state);
984 984
985 return ret; 985 return ret;
@@ -1270,7 +1270,7 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
1270 return data; 1270 return data;
1271} 1271}
1272 1272
1273static void qlcnic_update_stats(struct qlcnic_adapter *adapter) 1273void qlcnic_update_stats(struct qlcnic_adapter *adapter)
1274{ 1274{
1275 struct qlcnic_host_tx_ring *tx_ring; 1275 struct qlcnic_host_tx_ring *tx_ring;
1276 int ring; 1276 int ring;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index e9c21e5d0ca9..c4262c23ed7c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -134,6 +134,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
134 struct qlcnic_skb_frag *buffrag; 134 struct qlcnic_skb_frag *buffrag;
135 int i, j; 135 int i, j;
136 136
137 spin_lock(&tx_ring->tx_clean_lock);
138
137 cmd_buf = tx_ring->cmd_buf_arr; 139 cmd_buf = tx_ring->cmd_buf_arr;
138 for (i = 0; i < tx_ring->num_desc; i++) { 140 for (i = 0; i < tx_ring->num_desc; i++) {
139 buffrag = cmd_buf->frag_array; 141 buffrag = cmd_buf->frag_array;
@@ -157,6 +159,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
157 } 159 }
158 cmd_buf++; 160 cmd_buf++;
159 } 161 }
162
163 spin_unlock(&tx_ring->tx_clean_lock);
160} 164}
161 165
162void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) 166void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 0149c9495347..ad1531ae3aa8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -687,17 +687,15 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
687 if (adapter->ahw->linkup && !linkup) { 687 if (adapter->ahw->linkup && !linkup) {
688 netdev_info(netdev, "NIC Link is down\n"); 688 netdev_info(netdev, "NIC Link is down\n");
689 adapter->ahw->linkup = 0; 689 adapter->ahw->linkup = 0;
690 if (netif_running(netdev)) { 690 netif_carrier_off(netdev);
691 netif_carrier_off(netdev);
692 netif_tx_stop_all_queues(netdev);
693 }
694 } else if (!adapter->ahw->linkup && linkup) { 691 } else if (!adapter->ahw->linkup && linkup) {
692 /* Do not advertise Link up if the port is in loopback mode */
693 if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode)
694 return;
695
695 netdev_info(netdev, "NIC Link is up\n"); 696 netdev_info(netdev, "NIC Link is up\n");
696 adapter->ahw->linkup = 1; 697 adapter->ahw->linkup = 1;
697 if (netif_running(netdev)) { 698 netif_carrier_on(netdev);
698 netif_carrier_on(netdev);
699 netif_wake_queue(netdev);
700 }
701 } 699 }
702} 700}
703 701
@@ -784,7 +782,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
784 struct net_device *netdev = adapter->netdev; 782 struct net_device *netdev = adapter->netdev;
785 struct qlcnic_skb_frag *frag; 783 struct qlcnic_skb_frag *frag;
786 784
787 if (!spin_trylock(&adapter->tx_clean_lock)) 785 if (!spin_trylock(&tx_ring->tx_clean_lock))
788 return 1; 786 return 1;
789 787
790 sw_consumer = tx_ring->sw_consumer; 788 sw_consumer = tx_ring->sw_consumer;
@@ -813,8 +811,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
813 break; 811 break;
814 } 812 }
815 813
814 tx_ring->sw_consumer = sw_consumer;
815
816 if (count && netif_running(netdev)) { 816 if (count && netif_running(netdev)) {
817 tx_ring->sw_consumer = sw_consumer;
818 smp_mb(); 817 smp_mb();
819 if (netif_tx_queue_stopped(tx_ring->txq) && 818 if (netif_tx_queue_stopped(tx_ring->txq) &&
820 netif_carrier_ok(netdev)) { 819 netif_carrier_ok(netdev)) {
@@ -840,7 +839,8 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
840 */ 839 */
841 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); 840 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
842 done = (sw_consumer == hw_consumer); 841 done = (sw_consumer == hw_consumer);
843 spin_unlock(&adapter->tx_clean_lock); 842
843 spin_unlock(&tx_ring->tx_clean_lock);
844 844
845 return done; 845 return done;
846} 846}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 05c1eef8df13..550791b8fbae 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1178,6 +1178,7 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
1178 } else { 1178 } else {
1179 adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE; 1179 adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
1180 adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS; 1180 adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS;
1181 adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
1181 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; 1182 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
1182 } 1183 }
1183 1184
@@ -1755,7 +1756,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1755 if (qlcnic_sriov_vf_check(adapter)) 1756 if (qlcnic_sriov_vf_check(adapter))
1756 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); 1757 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
1757 smp_mb(); 1758 smp_mb();
1758 spin_lock(&adapter->tx_clean_lock);
1759 netif_carrier_off(netdev); 1759 netif_carrier_off(netdev);
1760 adapter->ahw->linkup = 0; 1760 adapter->ahw->linkup = 0;
1761 netif_tx_disable(netdev); 1761 netif_tx_disable(netdev);
@@ -1776,7 +1776,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1776 1776
1777 for (ring = 0; ring < adapter->drv_tx_rings; ring++) 1777 for (ring = 0; ring < adapter->drv_tx_rings; ring++)
1778 qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]); 1778 qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
1779 spin_unlock(&adapter->tx_clean_lock);
1780} 1779}
1781 1780
1782/* Usage: During suspend and firmware recovery module */ 1781/* Usage: During suspend and firmware recovery module */
@@ -1940,7 +1939,6 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1940 qlcnic_detach(adapter); 1939 qlcnic_detach(adapter);
1941 1940
1942 adapter->drv_sds_rings = QLCNIC_SINGLE_RING; 1941 adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
1943 adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
1944 adapter->ahw->diag_test = test; 1942 adapter->ahw->diag_test = test;
1945 adapter->ahw->linkup = 0; 1943 adapter->ahw->linkup = 0;
1946 1944
@@ -2172,6 +2170,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
2172 } 2170 }
2173 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); 2171 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
2174 tx_ring->cmd_buf_arr = cmd_buf_arr; 2172 tx_ring->cmd_buf_arr = cmd_buf_arr;
2173 spin_lock_init(&tx_ring->tx_clean_lock);
2175 } 2174 }
2176 2175
2177 if (qlcnic_83xx_check(adapter) || 2176 if (qlcnic_83xx_check(adapter) ||
@@ -2299,7 +2298,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2299 rwlock_init(&adapter->ahw->crb_lock); 2298 rwlock_init(&adapter->ahw->crb_lock);
2300 mutex_init(&adapter->ahw->mem_lock); 2299 mutex_init(&adapter->ahw->mem_lock);
2301 2300
2302 spin_lock_init(&adapter->tx_clean_lock);
2303 INIT_LIST_HEAD(&adapter->mac_list); 2301 INIT_LIST_HEAD(&adapter->mac_list);
2304 2302
2305 qlcnic_register_dcb(adapter); 2303 qlcnic_register_dcb(adapter);
@@ -2782,6 +2780,9 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2782 struct qlcnic_adapter *adapter = netdev_priv(netdev); 2780 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2783 struct net_device_stats *stats = &netdev->stats; 2781 struct net_device_stats *stats = &netdev->stats;
2784 2782
2783 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2784 qlcnic_update_stats(adapter);
2785
2785 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; 2786 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2786 stats->tx_packets = adapter->stats.xmitfinished; 2787 stats->tx_packets = adapter->stats.xmitfinished;
2787 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; 2788 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 686f460b1502..024f8161d2fe 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -75,7 +75,6 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
75 num_vfs = sriov->num_vfs; 75 num_vfs = sriov->num_vfs;
76 max = num_vfs + 1; 76 max = num_vfs + 1;
77 info->bit_offsets = 0xffff; 77 info->bit_offsets = 0xffff;
78 info->max_tx_ques = res->num_tx_queues / max;
79 info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters; 78 info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
80 num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC; 79 num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC;
81 80
@@ -86,6 +85,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
86 info->max_tx_mac_filters = temp; 85 info->max_tx_mac_filters = temp;
87 info->min_tx_bw = 0; 86 info->min_tx_bw = 0;
88 info->max_tx_bw = MAX_BW; 87 info->max_tx_bw = MAX_BW;
88 info->max_tx_ques = res->num_tx_queues - sriov->num_vfs;
89 } else { 89 } else {
90 id = qlcnic_sriov_func_to_index(adapter, func); 90 id = qlcnic_sriov_func_to_index(adapter, func);
91 if (id < 0) 91 if (id < 0)
@@ -95,6 +95,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
95 info->max_tx_bw = vp->max_tx_bw; 95 info->max_tx_bw = vp->max_tx_bw;
96 info->max_rx_ucast_mac_filters = num_vf_macs; 96 info->max_rx_ucast_mac_filters = num_vf_macs;
97 info->max_tx_mac_filters = num_vf_macs; 97 info->max_tx_mac_filters = num_vf_macs;
98 info->max_tx_ques = QLCNIC_SINGLE_RING;
98 } 99 }
99 100
100 info->max_rx_ip_addr = res->num_destip / max; 101 info->max_rx_ip_addr = res->num_destip / max;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8a7a23a84ac5..797b56a0efc4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -622,17 +622,15 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
622 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 622 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
623 return -EOPNOTSUPP; 623 return -EOPNOTSUPP;
624 624
625 if (netif_msg_hw(priv)) { 625 priv->adv_ts = 0;
626 if (priv->dma_cap.time_stamp) { 626 if (priv->dma_cap.atime_stamp && priv->extend_desc)
627 pr_debug("IEEE 1588-2002 Time Stamp supported\n"); 627 priv->adv_ts = 1;
628 priv->adv_ts = 0; 628
629 } 629 if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
630 if (priv->dma_cap.atime_stamp && priv->extend_desc) { 630 pr_debug("IEEE 1588-2002 Time Stamp supported\n");
631 pr_debug 631
632 ("IEEE 1588-2008 Advanced Time Stamp supported\n"); 632 if (netif_msg_hw(priv) && priv->adv_ts)
633 priv->adv_ts = 1; 633 pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
634 }
635 }
636 634
637 priv->hw->ptp = &stmmac_ptp; 635 priv->hw->ptp = &stmmac_ptp;
638 priv->hwts_tx_en = 0; 636 priv->hwts_tx_en = 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index b8b0eeed0f92..7680581ebe12 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -56,7 +56,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
56 56
57 priv->hw->ptp->config_addend(priv->ioaddr, addend); 57 priv->hw->ptp->config_addend(priv->ioaddr, addend);
58 58
59 spin_unlock_irqrestore(&priv->lock, flags); 59 spin_unlock_irqrestore(&priv->ptp_lock, flags);
60 60
61 return 0; 61 return 0;
62} 62}
@@ -91,7 +91,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
91 91
92 priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); 92 priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
93 93
94 spin_unlock_irqrestore(&priv->lock, flags); 94 spin_unlock_irqrestore(&priv->ptp_lock, flags);
95 95
96 return 0; 96 return 0;
97} 97}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 5120d9ce1dd4..5330fd298705 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -740,6 +740,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
740 /* set speed_in input in case RMII mode is used in 100Mbps */ 740 /* set speed_in input in case RMII mode is used in 100Mbps */
741 if (phy->speed == 100) 741 if (phy->speed == 100)
742 mac_control |= BIT(15); 742 mac_control |= BIT(15);
743 else if (phy->speed == 10)
744 mac_control |= BIT(18); /* In Band mode */
743 745
744 *link = true; 746 *link = true;
745 } else { 747 } else {
@@ -2106,7 +2108,7 @@ static int cpsw_probe(struct platform_device *pdev)
2106 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { 2108 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
2107 for (i = res->start; i <= res->end; i++) { 2109 for (i = res->start; i <= res->end; i++) {
2108 if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0, 2110 if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
2109 dev_name(priv->dev), priv)) { 2111 dev_name(&pdev->dev), priv)) {
2110 dev_err(priv->dev, "error attaching irq\n"); 2112 dev_err(priv->dev, "error attaching irq\n");
2111 goto clean_ale_ret; 2113 goto clean_ale_ret;
2112 } 2114 }
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 628b736e5ae7..0e9fb3301b11 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2080,7 +2080,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
2080} 2080}
2081 2081
2082/* Return subqueue id on this core (one per core). */ 2082/* Return subqueue id on this core (one per core). */
2083static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb) 2083static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb,
2084 void *accel_priv)
2084{ 2085{
2085 return smp_processor_id(); 2086 return smp_processor_id();
2086} 2087}
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 3169252613fa..5d78c1d08abd 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
571 case HDLCDRVCTL_CALIBRATE: 571 case HDLCDRVCTL_CALIBRATE:
572 if(!capable(CAP_SYS_RAWIO)) 572 if(!capable(CAP_SYS_RAWIO))
573 return -EPERM; 573 return -EPERM;
574 if (bi.data.calibrate > INT_MAX / s->par.bitrate)
575 return -EINVAL;
574 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; 576 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
575 return 0; 577 return 0;
576 578
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 1971411574db..61dd2447e1bb 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1057,6 +1057,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1057 break; 1057 break;
1058 1058
1059 case SIOCYAMGCFG: 1059 case SIOCYAMGCFG:
1060 memset(&yi, 0, sizeof(yi));
1060 yi.cfg.mask = 0xffffffff; 1061 yi.cfg.mask = 0xffffffff;
1061 yi.cfg.iobase = yp->iobase; 1062 yi.cfg.iobase = yp->iobase;
1062 yi.cfg.irq = yp->irq; 1063 yi.cfg.irq = yp->irq;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 524f713f6017..71baeb3ed905 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -261,9 +261,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
261 struct sk_buff *skb; 261 struct sk_buff *skb;
262 262
263 net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev; 263 net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
264 if (!net) { 264 if (!net || net->reg_state != NETREG_REGISTERED) {
265 netdev_err(net, "got receive callback but net device"
266 " not initialized yet\n");
267 packet->status = NVSP_STAT_FAIL; 265 packet->status = NVSP_STAT_FAIL;
268 return 0; 266 return 0;
269 } 267 }
@@ -327,7 +325,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
327 return -EINVAL; 325 return -EINVAL;
328 326
329 nvdev->start_remove = true; 327 nvdev->start_remove = true;
330 cancel_delayed_work_sync(&ndevctx->dwork);
331 cancel_work_sync(&ndevctx->work); 328 cancel_work_sync(&ndevctx->work);
332 netif_tx_disable(ndev); 329 netif_tx_disable(ndev);
333 rndis_filter_device_remove(hdev); 330 rndis_filter_device_remove(hdev);
@@ -436,19 +433,11 @@ static int netvsc_probe(struct hv_device *dev,
436 SET_ETHTOOL_OPS(net, &ethtool_ops); 433 SET_ETHTOOL_OPS(net, &ethtool_ops);
437 SET_NETDEV_DEV(net, &dev->device); 434 SET_NETDEV_DEV(net, &dev->device);
438 435
439 ret = register_netdev(net);
440 if (ret != 0) {
441 pr_err("Unable to register netdev.\n");
442 free_netdev(net);
443 goto out;
444 }
445
446 /* Notify the netvsc driver of the new device */ 436 /* Notify the netvsc driver of the new device */
447 device_info.ring_size = ring_size; 437 device_info.ring_size = ring_size;
448 ret = rndis_filter_device_add(dev, &device_info); 438 ret = rndis_filter_device_add(dev, &device_info);
449 if (ret != 0) { 439 if (ret != 0) {
450 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); 440 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
451 unregister_netdev(net);
452 free_netdev(net); 441 free_netdev(net);
453 hv_set_drvdata(dev, NULL); 442 hv_set_drvdata(dev, NULL);
454 return ret; 443 return ret;
@@ -457,7 +446,13 @@ static int netvsc_probe(struct hv_device *dev,
457 446
458 netif_carrier_on(net); 447 netif_carrier_on(net);
459 448
460out: 449 ret = register_netdev(net);
450 if (ret != 0) {
451 pr_err("Unable to register netdev.\n");
452 rndis_filter_device_remove(dev);
453 free_netdev(net);
454 }
455
461 return ret; 456 return ret;
462} 457}
463 458
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index acf93798dc67..bc8faaec33f5 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -299,7 +299,7 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
299 299
300 if (vlan->fwd_priv) { 300 if (vlan->fwd_priv) {
301 skb->dev = vlan->lowerdev; 301 skb->dev = vlan->lowerdev;
302 ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv); 302 ret = dev_queue_xmit_accel(skb, vlan->fwd_priv);
303 } else { 303 } else {
304 ret = macvlan_queue_xmit(skb, dev); 304 ret = macvlan_queue_xmit(skb, dev);
305 } 305 }
@@ -338,6 +338,8 @@ static const struct header_ops macvlan_hard_header_ops = {
338 .cache_update = eth_header_cache_update, 338 .cache_update = eth_header_cache_update,
339}; 339};
340 340
341static struct rtnl_link_ops macvlan_link_ops;
342
341static int macvlan_open(struct net_device *dev) 343static int macvlan_open(struct net_device *dev)
342{ 344{
343 struct macvlan_dev *vlan = netdev_priv(dev); 345 struct macvlan_dev *vlan = netdev_priv(dev);
@@ -353,7 +355,8 @@ static int macvlan_open(struct net_device *dev)
353 goto hash_add; 355 goto hash_add;
354 } 356 }
355 357
356 if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) { 358 if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD &&
359 dev->rtnl_link_ops == &macvlan_link_ops) {
357 vlan->fwd_priv = 360 vlan->fwd_priv =
358 lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); 361 lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
359 362
@@ -362,10 +365,8 @@ static int macvlan_open(struct net_device *dev)
362 */ 365 */
363 if (IS_ERR_OR_NULL(vlan->fwd_priv)) { 366 if (IS_ERR_OR_NULL(vlan->fwd_priv)) {
364 vlan->fwd_priv = NULL; 367 vlan->fwd_priv = NULL;
365 } else { 368 } else
366 dev->features &= ~NETIF_F_LLTX;
367 return 0; 369 return 0;
368 }
369 } 370 }
370 371
371 err = -EBUSY; 372 err = -EBUSY;
@@ -690,8 +691,18 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
690 netdev_features_t features) 691 netdev_features_t features)
691{ 692{
692 struct macvlan_dev *vlan = netdev_priv(dev); 693 struct macvlan_dev *vlan = netdev_priv(dev);
694 netdev_features_t mask;
695
696 features |= NETIF_F_ALL_FOR_ALL;
697 features &= (vlan->set_features | ~MACVLAN_FEATURES);
698 mask = features;
699
700 features = netdev_increment_features(vlan->lowerdev->features,
701 features,
702 mask);
703 features |= NETIF_F_LLTX;
693 704
694 return features & (vlan->set_features | ~MACVLAN_FEATURES); 705 return features;
695} 706}
696 707
697static const struct ethtool_ops macvlan_ethtool_ops = { 708static const struct ethtool_ops macvlan_ethtool_ops = {
@@ -1019,9 +1030,8 @@ static int macvlan_device_event(struct notifier_block *unused,
1019 break; 1030 break;
1020 case NETDEV_FEAT_CHANGE: 1031 case NETDEV_FEAT_CHANGE:
1021 list_for_each_entry(vlan, &port->vlans, list) { 1032 list_for_each_entry(vlan, &port->vlans, list) {
1022 vlan->dev->features = dev->features & MACVLAN_FEATURES;
1023 vlan->dev->gso_max_size = dev->gso_max_size; 1033 vlan->dev->gso_max_size = dev->gso_max_size;
1024 netdev_features_change(vlan->dev); 1034 netdev_update_features(vlan->dev);
1025 } 1035 }
1026 break; 1036 break;
1027 case NETDEV_UNREGISTER: 1037 case NETDEV_UNREGISTER:
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 36c6994436b7..98434b84f041 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -565,10 +565,8 @@ int phy_start_interrupts(struct phy_device *phydev)
565 int err = 0; 565 int err = 0;
566 566
567 atomic_set(&phydev->irq_disable, 0); 567 atomic_set(&phydev->irq_disable, 0);
568 if (request_irq(phydev->irq, phy_interrupt, 568 if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
569 IRQF_SHARED, 569 phydev) < 0) {
570 "phy_interrupt",
571 phydev) < 0) {
572 pr_warn("%s: Can't get IRQ %d (PHY)\n", 570 pr_warn("%s: Can't get IRQ %d (PHY)\n",
573 phydev->bus->name, phydev->irq); 571 phydev->bus->name, phydev->irq);
574 phydev->irq = PHY_POLL; 572 phydev->irq = PHY_POLL;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 736050d6b451..b75ae5bde673 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1647,7 +1647,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1647 return NETDEV_TX_OK; 1647 return NETDEV_TX_OK;
1648} 1648}
1649 1649
1650static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb) 1650static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
1651 void *accel_priv)
1651{ 1652{
1652 /* 1653 /*
1653 * This helper function exists to help dev_pick_tx get the correct 1654 * This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7c8343a4f918..ecec8029c5e8 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -348,7 +348,8 @@ unlock:
348 * different rxq no. here. If we could not get rxhash, then we would 348 * different rxq no. here. If we could not get rxhash, then we would
349 * hope the rxq no. may help here. 349 * hope the rxq no. may help here.
350 */ 350 */
351static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb) 351static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
352 void *accel_priv)
352{ 353{
353 struct tun_struct *tun = netdev_priv(dev); 354 struct tun_struct *tun = netdev_priv(dev);
354 struct tun_flow_entry *e; 355 struct tun_flow_entry *e;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 85e4a01670f0..47b0f732b0b1 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -276,12 +276,12 @@ config USB_NET_CDC_MBIM
276 module will be called cdc_mbim. 276 module will be called cdc_mbim.
277 277
278config USB_NET_DM9601 278config USB_NET_DM9601
279 tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices" 279 tristate "Davicom DM96xx based USB 10/100 ethernet devices"
280 depends on USB_USBNET 280 depends on USB_USBNET
281 select CRC32 281 select CRC32
282 help 282 help
283 This option adds support for Davicom DM9601 based USB 1.1 283 This option adds support for Davicom DM9601/DM9620/DM9621A
284 10/100 Ethernet adapters. 284 based USB 10/100 Ethernet adapters.
285 285
286config USB_NET_SR9700 286config USB_NET_SR9700
287 tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices" 287 tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices"
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index c6867f926cff..14aa48fa8d7e 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Davicom DM9601 USB 1.1 10/100Mbps ethernet devices 2 * Davicom DM96xx USB 10/100Mbps ethernet devices
3 * 3 *
4 * Peter Korsgaard <jacmet@sunsite.dk> 4 * Peter Korsgaard <jacmet@sunsite.dk>
5 * 5 *
@@ -364,7 +364,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
364 dev->net->ethtool_ops = &dm9601_ethtool_ops; 364 dev->net->ethtool_ops = &dm9601_ethtool_ops;
365 dev->net->hard_header_len += DM_TX_OVERHEAD; 365 dev->net->hard_header_len += DM_TX_OVERHEAD;
366 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 366 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
367 dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD; 367
368 /* dm9620/21a require room for 4 byte padding, even in dm9601
369 * mode, so we need +1 to be able to receive full size
370 * ethernet frames.
371 */
372 dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1;
368 373
369 dev->mii.dev = dev->net; 374 dev->mii.dev = dev->net;
370 dev->mii.mdio_read = dm9601_mdio_read; 375 dev->mii.mdio_read = dm9601_mdio_read;
@@ -468,7 +473,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
468static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, 473static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
469 gfp_t flags) 474 gfp_t flags)
470{ 475{
471 int len; 476 int len, pad;
472 477
473 /* format: 478 /* format:
474 b1: packet length low 479 b1: packet length low
@@ -476,12 +481,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
476 b3..n: packet data 481 b3..n: packet data
477 */ 482 */
478 483
479 len = skb->len; 484 len = skb->len + DM_TX_OVERHEAD;
485
486 /* workaround for dm962x errata with tx fifo getting out of
487 * sync if a USB bulk transfer retry happens right after a
488 * packet with odd / maxpacket length by adding up to 3 bytes
489 * padding.
490 */
491 while ((len & 1) || !(len % dev->maxpacket))
492 len++;
480 493
481 if (skb_headroom(skb) < DM_TX_OVERHEAD) { 494 len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */
495 pad = len - skb->len;
496
497 if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) {
482 struct sk_buff *skb2; 498 struct sk_buff *skb2;
483 499
484 skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags); 500 skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags);
485 dev_kfree_skb_any(skb); 501 dev_kfree_skb_any(skb);
486 skb = skb2; 502 skb = skb2;
487 if (!skb) 503 if (!skb)
@@ -490,10 +506,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
490 506
491 __skb_push(skb, DM_TX_OVERHEAD); 507 __skb_push(skb, DM_TX_OVERHEAD);
492 508
493 /* usbnet adds padding if length is a multiple of packet size 509 if (pad) {
494 if so, adjust length value in header */ 510 memset(skb->data + skb->len, 0, pad);
495 if ((skb->len % dev->maxpacket) == 0) 511 __skb_put(skb, pad);
496 len++; 512 }
497 513
498 skb->data[0] = len; 514 skb->data[0] = len;
499 skb->data[1] = len >> 8; 515 skb->data[1] = len >> 8;
@@ -543,7 +559,7 @@ static int dm9601_link_reset(struct usbnet *dev)
543} 559}
544 560
545static const struct driver_info dm9601_info = { 561static const struct driver_info dm9601_info = {
546 .description = "Davicom DM9601 USB Ethernet", 562 .description = "Davicom DM96xx USB 10/100 Ethernet",
547 .flags = FLAG_ETHER | FLAG_LINK_INTR, 563 .flags = FLAG_ETHER | FLAG_LINK_INTR,
548 .bind = dm9601_bind, 564 .bind = dm9601_bind,
549 .rx_fixup = dm9601_rx_fixup, 565 .rx_fixup = dm9601_rx_fixup,
@@ -594,6 +610,10 @@ static const struct usb_device_id products[] = {
594 USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */ 610 USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */
595 .driver_info = (unsigned long)&dm9601_info, 611 .driver_info = (unsigned long)&dm9601_info,
596 }, 612 },
613 {
614 USB_DEVICE(0x0a46, 0x9621), /* DM9621A USB to Fast Ethernet Adapter */
615 .driver_info = (unsigned long)&dm9601_info,
616 },
597 {}, // END 617 {}, // END
598}; 618};
599 619
@@ -612,5 +632,5 @@ static struct usb_driver dm9601_driver = {
612module_usb_driver(dm9601_driver); 632module_usb_driver(dm9601_driver);
613 633
614MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>"); 634MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
615MODULE_DESCRIPTION("Davicom DM9601 USB 1.1 ethernet devices"); 635MODULE_DESCRIPTION("Davicom DM96xx USB 10/100 ethernet devices");
616MODULE_LICENSE("GPL"); 636MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 86292e6aaf49..1a482344b3f5 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -185,7 +185,6 @@ enum rx_ctrl_state{
185#define BM_REQUEST_TYPE (0xa1) 185#define BM_REQUEST_TYPE (0xa1)
186#define B_NOTIFICATION (0x20) 186#define B_NOTIFICATION (0x20)
187#define W_VALUE (0x0) 187#define W_VALUE (0x0)
188#define W_INDEX (0x2)
189#define W_LENGTH (0x2) 188#define W_LENGTH (0x2)
190 189
191#define B_OVERRUN (0x1<<6) 190#define B_OVERRUN (0x1<<6)
@@ -1487,6 +1486,7 @@ static void tiocmget_intr_callback(struct urb *urb)
1487 struct uart_icount *icount; 1486 struct uart_icount *icount;
1488 struct hso_serial_state_notification *serial_state_notification; 1487 struct hso_serial_state_notification *serial_state_notification;
1489 struct usb_device *usb; 1488 struct usb_device *usb;
1489 int if_num;
1490 1490
1491 /* Sanity checks */ 1491 /* Sanity checks */
1492 if (!serial) 1492 if (!serial)
@@ -1495,15 +1495,24 @@ static void tiocmget_intr_callback(struct urb *urb)
1495 handle_usb_error(status, __func__, serial->parent); 1495 handle_usb_error(status, __func__, serial->parent);
1496 return; 1496 return;
1497 } 1497 }
1498
1499 /* tiocmget is only supported on HSO_PORT_MODEM */
1498 tiocmget = serial->tiocmget; 1500 tiocmget = serial->tiocmget;
1499 if (!tiocmget) 1501 if (!tiocmget)
1500 return; 1502 return;
1503 BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM);
1504
1501 usb = serial->parent->usb; 1505 usb = serial->parent->usb;
1506 if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber;
1507
1508 /* wIndex should be the USB interface number of the port to which the
1509 * notification applies, which should always be the Modem port.
1510 */
1502 serial_state_notification = &tiocmget->serial_state_notification; 1511 serial_state_notification = &tiocmget->serial_state_notification;
1503 if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE || 1512 if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE ||
1504 serial_state_notification->bNotification != B_NOTIFICATION || 1513 serial_state_notification->bNotification != B_NOTIFICATION ||
1505 le16_to_cpu(serial_state_notification->wValue) != W_VALUE || 1514 le16_to_cpu(serial_state_notification->wValue) != W_VALUE ||
1506 le16_to_cpu(serial_state_notification->wIndex) != W_INDEX || 1515 le16_to_cpu(serial_state_notification->wIndex) != if_num ||
1507 le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) { 1516 le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) {
1508 dev_warn(&usb->dev, 1517 dev_warn(&usb->dev,
1509 "hso received invalid serial state notification\n"); 1518 "hso received invalid serial state notification\n");
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 03832d3780aa..f54637828574 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -117,7 +117,6 @@ enum {
117struct mcs7830_data { 117struct mcs7830_data {
118 u8 multi_filter[8]; 118 u8 multi_filter[8];
119 u8 config; 119 u8 config;
120 u8 link_counter;
121}; 120};
122 121
123static const char driver_name[] = "MOSCHIP usb-ethernet driver"; 122static const char driver_name[] = "MOSCHIP usb-ethernet driver";
@@ -561,26 +560,16 @@ static void mcs7830_status(struct usbnet *dev, struct urb *urb)
561{ 560{
562 u8 *buf = urb->transfer_buffer; 561 u8 *buf = urb->transfer_buffer;
563 bool link, link_changed; 562 bool link, link_changed;
564 struct mcs7830_data *data = mcs7830_get_data(dev);
565 563
566 if (urb->actual_length < 16) 564 if (urb->actual_length < 16)
567 return; 565 return;
568 566
569 link = !(buf[1] & 0x20); 567 link = !(buf[1] == 0x20);
570 link_changed = netif_carrier_ok(dev->net) != link; 568 link_changed = netif_carrier_ok(dev->net) != link;
571 if (link_changed) { 569 if (link_changed) {
572 data->link_counter++; 570 usbnet_link_change(dev, link, 0);
573 /* 571 netdev_dbg(dev->net, "Link Status is: %d\n", link);
574 track link state 20 times to guard against erroneous 572 }
575 link state changes reported sometimes by the chip
576 */
577 if (data->link_counter > 20) {
578 data->link_counter = 0;
579 usbnet_link_change(dev, link, 0);
580 netdev_dbg(dev->net, "Link Status is: %d\n", link);
581 }
582 } else
583 data->link_counter = 0;
584} 573}
585 574
586static const struct driver_info moschip_info = { 575static const struct driver_info moschip_info = {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d208f8604981..5d776447d9c3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1797,16 +1797,17 @@ static int virtnet_restore(struct virtio_device *vdev)
1797 if (err) 1797 if (err)
1798 return err; 1798 return err;
1799 1799
1800 if (netif_running(vi->dev)) 1800 if (netif_running(vi->dev)) {
1801 for (i = 0; i < vi->curr_queue_pairs; i++)
1802 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1803 schedule_delayed_work(&vi->refill, 0);
1804
1801 for (i = 0; i < vi->max_queue_pairs; i++) 1805 for (i = 0; i < vi->max_queue_pairs; i++)
1802 virtnet_napi_enable(&vi->rq[i]); 1806 virtnet_napi_enable(&vi->rq[i]);
1807 }
1803 1808
1804 netif_device_attach(vi->dev); 1809 netif_device_attach(vi->dev);
1805 1810
1806 for (i = 0; i < vi->curr_queue_pairs; i++)
1807 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1808 schedule_delayed_work(&vi->refill, 0);
1809
1810 mutex_lock(&vi->config_lock); 1811 mutex_lock(&vi->config_lock);
1811 vi->config_enable = true; 1812 vi->config_enable = true;
1812 mutex_unlock(&vi->config_lock); 1813 mutex_unlock(&vi->config_lock);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 249e01c5600c..ed384fee76ac 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2440,7 +2440,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2440 /* update header length based on lower device */ 2440 /* update header length based on lower device */
2441 dev->hard_header_len = lowerdev->hard_header_len + 2441 dev->hard_header_len = lowerdev->hard_header_len +
2442 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2442 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2443 } 2443 } else if (use_ipv6)
2444 vxlan->flags |= VXLAN_F_IPV6;
2444 2445
2445 if (data[IFLA_VXLAN_TOS]) 2446 if (data[IFLA_VXLAN_TOS])
2446 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]); 2447 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 8d78253c26ce..a366d6b4626f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -76,9 +76,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
76 mask2 |= ATH9K_INT_CST; 76 mask2 |= ATH9K_INT_CST;
77 if (isr2 & AR_ISR_S2_TSFOOR) 77 if (isr2 & AR_ISR_S2_TSFOOR)
78 mask2 |= ATH9K_INT_TSFOOR; 78 mask2 |= ATH9K_INT_TSFOOR;
79
80 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
81 REG_WRITE(ah, AR_ISR_S2, isr2);
82 isr &= ~AR_ISR_BCNMISC;
83 }
79 } 84 }
80 85
81 isr = REG_READ(ah, AR_ISR_RAC); 86 if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
87 isr = REG_READ(ah, AR_ISR_RAC);
88
82 if (isr == 0xffffffff) { 89 if (isr == 0xffffffff) {
83 *masked = 0; 90 *masked = 0;
84 return false; 91 return false;
@@ -97,11 +104,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
97 104
98 *masked |= ATH9K_INT_TX; 105 *masked |= ATH9K_INT_TX;
99 106
100 s0_s = REG_READ(ah, AR_ISR_S0_S); 107 if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
108 s0_s = REG_READ(ah, AR_ISR_S0_S);
109 s1_s = REG_READ(ah, AR_ISR_S1_S);
110 } else {
111 s0_s = REG_READ(ah, AR_ISR_S0);
112 REG_WRITE(ah, AR_ISR_S0, s0_s);
113 s1_s = REG_READ(ah, AR_ISR_S1);
114 REG_WRITE(ah, AR_ISR_S1, s1_s);
115
116 isr &= ~(AR_ISR_TXOK |
117 AR_ISR_TXDESC |
118 AR_ISR_TXERR |
119 AR_ISR_TXEOL);
120 }
121
101 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK); 122 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
102 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC); 123 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
103
104 s1_s = REG_READ(ah, AR_ISR_S1_S);
105 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR); 124 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
106 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL); 125 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
107 } 126 }
@@ -114,13 +133,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
114 *masked |= mask2; 133 *masked |= mask2;
115 } 134 }
116 135
117 if (AR_SREV_9100(ah)) 136 if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) {
118 return true;
119
120 if (isr & AR_ISR_GENTMR) {
121 u32 s5_s; 137 u32 s5_s;
122 138
123 s5_s = REG_READ(ah, AR_ISR_S5_S); 139 if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
140 s5_s = REG_READ(ah, AR_ISR_S5_S);
141 } else {
142 s5_s = REG_READ(ah, AR_ISR_S5);
143 }
144
124 ah->intr_gen_timer_trigger = 145 ah->intr_gen_timer_trigger =
125 MS(s5_s, AR_ISR_S5_GENTIMER_TRIG); 146 MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
126 147
@@ -133,8 +154,21 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
133 if ((s5_s & AR_ISR_S5_TIM_TIMER) && 154 if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
134 !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 155 !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
135 *masked |= ATH9K_INT_TIM_TIMER; 156 *masked |= ATH9K_INT_TIM_TIMER;
157
158 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
159 REG_WRITE(ah, AR_ISR_S5, s5_s);
160 isr &= ~AR_ISR_GENTMR;
161 }
136 } 162 }
137 163
164 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
165 REG_WRITE(ah, AR_ISR, isr);
166 REG_READ(ah, AR_ISR);
167 }
168
169 if (AR_SREV_9100(ah))
170 return true;
171
138 if (sync_cause) { 172 if (sync_cause) {
139 ath9k_debug_sync_cause(common, sync_cause); 173 ath9k_debug_sync_cause(common, sync_cause);
140 fatal_int = 174 fatal_int =
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 9a2657fdd9cc..608d739d1378 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -127,21 +127,26 @@ static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
127 struct ath9k_vif_iter_data *iter_data = data; 127 struct ath9k_vif_iter_data *iter_data = data;
128 int i; 128 int i;
129 129
130 for (i = 0; i < ETH_ALEN; i++) 130 if (iter_data->hw_macaddr != NULL) {
131 iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]); 131 for (i = 0; i < ETH_ALEN; i++)
132 iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
133 } else {
134 iter_data->hw_macaddr = mac;
135 }
132} 136}
133 137
134static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv, 138static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv,
135 struct ieee80211_vif *vif) 139 struct ieee80211_vif *vif)
136{ 140{
137 struct ath_common *common = ath9k_hw_common(priv->ah); 141 struct ath_common *common = ath9k_hw_common(priv->ah);
138 struct ath9k_vif_iter_data iter_data; 142 struct ath9k_vif_iter_data iter_data;
139 143
140 /* 144 /*
141 * Use the hardware MAC address as reference, the hardware uses it 145 * Pick the MAC address of the first interface as the new hardware
142 * together with the BSSID mask when matching addresses. 146 * MAC address. The hardware will use it together with the BSSID mask
147 * when matching addresses.
143 */ 148 */
144 iter_data.hw_macaddr = common->macaddr; 149 iter_data.hw_macaddr = NULL;
145 memset(&iter_data.mask, 0xff, ETH_ALEN); 150 memset(&iter_data.mask, 0xff, ETH_ALEN);
146 151
147 if (vif) 152 if (vif)
@@ -153,6 +158,10 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
153 ath9k_htc_bssid_iter, &iter_data); 158 ath9k_htc_bssid_iter, &iter_data);
154 159
155 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); 160 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
161
162 if (iter_data.hw_macaddr)
163 memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
164
156 ath_hw_setbssidmask(common); 165 ath_hw_setbssidmask(common);
157} 166}
158 167
@@ -1063,7 +1072,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1063 goto out; 1072 goto out;
1064 } 1073 }
1065 1074
1066 ath9k_htc_set_bssid_mask(priv, vif); 1075 ath9k_htc_set_mac_bssid_mask(priv, vif);
1067 1076
1068 priv->vif_slot |= (1 << avp->index); 1077 priv->vif_slot |= (1 << avp->index);
1069 priv->nvifs++; 1078 priv->nvifs++;
@@ -1128,7 +1137,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
1128 1137
1129 ath9k_htc_set_opmode(priv); 1138 ath9k_htc_set_opmode(priv);
1130 1139
1131 ath9k_htc_set_bssid_mask(priv, vif); 1140 ath9k_htc_set_mac_bssid_mask(priv, vif);
1132 1141
1133 /* 1142 /*
1134 * Stop ANI only if there are no associated station interfaces. 1143 * Stop ANI only if there are no associated station interfaces.
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 74f452c7b166..21aa09e0e825 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -965,8 +965,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
965 struct ath_common *common = ath9k_hw_common(ah); 965 struct ath_common *common = ath9k_hw_common(ah);
966 966
967 /* 967 /*
968 * Use the hardware MAC address as reference, the hardware uses it 968 * Pick the MAC address of the first interface as the new hardware
969 * together with the BSSID mask when matching addresses. 969 * MAC address. The hardware will use it together with the BSSID mask
970 * when matching addresses.
970 */ 971 */
971 memset(iter_data, 0, sizeof(*iter_data)); 972 memset(iter_data, 0, sizeof(*iter_data));
972 memset(&iter_data->mask, 0xff, ETH_ALEN); 973 memset(&iter_data->mask, 0xff, ETH_ALEN);
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 86605027c41d..e6272546395a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -357,21 +357,27 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
357 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, 357 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
358 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, 358 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)},
359 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, 359 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
360 {IWL_PCI_DEVICE(0x095B, 0x5012, iwl7265_2ac_cfg)}, 360 {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
361 {IWL_PCI_DEVICE(0x095B, 0x500A, iwl7265_2ac_cfg)}, 361 {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)},
362 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, 362 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
363 {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
363 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, 364 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
364 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, 365 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
365 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, 366 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
366 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, 367 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
367 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, 368 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
368 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, 369 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
369 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, 371 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
372 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
373 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, 374 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, 375 {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
372 {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, 376 {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
373 {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, 377 {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)},
374 {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, 378 {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)},
379 {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)},
380 {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
375 {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, 381 {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
376 {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, 382 {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
377#endif /* CONFIG_IWLMVM */ 383#endif /* CONFIG_IWLMVM */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index c72438bb2faf..a1b32ee9594a 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2011,7 +2011,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
2011 (hwsim_flags & HWSIM_TX_STAT_ACK)) { 2011 (hwsim_flags & HWSIM_TX_STAT_ACK)) {
2012 if (skb->len >= 16) { 2012 if (skb->len >= 16) {
2013 hdr = (struct ieee80211_hdr *) skb->data; 2013 hdr = (struct ieee80211_hdr *) skb->data;
2014 mac80211_hwsim_monitor_ack(txi->rate_driver_data[0], 2014 mac80211_hwsim_monitor_ack(data2->channel,
2015 hdr->addr2); 2015 hdr->addr2);
2016 } 2016 }
2017 txi->flags |= IEEE80211_TX_STAT_ACK; 2017 txi->flags |= IEEE80211_TX_STAT_ACK;
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 78e8a6666cc6..8bb8988c435c 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -746,7 +746,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
746} 746}
747 747
748static u16 748static u16
749mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb) 749mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
750 void *accel_priv)
750{ 751{
751 skb->priority = cfg80211_classify8021d(skb); 752 skb->priority = cfg80211_classify8021d(skb);
752 return mwifiex_1d_to_wmm_queue[skb->priority]; 753 return mwifiex_1d_to_wmm_queue[skb->priority];
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 0f494444bcd1..5a53195d016b 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -740,6 +740,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
740 }; 740 };
741 int index = rtlpci->rx_ring[rx_queue_idx].idx; 741 int index = rtlpci->rx_ring[rx_queue_idx].idx;
742 742
743 if (rtlpci->driver_is_goingto_unload)
744 return;
743 /*RX NORMAL PKT */ 745 /*RX NORMAL PKT */
744 while (count--) { 746 while (count--) {
745 /*rx descriptor */ 747 /*rx descriptor */
@@ -1636,6 +1638,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
1636 */ 1638 */
1637 set_hal_stop(rtlhal); 1639 set_hal_stop(rtlhal);
1638 1640
1641 rtlpci->driver_is_goingto_unload = true;
1639 rtlpriv->cfg->ops->disable_interrupt(hw); 1642 rtlpriv->cfg->ops->disable_interrupt(hw);
1640 cancel_work_sync(&rtlpriv->works.lps_change_work); 1643 cancel_work_sync(&rtlpriv->works.lps_change_work);
1641 1644
@@ -1653,7 +1656,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
1653 ppsc->rfchange_inprogress = true; 1656 ppsc->rfchange_inprogress = true;
1654 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags); 1657 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1655 1658
1656 rtlpci->driver_is_goingto_unload = true;
1657 rtlpriv->cfg->ops->hw_disable(hw); 1659 rtlpriv->cfg->ops->hw_disable(hw);
1658 /* some things are not needed if firmware not available */ 1660 /* some things are not needed if firmware not available */
1659 if (!rtlpriv->max_fw_size) 1661 if (!rtlpriv->max_fw_size)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 08ae01b41c83..c47794b9d42f 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -101,6 +101,13 @@ struct xenvif_rx_meta {
101 101
102#define MAX_PENDING_REQS 256 102#define MAX_PENDING_REQS 256
103 103
104/* It's possible for an skb to have a maximal number of frags
105 * but still be less than MAX_BUFFER_OFFSET in size. Thus the
106 * worst-case number of copy operations is MAX_SKB_FRAGS per
107 * ring slot.
108 */
109#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
110
104struct xenvif { 111struct xenvif {
105 /* Unique identifier for this interface. */ 112 /* Unique identifier for this interface. */
106 domid_t domid; 113 domid_t domid;
@@ -143,13 +150,13 @@ struct xenvif {
143 */ 150 */
144 RING_IDX rx_req_cons_peek; 151 RING_IDX rx_req_cons_peek;
145 152
146 /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each 153 /* This array is allocated seperately as it is large */
147 * head/fragment page uses 2 copy operations because it 154 struct gnttab_copy *grant_copy_op;
148 * straddles two buffers in the frontend.
149 */
150 struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
151 struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
152 155
156 /* We create one meta structure per ring request we consume, so
157 * the maximum number is the same as the ring size.
158 */
159 struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
153 160
154 u8 fe_dev_addr[6]; 161 u8 fe_dev_addr[6];
155 162
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 870f1fa58370..fff8cddfed81 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -34,6 +34,7 @@
34#include <linux/ethtool.h> 34#include <linux/ethtool.h>
35#include <linux/rtnetlink.h> 35#include <linux/rtnetlink.h>
36#include <linux/if_vlan.h> 36#include <linux/if_vlan.h>
37#include <linux/vmalloc.h>
37 38
38#include <xen/events.h> 39#include <xen/events.h>
39#include <asm/xen/hypercall.h> 40#include <asm/xen/hypercall.h>
@@ -307,6 +308,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
307 SET_NETDEV_DEV(dev, parent); 308 SET_NETDEV_DEV(dev, parent);
308 309
309 vif = netdev_priv(dev); 310 vif = netdev_priv(dev);
311
312 vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
313 MAX_GRANT_COPY_OPS);
314 if (vif->grant_copy_op == NULL) {
315 pr_warn("Could not allocate grant copy space for %s\n", name);
316 free_netdev(dev);
317 return ERR_PTR(-ENOMEM);
318 }
319
310 vif->domid = domid; 320 vif->domid = domid;
311 vif->handle = handle; 321 vif->handle = handle;
312 vif->can_sg = 1; 322 vif->can_sg = 1;
@@ -487,6 +497,7 @@ void xenvif_free(struct xenvif *vif)
487 497
488 unregister_netdev(vif->dev); 498 unregister_netdev(vif->dev);
489 499
500 vfree(vif->grant_copy_op);
490 free_netdev(vif->dev); 501 free_netdev(vif->dev);
491 502
492 module_put(THIS_MODULE); 503 module_put(THIS_MODULE);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index e884ee1fe7ed..78425554a537 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -608,7 +608,7 @@ void xenvif_rx_action(struct xenvif *vif)
608 if (!npo.copy_prod) 608 if (!npo.copy_prod)
609 return; 609 return;
610 610
611 BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op)); 611 BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
612 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); 612 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
613 613
614 while ((skb = __skb_dequeue(&rxq)) != NULL) { 614 while ((skb = __skb_dequeue(&rxq)) != NULL) {
@@ -1197,6 +1197,9 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
1197 1197
1198 err = -EPROTO; 1198 err = -EPROTO;
1199 1199
1200 if (fragment)
1201 goto out;
1202
1200 switch (ip_hdr(skb)->protocol) { 1203 switch (ip_hdr(skb)->protocol) {
1201 case IPPROTO_TCP: 1204 case IPPROTO_TCP:
1202 err = maybe_pull_tail(skb, 1205 err = maybe_pull_tail(skb,
@@ -1206,8 +1209,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
1206 goto out; 1209 goto out;
1207 1210
1208 if (!skb_partial_csum_set(skb, off, 1211 if (!skb_partial_csum_set(skb, off,
1209 offsetof(struct tcphdr, check))) 1212 offsetof(struct tcphdr, check))) {
1213 err = -EPROTO;
1210 goto out; 1214 goto out;
1215 }
1211 1216
1212 if (recalculate_partial_csum) 1217 if (recalculate_partial_csum)
1213 tcp_hdr(skb)->check = 1218 tcp_hdr(skb)->check =
@@ -1224,8 +1229,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
1224 goto out; 1229 goto out;
1225 1230
1226 if (!skb_partial_csum_set(skb, off, 1231 if (!skb_partial_csum_set(skb, off,
1227 offsetof(struct udphdr, check))) 1232 offsetof(struct udphdr, check))) {
1233 err = -EPROTO;
1228 goto out; 1234 goto out;
1235 }
1229 1236
1230 if (recalculate_partial_csum) 1237 if (recalculate_partial_csum)
1231 udp_hdr(skb)->check = 1238 udp_hdr(skb)->check =
@@ -1347,8 +1354,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
1347 goto out; 1354 goto out;
1348 1355
1349 if (!skb_partial_csum_set(skb, off, 1356 if (!skb_partial_csum_set(skb, off,
1350 offsetof(struct tcphdr, check))) 1357 offsetof(struct tcphdr, check))) {
1358 err = -EPROTO;
1351 goto out; 1359 goto out;
1360 }
1352 1361
1353 if (recalculate_partial_csum) 1362 if (recalculate_partial_csum)
1354 tcp_hdr(skb)->check = 1363 tcp_hdr(skb)->check =
@@ -1365,8 +1374,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
1365 goto out; 1374 goto out;
1366 1375
1367 if (!skb_partial_csum_set(skb, off, 1376 if (!skb_partial_csum_set(skb, off,
1368 offsetof(struct udphdr, check))) 1377 offsetof(struct udphdr, check))) {
1378 err = -EPROTO;
1369 goto out; 1379 goto out;
1380 }
1370 1381
1371 if (recalculate_partial_csum) 1382 if (recalculate_partial_csum)
1372 udp_hdr(skb)->check = 1383 udp_hdr(skb)->check =
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index de6f8990246f..c6973f101a3e 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -20,7 +20,7 @@ config OF_SELFTEST
20 depends on OF_IRQ 20 depends on OF_IRQ
21 help 21 help
22 This option builds in test cases for the device tree infrastructure 22 This option builds in test cases for the device tree infrastructure
23 that are executed one at boot time, and the results dumped to the 23 that are executed once at boot time, and the results dumped to the
24 console. 24 console.
25 25
26 If unsure, say N here, but this option is safe to enable. 26 If unsure, say N here, but this option is safe to enable.
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 4b9317bdb81c..d3dd41c840f1 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -69,14 +69,6 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
69 (unsigned long long)cp, (unsigned long long)s, 69 (unsigned long long)cp, (unsigned long long)s,
70 (unsigned long long)da); 70 (unsigned long long)da);
71 71
72 /*
73 * If the number of address cells is larger than 2 we assume the
74 * mapping doesn't specify a physical address. Rather, the address
75 * specifies an identifier that must match exactly.
76 */
77 if (na > 2 && memcmp(range, addr, na * 4) != 0)
78 return OF_BAD_ADDR;
79
80 if (da < cp || da >= (cp + s)) 72 if (da < cp || da >= (cp + s))
81 return OF_BAD_ADDR; 73 return OF_BAD_ADDR;
82 return da - cp; 74 return da - cp;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 2fa024b97c43..758b4f8b30b7 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -922,8 +922,16 @@ void __init unflatten_device_tree(void)
922 */ 922 */
923void __init unflatten_and_copy_device_tree(void) 923void __init unflatten_and_copy_device_tree(void)
924{ 924{
925 int size = __be32_to_cpu(initial_boot_params->totalsize); 925 int size;
926 void *dt = early_init_dt_alloc_memory_arch(size, 926 void *dt;
927
928 if (!initial_boot_params) {
929 pr_warn("No valid device tree found, continuing without\n");
930 return;
931 }
932
933 size = __be32_to_cpu(initial_boot_params->totalsize);
934 dt = early_init_dt_alloc_memory_arch(size,
927 __alignof__(struct boot_param_header)); 935 __alignof__(struct boot_param_header));
928 936
929 if (dt) { 937 if (dt) {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 786b0b47fae4..27212402c532 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -165,7 +165,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
165 if (of_get_property(ipar, "interrupt-controller", NULL) != 165 if (of_get_property(ipar, "interrupt-controller", NULL) !=
166 NULL) { 166 NULL) {
167 pr_debug(" -> got it !\n"); 167 pr_debug(" -> got it !\n");
168 of_node_put(old);
169 return 0; 168 return 0;
170 } 169 }
171 170
@@ -250,8 +249,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
250 * Successfully parsed an interrrupt-map translation; copy new 249 * Successfully parsed an interrrupt-map translation; copy new
251 * interrupt specifier into the out_irq structure 250 * interrupt specifier into the out_irq structure
252 */ 251 */
253 of_node_put(out_irq->np); 252 out_irq->np = newpar;
254 out_irq->np = of_node_get(newpar);
255 253
256 match_array = imap - newaddrsize - newintsize; 254 match_array = imap - newaddrsize - newintsize;
257 for (i = 0; i < newintsize; i++) 255 for (i = 0; i < newintsize; i++)
@@ -268,7 +266,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
268 } 266 }
269 fail: 267 fail:
270 of_node_put(ipar); 268 of_node_put(ipar);
271 of_node_put(out_irq->np);
272 of_node_put(newpar); 269 of_node_put(newpar);
273 270
274 return -EINVAL; 271 return -EINVAL;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 1cf605f67673..e86439283a5d 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -279,7 +279,9 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
279 279
280 status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); 280 status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
281 if (ACPI_FAILURE(status)) { 281 if (ACPI_FAILURE(status)) {
282 acpi_handle_warn(handle, "can't evaluate _ADR (%#x)\n", status); 282 if (status != AE_NOT_FOUND)
283 acpi_handle_warn(handle,
284 "can't evaluate _ADR (%#x)\n", status);
283 return AE_OK; 285 return AE_OK;
284 } 286 }
285 287
@@ -643,6 +645,24 @@ static void disable_slot(struct acpiphp_slot *slot)
643 slot->flags &= (~SLOT_ENABLED); 645 slot->flags &= (~SLOT_ENABLED);
644} 646}
645 647
648static bool acpiphp_no_hotplug(acpi_handle handle)
649{
650 struct acpi_device *adev = NULL;
651
652 acpi_bus_get_device(handle, &adev);
653 return adev && adev->flags.no_hotplug;
654}
655
656static bool slot_no_hotplug(struct acpiphp_slot *slot)
657{
658 struct acpiphp_func *func;
659
660 list_for_each_entry(func, &slot->funcs, sibling)
661 if (acpiphp_no_hotplug(func_to_handle(func)))
662 return true;
663
664 return false;
665}
646 666
647/** 667/**
648 * get_slot_status - get ACPI slot status 668 * get_slot_status - get ACPI slot status
@@ -701,7 +721,8 @@ static void trim_stale_devices(struct pci_dev *dev)
701 unsigned long long sta; 721 unsigned long long sta;
702 722
703 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); 723 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
704 alive = ACPI_SUCCESS(status) && sta == ACPI_STA_ALL; 724 alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL)
725 || acpiphp_no_hotplug(handle);
705 } 726 }
706 if (!alive) { 727 if (!alive) {
707 u32 v; 728 u32 v;
@@ -741,8 +762,9 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
741 struct pci_dev *dev, *tmp; 762 struct pci_dev *dev, *tmp;
742 763
743 mutex_lock(&slot->crit_sect); 764 mutex_lock(&slot->crit_sect);
744 /* wake up all functions */ 765 if (slot_no_hotplug(slot)) {
745 if (get_slot_status(slot) == ACPI_STA_ALL) { 766 ; /* do nothing */
767 } else if (get_slot_status(slot) == ACPI_STA_ALL) {
746 /* remove stale devices if any */ 768 /* remove stale devices if any */
747 list_for_each_entry_safe(dev, tmp, &bus->devices, 769 list_for_each_entry_safe(dev, tmp, &bus->devices,
748 bus_list) 770 bus_list)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 577074efbe62..f7ebdba14bde 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -330,29 +330,32 @@ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
330static void pci_acpi_setup(struct device *dev) 330static void pci_acpi_setup(struct device *dev)
331{ 331{
332 struct pci_dev *pci_dev = to_pci_dev(dev); 332 struct pci_dev *pci_dev = to_pci_dev(dev);
333 acpi_handle handle = ACPI_HANDLE(dev); 333 struct acpi_device *adev = ACPI_COMPANION(dev);
334 struct acpi_device *adev;
335 334
336 if (acpi_bus_get_device(handle, &adev) || !adev->wakeup.flags.valid) 335 if (!adev)
336 return;
337
338 pci_acpi_add_pm_notifier(adev, pci_dev);
339 if (!adev->wakeup.flags.valid)
337 return; 340 return;
338 341
339 device_set_wakeup_capable(dev, true); 342 device_set_wakeup_capable(dev, true);
340 acpi_pci_sleep_wake(pci_dev, false); 343 acpi_pci_sleep_wake(pci_dev, false);
341
342 pci_acpi_add_pm_notifier(adev, pci_dev);
343 if (adev->wakeup.flags.run_wake) 344 if (adev->wakeup.flags.run_wake)
344 device_set_run_wake(dev, true); 345 device_set_run_wake(dev, true);
345} 346}
346 347
347static void pci_acpi_cleanup(struct device *dev) 348static void pci_acpi_cleanup(struct device *dev)
348{ 349{
349 acpi_handle handle = ACPI_HANDLE(dev); 350 struct acpi_device *adev = ACPI_COMPANION(dev);
350 struct acpi_device *adev; 351
352 if (!adev)
353 return;
351 354
352 if (!acpi_bus_get_device(handle, &adev) && adev->wakeup.flags.valid) { 355 pci_acpi_remove_pm_notifier(adev);
356 if (adev->wakeup.flags.valid) {
353 device_set_wakeup_capable(dev, false); 357 device_set_wakeup_capable(dev, false);
354 device_set_run_wake(dev, false); 358 device_set_run_wake(dev, false);
355 pci_acpi_remove_pm_notifier(adev);
356 } 359 }
357} 360}
358 361
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index a344f3d52361..330ef2d06567 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -24,8 +24,8 @@ config PHY_EXYNOS_MIPI_VIDEO
24config OMAP_USB2 24config OMAP_USB2
25 tristate "OMAP USB2 PHY Driver" 25 tristate "OMAP USB2 PHY Driver"
26 depends on ARCH_OMAP2PLUS 26 depends on ARCH_OMAP2PLUS
27 depends on USB_PHY
27 select GENERIC_PHY 28 select GENERIC_PHY
28 select USB_PHY
29 select OMAP_CONTROL_USB 29 select OMAP_CONTROL_USB
30 help 30 help
31 Enable this to support the transceiver that is part of SOC. This 31 Enable this to support the transceiver that is part of SOC. This
@@ -36,8 +36,8 @@ config OMAP_USB2
36config TWL4030_USB 36config TWL4030_USB
37 tristate "TWL4030 USB Transceiver Driver" 37 tristate "TWL4030 USB Transceiver Driver"
38 depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS 38 depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
39 depends on USB_PHY
39 select GENERIC_PHY 40 select GENERIC_PHY
40 select USB_PHY
41 help 41 help
42 Enable this to support the USB OTG transceiver on TWL4030 42 Enable this to support the USB OTG transceiver on TWL4030
43 family chips (including the TWL5030 and TPS659x0 devices). 43 family chips (including the TWL5030 and TPS659x0 devices).
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 03cf8fb81554..58e0e9739028 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -437,23 +437,18 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
437 int id; 437 int id;
438 struct phy *phy; 438 struct phy *phy;
439 439
440 if (!dev) { 440 if (WARN_ON(!dev))
441 dev_WARN(dev, "no device provided for PHY\n"); 441 return ERR_PTR(-EINVAL);
442 ret = -EINVAL;
443 goto err0;
444 }
445 442
446 phy = kzalloc(sizeof(*phy), GFP_KERNEL); 443 phy = kzalloc(sizeof(*phy), GFP_KERNEL);
447 if (!phy) { 444 if (!phy)
448 ret = -ENOMEM; 445 return ERR_PTR(-ENOMEM);
449 goto err0;
450 }
451 446
452 id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL); 447 id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL);
453 if (id < 0) { 448 if (id < 0) {
454 dev_err(dev, "unable to get id\n"); 449 dev_err(dev, "unable to get id\n");
455 ret = id; 450 ret = id;
456 goto err0; 451 goto free_phy;
457 } 452 }
458 453
459 device_initialize(&phy->dev); 454 device_initialize(&phy->dev);
@@ -468,11 +463,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
468 463
469 ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id); 464 ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id);
470 if (ret) 465 if (ret)
471 goto err1; 466 goto put_dev;
472 467
473 ret = device_add(&phy->dev); 468 ret = device_add(&phy->dev);
474 if (ret) 469 if (ret)
475 goto err1; 470 goto put_dev;
476 471
477 if (pm_runtime_enabled(dev)) { 472 if (pm_runtime_enabled(dev)) {
478 pm_runtime_enable(&phy->dev); 473 pm_runtime_enable(&phy->dev);
@@ -481,12 +476,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
481 476
482 return phy; 477 return phy;
483 478
484err1: 479put_dev:
485 ida_remove(&phy_ida, phy->id);
486 put_device(&phy->dev); 480 put_device(&phy->dev);
481 ida_remove(&phy_ida, phy->id);
482free_phy:
487 kfree(phy); 483 kfree(phy);
488
489err0:
490 return ERR_PTR(ret); 484 return ERR_PTR(ret);
491} 485}
492EXPORT_SYMBOL_GPL(phy_create); 486EXPORT_SYMBOL_GPL(phy_create);
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
index 2832576d8b12..114f5ef4b73a 100644
--- a/drivers/pinctrl/pinctrl-baytrail.c
+++ b/drivers/pinctrl/pinctrl-baytrail.c
@@ -512,6 +512,7 @@ static const struct dev_pm_ops byt_gpio_pm_ops = {
512 512
513static const struct acpi_device_id byt_gpio_acpi_match[] = { 513static const struct acpi_device_id byt_gpio_acpi_match[] = {
514 { "INT33B2", 0 }, 514 { "INT33B2", 0 },
515 { "INT33FC", 0 },
515 { } 516 { }
516}; 517};
517MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match); 518MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match);
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 11bd0d970a52..e2142956a8e5 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -254,7 +254,7 @@ struct sh_pfc_soc_info {
254#define PINMUX_GPIO(_pin) \ 254#define PINMUX_GPIO(_pin) \
255 [GPIO_##_pin] = { \ 255 [GPIO_##_pin] = { \
256 .pin = (u16)-1, \ 256 .pin = (u16)-1, \
257 .name = __stringify(name), \ 257 .name = __stringify(GPIO_##_pin), \
258 .enum_id = _pin##_DATA, \ 258 .enum_id = _pin##_DATA, \
259 } 259 }
260 260
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 5e2054afe840..85ad58c6da17 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -196,6 +196,7 @@ config BATTERY_MAX17040
196config BATTERY_MAX17042 196config BATTERY_MAX17042
197 tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge" 197 tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge"
198 depends on I2C 198 depends on I2C
199 select REGMAP_I2C
199 help 200 help
200 MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries 201 MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
201 in handheld and portable equipment. The MAX17042 is configured 202 in handheld and portable equipment. The MAX17042 is configured
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 00e667296360..557af943b2f5 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -511,6 +511,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
511 dev_set_drvdata(dev, psy); 511 dev_set_drvdata(dev, psy);
512 psy->dev = dev; 512 psy->dev = dev;
513 513
514 rc = dev_set_name(dev, "%s", psy->name);
515 if (rc)
516 goto dev_set_name_failed;
517
514 INIT_WORK(&psy->changed_work, power_supply_changed_work); 518 INIT_WORK(&psy->changed_work, power_supply_changed_work);
515 519
516 rc = power_supply_check_supplies(psy); 520 rc = power_supply_check_supplies(psy);
@@ -524,10 +528,6 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
524 if (rc) 528 if (rc)
525 goto wakeup_init_failed; 529 goto wakeup_init_failed;
526 530
527 rc = kobject_set_name(&dev->kobj, "%s", psy->name);
528 if (rc)
529 goto kobject_set_name_failed;
530
531 rc = device_add(dev); 531 rc = device_add(dev);
532 if (rc) 532 if (rc)
533 goto device_add_failed; 533 goto device_add_failed;
@@ -553,11 +553,11 @@ create_triggers_failed:
553register_cooler_failed: 553register_cooler_failed:
554 psy_unregister_thermal(psy); 554 psy_unregister_thermal(psy);
555register_thermal_failed: 555register_thermal_failed:
556wakeup_init_failed:
557 device_del(dev); 556 device_del(dev);
558kobject_set_name_failed:
559device_add_failed: 557device_add_failed:
558wakeup_init_failed:
560check_supplies_failed: 559check_supplies_failed:
560dev_set_name_failed:
561 put_device(dev); 561 put_device(dev);
562success: 562success:
563 return rc; 563 return rc;
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 2a786c504460..3c6768378a94 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -833,6 +833,11 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
833 return 0; 833 return 0;
834} 834}
835 835
836static const struct x86_cpu_id energy_unit_quirk_ids[] = {
837 { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
838 {}
839};
840
836static int rapl_check_unit(struct rapl_package *rp, int cpu) 841static int rapl_check_unit(struct rapl_package *rp, int cpu)
837{ 842{
838 u64 msr_val; 843 u64 msr_val;
@@ -853,8 +858,11 @@ static int rapl_check_unit(struct rapl_package *rp, int cpu)
853 * time unit: 1/time_unit_divisor Seconds 858 * time unit: 1/time_unit_divisor Seconds
854 */ 859 */
855 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; 860 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
856 rp->energy_unit_divisor = 1 << value; 861 /* some CPUs have different way to calculate energy unit */
857 862 if (x86_match_cpu(energy_unit_quirk_ids))
863 rp->energy_unit_divisor = 1000000 / (1 << value);
864 else
865 rp->energy_unit_divisor = 1 << value;
858 866
859 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; 867 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
860 rp->power_unit_divisor = 1 << value; 868 rp->power_unit_divisor = 1 << value;
@@ -941,6 +949,7 @@ static void package_power_limit_irq_restore(int package_id)
941static const struct x86_cpu_id rapl_ids[] = { 949static const struct x86_cpu_id rapl_ids[] = {
942 { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */ 950 { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */
943 { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */ 951 { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */
952 { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
944 { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */ 953 { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */
945 { X86_VENDOR_INTEL, 6, 0x45},/* HSW */ 954 { X86_VENDOR_INTEL, 6, 0x45},/* HSW */
946 /* TODO: Add more CPU IDs after testing */ 955 /* TODO: Add more CPU IDs after testing */
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index b4a445e7f78d..77711d4bd377 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -70,6 +70,14 @@ config REGULATOR_88PM8607
70 help 70 help
71 This driver supports 88PM8607 voltage regulator chips. 71 This driver supports 88PM8607 voltage regulator chips.
72 72
73config REGULATOR_ACT8865
74 tristate "Active-semi act8865 voltage regulator"
75 depends on I2C
76 select REGMAP_I2C
77 help
78 This driver controls a active-semi act8865 voltage output
79 regulator via I2C bus.
80
73config REGULATOR_AD5398 81config REGULATOR_AD5398
74 tristate "Analog Devices AD5398/AD5821 regulators" 82 tristate "Analog Devices AD5398/AD5821 regulators"
75 depends on I2C 83 depends on I2C
@@ -249,6 +257,13 @@ config REGULATOR_LP8788
249 help 257 help
250 This driver supports LP8788 voltage regulator chip. 258 This driver supports LP8788 voltage regulator chip.
251 259
260config REGULATOR_MAX14577
261 tristate "Maxim 14577 regulator"
262 depends on MFD_MAX14577
263 help
264 This driver controls a Maxim 14577 regulator via I2C bus.
265 The regulators include safeout LDO and current regulator 'CHARGER'.
266
252config REGULATOR_MAX1586 267config REGULATOR_MAX1586
253 tristate "Maxim 1586/1587 voltage regulator" 268 tristate "Maxim 1586/1587 voltage regulator"
254 depends on I2C 269 depends on I2C
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 01c597ea1744..979f9ddcf259 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
14obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o 14obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
15obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o 15obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
16obj-$(CONFIG_REGULATOR_AB8500) += ab8500-ext.o ab8500.o 16obj-$(CONFIG_REGULATOR_AB8500) += ab8500-ext.o ab8500.o
17obj-$(CONFIG_REGULATOR_ACT8865) += act8865-regulator.o
17obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o 18obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o
18obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o 19obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o
19obj-$(CONFIG_REGULATOR_ARIZONA) += arizona-micsupp.o arizona-ldo1.o 20obj-$(CONFIG_REGULATOR_ARIZONA) += arizona-micsupp.o arizona-ldo1.o
@@ -35,6 +36,7 @@ obj-$(CONFIG_REGULATOR_LP872X) += lp872x.o
35obj-$(CONFIG_REGULATOR_LP8788) += lp8788-buck.o 36obj-$(CONFIG_REGULATOR_LP8788) += lp8788-buck.o
36obj-$(CONFIG_REGULATOR_LP8788) += lp8788-ldo.o 37obj-$(CONFIG_REGULATOR_LP8788) += lp8788-ldo.o
37obj-$(CONFIG_REGULATOR_LP8755) += lp8755.o 38obj-$(CONFIG_REGULATOR_LP8755) += lp8755.o
39obj-$(CONFIG_REGULATOR_MAX14577) += max14577.o
38obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o 40obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
39obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o 41obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
40obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o 42obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index 603f192e84f1..c625468c7f2c 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -2998,37 +2998,6 @@ static void abx500_get_regulator_info(struct ab8500 *ab8500)
2998 } 2998 }
2999} 2999}
3000 3000
3001static int ab8500_regulator_init_registers(struct platform_device *pdev,
3002 int id, int mask, int value)
3003{
3004 struct ab8500_reg_init *reg_init = abx500_regulator.init;
3005 int err;
3006
3007 BUG_ON(value & ~mask);
3008 BUG_ON(mask & ~reg_init[id].mask);
3009
3010 /* initialize register */
3011 err = abx500_mask_and_set_register_interruptible(
3012 &pdev->dev,
3013 reg_init[id].bank,
3014 reg_init[id].addr,
3015 mask, value);
3016 if (err < 0) {
3017 dev_err(&pdev->dev,
3018 "Failed to initialize 0x%02x, 0x%02x.\n",
3019 reg_init[id].bank,
3020 reg_init[id].addr);
3021 return err;
3022 }
3023 dev_vdbg(&pdev->dev,
3024 " init: 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
3025 reg_init[id].bank,
3026 reg_init[id].addr,
3027 mask, value);
3028
3029 return 0;
3030}
3031
3032static int ab8500_regulator_register(struct platform_device *pdev, 3001static int ab8500_regulator_register(struct platform_device *pdev,
3033 struct regulator_init_data *init_data, 3002 struct regulator_init_data *init_data,
3034 int id, struct device_node *np) 3003 int id, struct device_node *np)
@@ -3036,7 +3005,6 @@ static int ab8500_regulator_register(struct platform_device *pdev,
3036 struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); 3005 struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
3037 struct ab8500_regulator_info *info = NULL; 3006 struct ab8500_regulator_info *info = NULL;
3038 struct regulator_config config = { }; 3007 struct regulator_config config = { };
3039 int err;
3040 3008
3041 /* assign per-regulator data */ 3009 /* assign per-regulator data */
3042 info = &abx500_regulator.info[id]; 3010 info = &abx500_regulator.info[id];
@@ -3058,17 +3026,12 @@ static int ab8500_regulator_register(struct platform_device *pdev,
3058 } 3026 }
3059 3027
3060 /* register regulator with framework */ 3028 /* register regulator with framework */
3061 info->regulator = regulator_register(&info->desc, &config); 3029 info->regulator = devm_regulator_register(&pdev->dev, &info->desc,
3030 &config);
3062 if (IS_ERR(info->regulator)) { 3031 if (IS_ERR(info->regulator)) {
3063 err = PTR_ERR(info->regulator);
3064 dev_err(&pdev->dev, "failed to register regulator %s\n", 3032 dev_err(&pdev->dev, "failed to register regulator %s\n",
3065 info->desc.name); 3033 info->desc.name);
3066 /* when we fail, un-register all earlier regulators */ 3034 return PTR_ERR(info->regulator);
3067 while (--id >= 0) {
3068 info = &abx500_regulator.info[id];
3069 regulator_unregister(info->regulator);
3070 }
3071 return err;
3072 } 3035 }
3073 3036
3074 return 0; 3037 return 0;
@@ -3095,9 +3058,7 @@ static int ab8500_regulator_probe(struct platform_device *pdev)
3095{ 3058{
3096 struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); 3059 struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
3097 struct device_node *np = pdev->dev.of_node; 3060 struct device_node *np = pdev->dev.of_node;
3098 struct ab8500_platform_data *ppdata; 3061 int err;
3099 struct ab8500_regulator_platform_data *pdata;
3100 int i, err;
3101 3062
3102 if (!ab8500) { 3063 if (!ab8500) {
3103 dev_err(&pdev->dev, "null mfd parent\n"); 3064 dev_err(&pdev->dev, "null mfd parent\n");
@@ -3106,83 +3067,20 @@ static int ab8500_regulator_probe(struct platform_device *pdev)
3106 3067
3107 abx500_get_regulator_info(ab8500); 3068 abx500_get_regulator_info(ab8500);
3108 3069
3109 if (np) { 3070 err = of_regulator_match(&pdev->dev, np,
3110 err = of_regulator_match(&pdev->dev, np, 3071 abx500_regulator.match,
3111 abx500_regulator.match, 3072 abx500_regulator.match_size);
3112 abx500_regulator.match_size); 3073 if (err < 0) {
3113 if (err < 0) { 3074 dev_err(&pdev->dev,
3114 dev_err(&pdev->dev, 3075 "Error parsing regulator init data: %d\n", err);
3115 "Error parsing regulator init data: %d\n", err);
3116 return err;
3117 }
3118
3119 err = ab8500_regulator_of_probe(pdev, np);
3120 return err;
3121 }
3122
3123 ppdata = dev_get_platdata(ab8500->dev);
3124 if (!ppdata) {
3125 dev_err(&pdev->dev, "null parent pdata\n");
3126 return -EINVAL;
3127 }
3128
3129 pdata = ppdata->regulator;
3130 if (!pdata) {
3131 dev_err(&pdev->dev, "null pdata\n");
3132 return -EINVAL;
3133 }
3134
3135 /* make sure the platform data has the correct size */
3136 if (pdata->num_regulator != abx500_regulator.info_size) {
3137 dev_err(&pdev->dev, "Configuration error: size mismatch.\n");
3138 return -EINVAL;
3139 }
3140
3141 /* initialize debug (initial state is recorded with this call) */
3142 err = ab8500_regulator_debug_init(pdev);
3143 if (err)
3144 return err; 3076 return err;
3145
3146 /* initialize registers */
3147 for (i = 0; i < pdata->num_reg_init; i++) {
3148 int id, mask, value;
3149
3150 id = pdata->reg_init[i].id;
3151 mask = pdata->reg_init[i].mask;
3152 value = pdata->reg_init[i].value;
3153
3154 /* check for configuration errors */
3155 BUG_ON(id >= abx500_regulator.init_size);
3156
3157 err = ab8500_regulator_init_registers(pdev, id, mask, value);
3158 if (err < 0)
3159 return err;
3160 } 3077 }
3161 3078 return ab8500_regulator_of_probe(pdev, np);
3162 /* register all regulators */
3163 for (i = 0; i < abx500_regulator.info_size; i++) {
3164 err = ab8500_regulator_register(pdev, &pdata->regulator[i],
3165 i, NULL);
3166 if (err < 0)
3167 return err;
3168 }
3169
3170 return 0;
3171} 3079}
3172 3080
3173static int ab8500_regulator_remove(struct platform_device *pdev) 3081static int ab8500_regulator_remove(struct platform_device *pdev)
3174{ 3082{
3175 int i, err; 3083 int err;
3176
3177 for (i = 0; i < abx500_regulator.info_size; i++) {
3178 struct ab8500_regulator_info *info = NULL;
3179 info = &abx500_regulator.info[i];
3180
3181 dev_vdbg(rdev_get_dev(info->regulator),
3182 "%s-remove\n", info->desc.name);
3183
3184 regulator_unregister(info->regulator);
3185 }
3186 3084
3187 /* remove regulator debug */ 3085 /* remove regulator debug */
3188 err = ab8500_regulator_debug_exit(pdev); 3086 err = ab8500_regulator_debug_exit(pdev);
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
new file mode 100644
index 000000000000..084cc0819a52
--- /dev/null
+++ b/drivers/regulator/act8865-regulator.c
@@ -0,0 +1,349 @@
1/*
2 * act8865-regulator.c - Voltage regulation for the active-semi ACT8865
3 * http://www.active-semi.com/sheets/ACT8865_Datasheet.pdf
4 *
5 * Copyright (C) 2013 Atmel Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/i2c.h>
21#include <linux/err.h>
22#include <linux/platform_device.h>
23#include <linux/regulator/driver.h>
24#include <linux/regulator/act8865.h>
25#include <linux/of.h>
26#include <linux/of_device.h>
27#include <linux/regulator/of_regulator.h>
28#include <linux/regmap.h>
29
30/*
31 * ACT8865 Global Register Map.
32 */
33#define ACT8865_SYS_MODE 0x00
34#define ACT8865_SYS_CTRL 0x01
35#define ACT8865_DCDC1_VSET1 0x20
36#define ACT8865_DCDC1_VSET2 0x21
37#define ACT8865_DCDC1_CTRL 0x22
38#define ACT8865_DCDC2_VSET1 0x30
39#define ACT8865_DCDC2_VSET2 0x31
40#define ACT8865_DCDC2_CTRL 0x32
41#define ACT8865_DCDC3_VSET1 0x40
42#define ACT8865_DCDC3_VSET2 0x41
43#define ACT8865_DCDC3_CTRL 0x42
44#define ACT8865_LDO1_VSET 0x50
45#define ACT8865_LDO1_CTRL 0x51
46#define ACT8865_LDO2_VSET 0x54
47#define ACT8865_LDO2_CTRL 0x55
48#define ACT8865_LDO3_VSET 0x60
49#define ACT8865_LDO3_CTRL 0x61
50#define ACT8865_LDO4_VSET 0x64
51#define ACT8865_LDO4_CTRL 0x65
52
53/*
54 * Field Definitions.
55 */
56#define ACT8865_ENA 0x80 /* ON - [7] */
57#define ACT8865_VSEL_MASK 0x3F /* VSET - [5:0] */
58
59/*
60 * ACT8865 voltage number
61 */
62#define ACT8865_VOLTAGE_NUM 64
63
64struct act8865 {
65 struct regulator_dev *rdev[ACT8865_REG_NUM];
66 struct regmap *regmap;
67};
68
69static const struct regmap_config act8865_regmap_config = {
70 .reg_bits = 8,
71 .val_bits = 8,
72};
73
74static const struct regulator_linear_range act8865_volatge_ranges[] = {
75 REGULATOR_LINEAR_RANGE(600000, 0, 23, 25000),
76 REGULATOR_LINEAR_RANGE(1200000, 24, 47, 50000),
77 REGULATOR_LINEAR_RANGE(2400000, 48, 63, 100000),
78};
79
80static struct regulator_ops act8865_ops = {
81 .list_voltage = regulator_list_voltage_linear_range,
82 .map_voltage = regulator_map_voltage_linear_range,
83 .get_voltage_sel = regulator_get_voltage_sel_regmap,
84 .set_voltage_sel = regulator_set_voltage_sel_regmap,
85 .enable = regulator_enable_regmap,
86 .disable = regulator_disable_regmap,
87 .is_enabled = regulator_is_enabled_regmap,
88};
89
90static const struct regulator_desc act8865_reg[] = {
91 {
92 .name = "DCDC_REG1",
93 .id = ACT8865_ID_DCDC1,
94 .ops = &act8865_ops,
95 .type = REGULATOR_VOLTAGE,
96 .n_voltages = ACT8865_VOLTAGE_NUM,
97 .linear_ranges = act8865_volatge_ranges,
98 .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges),
99 .vsel_reg = ACT8865_DCDC1_VSET1,
100 .vsel_mask = ACT8865_VSEL_MASK,
101 .enable_reg = ACT8865_DCDC1_CTRL,
102 .enable_mask = ACT8865_ENA,
103 .owner = THIS_MODULE,
104 },
105 {
106 .name = "DCDC_REG2",
107 .id = ACT8865_ID_DCDC2,
108 .ops = &act8865_ops,
109 .type = REGULATOR_VOLTAGE,
110 .n_voltages = ACT8865_VOLTAGE_NUM,
111 .linear_ranges = act8865_volatge_ranges,
112 .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges),
113 .vsel_reg = ACT8865_DCDC2_VSET1,
114 .vsel_mask = ACT8865_VSEL_MASK,
115 .enable_reg = ACT8865_DCDC2_CTRL,
116 .enable_mask = ACT8865_ENA,
117 .owner = THIS_MODULE,
118 },
119 {
120 .name = "DCDC_REG3",
121 .id = ACT8865_ID_DCDC3,
122 .ops = &act8865_ops,
123 .type = REGULATOR_VOLTAGE,
124 .n_voltages = ACT8865_VOLTAGE_NUM,
125 .linear_ranges = act8865_volatge_ranges,
126 .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges),
127 .vsel_reg = ACT8865_DCDC3_VSET1,
128 .vsel_mask = ACT8865_VSEL_MASK,
129 .enable_reg = ACT8865_DCDC3_CTRL,
130 .enable_mask = ACT8865_ENA,
131 .owner = THIS_MODULE,
132 },
133 {
134 .name = "LDO_REG1",
135 .id = ACT8865_ID_LDO1,
136 .ops = &act8865_ops,
137 .type = REGULATOR_VOLTAGE,
138 .n_voltages = ACT8865_VOLTAGE_NUM,
139 .linear_ranges = act8865_volatge_ranges,
140 .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges),
141 .vsel_reg = ACT8865_LDO1_VSET,
142 .vsel_mask = ACT8865_VSEL_MASK,
143 .enable_reg = ACT8865_LDO1_CTRL,
144 .enable_mask = ACT8865_ENA,
145 .owner = THIS_MODULE,
146 },
147 {
148 .name = "LDO_REG2",
149 .id = ACT8865_ID_LDO2,
150 .ops = &act8865_ops,
151 .type = REGULATOR_VOLTAGE,
152 .n_voltages = ACT8865_VOLTAGE_NUM,
153 .linear_ranges = act8865_volatge_ranges,
154 .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges),
155 .vsel_reg = ACT8865_LDO2_VSET,
156 .vsel_mask = ACT8865_VSEL_MASK,
157 .enable_reg = ACT8865_LDO2_CTRL,
158 .enable_mask = ACT8865_ENA,
159 .owner = THIS_MODULE,
160 },
161 {
162 .name = "LDO_REG3",
163 .id = ACT8865_ID_LDO3,
164 .ops = &act8865_ops,
165 .type = REGULATOR_VOLTAGE,
166 .n_voltages = ACT8865_VOLTAGE_NUM,
167 .linear_ranges = act8865_volatge_ranges,
168 .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges),
169 .vsel_reg = ACT8865_LDO3_VSET,
170 .vsel_mask = ACT8865_VSEL_MASK,
171 .enable_reg = ACT8865_LDO3_CTRL,
172 .enable_mask = ACT8865_ENA,
173 .owner = THIS_MODULE,
174 },
175 {
176 .name = "LDO_REG4",
177 .id = ACT8865_ID_LDO4,
178 .ops = &act8865_ops,
179 .type = REGULATOR_VOLTAGE,
180 .n_voltages = ACT8865_VOLTAGE_NUM,
181 .linear_ranges = act8865_volatge_ranges,
182 .n_linear_ranges = ARRAY_SIZE(act8865_volatge_ranges),
183 .vsel_reg = ACT8865_LDO4_VSET,
184 .vsel_mask = ACT8865_VSEL_MASK,
185 .enable_reg = ACT8865_LDO4_CTRL,
186 .enable_mask = ACT8865_ENA,
187 .owner = THIS_MODULE,
188 },
189};
190
191#ifdef CONFIG_OF
192static const struct of_device_id act8865_dt_ids[] = {
193 { .compatible = "active-semi,act8865" },
194 { }
195};
196MODULE_DEVICE_TABLE(of, act8865_dt_ids);
197
198static struct of_regulator_match act8865_matches[] = {
199 [ACT8865_ID_DCDC1] = { .name = "DCDC_REG1"},
200 [ACT8865_ID_DCDC2] = { .name = "DCDC_REG2"},
201 [ACT8865_ID_DCDC3] = { .name = "DCDC_REG3"},
202 [ACT8865_ID_LDO1] = { .name = "LDO_REG1"},
203 [ACT8865_ID_LDO2] = { .name = "LDO_REG2"},
204 [ACT8865_ID_LDO3] = { .name = "LDO_REG3"},
205 [ACT8865_ID_LDO4] = { .name = "LDO_REG4"},
206};
207
208static int act8865_pdata_from_dt(struct device *dev,
209 struct device_node **of_node,
210 struct act8865_platform_data *pdata)
211{
212 int matched, i;
213 struct device_node *np;
214 struct act8865_regulator_data *regulator;
215
216 np = of_find_node_by_name(dev->of_node, "regulators");
217 if (!np) {
218 dev_err(dev, "missing 'regulators' subnode in DT\n");
219 return -EINVAL;
220 }
221
222 matched = of_regulator_match(dev, np,
223 act8865_matches, ARRAY_SIZE(act8865_matches));
224 if (matched <= 0)
225 return matched;
226
227 pdata->regulators = devm_kzalloc(dev,
228 sizeof(struct act8865_regulator_data) *
229 ARRAY_SIZE(act8865_matches), GFP_KERNEL);
230 if (!pdata->regulators) {
231 dev_err(dev, "%s: failed to allocate act8865 registor\n",
232 __func__);
233 return -ENOMEM;
234 }
235
236 pdata->num_regulators = matched;
237 regulator = pdata->regulators;
238
239 for (i = 0; i < ARRAY_SIZE(act8865_matches); i++) {
240 regulator->id = i;
241 regulator->name = act8865_matches[i].name;
242 regulator->platform_data = act8865_matches[i].init_data;
243 of_node[i] = act8865_matches[i].of_node;
244 regulator++;
245 }
246
247 return 0;
248}
249#else
250static inline int act8865_pdata_from_dt(struct device *dev,
251 struct device_node **of_node,
252 struct act8865_platform_data *pdata)
253{
254 return 0;
255}
256#endif
257
258static int act8865_pmic_probe(struct i2c_client *client,
259 const struct i2c_device_id *i2c_id)
260{
261 struct regulator_dev **rdev;
262 struct device *dev = &client->dev;
263 struct act8865_platform_data *pdata = dev_get_platdata(dev);
264 struct regulator_config config = { };
265 struct act8865 *act8865;
266 struct device_node *of_node[ACT8865_REG_NUM];
267 int i, id;
268 int ret = -EINVAL;
269 int error;
270
271 if (dev->of_node && !pdata) {
272 const struct of_device_id *id;
273 struct act8865_platform_data pdata_of;
274
275 id = of_match_device(of_match_ptr(act8865_dt_ids), dev);
276 if (!id)
277 return -ENODEV;
278
279 ret = act8865_pdata_from_dt(dev, of_node, &pdata_of);
280 if (ret < 0)
281 return ret;
282
283 pdata = &pdata_of;
284 }
285
286 if (pdata->num_regulators > ACT8865_REG_NUM) {
287 dev_err(dev, "Too many regulators found!\n");
288 return -EINVAL;
289 }
290
291 act8865 = devm_kzalloc(dev, sizeof(struct act8865), GFP_KERNEL);
292 if (!act8865)
293 return -ENOMEM;
294
295 rdev = act8865->rdev;
296
297 act8865->regmap = devm_regmap_init_i2c(client, &act8865_regmap_config);
298 if (IS_ERR(act8865->regmap)) {
299 error = PTR_ERR(act8865->regmap);
300 dev_err(&client->dev, "Failed to allocate register map: %d\n",
301 error);
302 return error;
303 }
304
305 /* Finally register devices */
306 for (i = 0; i < ACT8865_REG_NUM; i++) {
307
308 id = pdata->regulators[i].id;
309
310 config.dev = dev;
311 config.init_data = pdata->regulators[i].platform_data;
312 config.of_node = of_node[i];
313 config.driver_data = act8865;
314 config.regmap = act8865->regmap;
315
316 rdev[i] = devm_regulator_register(&client->dev,
317 &act8865_reg[i], &config);
318 if (IS_ERR(rdev[i])) {
319 dev_err(dev, "failed to register %s\n",
320 act8865_reg[id].name);
321 return PTR_ERR(rdev[i]);
322 }
323 }
324
325 i2c_set_clientdata(client, act8865);
326
327 return 0;
328}
329
330static const struct i2c_device_id act8865_ids[] = {
331 { "act8865", 0 },
332 { },
333};
334MODULE_DEVICE_TABLE(i2c, act8865_ids);
335
336static struct i2c_driver act8865_pmic_driver = {
337 .driver = {
338 .name = "act8865",
339 .owner = THIS_MODULE,
340 },
341 .probe = act8865_pmic_probe,
342 .id_table = act8865_ids,
343};
344
345module_i2c_driver(act8865_pmic_driver);
346
347MODULE_DESCRIPTION("active-semi act8865 voltage regulator driver");
348MODULE_AUTHOR("Wenyou Yang <wenyou.yang@atmel.com>");
349MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index c734d0980826..862e63e451d0 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -122,10 +122,8 @@ static int anatop_regulator_probe(struct platform_device *pdev)
122 if (!sreg) 122 if (!sreg)
123 return -ENOMEM; 123 return -ENOMEM;
124 sreg->initdata = initdata; 124 sreg->initdata = initdata;
125 sreg->name = kstrdup(of_get_property(np, "regulator-name", NULL), 125 sreg->name = of_get_property(np, "regulator-name", NULL);
126 GFP_KERNEL);
127 rdesc = &sreg->rdesc; 126 rdesc = &sreg->rdesc;
128 memset(rdesc, 0, sizeof(*rdesc));
129 rdesc->name = sreg->name; 127 rdesc->name = sreg->name;
130 rdesc->ops = &anatop_rops; 128 rdesc->ops = &anatop_rops;
131 rdesc->type = REGULATOR_VOLTAGE; 129 rdesc->type = REGULATOR_VOLTAGE;
@@ -143,37 +141,37 @@ static int anatop_regulator_probe(struct platform_device *pdev)
143 &sreg->control_reg); 141 &sreg->control_reg);
144 if (ret) { 142 if (ret) {
145 dev_err(dev, "no anatop-reg-offset property set\n"); 143 dev_err(dev, "no anatop-reg-offset property set\n");
146 goto anatop_probe_end; 144 return ret;
147 } 145 }
148 ret = of_property_read_u32(np, "anatop-vol-bit-width", 146 ret = of_property_read_u32(np, "anatop-vol-bit-width",
149 &sreg->vol_bit_width); 147 &sreg->vol_bit_width);
150 if (ret) { 148 if (ret) {
151 dev_err(dev, "no anatop-vol-bit-width property set\n"); 149 dev_err(dev, "no anatop-vol-bit-width property set\n");
152 goto anatop_probe_end; 150 return ret;
153 } 151 }
154 ret = of_property_read_u32(np, "anatop-vol-bit-shift", 152 ret = of_property_read_u32(np, "anatop-vol-bit-shift",
155 &sreg->vol_bit_shift); 153 &sreg->vol_bit_shift);
156 if (ret) { 154 if (ret) {
157 dev_err(dev, "no anatop-vol-bit-shift property set\n"); 155 dev_err(dev, "no anatop-vol-bit-shift property set\n");
158 goto anatop_probe_end; 156 return ret;
159 } 157 }
160 ret = of_property_read_u32(np, "anatop-min-bit-val", 158 ret = of_property_read_u32(np, "anatop-min-bit-val",
161 &sreg->min_bit_val); 159 &sreg->min_bit_val);
162 if (ret) { 160 if (ret) {
163 dev_err(dev, "no anatop-min-bit-val property set\n"); 161 dev_err(dev, "no anatop-min-bit-val property set\n");
164 goto anatop_probe_end; 162 return ret;
165 } 163 }
166 ret = of_property_read_u32(np, "anatop-min-voltage", 164 ret = of_property_read_u32(np, "anatop-min-voltage",
167 &sreg->min_voltage); 165 &sreg->min_voltage);
168 if (ret) { 166 if (ret) {
169 dev_err(dev, "no anatop-min-voltage property set\n"); 167 dev_err(dev, "no anatop-min-voltage property set\n");
170 goto anatop_probe_end; 168 return ret;
171 } 169 }
172 ret = of_property_read_u32(np, "anatop-max-voltage", 170 ret = of_property_read_u32(np, "anatop-max-voltage",
173 &sreg->max_voltage); 171 &sreg->max_voltage);
174 if (ret) { 172 if (ret) {
175 dev_err(dev, "no anatop-max-voltage property set\n"); 173 dev_err(dev, "no anatop-max-voltage property set\n");
176 goto anatop_probe_end; 174 return ret;
177 } 175 }
178 176
179 /* read LDO ramp up setting, only for core reg */ 177 /* read LDO ramp up setting, only for core reg */
@@ -204,27 +202,11 @@ static int anatop_regulator_probe(struct platform_device *pdev)
204 if (IS_ERR(rdev)) { 202 if (IS_ERR(rdev)) {
205 dev_err(dev, "failed to register %s\n", 203 dev_err(dev, "failed to register %s\n",
206 rdesc->name); 204 rdesc->name);
207 ret = PTR_ERR(rdev); 205 return PTR_ERR(rdev);
208 goto anatop_probe_end;
209 } 206 }
210 207
211 platform_set_drvdata(pdev, rdev); 208 platform_set_drvdata(pdev, rdev);
212 209
213anatop_probe_end:
214 if (ret)
215 kfree(sreg->name);
216
217 return ret;
218}
219
220static int anatop_regulator_remove(struct platform_device *pdev)
221{
222 struct regulator_dev *rdev = platform_get_drvdata(pdev);
223 struct anatop_regulator *sreg = rdev_get_drvdata(rdev);
224 const char *name = sreg->name;
225
226 kfree(name);
227
228 return 0; 210 return 0;
229} 211}
230 212
@@ -240,7 +222,6 @@ static struct platform_driver anatop_regulator_driver = {
240 .of_match_table = of_anatop_regulator_match_tbl, 222 .of_match_table = of_anatop_regulator_match_tbl,
241 }, 223 },
242 .probe = anatop_regulator_probe, 224 .probe = anatop_regulator_probe,
243 .remove = anatop_regulator_remove,
244}; 225};
245 226
246static int __init anatop_regulator_init(void) 227static int __init anatop_regulator_init(void)
@@ -259,3 +240,4 @@ MODULE_AUTHOR("Nancy Chen <Nancy.Chen@freescale.com>");
259MODULE_AUTHOR("Ying-Chun Liu (PaulLiu) <paul.liu@linaro.org>"); 240MODULE_AUTHOR("Ying-Chun Liu (PaulLiu) <paul.liu@linaro.org>");
260MODULE_DESCRIPTION("ANATOP Regulator driver"); 241MODULE_DESCRIPTION("ANATOP Regulator driver");
261MODULE_LICENSE("GPL v2"); 242MODULE_LICENSE("GPL v2");
243MODULE_ALIAS("platform:anatop_regulator");
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
index fd3154d86901..034ece707083 100644
--- a/drivers/regulator/arizona-micsupp.c
+++ b/drivers/regulator/arizona-micsupp.c
@@ -28,8 +28,6 @@
28#include <linux/mfd/arizona/pdata.h> 28#include <linux/mfd/arizona/pdata.h>
29#include <linux/mfd/arizona/registers.h> 29#include <linux/mfd/arizona/registers.h>
30 30
31#define ARIZONA_MICSUPP_MAX_SELECTOR 0x1f
32
33struct arizona_micsupp { 31struct arizona_micsupp {
34 struct regulator_dev *regulator; 32 struct regulator_dev *regulator;
35 struct arizona *arizona; 33 struct arizona *arizona;
@@ -40,42 +38,6 @@ struct arizona_micsupp {
40 struct work_struct check_cp_work; 38 struct work_struct check_cp_work;
41}; 39};
42 40
43static int arizona_micsupp_list_voltage(struct regulator_dev *rdev,
44 unsigned int selector)
45{
46 if (selector > ARIZONA_MICSUPP_MAX_SELECTOR)
47 return -EINVAL;
48
49 if (selector == ARIZONA_MICSUPP_MAX_SELECTOR)
50 return 3300000;
51 else
52 return (selector * 50000) + 1700000;
53}
54
55static int arizona_micsupp_map_voltage(struct regulator_dev *rdev,
56 int min_uV, int max_uV)
57{
58 unsigned int voltage;
59 int selector;
60
61 if (min_uV < 1700000)
62 min_uV = 1700000;
63
64 if (min_uV > 3200000)
65 selector = ARIZONA_MICSUPP_MAX_SELECTOR;
66 else
67 selector = DIV_ROUND_UP(min_uV - 1700000, 50000);
68
69 if (selector < 0)
70 return -EINVAL;
71
72 voltage = arizona_micsupp_list_voltage(rdev, selector);
73 if (voltage < min_uV || voltage > max_uV)
74 return -EINVAL;
75
76 return selector;
77}
78
79static void arizona_micsupp_check_cp(struct work_struct *work) 41static void arizona_micsupp_check_cp(struct work_struct *work)
80{ 42{
81 struct arizona_micsupp *micsupp = 43 struct arizona_micsupp *micsupp =
@@ -145,8 +107,8 @@ static struct regulator_ops arizona_micsupp_ops = {
145 .disable = arizona_micsupp_disable, 107 .disable = arizona_micsupp_disable,
146 .is_enabled = regulator_is_enabled_regmap, 108 .is_enabled = regulator_is_enabled_regmap,
147 109
148 .list_voltage = arizona_micsupp_list_voltage, 110 .list_voltage = regulator_list_voltage_linear_range,
149 .map_voltage = arizona_micsupp_map_voltage, 111 .map_voltage = regulator_map_voltage_linear_range,
150 112
151 .get_voltage_sel = regulator_get_voltage_sel_regmap, 113 .get_voltage_sel = regulator_get_voltage_sel_regmap,
152 .set_voltage_sel = regulator_set_voltage_sel_regmap, 114 .set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -155,11 +117,16 @@ static struct regulator_ops arizona_micsupp_ops = {
155 .set_bypass = arizona_micsupp_set_bypass, 117 .set_bypass = arizona_micsupp_set_bypass,
156}; 118};
157 119
120static const struct regulator_linear_range arizona_micsupp_ranges[] = {
121 REGULATOR_LINEAR_RANGE(1700000, 0, 0x1e, 50000),
122 REGULATOR_LINEAR_RANGE(3300000, 0x1f, 0x1f, 0),
123};
124
158static const struct regulator_desc arizona_micsupp = { 125static const struct regulator_desc arizona_micsupp = {
159 .name = "MICVDD", 126 .name = "MICVDD",
160 .supply_name = "CPVDD", 127 .supply_name = "CPVDD",
161 .type = REGULATOR_VOLTAGE, 128 .type = REGULATOR_VOLTAGE,
162 .n_voltages = ARIZONA_MICSUPP_MAX_SELECTOR + 1, 129 .n_voltages = 32,
163 .ops = &arizona_micsupp_ops, 130 .ops = &arizona_micsupp_ops,
164 131
165 .vsel_reg = ARIZONA_LDO2_CONTROL_1, 132 .vsel_reg = ARIZONA_LDO2_CONTROL_1,
@@ -169,6 +136,9 @@ static const struct regulator_desc arizona_micsupp = {
169 .bypass_reg = ARIZONA_MIC_CHARGE_PUMP_1, 136 .bypass_reg = ARIZONA_MIC_CHARGE_PUMP_1,
170 .bypass_mask = ARIZONA_CPMIC_BYPASS, 137 .bypass_mask = ARIZONA_CPMIC_BYPASS,
171 138
139 .linear_ranges = arizona_micsupp_ranges,
140 .n_linear_ranges = ARRAY_SIZE(arizona_micsupp_ranges),
141
172 .enable_time = 3000, 142 .enable_time = 3000,
173 143
174 .owner = THIS_MODULE, 144 .owner = THIS_MODULE,
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c
index b9f1d24c6812..8b17d786cb71 100644
--- a/drivers/regulator/as3722-regulator.c
+++ b/drivers/regulator/as3722-regulator.c
@@ -99,7 +99,6 @@ static const struct as3722_register_mapping as3722_reg_lookup[] = {
99 .sleep_ctrl_mask = AS3722_SD0_EXT_ENABLE_MASK, 99 .sleep_ctrl_mask = AS3722_SD0_EXT_ENABLE_MASK,
100 .control_reg = AS3722_SD0_CONTROL_REG, 100 .control_reg = AS3722_SD0_CONTROL_REG,
101 .mode_mask = AS3722_SD0_MODE_FAST, 101 .mode_mask = AS3722_SD0_MODE_FAST,
102 .n_voltages = AS3722_SD0_VSEL_MAX + 1,
103 }, 102 },
104 { 103 {
105 .regulator_id = AS3722_REGULATOR_ID_SD1, 104 .regulator_id = AS3722_REGULATOR_ID_SD1,
@@ -112,7 +111,6 @@ static const struct as3722_register_mapping as3722_reg_lookup[] = {
112 .sleep_ctrl_mask = AS3722_SD1_EXT_ENABLE_MASK, 111 .sleep_ctrl_mask = AS3722_SD1_EXT_ENABLE_MASK,
113 .control_reg = AS3722_SD1_CONTROL_REG, 112 .control_reg = AS3722_SD1_CONTROL_REG,
114 .mode_mask = AS3722_SD1_MODE_FAST, 113 .mode_mask = AS3722_SD1_MODE_FAST,
115 .n_voltages = AS3722_SD0_VSEL_MAX + 1,
116 }, 114 },
117 { 115 {
118 .regulator_id = AS3722_REGULATOR_ID_SD2, 116 .regulator_id = AS3722_REGULATOR_ID_SD2,
@@ -181,7 +179,6 @@ static const struct as3722_register_mapping as3722_reg_lookup[] = {
181 .sleep_ctrl_mask = AS3722_SD6_EXT_ENABLE_MASK, 179 .sleep_ctrl_mask = AS3722_SD6_EXT_ENABLE_MASK,
182 .control_reg = AS3722_SD6_CONTROL_REG, 180 .control_reg = AS3722_SD6_CONTROL_REG,
183 .mode_mask = AS3722_SD6_MODE_FAST, 181 .mode_mask = AS3722_SD6_MODE_FAST,
184 .n_voltages = AS3722_SD0_VSEL_MAX + 1,
185 }, 182 },
186 { 183 {
187 .regulator_id = AS3722_REGULATOR_ID_LDO0, 184 .regulator_id = AS3722_REGULATOR_ID_LDO0,
@@ -595,6 +592,22 @@ static int as3722_sd016_set_current_limit(struct regulator_dev *rdev,
595 return as3722_update_bits(as3722, reg, mask, val); 592 return as3722_update_bits(as3722, reg, mask, val);
596} 593}
597 594
595static bool as3722_sd0_is_low_voltage(struct as3722_regulators *as3722_regs)
596{
597 int err;
598 unsigned val;
599
600 err = as3722_read(as3722_regs->as3722, AS3722_FUSE7_REG, &val);
601 if (err < 0) {
602 dev_err(as3722_regs->dev, "Reg 0x%02x read failed: %d\n",
603 AS3722_FUSE7_REG, err);
604 return false;
605 }
606 if (val & AS3722_FUSE7_SD0_LOW_VOLTAGE)
607 return true;
608 return false;
609}
610
598static const struct regulator_linear_range as3722_sd2345_ranges[] = { 611static const struct regulator_linear_range as3722_sd2345_ranges[] = {
599 REGULATOR_LINEAR_RANGE(612500, 0x01, 0x40, 12500), 612 REGULATOR_LINEAR_RANGE(612500, 0x01, 0x40, 12500),
600 REGULATOR_LINEAR_RANGE(1425000, 0x41, 0x70, 25000), 613 REGULATOR_LINEAR_RANGE(1425000, 0x41, 0x70, 25000),
@@ -820,9 +833,19 @@ static int as3722_regulator_probe(struct platform_device *pdev)
820 ops = &as3722_sd016_extcntrl_ops; 833 ops = &as3722_sd016_extcntrl_ops;
821 else 834 else
822 ops = &as3722_sd016_ops; 835 ops = &as3722_sd016_ops;
823 as3722_regs->desc[id].min_uV = 610000; 836 if (id == AS3722_REGULATOR_ID_SD0 &&
837 as3722_sd0_is_low_voltage(as3722_regs)) {
838 as3722_regs->desc[id].n_voltages =
839 AS3722_SD0_VSEL_LOW_VOL_MAX + 1;
840 as3722_regs->desc[id].min_uV = 410000;
841 } else {
842 as3722_regs->desc[id].n_voltages =
843 AS3722_SD0_VSEL_MAX + 1,
844 as3722_regs->desc[id].min_uV = 610000;
845 }
824 as3722_regs->desc[id].uV_step = 10000; 846 as3722_regs->desc[id].uV_step = 10000;
825 as3722_regs->desc[id].linear_min_sel = 1; 847 as3722_regs->desc[id].linear_min_sel = 1;
848 as3722_regs->desc[id].enable_time = 600;
826 break; 849 break;
827 case AS3722_REGULATOR_ID_SD2: 850 case AS3722_REGULATOR_ID_SD2:
828 case AS3722_REGULATOR_ID_SD3: 851 case AS3722_REGULATOR_ID_SD3:
@@ -842,9 +865,6 @@ static int as3722_regulator_probe(struct platform_device *pdev)
842 ops = &as3722_ldo_extcntrl_ops; 865 ops = &as3722_ldo_extcntrl_ops;
843 else 866 else
844 ops = &as3722_ldo_ops; 867 ops = &as3722_ldo_ops;
845 as3722_regs->desc[id].min_uV = 825000;
846 as3722_regs->desc[id].uV_step = 25000;
847 as3722_regs->desc[id].linear_min_sel = 1;
848 as3722_regs->desc[id].enable_time = 500; 868 as3722_regs->desc[id].enable_time = 500;
849 as3722_regs->desc[id].linear_ranges = as3722_ldo_ranges; 869 as3722_regs->desc[id].linear_ranges = as3722_ldo_ranges;
850 as3722_regs->desc[id].n_linear_ranges = 870 as3722_regs->desc[id].n_linear_ranges =
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d85f31385b24..b38a6b669e8c 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1334,9 +1334,8 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
1334 * If we have return value from dev_lookup fail, we do not expect to 1334 * If we have return value from dev_lookup fail, we do not expect to
1335 * succeed, so, quit with appropriate error value 1335 * succeed, so, quit with appropriate error value
1336 */ 1336 */
1337 if (ret && ret != -ENODEV) { 1337 if (ret && ret != -ENODEV)
1338 goto out; 1338 goto out;
1339 }
1340 1339
1341 if (!devname) 1340 if (!devname)
1342 devname = "deviceless"; 1341 devname = "deviceless";
@@ -1351,7 +1350,8 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
1351 1350
1352 rdev = dummy_regulator_rdev; 1351 rdev = dummy_regulator_rdev;
1353 goto found; 1352 goto found;
1354 } else { 1353 /* Don't log an error when called from regulator_get_optional() */
1354 } else if (!have_full_constraints() || exclusive) {
1355 dev_err(dev, "dummy supplies not allowed\n"); 1355 dev_err(dev, "dummy supplies not allowed\n");
1356 } 1356 }
1357 1357
@@ -2244,7 +2244,7 @@ int regulator_is_supported_voltage(struct regulator *regulator,
2244 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { 2244 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
2245 ret = regulator_get_voltage(regulator); 2245 ret = regulator_get_voltage(regulator);
2246 if (ret >= 0) 2246 if (ret >= 0)
2247 return (min_uV <= ret && ret <= max_uV); 2247 return min_uV <= ret && ret <= max_uV;
2248 else 2248 else
2249 return ret; 2249 return ret;
2250 } 2250 }
@@ -2416,7 +2416,7 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
2416 ret = regulator_check_voltage(rdev, &min_uV, &max_uV); 2416 ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
2417 if (ret < 0) 2417 if (ret < 0)
2418 goto out; 2418 goto out;
2419 2419
2420 /* restore original values in case of error */ 2420 /* restore original values in case of error */
2421 old_min_uV = regulator->min_uV; 2421 old_min_uV = regulator->min_uV;
2422 old_max_uV = regulator->max_uV; 2422 old_max_uV = regulator->max_uV;
@@ -2430,7 +2430,7 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
2430 ret = _regulator_do_set_voltage(rdev, min_uV, max_uV); 2430 ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
2431 if (ret < 0) 2431 if (ret < 0)
2432 goto out2; 2432 goto out2;
2433 2433
2434out: 2434out:
2435 mutex_unlock(&rdev->mutex); 2435 mutex_unlock(&rdev->mutex);
2436 return ret; 2436 return ret;
@@ -3835,9 +3835,8 @@ static int __init regulator_init_complete(void)
3835 * goes wrong. */ 3835 * goes wrong. */
3836 rdev_info(rdev, "disabling\n"); 3836 rdev_info(rdev, "disabling\n");
3837 ret = ops->disable(rdev); 3837 ret = ops->disable(rdev);
3838 if (ret != 0) { 3838 if (ret != 0)
3839 rdev_err(rdev, "couldn't disable: %d\n", ret); 3839 rdev_err(rdev, "couldn't disable: %d\n", ret);
3840 }
3841 } else { 3840 } else {
3842 /* The intention is that in future we will 3841 /* The intention is that in future we will
3843 * assume that full constraints are provided 3842 * assume that full constraints are provided
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index a53c11a529d5..846acf240e48 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -431,17 +431,11 @@ static int db8500_regulator_register(struct platform_device *pdev,
431 config.of_node = np; 431 config.of_node = np;
432 432
433 /* register with the regulator framework */ 433 /* register with the regulator framework */
434 info->rdev = regulator_register(&info->desc, &config); 434 info->rdev = devm_regulator_register(&pdev->dev, &info->desc, &config);
435 if (IS_ERR(info->rdev)) { 435 if (IS_ERR(info->rdev)) {
436 err = PTR_ERR(info->rdev); 436 err = PTR_ERR(info->rdev);
437 dev_err(&pdev->dev, "failed to register %s: err %i\n", 437 dev_err(&pdev->dev, "failed to register %s: err %i\n",
438 info->desc.name, err); 438 info->desc.name, err);
439
440 /* if failing, unregister all earlier regulators */
441 while (--id >= 0) {
442 info = &dbx500_regulator_info[id];
443 regulator_unregister(info->rdev);
444 }
445 return err; 439 return err;
446 } 440 }
447 441
@@ -530,20 +524,8 @@ static int db8500_regulator_probe(struct platform_device *pdev)
530 524
531static int db8500_regulator_remove(struct platform_device *pdev) 525static int db8500_regulator_remove(struct platform_device *pdev)
532{ 526{
533 int i;
534
535 ux500_regulator_debug_exit(); 527 ux500_regulator_debug_exit();
536 528
537 for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) {
538 struct dbx500_regulator_info *info;
539 info = &dbx500_regulator_info[i];
540
541 dev_vdbg(rdev_get_dev(info->rdev),
542 "regulator-%s-remove\n", info->desc.name);
543
544 regulator_unregister(info->rdev);
545 }
546
547 return 0; 529 return 0;
548} 530}
549 531
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 234960dc9607..c0a1d00b78c9 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -203,17 +203,18 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
203 } 203 }
204 config->nr_states = i; 204 config->nr_states = i;
205 205
206 config->type = REGULATOR_VOLTAGE;
206 ret = of_property_read_string(np, "regulator-type", &regtype); 207 ret = of_property_read_string(np, "regulator-type", &regtype);
207 if (ret < 0) { 208 if (ret >= 0) {
208 dev_err(dev, "Missing 'regulator-type' property\n"); 209 if (!strncmp("voltage", regtype, 7))
209 return ERR_PTR(-EINVAL); 210 config->type = REGULATOR_VOLTAGE;
211 else if (!strncmp("current", regtype, 7))
212 config->type = REGULATOR_CURRENT;
213 else
214 dev_warn(dev, "Unknown regulator-type '%s'\n",
215 regtype);
210 } 216 }
211 217
212 if (!strncmp("voltage", regtype, 7))
213 config->type = REGULATOR_VOLTAGE;
214 else if (!strncmp("current", regtype, 7))
215 config->type = REGULATOR_CURRENT;
216
217 return config; 218 return config;
218} 219}
219 220
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 947c05ffe0ab..3b1102b75071 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -25,8 +25,6 @@ struct lp3971 {
25 struct device *dev; 25 struct device *dev;
26 struct mutex io_lock; 26 struct mutex io_lock;
27 struct i2c_client *i2c; 27 struct i2c_client *i2c;
28 int num_regulators;
29 struct regulator_dev **rdev;
30}; 28};
31 29
32static u8 lp3971_reg_read(struct lp3971 *lp3971, u8 reg); 30static u8 lp3971_reg_read(struct lp3971 *lp3971, u8 reg);
@@ -383,42 +381,27 @@ static int setup_regulators(struct lp3971 *lp3971,
383{ 381{
384 int i, err; 382 int i, err;
385 383
386 lp3971->num_regulators = pdata->num_regulators;
387 lp3971->rdev = kcalloc(pdata->num_regulators,
388 sizeof(struct regulator_dev *), GFP_KERNEL);
389 if (!lp3971->rdev) {
390 err = -ENOMEM;
391 goto err_nomem;
392 }
393
394 /* Instantiate the regulators */ 384 /* Instantiate the regulators */
395 for (i = 0; i < pdata->num_regulators; i++) { 385 for (i = 0; i < pdata->num_regulators; i++) {
396 struct regulator_config config = { }; 386 struct regulator_config config = { };
397 struct lp3971_regulator_subdev *reg = &pdata->regulators[i]; 387 struct lp3971_regulator_subdev *reg = &pdata->regulators[i];
388 struct regulator_dev *rdev;
398 389
399 config.dev = lp3971->dev; 390 config.dev = lp3971->dev;
400 config.init_data = reg->initdata; 391 config.init_data = reg->initdata;
401 config.driver_data = lp3971; 392 config.driver_data = lp3971;
402 393
403 lp3971->rdev[i] = regulator_register(&regulators[reg->id], 394 rdev = devm_regulator_register(lp3971->dev,
404 &config); 395 &regulators[reg->id], &config);
405 if (IS_ERR(lp3971->rdev[i])) { 396 if (IS_ERR(rdev)) {
406 err = PTR_ERR(lp3971->rdev[i]); 397 err = PTR_ERR(rdev);
407 dev_err(lp3971->dev, "regulator init failed: %d\n", 398 dev_err(lp3971->dev, "regulator init failed: %d\n",
408 err); 399 err);
409 goto error; 400 return err;
410 } 401 }
411 } 402 }
412 403
413 return 0; 404 return 0;
414
415error:
416 while (--i >= 0)
417 regulator_unregister(lp3971->rdev[i]);
418 kfree(lp3971->rdev);
419 lp3971->rdev = NULL;
420err_nomem:
421 return err;
422} 405}
423 406
424static int lp3971_i2c_probe(struct i2c_client *i2c, 407static int lp3971_i2c_probe(struct i2c_client *i2c,
@@ -460,19 +443,6 @@ static int lp3971_i2c_probe(struct i2c_client *i2c,
460 return 0; 443 return 0;
461} 444}
462 445
463static int lp3971_i2c_remove(struct i2c_client *i2c)
464{
465 struct lp3971 *lp3971 = i2c_get_clientdata(i2c);
466 int i;
467
468 for (i = 0; i < lp3971->num_regulators; i++)
469 regulator_unregister(lp3971->rdev[i]);
470
471 kfree(lp3971->rdev);
472
473 return 0;
474}
475
476static const struct i2c_device_id lp3971_i2c_id[] = { 446static const struct i2c_device_id lp3971_i2c_id[] = {
477 { "lp3971", 0 }, 447 { "lp3971", 0 },
478 { } 448 { }
@@ -485,7 +455,6 @@ static struct i2c_driver lp3971_i2c_driver = {
485 .owner = THIS_MODULE, 455 .owner = THIS_MODULE,
486 }, 456 },
487 .probe = lp3971_i2c_probe, 457 .probe = lp3971_i2c_probe,
488 .remove = lp3971_i2c_remove,
489 .id_table = lp3971_i2c_id, 458 .id_table = lp3971_i2c_id,
490}; 459};
491 460
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index 093e6f44ff8a..aea485afcc1a 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -22,8 +22,6 @@ struct lp3972 {
22 struct device *dev; 22 struct device *dev;
23 struct mutex io_lock; 23 struct mutex io_lock;
24 struct i2c_client *i2c; 24 struct i2c_client *i2c;
25 int num_regulators;
26 struct regulator_dev **rdev;
27}; 25};
28 26
29/* LP3972 Control Registers */ 27/* LP3972 Control Registers */
@@ -478,41 +476,27 @@ static int setup_regulators(struct lp3972 *lp3972,
478{ 476{
479 int i, err; 477 int i, err;
480 478
481 lp3972->num_regulators = pdata->num_regulators;
482 lp3972->rdev = kcalloc(pdata->num_regulators,
483 sizeof(struct regulator_dev *), GFP_KERNEL);
484 if (!lp3972->rdev) {
485 err = -ENOMEM;
486 goto err_nomem;
487 }
488
489 /* Instantiate the regulators */ 479 /* Instantiate the regulators */
490 for (i = 0; i < pdata->num_regulators; i++) { 480 for (i = 0; i < pdata->num_regulators; i++) {
491 struct lp3972_regulator_subdev *reg = &pdata->regulators[i]; 481 struct lp3972_regulator_subdev *reg = &pdata->regulators[i];
492 struct regulator_config config = { }; 482 struct regulator_config config = { };
483 struct regulator_dev *rdev;
493 484
494 config.dev = lp3972->dev; 485 config.dev = lp3972->dev;
495 config.init_data = reg->initdata; 486 config.init_data = reg->initdata;
496 config.driver_data = lp3972; 487 config.driver_data = lp3972;
497 488
498 lp3972->rdev[i] = regulator_register(&regulators[reg->id], 489 rdev = devm_regulator_register(lp3972->dev,
499 &config); 490 &regulators[reg->id], &config);
500 if (IS_ERR(lp3972->rdev[i])) { 491 if (IS_ERR(rdev)) {
501 err = PTR_ERR(lp3972->rdev[i]); 492 err = PTR_ERR(rdev);
502 dev_err(lp3972->dev, "regulator init failed: %d\n", 493 dev_err(lp3972->dev, "regulator init failed: %d\n",
503 err); 494 err);
504 goto error; 495 return err;
505 } 496 }
506 } 497 }
507 498
508 return 0; 499 return 0;
509error:
510 while (--i >= 0)
511 regulator_unregister(lp3972->rdev[i]);
512 kfree(lp3972->rdev);
513 lp3972->rdev = NULL;
514err_nomem:
515 return err;
516} 500}
517 501
518static int lp3972_i2c_probe(struct i2c_client *i2c, 502static int lp3972_i2c_probe(struct i2c_client *i2c,
@@ -557,18 +541,6 @@ static int lp3972_i2c_probe(struct i2c_client *i2c,
557 return 0; 541 return 0;
558} 542}
559 543
560static int lp3972_i2c_remove(struct i2c_client *i2c)
561{
562 struct lp3972 *lp3972 = i2c_get_clientdata(i2c);
563 int i;
564
565 for (i = 0; i < lp3972->num_regulators; i++)
566 regulator_unregister(lp3972->rdev[i]);
567 kfree(lp3972->rdev);
568
569 return 0;
570}
571
572static const struct i2c_device_id lp3972_i2c_id[] = { 544static const struct i2c_device_id lp3972_i2c_id[] = {
573 { "lp3972", 0 }, 545 { "lp3972", 0 },
574 { } 546 { }
@@ -581,7 +553,6 @@ static struct i2c_driver lp3972_i2c_driver = {
581 .owner = THIS_MODULE, 553 .owner = THIS_MODULE,
582 }, 554 },
583 .probe = lp3972_i2c_probe, 555 .probe = lp3972_i2c_probe,
584 .remove = lp3972_i2c_remove,
585 .id_table = lp3972_i2c_id, 556 .id_table = lp3972_i2c_id,
586}; 557};
587 558
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c
new file mode 100644
index 000000000000..b1078ba3f393
--- /dev/null
+++ b/drivers/regulator/max14577.c
@@ -0,0 +1,273 @@
1/*
2 * max14577.c - Regulator driver for the Maxim 14577
3 *
4 * Copyright (C) 2013 Samsung Electronics
5 * Krzysztof Kozlowski <k.kozlowski@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/regulator/driver.h>
21#include <linux/mfd/max14577.h>
22#include <linux/mfd/max14577-private.h>
23#include <linux/regulator/of_regulator.h>
24
25struct max14577_regulator {
26 struct device *dev;
27 struct max14577 *max14577;
28 struct regulator_dev **regulators;
29};
30
31static int max14577_reg_is_enabled(struct regulator_dev *rdev)
32{
33 int rid = rdev_get_id(rdev);
34 struct regmap *rmap = rdev->regmap;
35 u8 reg_data;
36
37 switch (rid) {
38 case MAX14577_CHARGER:
39 max14577_read_reg(rmap, MAX14577_CHG_REG_CHG_CTRL2, &reg_data);
40 if ((reg_data & CHGCTRL2_MBCHOSTEN_MASK) == 0)
41 return 0;
42 max14577_read_reg(rmap, MAX14577_CHG_REG_STATUS3, &reg_data);
43 if ((reg_data & STATUS3_CGMBC_MASK) == 0)
44 return 0;
45 /* MBCHOSTEN and CGMBC are on */
46 return 1;
47 default:
48 return -EINVAL;
49 }
50}
51
52static int max14577_reg_get_current_limit(struct regulator_dev *rdev)
53{
54 u8 reg_data;
55 struct regmap *rmap = rdev->regmap;
56
57 if (rdev_get_id(rdev) != MAX14577_CHARGER)
58 return -EINVAL;
59
60 max14577_read_reg(rmap, MAX14577_CHG_REG_CHG_CTRL4, &reg_data);
61
62 if ((reg_data & CHGCTRL4_MBCICHWRCL_MASK) == 0)
63 return MAX14577_REGULATOR_CURRENT_LIMIT_MIN;
64
65 reg_data = ((reg_data & CHGCTRL4_MBCICHWRCH_MASK) >>
66 CHGCTRL4_MBCICHWRCH_SHIFT);
67 return MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START +
68 reg_data * MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP;
69}
70
71static int max14577_reg_set_current_limit(struct regulator_dev *rdev,
72 int min_uA, int max_uA)
73{
74 int i, current_bits = 0xf;
75 u8 reg_data;
76
77 if (rdev_get_id(rdev) != MAX14577_CHARGER)
78 return -EINVAL;
79
80 if (min_uA > MAX14577_REGULATOR_CURRENT_LIMIT_MAX ||
81 max_uA < MAX14577_REGULATOR_CURRENT_LIMIT_MIN)
82 return -EINVAL;
83
84 if (max_uA < MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START) {
85 /* Less than 200 mA, so set 90mA (turn only Low Bit off) */
86 u8 reg_data = 0x0 << CHGCTRL4_MBCICHWRCL_SHIFT;
87 return max14577_update_reg(rdev->regmap,
88 MAX14577_CHG_REG_CHG_CTRL4,
89 CHGCTRL4_MBCICHWRCL_MASK, reg_data);
90 }
91
92 /* max_uA is in range: <LIMIT_HIGH_START, inifinite>, so search for
93 * valid current starting from LIMIT_MAX. */
94 for (i = MAX14577_REGULATOR_CURRENT_LIMIT_MAX;
95 i >= MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START;
96 i -= MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP) {
97 if (i <= max_uA)
98 break;
99 current_bits--;
100 }
101 BUG_ON(current_bits < 0); /* Cannot happen */
102 /* Turn Low Bit on (use range 200mA-950 mA) */
103 reg_data = 0x1 << CHGCTRL4_MBCICHWRCL_SHIFT;
104 /* and set proper High Bits */
105 reg_data |= current_bits << CHGCTRL4_MBCICHWRCH_SHIFT;
106
107 return max14577_update_reg(rdev->regmap, MAX14577_CHG_REG_CHG_CTRL4,
108 CHGCTRL4_MBCICHWRCL_MASK | CHGCTRL4_MBCICHWRCH_MASK,
109 reg_data);
110}
111
112static struct regulator_ops max14577_safeout_ops = {
113 .is_enabled = regulator_is_enabled_regmap,
114 .enable = regulator_enable_regmap,
115 .disable = regulator_disable_regmap,
116 .list_voltage = regulator_list_voltage_linear,
117};
118
119static struct regulator_ops max14577_charger_ops = {
120 .is_enabled = max14577_reg_is_enabled,
121 .enable = regulator_enable_regmap,
122 .disable = regulator_disable_regmap,
123 .get_current_limit = max14577_reg_get_current_limit,
124 .set_current_limit = max14577_reg_set_current_limit,
125};
126
127static const struct regulator_desc supported_regulators[] = {
128 [MAX14577_SAFEOUT] = {
129 .name = "SAFEOUT",
130 .id = MAX14577_SAFEOUT,
131 .ops = &max14577_safeout_ops,
132 .type = REGULATOR_VOLTAGE,
133 .owner = THIS_MODULE,
134 .n_voltages = 1,
135 .min_uV = MAX14577_REGULATOR_SAFEOUT_VOLTAGE,
136 .enable_reg = MAX14577_REG_CONTROL2,
137 .enable_mask = CTRL2_SFOUTORD_MASK,
138 },
139 [MAX14577_CHARGER] = {
140 .name = "CHARGER",
141 .id = MAX14577_CHARGER,
142 .ops = &max14577_charger_ops,
143 .type = REGULATOR_CURRENT,
144 .owner = THIS_MODULE,
145 .enable_reg = MAX14577_CHG_REG_CHG_CTRL2,
146 .enable_mask = CHGCTRL2_MBCHOSTEN_MASK,
147 },
148};
149
150#ifdef CONFIG_OF
151static struct of_regulator_match max14577_regulator_matches[] = {
152 { .name = "SAFEOUT", },
153 { .name = "CHARGER", },
154};
155
156static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev)
157{
158 int ret;
159 struct device_node *np;
160
161 np = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
162 if (!np) {
163 dev_err(&pdev->dev, "Failed to get child OF node for regulators\n");
164 return -EINVAL;
165 }
166
167 ret = of_regulator_match(&pdev->dev, np, max14577_regulator_matches,
168 MAX14577_REG_MAX);
169 if (ret < 0) {
170 dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
171 return ret;
172 }
173
174 return 0;
175}
176
177static inline struct regulator_init_data *match_init_data(int index)
178{
179 return max14577_regulator_matches[index].init_data;
180}
181
182static inline struct device_node *match_of_node(int index)
183{
184 return max14577_regulator_matches[index].of_node;
185}
186#else /* CONFIG_OF */
187static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev)
188{
189 return 0;
190}
191static inline struct regulator_init_data *match_init_data(int index)
192{
193 return NULL;
194}
195
196static inline struct device_node *match_of_node(int index)
197{
198 return NULL;
199}
200#endif /* CONFIG_OF */
201
202
203static int max14577_regulator_probe(struct platform_device *pdev)
204{
205 struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent);
206 struct max14577_platform_data *pdata = dev_get_platdata(max14577->dev);
207 int i, ret;
208 struct regulator_config config = {};
209
210 ret = max14577_regulator_dt_parse_pdata(pdev);
211 if (ret)
212 return ret;
213
214 config.dev = &pdev->dev;
215 config.regmap = max14577->regmap;
216
217 for (i = 0; i < ARRAY_SIZE(supported_regulators); i++) {
218 struct regulator_dev *regulator;
219 /*
220 * Index of supported_regulators[] is also the id and must
221 * match index of pdata->regulators[].
222 */
223 if (pdata && pdata->regulators) {
224 config.init_data = pdata->regulators[i].initdata;
225 config.of_node = pdata->regulators[i].of_node;
226 } else {
227 config.init_data = match_init_data(i);
228 config.of_node = match_of_node(i);
229 }
230
231 regulator = devm_regulator_register(&pdev->dev,
232 &supported_regulators[i], &config);
233 if (IS_ERR(regulator)) {
234 ret = PTR_ERR(regulator);
235 dev_err(&pdev->dev,
236 "Regulator init failed for ID %d with error: %d\n",
237 i, ret);
238 return ret;
239 }
240 }
241
242 return ret;
243}
244
245static struct platform_driver max14577_regulator_driver = {
246 .driver = {
247 .owner = THIS_MODULE,
248 .name = "max14577-regulator",
249 },
250 .probe = max14577_regulator_probe,
251};
252
253static int __init max14577_regulator_init(void)
254{
255 BUILD_BUG_ON(MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START +
256 MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP * 0xf !=
257 MAX14577_REGULATOR_CURRENT_LIMIT_MAX);
258 BUILD_BUG_ON(ARRAY_SIZE(supported_regulators) != MAX14577_REG_MAX);
259
260 return platform_driver_register(&max14577_regulator_driver);
261}
262subsys_initcall(max14577_regulator_init);
263
264static void __exit max14577_regulator_exit(void)
265{
266 platform_driver_unregister(&max14577_regulator_driver);
267}
268module_exit(max14577_regulator_exit);
269
270MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>");
271MODULE_DESCRIPTION("MAXIM 14577 regulator driver");
272MODULE_LICENSE("GPL");
273MODULE_ALIAS("platform:max14577-regulator");
diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c
index feb20bf4ccab..5fb899f461d0 100644
--- a/drivers/regulator/max77693.c
+++ b/drivers/regulator/max77693.c
@@ -138,6 +138,7 @@ static struct regulator_ops max77693_charger_ops = {
138 .n_voltages = 4, \ 138 .n_voltages = 4, \
139 .ops = &max77693_safeout_ops, \ 139 .ops = &max77693_safeout_ops, \
140 .type = REGULATOR_VOLTAGE, \ 140 .type = REGULATOR_VOLTAGE, \
141 .owner = THIS_MODULE, \
141 .volt_table = max77693_safeout_table, \ 142 .volt_table = max77693_safeout_table, \
142 .vsel_reg = MAX77693_CHG_REG_SAFEOUT_CTRL, \ 143 .vsel_reg = MAX77693_CHG_REG_SAFEOUT_CTRL, \
143 .vsel_mask = SAFEOUT_CTRL_SAFEOUT##_num##_MASK, \ 144 .vsel_mask = SAFEOUT_CTRL_SAFEOUT##_num##_MASK, \
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 96c9f80d9550..f374fa57220f 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -274,25 +274,25 @@ static struct mc13xxx_regulator mc13892_regulators[] = {
274 MC13892_SW_DEFINE(SW4, SWITCHERS3, SWITCHERS3, mc13892_sw), 274 MC13892_SW_DEFINE(SW4, SWITCHERS3, SWITCHERS3, mc13892_sw),
275 MC13892_FIXED_DEFINE(SWBST, SWITCHERS5, mc13892_swbst), 275 MC13892_FIXED_DEFINE(SWBST, SWITCHERS5, mc13892_swbst),
276 MC13892_FIXED_DEFINE(VIOHI, REGULATORMODE0, mc13892_viohi), 276 MC13892_FIXED_DEFINE(VIOHI, REGULATORMODE0, mc13892_viohi),
277 MC13892_DEFINE_REGU(VPLL, REGULATORMODE0, REGULATORSETTING0, \ 277 MC13892_DEFINE_REGU(VPLL, REGULATORMODE0, REGULATORSETTING0,
278 mc13892_vpll), 278 mc13892_vpll),
279 MC13892_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, \ 279 MC13892_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0,
280 mc13892_vdig), 280 mc13892_vdig),
281 MC13892_DEFINE_REGU(VSD, REGULATORMODE1, REGULATORSETTING1, \ 281 MC13892_DEFINE_REGU(VSD, REGULATORMODE1, REGULATORSETTING1,
282 mc13892_vsd), 282 mc13892_vsd),
283 MC13892_DEFINE_REGU(VUSB2, REGULATORMODE0, REGULATORSETTING0, \ 283 MC13892_DEFINE_REGU(VUSB2, REGULATORMODE0, REGULATORSETTING0,
284 mc13892_vusb2), 284 mc13892_vusb2),
285 MC13892_DEFINE_REGU(VVIDEO, REGULATORMODE1, REGULATORSETTING1, \ 285 MC13892_DEFINE_REGU(VVIDEO, REGULATORMODE1, REGULATORSETTING1,
286 mc13892_vvideo), 286 mc13892_vvideo),
287 MC13892_DEFINE_REGU(VAUDIO, REGULATORMODE1, REGULATORSETTING1, \ 287 MC13892_DEFINE_REGU(VAUDIO, REGULATORMODE1, REGULATORSETTING1,
288 mc13892_vaudio), 288 mc13892_vaudio),
289 MC13892_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, \ 289 MC13892_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0,
290 mc13892_vcam), 290 mc13892_vcam),
291 MC13892_DEFINE_REGU(VGEN1, REGULATORMODE0, REGULATORSETTING0, \ 291 MC13892_DEFINE_REGU(VGEN1, REGULATORMODE0, REGULATORSETTING0,
292 mc13892_vgen1), 292 mc13892_vgen1),
293 MC13892_DEFINE_REGU(VGEN2, REGULATORMODE0, REGULATORSETTING0, \ 293 MC13892_DEFINE_REGU(VGEN2, REGULATORMODE0, REGULATORSETTING0,
294 mc13892_vgen2), 294 mc13892_vgen2),
295 MC13892_DEFINE_REGU(VGEN3, REGULATORMODE1, REGULATORSETTING0, \ 295 MC13892_DEFINE_REGU(VGEN3, REGULATORMODE1, REGULATORSETTING0,
296 mc13892_vgen3), 296 mc13892_vgen3),
297 MC13892_FIXED_DEFINE(VUSB, USB1, mc13892_vusb), 297 MC13892_FIXED_DEFINE(VUSB, USB1, mc13892_vusb),
298 MC13892_GPO_DEFINE(GPO1, POWERMISC, mc13892_gpo), 298 MC13892_GPO_DEFINE(GPO1, POWERMISC, mc13892_gpo),
@@ -476,8 +476,8 @@ static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev,
476 } 476 }
477 477
478 mc13xxx_lock(priv->mc13xxx); 478 mc13xxx_lock(priv->mc13xxx);
479 ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg, mask, 479 ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg,
480 reg_value); 480 mask, reg_value);
481 mc13xxx_unlock(priv->mc13xxx); 481 mc13xxx_unlock(priv->mc13xxx);
482 482
483 return ret; 483 return ret;
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index d7da1c15a6da..134f90ec9ca1 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -105,7 +105,7 @@ static int pcf50633_regulator_probe(struct platform_device *pdev)
105 105
106static struct platform_driver pcf50633_regulator_driver = { 106static struct platform_driver pcf50633_regulator_driver = {
107 .driver = { 107 .driver = {
108 .name = "pcf50633-regltr", 108 .name = "pcf50633-regulator",
109 }, 109 },
110 .probe = pcf50633_regulator_probe, 110 .probe = pcf50633_regulator_probe,
111}; 111};
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index f68e5d5a011e..ab174f20ca11 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -326,7 +326,7 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip)
326 if (ret) 326 if (ret)
327 return ret; 327 return ret;
328 dev_info(pfuze_chip->dev, 328 dev_info(pfuze_chip->dev,
329 "Full lay: %x, Metal lay: %x\n", 329 "Full layer: %x, Metal layer: %x\n",
330 (value & 0xf0) >> 4, value & 0x0f); 330 (value & 0xf0) >> 4, value & 0x0f);
331 331
332 ret = regmap_read(pfuze_chip->regmap, PFUZE100_FABID, &value); 332 ret = regmap_read(pfuze_chip->regmap, PFUZE100_FABID, &value);
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 333677d68d0e..9e61922d8230 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -438,7 +438,7 @@ common_reg:
438 platform_set_drvdata(pdev, s2mps11); 438 platform_set_drvdata(pdev, s2mps11);
439 439
440 config.dev = &pdev->dev; 440 config.dev = &pdev->dev;
441 config.regmap = iodev->regmap; 441 config.regmap = iodev->regmap_pmic;
442 config.driver_data = s2mps11; 442 config.driver_data = s2mps11;
443 for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) { 443 for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
444 if (!reg_np) { 444 if (!reg_np) {
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 3f4ca4e09a4c..34629ea913d4 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -942,7 +942,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
942 return rc; 942 return rc;
943 } 943 }
944 944
945 tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows); 945 tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols);
946 if (IS_ERR(tp->screen)) { 946 if (IS_ERR(tp->screen)) {
947 rc = PTR_ERR(tp->screen); 947 rc = PTR_ERR(tp->screen);
948 raw3270_put_view(&tp->view); 948 raw3270_put_view(&tp->view);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 596480022b0a..38a1257e76e1 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -471,7 +471,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
471 schedule_delayed_work(&tgt->sess_del_work, 0); 471 schedule_delayed_work(&tgt->sess_del_work, 0);
472 else 472 else
473 schedule_delayed_work(&tgt->sess_del_work, 473 schedule_delayed_work(&tgt->sess_del_work,
474 jiffies - sess->expires); 474 sess->expires - jiffies);
475} 475}
476 476
477/* ha->hardware_lock supposed to be held on entry */ 477/* ha->hardware_lock supposed to be held on entry */
@@ -550,13 +550,14 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
550 struct scsi_qla_host *vha = tgt->vha; 550 struct scsi_qla_host *vha = tgt->vha;
551 struct qla_hw_data *ha = vha->hw; 551 struct qla_hw_data *ha = vha->hw;
552 struct qla_tgt_sess *sess; 552 struct qla_tgt_sess *sess;
553 unsigned long flags; 553 unsigned long flags, elapsed;
554 554
555 spin_lock_irqsave(&ha->hardware_lock, flags); 555 spin_lock_irqsave(&ha->hardware_lock, flags);
556 while (!list_empty(&tgt->del_sess_list)) { 556 while (!list_empty(&tgt->del_sess_list)) {
557 sess = list_entry(tgt->del_sess_list.next, typeof(*sess), 557 sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
558 del_list_entry); 558 del_list_entry);
559 if (time_after_eq(jiffies, sess->expires)) { 559 elapsed = jiffies;
560 if (time_after_eq(elapsed, sess->expires)) {
560 qlt_undelete_sess(sess); 561 qlt_undelete_sess(sess);
561 562
562 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 563 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
@@ -566,7 +567,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
566 ha->tgt.tgt_ops->put_sess(sess); 567 ha->tgt.tgt_ops->put_sess(sess);
567 } else { 568 } else {
568 schedule_delayed_work(&tgt->sess_del_work, 569 schedule_delayed_work(&tgt->sess_del_work,
569 jiffies - sess->expires); 570 sess->expires - elapsed);
570 break; 571 break;
571 } 572 }
572 } 573 }
@@ -4290,6 +4291,7 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
4290 if (rc != 0) { 4291 if (rc != 0) {
4291 ha->tgt.tgt_ops = NULL; 4292 ha->tgt.tgt_ops = NULL;
4292 ha->tgt.target_lport_ptr = NULL; 4293 ha->tgt.target_lport_ptr = NULL;
4294 scsi_host_put(host);
4293 } 4295 }
4294 mutex_unlock(&qla_tgt_mutex); 4296 mutex_unlock(&qla_tgt_mutex);
4295 return rc; 4297 return rc;
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c
index 53fee2f9a498..8dfdd2732bdc 100644
--- a/drivers/staging/bcm/Bcmnet.c
+++ b/drivers/staging/bcm/Bcmnet.c
@@ -39,7 +39,8 @@ static INT bcm_close(struct net_device *dev)
39 return 0; 39 return 0;
40} 40}
41 41
42static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb) 42static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb,
43 void *accel_priv)
43{ 44{
44 return ClassifyPacket(netdev_priv(dev), skb); 45 return ClassifyPacket(netdev_priv(dev), skb);
45} 46}
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 8f02bf66e20b..4964d2a2fc7d 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -446,7 +446,7 @@ int comedi_load_firmware(struct comedi_device *dev,
446 release_firmware(fw); 446 release_firmware(fw);
447 } 447 }
448 448
449 return ret; 449 return ret < 0 ? ret : 0;
450} 450}
451EXPORT_SYMBOL_GPL(comedi_load_firmware); 451EXPORT_SYMBOL_GPL(comedi_load_firmware);
452 452
diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
index 432e3f9c3301..c55f234b29e6 100644
--- a/drivers/staging/comedi/drivers/8255_pci.c
+++ b/drivers/staging/comedi/drivers/8255_pci.c
@@ -63,7 +63,8 @@ enum pci_8255_boardid {
63 BOARD_ADLINK_PCI7296, 63 BOARD_ADLINK_PCI7296,
64 BOARD_CB_PCIDIO24, 64 BOARD_CB_PCIDIO24,
65 BOARD_CB_PCIDIO24H, 65 BOARD_CB_PCIDIO24H,
66 BOARD_CB_PCIDIO48H, 66 BOARD_CB_PCIDIO48H_OLD,
67 BOARD_CB_PCIDIO48H_NEW,
67 BOARD_CB_PCIDIO96H, 68 BOARD_CB_PCIDIO96H,
68 BOARD_NI_PCIDIO96, 69 BOARD_NI_PCIDIO96,
69 BOARD_NI_PCIDIO96B, 70 BOARD_NI_PCIDIO96B,
@@ -106,11 +107,16 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = {
106 .dio_badr = 2, 107 .dio_badr = 2,
107 .n_8255 = 1, 108 .n_8255 = 1,
108 }, 109 },
109 [BOARD_CB_PCIDIO48H] = { 110 [BOARD_CB_PCIDIO48H_OLD] = {
110 .name = "cb_pci-dio48h", 111 .name = "cb_pci-dio48h",
111 .dio_badr = 1, 112 .dio_badr = 1,
112 .n_8255 = 2, 113 .n_8255 = 2,
113 }, 114 },
115 [BOARD_CB_PCIDIO48H_NEW] = {
116 .name = "cb_pci-dio48h",
117 .dio_badr = 2,
118 .n_8255 = 2,
119 },
114 [BOARD_CB_PCIDIO96H] = { 120 [BOARD_CB_PCIDIO96H] = {
115 .name = "cb_pci-dio96h", 121 .name = "cb_pci-dio96h",
116 .dio_badr = 2, 122 .dio_badr = 2,
@@ -263,7 +269,10 @@ static DEFINE_PCI_DEVICE_TABLE(pci_8255_pci_table) = {
263 { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 }, 269 { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 },
264 { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 }, 270 { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 },
265 { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H }, 271 { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H },
266 { PCI_VDEVICE(CB, 0x000b), BOARD_CB_PCIDIO48H }, 272 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, 0x0000, 0x0000),
273 .driver_data = BOARD_CB_PCIDIO48H_OLD },
274 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, PCI_VENDOR_ID_CB, 0x000b),
275 .driver_data = BOARD_CB_PCIDIO48H_NEW },
267 { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H }, 276 { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H },
268 { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 }, 277 { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 },
269 { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B }, 278 { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B },
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index 99421f90d189..0485d7f39867 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -451,7 +451,12 @@ done:
451 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ 451 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
452 BIT(IIO_CHAN_INFO_SAMP_FREQ), \ 452 BIT(IIO_CHAN_INFO_SAMP_FREQ), \
453 .scan_index = idx, \ 453 .scan_index = idx, \
454 .scan_type = IIO_ST('s', 16, 16, IIO_BE), \ 454 .scan_type = { \
455 .sign = 's', \
456 .realbits = 16, \
457 .storagebits = 16, \
458 .endianness = IIO_BE, \
459 }, \
455 } 460 }
456 461
457static const struct iio_chan_spec hmc5843_channels[] = { 462static const struct iio_chan_spec hmc5843_channels[] = {
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 6bd015ac9d68..96e4eee344ef 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -88,8 +88,9 @@ static int imx_drm_driver_unload(struct drm_device *drm)
88 88
89 imx_drm_device_put(); 89 imx_drm_device_put();
90 90
91 drm_mode_config_cleanup(imxdrm->drm); 91 drm_vblank_cleanup(imxdrm->drm);
92 drm_kms_helper_poll_fini(imxdrm->drm); 92 drm_kms_helper_poll_fini(imxdrm->drm);
93 drm_mode_config_cleanup(imxdrm->drm);
93 94
94 return 0; 95 return 0;
95} 96}
@@ -199,8 +200,8 @@ static void imx_drm_driver_preclose(struct drm_device *drm,
199 if (!file->is_master) 200 if (!file->is_master)
200 return; 201 return;
201 202
202 for (i = 0; i < 4; i++) 203 for (i = 0; i < MAX_CRTC; i++)
203 imx_drm_disable_vblank(drm , i); 204 imx_drm_disable_vblank(drm, i);
204} 205}
205 206
206static const struct file_operations imx_drm_driver_fops = { 207static const struct file_operations imx_drm_driver_fops = {
@@ -376,8 +377,6 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
376 struct imx_drm_device *imxdrm = __imx_drm_device(); 377 struct imx_drm_device *imxdrm = __imx_drm_device();
377 int ret; 378 int ret;
378 379
379 drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
380 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
381 ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256); 380 ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
382 if (ret) 381 if (ret)
383 return ret; 382 return ret;
@@ -385,6 +384,9 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
385 drm_crtc_helper_add(imx_drm_crtc->crtc, 384 drm_crtc_helper_add(imx_drm_crtc->crtc,
386 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); 385 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
387 386
387 drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
388 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
389
388 drm_mode_group_reinit(imxdrm->drm); 390 drm_mode_group_reinit(imxdrm->drm);
389 391
390 return 0; 392 return 0;
@@ -428,11 +430,11 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
428 ret = drm_mode_group_init_legacy_group(imxdrm->drm, 430 ret = drm_mode_group_init_legacy_group(imxdrm->drm,
429 &imxdrm->drm->primary->mode_group); 431 &imxdrm->drm->primary->mode_group);
430 if (ret) 432 if (ret)
431 goto err_init; 433 goto err_kms;
432 434
433 ret = drm_vblank_init(imxdrm->drm, MAX_CRTC); 435 ret = drm_vblank_init(imxdrm->drm, MAX_CRTC);
434 if (ret) 436 if (ret)
435 goto err_init; 437 goto err_kms;
436 438
437 /* 439 /*
438 * with vblank_disable_allowed = true, vblank interrupt will be disabled 440 * with vblank_disable_allowed = true, vblank interrupt will be disabled
@@ -441,12 +443,19 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
441 */ 443 */
442 imxdrm->drm->vblank_disable_allowed = true; 444 imxdrm->drm->vblank_disable_allowed = true;
443 445
444 if (!imx_drm_device_get()) 446 if (!imx_drm_device_get()) {
445 ret = -EINVAL; 447 ret = -EINVAL;
448 goto err_vblank;
449 }
446 450
447 ret = 0; 451 mutex_unlock(&imxdrm->mutex);
452 return 0;
448 453
449err_init: 454err_vblank:
455 drm_vblank_cleanup(drm);
456err_kms:
457 drm_kms_helper_poll_fini(drm);
458 drm_mode_config_cleanup(drm);
450 mutex_unlock(&imxdrm->mutex); 459 mutex_unlock(&imxdrm->mutex);
451 460
452 return ret; 461 return ret;
@@ -492,6 +501,15 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
492 501
493 mutex_lock(&imxdrm->mutex); 502 mutex_lock(&imxdrm->mutex);
494 503
504 /*
505 * The vblank arrays are dimensioned by MAX_CRTC - we can't
506 * pass IDs greater than this to those functions.
507 */
508 if (imxdrm->pipes >= MAX_CRTC) {
509 ret = -EINVAL;
510 goto err_busy;
511 }
512
495 if (imxdrm->drm->open_count) { 513 if (imxdrm->drm->open_count) {
496 ret = -EBUSY; 514 ret = -EBUSY;
497 goto err_busy; 515 goto err_busy;
@@ -528,6 +546,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
528 return 0; 546 return 0;
529 547
530err_register: 548err_register:
549 list_del(&imx_drm_crtc->list);
531 kfree(imx_drm_crtc); 550 kfree(imx_drm_crtc);
532err_alloc: 551err_alloc:
533err_busy: 552err_busy:
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c
index 680f4c8fa081..2c44fef8d58b 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/staging/imx-drm/imx-tve.c
@@ -114,7 +114,6 @@ struct imx_tve {
114 struct drm_encoder encoder; 114 struct drm_encoder encoder;
115 struct imx_drm_encoder *imx_drm_encoder; 115 struct imx_drm_encoder *imx_drm_encoder;
116 struct device *dev; 116 struct device *dev;
117 spinlock_t enable_lock; /* serializes tve_enable/disable */
118 spinlock_t lock; /* register lock */ 117 spinlock_t lock; /* register lock */
119 bool enabled; 118 bool enabled;
120 int mode; 119 int mode;
@@ -146,10 +145,8 @@ __releases(&tve->lock)
146 145
147static void tve_enable(struct imx_tve *tve) 146static void tve_enable(struct imx_tve *tve)
148{ 147{
149 unsigned long flags;
150 int ret; 148 int ret;
151 149
152 spin_lock_irqsave(&tve->enable_lock, flags);
153 if (!tve->enabled) { 150 if (!tve->enabled) {
154 tve->enabled = true; 151 tve->enabled = true;
155 clk_prepare_enable(tve->clk); 152 clk_prepare_enable(tve->clk);
@@ -169,23 +166,18 @@ static void tve_enable(struct imx_tve *tve)
169 TVE_CD_SM_IEN | 166 TVE_CD_SM_IEN |
170 TVE_CD_LM_IEN | 167 TVE_CD_LM_IEN |
171 TVE_CD_MON_END_IEN); 168 TVE_CD_MON_END_IEN);
172
173 spin_unlock_irqrestore(&tve->enable_lock, flags);
174} 169}
175 170
176static void tve_disable(struct imx_tve *tve) 171static void tve_disable(struct imx_tve *tve)
177{ 172{
178 unsigned long flags;
179 int ret; 173 int ret;
180 174
181 spin_lock_irqsave(&tve->enable_lock, flags);
182 if (tve->enabled) { 175 if (tve->enabled) {
183 tve->enabled = false; 176 tve->enabled = false;
184 ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, 177 ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
185 TVE_IPU_CLK_EN | TVE_EN, 0); 178 TVE_IPU_CLK_EN | TVE_EN, 0);
186 clk_disable_unprepare(tve->clk); 179 clk_disable_unprepare(tve->clk);
187 } 180 }
188 spin_unlock_irqrestore(&tve->enable_lock, flags);
189} 181}
190 182
191static int tve_setup_tvout(struct imx_tve *tve) 183static int tve_setup_tvout(struct imx_tve *tve)
@@ -601,7 +593,6 @@ static int imx_tve_probe(struct platform_device *pdev)
601 593
602 tve->dev = &pdev->dev; 594 tve->dev = &pdev->dev;
603 spin_lock_init(&tve->lock); 595 spin_lock_init(&tve->lock);
604 spin_lock_init(&tve->enable_lock);
605 596
606 ddc_node = of_parse_phandle(np, "ddc", 0); 597 ddc_node = of_parse_phandle(np, "ddc", 0);
607 if (ddc_node) { 598 if (ddc_node) {
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
index 7a22ce619ed2..97ca6924dbb3 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
@@ -996,35 +996,35 @@ static const struct ipu_platform_reg client_reg[] = {
996 }, 996 },
997}; 997};
998 998
999static DEFINE_MUTEX(ipu_client_id_mutex);
999static int ipu_client_id; 1000static int ipu_client_id;
1000 1001
1001static int ipu_add_subdevice_pdata(struct device *dev,
1002 const struct ipu_platform_reg *reg)
1003{
1004 struct platform_device *pdev;
1005
1006 pdev = platform_device_register_data(dev, reg->name, ipu_client_id++,
1007 &reg->pdata, sizeof(struct ipu_platform_reg));
1008
1009 return PTR_ERR_OR_ZERO(pdev);
1010}
1011
1012static int ipu_add_client_devices(struct ipu_soc *ipu) 1002static int ipu_add_client_devices(struct ipu_soc *ipu)
1013{ 1003{
1014 int ret; 1004 struct device *dev = ipu->dev;
1015 int i; 1005 unsigned i;
1006 int id, ret;
1007
1008 mutex_lock(&ipu_client_id_mutex);
1009 id = ipu_client_id;
1010 ipu_client_id += ARRAY_SIZE(client_reg);
1011 mutex_unlock(&ipu_client_id_mutex);
1016 1012
1017 for (i = 0; i < ARRAY_SIZE(client_reg); i++) { 1013 for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
1018 const struct ipu_platform_reg *reg = &client_reg[i]; 1014 const struct ipu_platform_reg *reg = &client_reg[i];
1019 ret = ipu_add_subdevice_pdata(ipu->dev, reg); 1015 struct platform_device *pdev;
1020 if (ret) 1016
1017 pdev = platform_device_register_data(dev, reg->name,
1018 id++, &reg->pdata, sizeof(reg->pdata));
1019
1020 if (IS_ERR(pdev))
1021 goto err_register; 1021 goto err_register;
1022 } 1022 }
1023 1023
1024 return 0; 1024 return 0;
1025 1025
1026err_register: 1026err_register:
1027 platform_device_unregister_children(to_platform_device(ipu->dev)); 1027 platform_device_unregister_children(to_platform_device(dev));
1028 1028
1029 return ret; 1029 return ret;
1030} 1030}
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 235d2b1ec593..eedffed17e39 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -306,7 +306,8 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
306 return NETDEV_TX_OK; 306 return NETDEV_TX_OK;
307} 307}
308 308
309static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb) 309static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
310 void *accel_priv)
310{ 311{
311 return (u16)smp_processor_id(); 312 return (u16)smp_processor_id();
312} 313}
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 17659bb04bef..dd69e344e409 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -652,7 +652,8 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
652 return dscp >> 5; 652 return dscp >> 5;
653} 653}
654 654
655static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb) 655static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
656 void *accel_priv)
656{ 657{
657 struct adapter *padapter = rtw_netdev_priv(dev); 658 struct adapter *padapter = rtw_netdev_priv(dev);
658 struct mlme_priv *pmlmepriv = &padapter->mlmepriv; 659 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index d70e9119e906..00867190413c 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -465,6 +465,7 @@ int iscsit_del_np(struct iscsi_np *np)
465 */ 465 */
466 send_sig(SIGINT, np->np_thread, 1); 466 send_sig(SIGINT, np->np_thread, 1);
467 kthread_stop(np->np_thread); 467 kthread_stop(np->np_thread);
468 np->np_thread = NULL;
468 } 469 }
469 470
470 np->np_transport->iscsit_free_np(np); 471 np->np_transport->iscsit_free_np(np);
@@ -823,24 +824,22 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
823 if (((hdr->flags & ISCSI_FLAG_CMD_READ) || 824 if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
824 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { 825 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
825 /* 826 /*
826 * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2) 827 * From RFC-3720 Section 10.3.1:
827 * that adds support for RESERVE/RELEASE. There is a bug 828 *
828 * add with this new functionality that sets R/W bits when 829 * "Either or both of R and W MAY be 1 when either the
829 * neither CDB carries any READ or WRITE datapayloads. 830 * Expected Data Transfer Length and/or Bidirectional Read
831 * Expected Data Transfer Length are 0"
832 *
833 * For this case, go ahead and clear the unnecssary bits
834 * to avoid any confusion with ->data_direction.
830 */ 835 */
831 if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) { 836 hdr->flags &= ~ISCSI_FLAG_CMD_READ;
832 hdr->flags &= ~ISCSI_FLAG_CMD_READ; 837 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
833 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
834 goto done;
835 }
836 838
837 pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" 839 pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
838 " set when Expected Data Transfer Length is 0 for" 840 " set when Expected Data Transfer Length is 0 for"
839 " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]); 841 " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
840 return iscsit_add_reject_cmd(cmd,
841 ISCSI_REASON_BOOKMARK_INVALID, buf);
842 } 842 }
843done:
844 843
845 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && 844 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
846 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { 845 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index e3318edb233d..1c0088fe9e99 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -474,7 +474,8 @@ static ssize_t __iscsi_##prefix##_store_##name( \
474 \ 474 \
475 if (!capable(CAP_SYS_ADMIN)) \ 475 if (!capable(CAP_SYS_ADMIN)) \
476 return -EPERM; \ 476 return -EPERM; \
477 \ 477 if (count >= sizeof(auth->name)) \
478 return -EINVAL; \
478 snprintf(auth->name, sizeof(auth->name), "%s", page); \ 479 snprintf(auth->name, sizeof(auth->name), "%s", page); \
479 if (!strncmp("NULL", auth->name, 4)) \ 480 if (!strncmp("NULL", auth->name, 4)) \
480 auth->naf_flags &= ~flags; \ 481 auth->naf_flags &= ~flags; \
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 4eb93b2b6473..e29279e6b577 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1403,11 +1403,6 @@ old_sess_out:
1403 1403
1404out: 1404out:
1405 stop = kthread_should_stop(); 1405 stop = kthread_should_stop();
1406 if (!stop && signal_pending(current)) {
1407 spin_lock_bh(&np->np_thread_lock);
1408 stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN);
1409 spin_unlock_bh(&np->np_thread_lock);
1410 }
1411 /* Wait for another socket.. */ 1406 /* Wait for another socket.. */
1412 if (!stop) 1407 if (!stop)
1413 return 1; 1408 return 1;
@@ -1415,7 +1410,6 @@ exit:
1415 iscsi_stop_login_thread_timer(np); 1410 iscsi_stop_login_thread_timer(np);
1416 spin_lock_bh(&np->np_thread_lock); 1411 spin_lock_bh(&np->np_thread_lock);
1417 np->np_thread_state = ISCSI_NP_THREAD_EXIT; 1412 np->np_thread_state = ISCSI_NP_THREAD_EXIT;
1418 np->np_thread = NULL;
1419 spin_unlock_bh(&np->np_thread_lock); 1413 spin_unlock_bh(&np->np_thread_lock);
1420 1414
1421 return 0; 1415 return 0;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 207b340498a3..d06de84b069b 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1106,6 +1106,11 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1106 dev->dev_attrib.block_size = block_size; 1106 dev->dev_attrib.block_size = block_size;
1107 pr_debug("dev[%p]: SE Device block_size changed to %u\n", 1107 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1108 dev, block_size); 1108 dev, block_size);
1109
1110 if (dev->dev_attrib.max_bytes_per_io)
1111 dev->dev_attrib.hw_max_sectors =
1112 dev->dev_attrib.max_bytes_per_io / block_size;
1113
1109 return 0; 1114 return 0;
1110} 1115}
1111 1116
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 0e34cda3271e..78241a53b555 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -66,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
66 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" 66 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
67 " Target Core Stack %s\n", hba->hba_id, FD_VERSION, 67 " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
68 TARGET_CORE_MOD_VERSION); 68 TARGET_CORE_MOD_VERSION);
69 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" 69 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
70 " MaxSectors: %u\n", 70 hba->hba_id, fd_host->fd_host_id);
71 hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
72 71
73 return 0; 72 return 0;
74} 73}
@@ -220,7 +219,8 @@ static int fd_configure_device(struct se_device *dev)
220 } 219 }
221 220
222 dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; 221 dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
223 dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; 222 dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
223 dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
224 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; 224 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
225 225
226 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { 226 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 37ffc5bd2399..d7772c167685 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -7,7 +7,10 @@
7#define FD_DEVICE_QUEUE_DEPTH 32 7#define FD_DEVICE_QUEUE_DEPTH 32
8#define FD_MAX_DEVICE_QUEUE_DEPTH 128 8#define FD_MAX_DEVICE_QUEUE_DEPTH 128
9#define FD_BLOCKSIZE 512 9#define FD_BLOCKSIZE 512
10#define FD_MAX_SECTORS 2048 10/*
11 * Limited by the number of iovecs (2048) per vfs_[writev,readv] call
12 */
13#define FD_MAX_BYTES 8388608
11 14
12#define RRF_EMULATE_CDB 0x01 15#define RRF_EMULATE_CDB 0x01
13#define RRF_GOT_LBA 0x02 16#define RRF_GOT_LBA 0x02
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index f697f8baec54..2a573de19a9f 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -278,7 +278,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
278 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 278 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
279 acl->se_tpg = tpg; 279 acl->se_tpg = tpg;
280 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 280 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
281 spin_lock_init(&acl->stats_lock);
282 acl->dynamic_node_acl = 1; 281 acl->dynamic_node_acl = 1;
283 282
284 tpg->se_tpg_tfo->set_default_node_attributes(acl); 283 tpg->se_tpg_tfo->set_default_node_attributes(acl);
@@ -406,7 +405,6 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
406 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 405 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
407 acl->se_tpg = tpg; 406 acl->se_tpg = tpg;
408 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 407 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
409 spin_lock_init(&acl->stats_lock);
410 408
411 tpg->se_tpg_tfo->set_default_node_attributes(acl); 409 tpg->se_tpg_tfo->set_default_node_attributes(acl);
412 410
@@ -658,15 +656,9 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
658 spin_lock_init(&lun->lun_sep_lock); 656 spin_lock_init(&lun->lun_sep_lock);
659 init_completion(&lun->lun_ref_comp); 657 init_completion(&lun->lun_ref_comp);
660 658
661 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
662 if (ret < 0)
663 return ret;
664
665 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); 659 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
666 if (ret < 0) { 660 if (ret < 0)
667 percpu_ref_cancel_init(&lun->lun_ref);
668 return ret; 661 return ret;
669 }
670 662
671 return 0; 663 return 0;
672} 664}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 268b62768f2b..34aacaaae14a 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -93,6 +93,7 @@ struct n_tty_data {
93 size_t canon_head; 93 size_t canon_head;
94 size_t echo_head; 94 size_t echo_head;
95 size_t echo_commit; 95 size_t echo_commit;
96 size_t echo_mark;
96 DECLARE_BITMAP(char_map, 256); 97 DECLARE_BITMAP(char_map, 256);
97 98
98 /* private to n_tty_receive_overrun (single-threaded) */ 99 /* private to n_tty_receive_overrun (single-threaded) */
@@ -336,6 +337,7 @@ static void reset_buffer_flags(struct n_tty_data *ldata)
336{ 337{
337 ldata->read_head = ldata->canon_head = ldata->read_tail = 0; 338 ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
338 ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0; 339 ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
340 ldata->echo_mark = 0;
339 ldata->line_start = 0; 341 ldata->line_start = 0;
340 342
341 ldata->erasing = 0; 343 ldata->erasing = 0;
@@ -787,6 +789,7 @@ static void commit_echoes(struct tty_struct *tty)
787 size_t head; 789 size_t head;
788 790
789 head = ldata->echo_head; 791 head = ldata->echo_head;
792 ldata->echo_mark = head;
790 old = ldata->echo_commit - ldata->echo_tail; 793 old = ldata->echo_commit - ldata->echo_tail;
791 794
792 /* Process committed echoes if the accumulated # of bytes 795 /* Process committed echoes if the accumulated # of bytes
@@ -811,10 +814,11 @@ static void process_echoes(struct tty_struct *tty)
811 size_t echoed; 814 size_t echoed;
812 815
813 if ((!L_ECHO(tty) && !L_ECHONL(tty)) || 816 if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
814 ldata->echo_commit == ldata->echo_tail) 817 ldata->echo_mark == ldata->echo_tail)
815 return; 818 return;
816 819
817 mutex_lock(&ldata->output_lock); 820 mutex_lock(&ldata->output_lock);
821 ldata->echo_commit = ldata->echo_mark;
818 echoed = __process_echoes(tty); 822 echoed = __process_echoes(tty);
819 mutex_unlock(&ldata->output_lock); 823 mutex_unlock(&ldata->output_lock);
820 824
@@ -822,6 +826,7 @@ static void process_echoes(struct tty_struct *tty)
822 tty->ops->flush_chars(tty); 826 tty->ops->flush_chars(tty);
823} 827}
824 828
829/* NB: echo_mark and echo_head should be equivalent here */
825static void flush_echoes(struct tty_struct *tty) 830static void flush_echoes(struct tty_struct *tty)
826{ 831{
827 struct n_tty_data *ldata = tty->disc_data; 832 struct n_tty_data *ldata = tty->disc_data;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 4658e3e0ec42..06525f10e364 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -96,7 +96,8 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value)
96 if (offset == UART_LCR) { 96 if (offset == UART_LCR) {
97 int tries = 1000; 97 int tries = 1000;
98 while (tries--) { 98 while (tries--) {
99 if (value == p->serial_in(p, UART_LCR)) 99 unsigned int lcr = p->serial_in(p, UART_LCR);
100 if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
100 return; 101 return;
101 dw8250_force_idle(p); 102 dw8250_force_idle(p);
102 writeb(value, p->membase + (UART_LCR << p->regshift)); 103 writeb(value, p->membase + (UART_LCR << p->regshift));
@@ -132,7 +133,8 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
132 if (offset == UART_LCR) { 133 if (offset == UART_LCR) {
133 int tries = 1000; 134 int tries = 1000;
134 while (tries--) { 135 while (tries--) {
135 if (value == p->serial_in(p, UART_LCR)) 136 unsigned int lcr = p->serial_in(p, UART_LCR);
137 if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
136 return; 138 return;
137 dw8250_force_idle(p); 139 dw8250_force_idle(p);
138 writel(value, p->membase + (UART_LCR << p->regshift)); 140 writel(value, p->membase + (UART_LCR << p->regshift));
@@ -455,6 +457,8 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match);
455static const struct acpi_device_id dw8250_acpi_match[] = { 457static const struct acpi_device_id dw8250_acpi_match[] = {
456 { "INT33C4", 0 }, 458 { "INT33C4", 0 },
457 { "INT33C5", 0 }, 459 { "INT33C5", 0 },
460 { "INT3434", 0 },
461 { "INT3435", 0 },
458 { "80860F0A", 0 }, 462 { "80860F0A", 0 },
459 { }, 463 { },
460}; 464};
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index e46e9f3f19b9..f619ad5b5eae 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -240,6 +240,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
240 continue; 240 continue;
241 } 241 }
242 242
243#ifdef SUPPORT_SYSRQ
243 /* 244 /*
244 * uart_handle_sysrq_char() doesn't work if 245 * uart_handle_sysrq_char() doesn't work if
245 * spinlocked, for some reason 246 * spinlocked, for some reason
@@ -253,6 +254,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
253 } 254 }
254 spin_lock(&port->lock); 255 spin_lock(&port->lock);
255 } 256 }
257#endif
256 258
257 port->icount.rx++; 259 port->icount.rx++;
258 260
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 22fad8ad5ac2..d8a55e87877f 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -86,11 +86,21 @@ static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
86 return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); 86 return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
87} 87}
88 88
89/*
90 * ldsem_cmpxchg() updates @*old with the last-known sem->count value.
91 * Returns 1 if count was successfully changed; @*old will have @new value.
92 * Returns 0 if count was not changed; @*old will have most recent sem->count
93 */
89static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem) 94static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
90{ 95{
91 long tmp = *old; 96 long tmp = atomic_long_cmpxchg(&sem->count, *old, new);
92 *old = atomic_long_cmpxchg(&sem->count, *old, new); 97 if (tmp == *old) {
93 return *old == tmp; 98 *old = new;
99 return 1;
100 } else {
101 *old = tmp;
102 return 0;
103 }
94} 104}
95 105
96/* 106/*
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 5d8981c5235e..6e73f8cd60e5 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -642,6 +642,10 @@ static int ci_hdrc_probe(struct platform_device *pdev)
642 : CI_ROLE_GADGET; 642 : CI_ROLE_GADGET;
643 } 643 }
644 644
645 /* only update vbus status for peripheral */
646 if (ci->role == CI_ROLE_GADGET)
647 ci_handle_vbus_change(ci);
648
645 ret = ci_role_start(ci, ci->role); 649 ret = ci_role_start(ci, ci->role);
646 if (ret) { 650 if (ret) {
647 dev_err(dev, "can't start %s role\n", ci_role(ci)->name); 651 dev_err(dev, "can't start %s role\n", ci_role(ci)->name);
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 59e6020ea753..526cd77563d8 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -88,7 +88,8 @@ static int host_start(struct ci_hdrc *ci)
88 return ret; 88 return ret;
89 89
90disable_reg: 90disable_reg:
91 regulator_disable(ci->platdata->reg_vbus); 91 if (ci->platdata->reg_vbus)
92 regulator_disable(ci->platdata->reg_vbus);
92 93
93put_hcd: 94put_hcd:
94 usb_put_hcd(hcd); 95 usb_put_hcd(hcd);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index b34c81969cba..69d20fbb38a2 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1795,9 +1795,6 @@ static int udc_start(struct ci_hdrc *ci)
1795 pm_runtime_no_callbacks(&ci->gadget.dev); 1795 pm_runtime_no_callbacks(&ci->gadget.dev);
1796 pm_runtime_enable(&ci->gadget.dev); 1796 pm_runtime_enable(&ci->gadget.dev);
1797 1797
1798 /* Update ci->vbus_active */
1799 ci_handle_vbus_change(ci);
1800
1801 return retval; 1798 return retval;
1802 1799
1803destroy_eps: 1800destroy_eps:
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 4d387596f3f0..0b23a8639311 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -854,13 +854,11 @@ static int wdm_manage_power(struct usb_interface *intf, int on)
854{ 854{
855 /* need autopm_get/put here to ensure the usbcore sees the new value */ 855 /* need autopm_get/put here to ensure the usbcore sees the new value */
856 int rv = usb_autopm_get_interface(intf); 856 int rv = usb_autopm_get_interface(intf);
857 if (rv < 0)
858 goto err;
859 857
860 intf->needs_remote_wakeup = on; 858 intf->needs_remote_wakeup = on;
861 usb_autopm_put_interface(intf); 859 if (!rv)
862err: 860 usb_autopm_put_interface(intf);
863 return rv; 861 return 0;
864} 862}
865 863
866static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id) 864static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 74f9cf02da07..a49217ae3533 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -455,9 +455,6 @@ static int dwc3_probe(struct platform_device *pdev)
455 if (IS_ERR(regs)) 455 if (IS_ERR(regs))
456 return PTR_ERR(regs); 456 return PTR_ERR(regs);
457 457
458 usb_phy_set_suspend(dwc->usb2_phy, 0);
459 usb_phy_set_suspend(dwc->usb3_phy, 0);
460
461 spin_lock_init(&dwc->lock); 458 spin_lock_init(&dwc->lock);
462 platform_set_drvdata(pdev, dwc); 459 platform_set_drvdata(pdev, dwc);
463 460
@@ -488,6 +485,9 @@ static int dwc3_probe(struct platform_device *pdev)
488 goto err0; 485 goto err0;
489 } 486 }
490 487
488 usb_phy_set_suspend(dwc->usb2_phy, 0);
489 usb_phy_set_suspend(dwc->usb3_phy, 0);
490
491 ret = dwc3_event_buffers_setup(dwc); 491 ret = dwc3_event_buffers_setup(dwc);
492 if (ret) { 492 if (ret) {
493 dev_err(dwc->dev, "failed to setup event buffers\n"); 493 dev_err(dwc->dev, "failed to setup event buffers\n");
@@ -569,6 +569,8 @@ err2:
569 dwc3_event_buffers_cleanup(dwc); 569 dwc3_event_buffers_cleanup(dwc);
570 570
571err1: 571err1:
572 usb_phy_set_suspend(dwc->usb2_phy, 1);
573 usb_phy_set_suspend(dwc->usb3_phy, 1);
572 dwc3_core_exit(dwc); 574 dwc3_core_exit(dwc);
573 575
574err0: 576err0:
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 418444ebb1b8..8c356af79409 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -136,23 +136,27 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
136 struct ohci_hcd *ohci; 136 struct ohci_hcd *ohci;
137 int retval; 137 int retval;
138 struct usb_hcd *hcd = NULL; 138 struct usb_hcd *hcd = NULL;
139 139 struct device *dev = &pdev->dev;
140 if (pdev->num_resources != 2) { 140 struct resource *res;
141 pr_debug("hcd probe: invalid num_resources"); 141 int irq;
142 return -ENODEV; 142
143 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
144 if (!res) {
145 dev_dbg(dev, "hcd probe: missing memory resource\n");
146 return -ENXIO;
143 } 147 }
144 148
145 if ((pdev->resource[0].flags != IORESOURCE_MEM) 149 irq = platform_get_irq(pdev, 0);
146 || (pdev->resource[1].flags != IORESOURCE_IRQ)) { 150 if (irq < 0) {
147 pr_debug("hcd probe: invalid resource type\n"); 151 dev_dbg(dev, "hcd probe: missing irq resource\n");
148 return -ENODEV; 152 return irq;
149 } 153 }
150 154
151 hcd = usb_create_hcd(driver, &pdev->dev, "at91"); 155 hcd = usb_create_hcd(driver, &pdev->dev, "at91");
152 if (!hcd) 156 if (!hcd)
153 return -ENOMEM; 157 return -ENOMEM;
154 hcd->rsrc_start = pdev->resource[0].start; 158 hcd->rsrc_start = res->start;
155 hcd->rsrc_len = resource_size(&pdev->resource[0]); 159 hcd->rsrc_len = resource_size(res);
156 160
157 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { 161 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
158 pr_debug("request_mem_region failed\n"); 162 pr_debug("request_mem_region failed\n");
@@ -199,7 +203,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
199 ohci->num_ports = board->ports; 203 ohci->num_ports = board->ports;
200 at91_start_hc(pdev); 204 at91_start_hc(pdev);
201 205
202 retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED); 206 retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
203 if (retval == 0) 207 if (retval == 0)
204 return retval; 208 return retval;
205 209
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index b8dffd59eb25..73f5208714a4 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -128,7 +128,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
128 * any other sleep) on Haswell machines with LPT and LPT-LP 128 * any other sleep) on Haswell machines with LPT and LPT-LP
129 * with the new Intel BIOS 129 * with the new Intel BIOS
130 */ 130 */
131 xhci->quirks |= XHCI_SPURIOUS_WAKEUP; 131 /* Limit the quirk to only known vendors, as this triggers
132 * yet another BIOS bug on some other machines
133 * https://bugzilla.kernel.org/show_bug.cgi?id=66171
134 */
135 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
136 xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
132 } 137 }
133 if (pdev->vendor == PCI_VENDOR_ID_ETRON && 138 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
134 pdev->device == PCI_DEVICE_ID_ASROCK_P67) { 139 pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 08e2f39027ec..2b41c636a52a 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -19,8 +19,9 @@ config AB8500_USB
19 in host mode, low speed. 19 in host mode, low speed.
20 20
21config FSL_USB2_OTG 21config FSL_USB2_OTG
22 bool "Freescale USB OTG Transceiver Driver" 22 tristate "Freescale USB OTG Transceiver Driver"
23 depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME 23 depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME
24 depends on USB
24 select USB_OTG 25 select USB_OTG
25 select USB_PHY 26 select USB_PHY
26 help 27 help
@@ -29,6 +30,7 @@ config FSL_USB2_OTG
29config ISP1301_OMAP 30config ISP1301_OMAP
30 tristate "Philips ISP1301 with OMAP OTG" 31 tristate "Philips ISP1301 with OMAP OTG"
31 depends on I2C && ARCH_OMAP_OTG 32 depends on I2C && ARCH_OMAP_OTG
33 depends on USB
32 select USB_PHY 34 select USB_PHY
33 help 35 help
34 If you say yes here you get support for the Philips ISP1301 36 If you say yes here you get support for the Philips ISP1301
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index 82232acf1ab6..bbe4f8e6e8d7 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -876,7 +876,7 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
876 876
877 tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start, 877 tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start,
878 resource_size(res)); 878 resource_size(res));
879 if (!tegra_phy->regs) { 879 if (!tegra_phy->pad_regs) {
880 dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n"); 880 dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n");
881 return -ENOMEM; 881 return -ENOMEM;
882 } 882 }
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index 30e8a61552d4..bad57ce77ba5 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -127,7 +127,8 @@ static inline int twl6030_writeb(struct twl6030_usb *twl, u8 module,
127 127
128static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address) 128static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address)
129{ 129{
130 u8 data, ret = 0; 130 u8 data;
131 int ret;
131 132
132 ret = twl_i2c_read_u8(module, &data, address); 133 ret = twl_i2c_read_u8(module, &data, address);
133 if (ret >= 0) 134 if (ret >= 0)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 496b7e39d5be..cc7a24154490 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -251,6 +251,7 @@ static void option_instat_callback(struct urb *urb);
251#define ZTE_PRODUCT_MF628 0x0015 251#define ZTE_PRODUCT_MF628 0x0015
252#define ZTE_PRODUCT_MF626 0x0031 252#define ZTE_PRODUCT_MF626 0x0031
253#define ZTE_PRODUCT_MC2718 0xffe8 253#define ZTE_PRODUCT_MC2718 0xffe8
254#define ZTE_PRODUCT_AC2726 0xfff1
254 255
255#define BENQ_VENDOR_ID 0x04a5 256#define BENQ_VENDOR_ID 0x04a5
256#define BENQ_PRODUCT_H10 0x4068 257#define BENQ_PRODUCT_H10 0x4068
@@ -1453,6 +1454,7 @@ static const struct usb_device_id option_ids[] = {
1453 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, 1454 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
1454 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, 1455 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
1455 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, 1456 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
1457 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
1456 1458
1457 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, 1459 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
1458 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, 1460 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c
index fca4c752a4ed..eae2c873b39f 100644
--- a/drivers/usb/serial/zte_ev.c
+++ b/drivers/usb/serial/zte_ev.c
@@ -281,8 +281,7 @@ static const struct usb_device_id id_table[] = {
281 { USB_DEVICE(0x19d2, 0xfffd) }, 281 { USB_DEVICE(0x19d2, 0xfffd) },
282 { USB_DEVICE(0x19d2, 0xfffc) }, 282 { USB_DEVICE(0x19d2, 0xfffc) },
283 { USB_DEVICE(0x19d2, 0xfffb) }, 283 { USB_DEVICE(0x19d2, 0xfffb) },
284 /* AC2726, AC8710_V3 */ 284 /* AC8710_V3 */
285 { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) },
286 { USB_DEVICE(0x19d2, 0xfff6) }, 285 { USB_DEVICE(0x19d2, 0xfff6) },
287 { USB_DEVICE(0x19d2, 0xfff7) }, 286 { USB_DEVICE(0x19d2, 0xfff7) },
288 { USB_DEVICE(0x19d2, 0xfff8) }, 287 { USB_DEVICE(0x19d2, 0xfff8) },
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index c444654fc33f..5c4a95b516cf 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -285,7 +285,7 @@ static void update_balloon_size(struct virtio_balloon *vb)
285{ 285{
286 __le32 actual = cpu_to_le32(vb->num_pages); 286 __le32 actual = cpu_to_le32(vb->num_pages);
287 287
288 virtio_cwrite(vb->vdev, struct virtio_balloon_config, num_pages, 288 virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual,
289 &actual); 289 &actual);
290} 290}
291 291
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 55ea73f7c70b..4c02e2b94103 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -350,17 +350,19 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
350 350
351 pfn = page_to_pfn(page); 351 pfn = page_to_pfn(page);
352 352
353 set_phys_to_machine(pfn, frame_list[i]);
354
355#ifdef CONFIG_XEN_HAVE_PVMMU 353#ifdef CONFIG_XEN_HAVE_PVMMU
356 /* Link back into the page tables if not highmem. */ 354 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
357 if (xen_pv_domain() && !PageHighMem(page)) { 355 set_phys_to_machine(pfn, frame_list[i]);
358 int ret; 356
359 ret = HYPERVISOR_update_va_mapping( 357 /* Link back into the page tables if not highmem. */
360 (unsigned long)__va(pfn << PAGE_SHIFT), 358 if (!PageHighMem(page)) {
361 mfn_pte(frame_list[i], PAGE_KERNEL), 359 int ret;
362 0); 360 ret = HYPERVISOR_update_va_mapping(
363 BUG_ON(ret); 361 (unsigned long)__va(pfn << PAGE_SHIFT),
362 mfn_pte(frame_list[i], PAGE_KERNEL),
363 0);
364 BUG_ON(ret);
365 }
364 } 366 }
365#endif 367#endif
366 368
@@ -378,7 +380,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
378 enum bp_state state = BP_DONE; 380 enum bp_state state = BP_DONE;
379 unsigned long pfn, i; 381 unsigned long pfn, i;
380 struct page *page; 382 struct page *page;
381 struct page *scratch_page;
382 int ret; 383 int ret;
383 struct xen_memory_reservation reservation = { 384 struct xen_memory_reservation reservation = {
384 .address_bits = 0, 385 .address_bits = 0,
@@ -411,27 +412,29 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
411 412
412 scrub_page(page); 413 scrub_page(page);
413 414
415#ifdef CONFIG_XEN_HAVE_PVMMU
414 /* 416 /*
415 * Ballooned out frames are effectively replaced with 417 * Ballooned out frames are effectively replaced with
416 * a scratch frame. Ensure direct mappings and the 418 * a scratch frame. Ensure direct mappings and the
417 * p2m are consistent. 419 * p2m are consistent.
418 */ 420 */
419 scratch_page = get_balloon_scratch_page();
420#ifdef CONFIG_XEN_HAVE_PVMMU
421 if (xen_pv_domain() && !PageHighMem(page)) {
422 ret = HYPERVISOR_update_va_mapping(
423 (unsigned long)__va(pfn << PAGE_SHIFT),
424 pfn_pte(page_to_pfn(scratch_page),
425 PAGE_KERNEL_RO), 0);
426 BUG_ON(ret);
427 }
428#endif
429 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 421 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
430 unsigned long p; 422 unsigned long p;
423 struct page *scratch_page = get_balloon_scratch_page();
424
425 if (!PageHighMem(page)) {
426 ret = HYPERVISOR_update_va_mapping(
427 (unsigned long)__va(pfn << PAGE_SHIFT),
428 pfn_pte(page_to_pfn(scratch_page),
429 PAGE_KERNEL_RO), 0);
430 BUG_ON(ret);
431 }
431 p = page_to_pfn(scratch_page); 432 p = page_to_pfn(scratch_page);
432 __set_phys_to_machine(pfn, pfn_to_mfn(p)); 433 __set_phys_to_machine(pfn, pfn_to_mfn(p));
434
435 put_balloon_scratch_page();
433 } 436 }
434 put_balloon_scratch_page(); 437#endif
435 438
436 balloon_append(pfn_to_page(pfn)); 439 balloon_append(pfn_to_page(pfn));
437 } 440 }
@@ -627,15 +630,17 @@ static int __init balloon_init(void)
627 if (!xen_domain()) 630 if (!xen_domain())
628 return -ENODEV; 631 return -ENODEV;
629 632
630 for_each_online_cpu(cpu) 633 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
631 { 634 for_each_online_cpu(cpu)
632 per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL); 635 {
633 if (per_cpu(balloon_scratch_page, cpu) == NULL) { 636 per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
634 pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu); 637 if (per_cpu(balloon_scratch_page, cpu) == NULL) {
635 return -ENOMEM; 638 pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
639 return -ENOMEM;
640 }
636 } 641 }
642 register_cpu_notifier(&balloon_cpu_notifier);
637 } 643 }
638 register_cpu_notifier(&balloon_cpu_notifier);
639 644
640 pr_info("Initialising balloon driver\n"); 645 pr_info("Initialising balloon driver\n");
641 646
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 028387192b60..aa846a48f400 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1176,7 +1176,8 @@ static int gnttab_setup(void)
1176 gnttab_shared.addr = xen_remap(xen_hvm_resume_frames, 1176 gnttab_shared.addr = xen_remap(xen_hvm_resume_frames,
1177 PAGE_SIZE * max_nr_gframes); 1177 PAGE_SIZE * max_nr_gframes);
1178 if (gnttab_shared.addr == NULL) { 1178 if (gnttab_shared.addr == NULL) {
1179 pr_warn("Failed to ioremap gnttab share frames!\n"); 1179 pr_warn("Failed to ioremap gnttab share frames (addr=0x%08lx)!\n",
1180 xen_hvm_resume_frames);
1180 return -ENOMEM; 1181 return -ENOMEM;
1181 } 1182 }
1182 } 1183 }
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 8e74590fa1bb..569a13b9e856 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -533,12 +533,17 @@ static void privcmd_close(struct vm_area_struct *vma)
533{ 533{
534 struct page **pages = vma->vm_private_data; 534 struct page **pages = vma->vm_private_data;
535 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 535 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
536 int rc;
536 537
537 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) 538 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
538 return; 539 return;
539 540
540 xen_unmap_domain_mfn_range(vma, numpgs, pages); 541 rc = xen_unmap_domain_mfn_range(vma, numpgs, pages);
541 free_xenballooned_pages(numpgs, pages); 542 if (rc == 0)
543 free_xenballooned_pages(numpgs, pages);
544 else
545 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
546 numpgs, rc);
542 kfree(pages); 547 kfree(pages);
543} 548}
544 549