aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig8
-rw-r--r--drivers/acpi/device_pm.c56
-rw-r--r--drivers/acpi/power.c104
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/ata/ahci_platform.c2
-rw-r--r--drivers/ata/libahci.c10
-rw-r--r--drivers/ata/libata-acpi.c14
-rw-r--r--drivers/ata/libata-eh.c6
-rw-r--r--drivers/ata/libata-scsi.c3
-rw-r--r--drivers/ata/libata.h4
-rw-r--r--drivers/ata/pata_isapnp.c2
-rw-r--r--drivers/base/memory.c7
-rw-r--r--drivers/base/regmap/internal.h8
-rw-r--r--drivers/base/regmap/regcache.c3
-rw-r--r--drivers/base/regmap/regmap.c294
-rw-r--r--drivers/char/tpm/xen-tpmfront.c1
-rw-r--r--drivers/clk/clk-nomadik.c21
-rw-r--r--drivers/clk/mvebu/armada-370.c4
-rw-r--r--drivers/clk/socfpga/clk.c2
-rw-r--r--drivers/clk/versatile/clk-icst.c2
-rw-r--r--drivers/connector/cn_proc.c18
-rw-r--r--drivers/connector/connector.c9
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c8
-rw-r--r--drivers/cpufreq/intel_pstate.c52
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c2
-rw-r--r--drivers/dma/edma.c2
-rw-r--r--drivers/gpio/gpio-lynxpoint.c5
-rw-r--r--drivers/gpio/gpiolib.c6
-rw-r--r--drivers/gpu/drm/drm_drv.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c109
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h6
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c28
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c21
-rw-r--r--drivers/gpu/drm/i915/intel_display.c131
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c20
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c16
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c54
-rw-r--r--drivers/gpu/drm/radeon/cik.c4
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c4
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/ni.c1
-rw-r--r--drivers/gpu/drm/radeon/r600.c1
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c3
-rw-r--r--drivers/gpu/drm/radeon/si.c1
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--drivers/hid/hid-core.c12
-rw-r--r--drivers/hid/hid-ids.h6
-rw-r--r--drivers/hid/hid-input.c13
-rw-r--r--drivers/hid/hid-wiimote-core.c5
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/iio/frequency/adf4350.c6
-rw-r--r--drivers/iio/industrialio-buffer.c3
-rw-r--r--drivers/infiniband/Kconfig11
-rw-r--r--drivers/infiniband/core/uverbs.h2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c4
-rw-r--r--drivers/infiniband/core/uverbs_main.c6
-rw-r--r--drivers/infiniband/hw/amso1100/c2_ae.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c16
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c70
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c80
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c6
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2
-rw-r--r--drivers/input/input.c10
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c11
-rw-r--r--drivers/input/misc/cm109.c14
-rw-r--r--drivers/input/mouse/alps.c1
-rw-r--r--drivers/input/serio/i8042.c23
-rw-r--r--drivers/input/tablet/wacom_sys.c4
-rw-r--r--drivers/input/tablet/wacom_wac.c8
-rw-r--r--drivers/md/bcache/request.c2
-rw-r--r--drivers/md/dm-snap-persistent.c18
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/md/raid1.c1
-rw-r--r--drivers/md/raid10.c1
-rw-r--r--drivers/md/raid5.c20
-rw-r--r--drivers/media/dvb-frontends/tda10071.c9
-rw-r--r--drivers/media/i2c/ad9389b.c15
-rw-r--r--drivers/media/i2c/adv7511.c18
-rw-r--r--drivers/media/i2c/adv7842.c30
-rw-r--r--drivers/media/i2c/ths8200.c12
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c1
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c1
-rw-r--r--drivers/media/platform/sh_vou.c2
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c5
-rw-r--r--drivers/media/tuners/e4000.c3
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c7
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c18
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c87
-rw-r--r--drivers/mfd/mc13xxx-core.c5
-rw-r--r--drivers/mfd/mc13xxx-spi.c5
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c7
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/dev.c10
-rw-r--r--drivers/net/can/flexcan.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c40
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h38
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c388
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c2
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c23
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c56
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c3
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c38
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c16
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c22
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c13
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c87
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c18
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h56
-rw-r--r--drivers/net/ethernet/sfc/nic.c9
-rw-r--r--drivers/net/ethernet/sfc/nic.h12
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h6
-rw-r--r--drivers/net/ethernet/ti/cpsw.c19
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c3
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/ieee802154/mrf24j40.c31
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/usb/ax88179_178a.c23
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/virtio_net.c14
-rw-r--r--drivers/net/wan/farsync.c1
-rw-r--r--drivers/net/wan/wanxl.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c9
-rw-r--r--drivers/net/wireless/cw1200/cw1200_spi.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c12
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c42
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/mwifiex/join.c10
-rw-r--r--drivers/net/wireless/mwifiex/main.c6
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c3
-rw-r--r--drivers/net/xen-netback/xenbus.c4
-rw-r--r--drivers/of/Kconfig6
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/base.c4
-rw-r--r--drivers/of/fdt.c12
-rw-r--r--drivers/of/of_reserved_mem.c173
-rw-r--r--drivers/of/platform.c4
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c14
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/sony-laptop.c26
-rw-r--r--drivers/s390/block/dasd_eckd.c98
-rw-r--r--drivers/s390/char/sclp.c4
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/cio/cio.c4
-rw-r--r--drivers/s390/cio/qdio_main.c10
-rw-r--r--drivers/scsi/BusLogic.c16
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c9
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sg.c176
-rw-r--r--drivers/staging/bcm/Bcmchar.c1
-rw-r--r--drivers/staging/media/msi3101/Kconfig1
-rw-r--r--drivers/staging/media/msi3101/sdr-msi3101.c10
-rw-r--r--drivers/staging/ozwpan/ozcdev.c3
-rw-r--r--drivers/staging/sb105x/sb_pci_mp.c2
-rw-r--r--drivers/staging/wlags49_h2/wl_priv.c9
-rw-r--r--drivers/target/target_core_pscsi.c8
-rw-r--r--drivers/target/target_core_sbc.c5
-rw-r--r--drivers/target/target_core_xcopy.c53
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.c2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c12
-rw-r--r--drivers/thermal/samsung/exynos_tmu.h7
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.c30
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.h13
-rw-r--r--drivers/thermal/thermal_hwmon.c2
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c1
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c14
-rw-r--r--drivers/tty/serial/atmel_serial.c9
-rw-r--r--drivers/tty/serial/imx.c3
-rw-r--r--drivers/tty/serial/vt8500_serial.c5
-rw-r--r--drivers/uio/uio.c17
-rw-r--r--drivers/usb/chipidea/host.c6
-rw-r--r--drivers/usb/core/quirks.c6
-rw-r--r--drivers/usb/host/pci-quirks.c4
-rw-r--r--drivers/usb/host/xhci-hub.c26
-rw-r--r--drivers/usb/host/xhci-pci.c25
-rw-r--r--drivers/usb/host/xhci.c14
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/misc/Kconfig2
-rw-r--r--drivers/usb/musb/musb_core.c46
-rw-r--r--drivers/usb/musb/musb_core.h1
-rw-r--r--drivers/usb/musb/musb_gadget.c3
-rw-r--r--drivers/usb/musb/musb_virthub.c46
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/option.c225
-rw-r--r--drivers/usb/serial/pl2303.c274
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c1
-rw-r--r--drivers/usb/storage/scsiglue.c5
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/vfio/vfio_iommu_type1.c40
-rw-r--r--drivers/vhost/scsi.c2
-rw-r--r--drivers/video/au1100fb.c26
-rw-r--r--drivers/video/au1200fb.c23
-rw-r--r--drivers/w1/w1.c6
237 files changed, 2733 insertions, 1881 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 22327e6a7236..6efe2ac6902f 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -24,7 +24,7 @@ menuconfig ACPI
24 are configured, ACPI is used. 24 are configured, ACPI is used.
25 25
26 The project home page for the Linux ACPI subsystem is here: 26 The project home page for the Linux ACPI subsystem is here:
27 <http://www.lesswatts.org/projects/acpi/> 27 <https://01.org/linux-acpi>
28 28
29 Linux support for ACPI is based on Intel Corporation's ACPI 29 Linux support for ACPI is based on Intel Corporation's ACPI
30 Component Architecture (ACPI CA). For more information on the 30 Component Architecture (ACPI CA). For more information on the
@@ -123,9 +123,9 @@ config ACPI_BUTTON
123 default y 123 default y
124 help 124 help
125 This driver handles events on the power, sleep, and lid buttons. 125 This driver handles events on the power, sleep, and lid buttons.
126 A daemon reads /proc/acpi/event and perform user-defined actions 126 A daemon reads events from input devices or via netlink and
127 such as shutting down the system. This is necessary for 127 performs user-defined actions such as shutting down the system.
128 software-controlled poweroff. 128 This is necessary for software-controlled poweroff.
129 129
130 To compile this driver as a module, choose M here: 130 To compile this driver as a module, choose M here:
131 the module will be called button. 131 the module will be called button.
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 59d3202f6b36..a94383d1f350 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1025,60 +1025,4 @@ void acpi_dev_pm_detach(struct device *dev, bool power_off)
1025 } 1025 }
1026} 1026}
1027EXPORT_SYMBOL_GPL(acpi_dev_pm_detach); 1027EXPORT_SYMBOL_GPL(acpi_dev_pm_detach);
1028
1029/**
1030 * acpi_dev_pm_add_dependent - Add physical device depending for PM.
1031 * @handle: Handle of ACPI device node.
1032 * @depdev: Device depending on that node for PM.
1033 */
1034void acpi_dev_pm_add_dependent(acpi_handle handle, struct device *depdev)
1035{
1036 struct acpi_device_physical_node *dep;
1037 struct acpi_device *adev;
1038
1039 if (!depdev || acpi_bus_get_device(handle, &adev))
1040 return;
1041
1042 mutex_lock(&adev->physical_node_lock);
1043
1044 list_for_each_entry(dep, &adev->power_dependent, node)
1045 if (dep->dev == depdev)
1046 goto out;
1047
1048 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1049 if (dep) {
1050 dep->dev = depdev;
1051 list_add_tail(&dep->node, &adev->power_dependent);
1052 }
1053
1054 out:
1055 mutex_unlock(&adev->physical_node_lock);
1056}
1057EXPORT_SYMBOL_GPL(acpi_dev_pm_add_dependent);
1058
1059/**
1060 * acpi_dev_pm_remove_dependent - Remove physical device depending for PM.
1061 * @handle: Handle of ACPI device node.
1062 * @depdev: Device depending on that node for PM.
1063 */
1064void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev)
1065{
1066 struct acpi_device_physical_node *dep;
1067 struct acpi_device *adev;
1068
1069 if (!depdev || acpi_bus_get_device(handle, &adev))
1070 return;
1071
1072 mutex_lock(&adev->physical_node_lock);
1073
1074 list_for_each_entry(dep, &adev->power_dependent, node)
1075 if (dep->dev == depdev) {
1076 list_del(&dep->node);
1077 kfree(dep);
1078 break;
1079 }
1080
1081 mutex_unlock(&adev->physical_node_lock);
1082}
1083EXPORT_SYMBOL_GPL(acpi_dev_pm_remove_dependent);
1084#endif /* CONFIG_PM */ 1028#endif /* CONFIG_PM */
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 0dbe5cdf3396..c2ad391d8041 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -59,16 +59,9 @@ ACPI_MODULE_NAME("power");
59#define ACPI_POWER_RESOURCE_STATE_ON 0x01 59#define ACPI_POWER_RESOURCE_STATE_ON 0x01
60#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF 60#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
61 61
62struct acpi_power_dependent_device {
63 struct list_head node;
64 struct acpi_device *adev;
65 struct work_struct work;
66};
67
68struct acpi_power_resource { 62struct acpi_power_resource {
69 struct acpi_device device; 63 struct acpi_device device;
70 struct list_head list_node; 64 struct list_head list_node;
71 struct list_head dependent;
72 char *name; 65 char *name;
73 u32 system_level; 66 u32 system_level;
74 u32 order; 67 u32 order;
@@ -233,32 +226,6 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
233 return 0; 226 return 0;
234} 227}
235 228
236static void acpi_power_resume_dependent(struct work_struct *work)
237{
238 struct acpi_power_dependent_device *dep;
239 struct acpi_device_physical_node *pn;
240 struct acpi_device *adev;
241 int state;
242
243 dep = container_of(work, struct acpi_power_dependent_device, work);
244 adev = dep->adev;
245 if (acpi_power_get_inferred_state(adev, &state))
246 return;
247
248 if (state > ACPI_STATE_D0)
249 return;
250
251 mutex_lock(&adev->physical_node_lock);
252
253 list_for_each_entry(pn, &adev->physical_node_list, node)
254 pm_request_resume(pn->dev);
255
256 list_for_each_entry(pn, &adev->power_dependent, node)
257 pm_request_resume(pn->dev);
258
259 mutex_unlock(&adev->physical_node_lock);
260}
261
262static int __acpi_power_on(struct acpi_power_resource *resource) 229static int __acpi_power_on(struct acpi_power_resource *resource)
263{ 230{
264 acpi_status status = AE_OK; 231 acpi_status status = AE_OK;
@@ -283,14 +250,8 @@ static int acpi_power_on_unlocked(struct acpi_power_resource *resource)
283 resource->name)); 250 resource->name));
284 } else { 251 } else {
285 result = __acpi_power_on(resource); 252 result = __acpi_power_on(resource);
286 if (result) { 253 if (result)
287 resource->ref_count--; 254 resource->ref_count--;
288 } else {
289 struct acpi_power_dependent_device *dep;
290
291 list_for_each_entry(dep, &resource->dependent, node)
292 schedule_work(&dep->work);
293 }
294 } 255 }
295 return result; 256 return result;
296} 257}
@@ -390,52 +351,6 @@ static int acpi_power_on_list(struct list_head *list)
390 return result; 351 return result;
391} 352}
392 353
393static void acpi_power_add_dependent(struct acpi_power_resource *resource,
394 struct acpi_device *adev)
395{
396 struct acpi_power_dependent_device *dep;
397
398 mutex_lock(&resource->resource_lock);
399
400 list_for_each_entry(dep, &resource->dependent, node)
401 if (dep->adev == adev)
402 goto out;
403
404 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
405 if (!dep)
406 goto out;
407
408 dep->adev = adev;
409 INIT_WORK(&dep->work, acpi_power_resume_dependent);
410 list_add_tail(&dep->node, &resource->dependent);
411
412 out:
413 mutex_unlock(&resource->resource_lock);
414}
415
416static void acpi_power_remove_dependent(struct acpi_power_resource *resource,
417 struct acpi_device *adev)
418{
419 struct acpi_power_dependent_device *dep;
420 struct work_struct *work = NULL;
421
422 mutex_lock(&resource->resource_lock);
423
424 list_for_each_entry(dep, &resource->dependent, node)
425 if (dep->adev == adev) {
426 list_del(&dep->node);
427 work = &dep->work;
428 break;
429 }
430
431 mutex_unlock(&resource->resource_lock);
432
433 if (work) {
434 cancel_work_sync(work);
435 kfree(dep);
436 }
437}
438
439static struct attribute *attrs[] = { 354static struct attribute *attrs[] = {
440 NULL, 355 NULL,
441}; 356};
@@ -524,8 +439,6 @@ static void acpi_power_expose_hide(struct acpi_device *adev,
524 439
525void acpi_power_add_remove_device(struct acpi_device *adev, bool add) 440void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
526{ 441{
527 struct acpi_device_power_state *ps;
528 struct acpi_power_resource_entry *entry;
529 int state; 442 int state;
530 443
531 if (adev->wakeup.flags.valid) 444 if (adev->wakeup.flags.valid)
@@ -535,16 +448,6 @@ void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
535 if (!adev->power.flags.power_resources) 448 if (!adev->power.flags.power_resources)
536 return; 449 return;
537 450
538 ps = &adev->power.states[ACPI_STATE_D0];
539 list_for_each_entry(entry, &ps->resources, node) {
540 struct acpi_power_resource *resource = entry->resource;
541
542 if (add)
543 acpi_power_add_dependent(resource, adev);
544 else
545 acpi_power_remove_dependent(resource, adev);
546 }
547
548 for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++) 451 for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++)
549 acpi_power_expose_hide(adev, 452 acpi_power_expose_hide(adev,
550 &adev->power.states[state].resources, 453 &adev->power.states[state].resources,
@@ -882,7 +785,6 @@ int acpi_add_power_resource(acpi_handle handle)
882 acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER, 785 acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER,
883 ACPI_STA_DEFAULT); 786 ACPI_STA_DEFAULT);
884 mutex_init(&resource->resource_lock); 787 mutex_init(&resource->resource_lock);
885 INIT_LIST_HEAD(&resource->dependent);
886 INIT_LIST_HEAD(&resource->list_node); 788 INIT_LIST_HEAD(&resource->list_node);
887 resource->name = device->pnp.bus_id; 789 resource->name = device->pnp.bus_id;
888 strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); 790 strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
@@ -936,8 +838,10 @@ void acpi_resume_power_resources(void)
936 mutex_lock(&resource->resource_lock); 838 mutex_lock(&resource->resource_lock);
937 839
938 result = acpi_power_get_state(resource->device.handle, &state); 840 result = acpi_power_get_state(resource->device.handle, &state);
939 if (result) 841 if (result) {
842 mutex_unlock(&resource->resource_lock);
940 continue; 843 continue;
844 }
941 845
942 if (state == ACPI_POWER_RESOURCE_STATE_OFF 846 if (state == ACPI_POWER_RESOURCE_STATE_OFF
943 && resource->ref_count) { 847 && resource->ref_count) {
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 407ad13cac2f..fee8a297c7d9 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -999,7 +999,6 @@ int acpi_device_add(struct acpi_device *device,
999 INIT_LIST_HEAD(&device->wakeup_list); 999 INIT_LIST_HEAD(&device->wakeup_list);
1000 INIT_LIST_HEAD(&device->physical_node_list); 1000 INIT_LIST_HEAD(&device->physical_node_list);
1001 mutex_init(&device->physical_node_lock); 1001 mutex_init(&device->physical_node_lock);
1002 INIT_LIST_HEAD(&device->power_dependent);
1003 1002
1004 new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL); 1003 new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
1005 if (!new_bus_id) { 1004 if (!new_bus_id) {
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 9d715ae5ff6b..8e28f923cf7f 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1343,7 +1343,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1343 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) 1343 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
1344 host->flags |= ATA_HOST_PARALLEL_SCAN; 1344 host->flags |= ATA_HOST_PARALLEL_SCAN;
1345 else 1345 else
1346 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); 1346 dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n");
1347 1347
1348 if (pi.flags & ATA_FLAG_EM) 1348 if (pi.flags & ATA_FLAG_EM)
1349 ahci_reset_em(host); 1349 ahci_reset_em(host);
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 2daaee05cab1..7d3b85385bfc 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -184,7 +184,7 @@ static int ahci_probe(struct platform_device *pdev)
184 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) 184 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
185 host->flags |= ATA_HOST_PARALLEL_SCAN; 185 host->flags |= ATA_HOST_PARALLEL_SCAN;
186 else 186 else
187 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); 187 dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
188 188
189 if (pi.flags & ATA_FLAG_EM) 189 if (pi.flags & ATA_FLAG_EM)
190 ahci_reset_em(host); 190 ahci_reset_em(host);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index acfd0f711069..aaac4fb0d564 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -778,8 +778,16 @@ static void ahci_start_port(struct ata_port *ap)
778 rc = ap->ops->transmit_led_message(ap, 778 rc = ap->ops->transmit_led_message(ap,
779 emp->led_state, 779 emp->led_state,
780 4); 780 4);
781 /*
782 * If busy, give a breather but do not
783 * release EH ownership by using msleep()
784 * instead of ata_msleep(). EM Transmit
785 * bit is busy for the whole host and
786 * releasing ownership will cause other
787 * ports to fail the same way.
788 */
781 if (rc == -EBUSY) 789 if (rc == -EBUSY)
782 ata_msleep(ap, 1); 790 msleep(1);
783 else 791 else
784 break; 792 break;
785 } 793 }
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 4ba8b0405572..ab714d2ad978 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -1035,17 +1035,3 @@ void ata_acpi_on_disable(struct ata_device *dev)
1035{ 1035{
1036 ata_acpi_clear_gtf(dev); 1036 ata_acpi_clear_gtf(dev);
1037} 1037}
1038
1039void ata_scsi_acpi_bind(struct ata_device *dev)
1040{
1041 acpi_handle handle = ata_dev_acpi_handle(dev);
1042 if (handle)
1043 acpi_dev_pm_add_dependent(handle, &dev->sdev->sdev_gendev);
1044}
1045
1046void ata_scsi_acpi_unbind(struct ata_device *dev)
1047{
1048 acpi_handle handle = ata_dev_acpi_handle(dev);
1049 if (handle)
1050 acpi_dev_pm_remove_dependent(handle, &dev->sdev->sdev_gendev);
1051}
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c69fcce505c0..370462fa8e01 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1322,14 +1322,14 @@ void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1322 * should be retried. To be used from EH. 1322 * should be retried. To be used from EH.
1323 * 1323 *
1324 * SCSI midlayer limits the number of retries to scmd->allowed. 1324 * SCSI midlayer limits the number of retries to scmd->allowed.
1325 * scmd->retries is decremented for commands which get retried 1325 * scmd->allowed is incremented for commands which get retried
1326 * due to unrelated failures (qc->err_mask is zero). 1326 * due to unrelated failures (qc->err_mask is zero).
1327 */ 1327 */
1328void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1328void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1329{ 1329{
1330 struct scsi_cmnd *scmd = qc->scsicmd; 1330 struct scsi_cmnd *scmd = qc->scsicmd;
1331 if (!qc->err_mask && scmd->retries) 1331 if (!qc->err_mask)
1332 scmd->retries--; 1332 scmd->allowed++;
1333 __ata_eh_qc_complete(qc); 1333 __ata_eh_qc_complete(qc);
1334} 1334}
1335 1335
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 97a0cef12959..db6dfcfa3e2e 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3679,7 +3679,6 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
3679 if (!IS_ERR(sdev)) { 3679 if (!IS_ERR(sdev)) {
3680 dev->sdev = sdev; 3680 dev->sdev = sdev;
3681 scsi_device_put(sdev); 3681 scsi_device_put(sdev);
3682 ata_scsi_acpi_bind(dev);
3683 } else { 3682 } else {
3684 dev->sdev = NULL; 3683 dev->sdev = NULL;
3685 } 3684 }
@@ -3767,8 +3766,6 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
3767 struct scsi_device *sdev; 3766 struct scsi_device *sdev;
3768 unsigned long flags; 3767 unsigned long flags;
3769 3768
3770 ata_scsi_acpi_unbind(dev);
3771
3772 /* Alas, we need to grab scan_mutex to ensure SCSI device 3769 /* Alas, we need to grab scan_mutex to ensure SCSI device
3773 * state doesn't change underneath us and thus 3770 * state doesn't change underneath us and thus
3774 * scsi_device_get() always succeeds. The mutex locking can 3771 * scsi_device_get() always succeeds. The mutex locking can
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index eeeb77845d48..45b5ab3a95d5 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -121,8 +121,6 @@ extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
121extern void ata_acpi_bind_port(struct ata_port *ap); 121extern void ata_acpi_bind_port(struct ata_port *ap);
122extern void ata_acpi_bind_dev(struct ata_device *dev); 122extern void ata_acpi_bind_dev(struct ata_device *dev);
123extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev); 123extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev);
124extern void ata_scsi_acpi_bind(struct ata_device *dev);
125extern void ata_scsi_acpi_unbind(struct ata_device *dev);
126#else 124#else
127static inline void ata_acpi_dissociate(struct ata_host *host) { } 125static inline void ata_acpi_dissociate(struct ata_host *host) { }
128static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; } 126static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
@@ -133,8 +131,6 @@ static inline void ata_acpi_set_state(struct ata_port *ap,
133 pm_message_t state) { } 131 pm_message_t state) { }
134static inline void ata_acpi_bind_port(struct ata_port *ap) {} 132static inline void ata_acpi_bind_port(struct ata_port *ap) {}
135static inline void ata_acpi_bind_dev(struct ata_device *dev) {} 133static inline void ata_acpi_bind_dev(struct ata_device *dev) {}
136static inline void ata_scsi_acpi_bind(struct ata_device *dev) {}
137static inline void ata_scsi_acpi_unbind(struct ata_device *dev) {}
138#endif 134#endif
139 135
140/* libata-scsi.c */ 136/* libata-scsi.c */
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
index 4bceb8803a10..b33d1f99b3a4 100644
--- a/drivers/ata/pata_isapnp.c
+++ b/drivers/ata/pata_isapnp.c
@@ -78,7 +78,7 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
78 78
79 ap->ioaddr.cmd_addr = cmd_addr; 79 ap->ioaddr.cmd_addr = cmd_addr;
80 80
81 if (pnp_port_valid(idev, 1) == 0) { 81 if (pnp_port_valid(idev, 1)) {
82 ctl_addr = devm_ioport_map(&idev->dev, 82 ctl_addr = devm_ioport_map(&idev->dev,
83 pnp_port_start(idev, 1), 1); 83 pnp_port_start(idev, 1), 1);
84 ap->ioaddr.altstatus_addr = ctl_addr; 84 ap->ioaddr.altstatus_addr = ctl_addr;
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 9e59f6535c44..bece691cb5d9 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -333,8 +333,10 @@ store_mem_state(struct device *dev,
333 online_type = ONLINE_KEEP; 333 online_type = ONLINE_KEEP;
334 else if (!strncmp(buf, "offline", min_t(int, count, 7))) 334 else if (!strncmp(buf, "offline", min_t(int, count, 7)))
335 online_type = -1; 335 online_type = -1;
336 else 336 else {
337 return -EINVAL; 337 ret = -EINVAL;
338 goto err;
339 }
338 340
339 switch (online_type) { 341 switch (online_type) {
340 case ONLINE_KERNEL: 342 case ONLINE_KERNEL:
@@ -357,6 +359,7 @@ store_mem_state(struct device *dev,
357 ret = -EINVAL; /* should never happen */ 359 ret = -EINVAL; /* should never happen */
358 } 360 }
359 361
362err:
360 unlock_device_hotplug(); 363 unlock_device_hotplug();
361 364
362 if (ret) 365 if (ret)
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 57f777835d97..33414b1de201 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -44,7 +44,6 @@ struct regmap_format {
44 44
45struct regmap_async { 45struct regmap_async {
46 struct list_head list; 46 struct list_head list;
47 struct work_struct cleanup;
48 struct regmap *map; 47 struct regmap *map;
49 void *work_buf; 48 void *work_buf;
50}; 49};
@@ -64,9 +63,11 @@ struct regmap {
64 void *bus_context; 63 void *bus_context;
65 const char *name; 64 const char *name;
66 65
66 bool async;
67 spinlock_t async_lock; 67 spinlock_t async_lock;
68 wait_queue_head_t async_waitq; 68 wait_queue_head_t async_waitq;
69 struct list_head async_list; 69 struct list_head async_list;
70 struct list_head async_free;
70 int async_ret; 71 int async_ret;
71 72
72#ifdef CONFIG_DEBUG_FS 73#ifdef CONFIG_DEBUG_FS
@@ -179,6 +180,9 @@ struct regmap_field {
179 /* lsb */ 180 /* lsb */
180 unsigned int shift; 181 unsigned int shift;
181 unsigned int reg; 182 unsigned int reg;
183
184 unsigned int id_size;
185 unsigned int id_offset;
182}; 186};
183 187
184#ifdef CONFIG_DEBUG_FS 188#ifdef CONFIG_DEBUG_FS
@@ -218,7 +222,7 @@ bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
218int regcache_lookup_reg(struct regmap *map, unsigned int reg); 222int regcache_lookup_reg(struct regmap *map, unsigned int reg);
219 223
220int _regmap_raw_write(struct regmap *map, unsigned int reg, 224int _regmap_raw_write(struct regmap *map, unsigned int reg,
221 const void *val, size_t val_len, bool async); 225 const void *val, size_t val_len);
222 226
223void regmap_async_complete_cb(struct regmap_async *async, int ret); 227void regmap_async_complete_cb(struct regmap_async *async, int ret);
224 228
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index d6c2d691b6e8..a36112af494c 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -631,8 +631,7 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
631 631
632 map->cache_bypass = 1; 632 map->cache_bypass = 1;
633 633
634 ret = _regmap_raw_write(map, base, *data, count * val_bytes, 634 ret = _regmap_raw_write(map, base, *data, count * val_bytes);
635 false);
636 635
637 map->cache_bypass = 0; 636 map->cache_bypass = 0;
638 637
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 7d689a15c500..ccdac61ac5e2 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -42,15 +42,6 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
42static int _regmap_bus_raw_write(void *context, unsigned int reg, 42static int _regmap_bus_raw_write(void *context, unsigned int reg,
43 unsigned int val); 43 unsigned int val);
44 44
45static void async_cleanup(struct work_struct *work)
46{
47 struct regmap_async *async = container_of(work, struct regmap_async,
48 cleanup);
49
50 kfree(async->work_buf);
51 kfree(async);
52}
53
54bool regmap_reg_in_ranges(unsigned int reg, 45bool regmap_reg_in_ranges(unsigned int reg,
55 const struct regmap_range *ranges, 46 const struct regmap_range *ranges,
56 unsigned int nranges) 47 unsigned int nranges)
@@ -465,6 +456,7 @@ struct regmap *regmap_init(struct device *dev,
465 456
466 spin_lock_init(&map->async_lock); 457 spin_lock_init(&map->async_lock);
467 INIT_LIST_HEAD(&map->async_list); 458 INIT_LIST_HEAD(&map->async_list);
459 INIT_LIST_HEAD(&map->async_free);
468 init_waitqueue_head(&map->async_waitq); 460 init_waitqueue_head(&map->async_waitq);
469 461
470 if (config->read_flag_mask || config->write_flag_mask) { 462 if (config->read_flag_mask || config->write_flag_mask) {
@@ -821,6 +813,8 @@ static void regmap_field_init(struct regmap_field *rm_field,
821 rm_field->reg = reg_field.reg; 813 rm_field->reg = reg_field.reg;
822 rm_field->shift = reg_field.lsb; 814 rm_field->shift = reg_field.lsb;
823 rm_field->mask = ((BIT(field_bits) - 1) << reg_field.lsb); 815 rm_field->mask = ((BIT(field_bits) - 1) << reg_field.lsb);
816 rm_field->id_size = reg_field.id_size;
817 rm_field->id_offset = reg_field.id_offset;
824} 818}
825 819
826/** 820/**
@@ -942,12 +936,22 @@ EXPORT_SYMBOL_GPL(regmap_reinit_cache);
942 */ 936 */
943void regmap_exit(struct regmap *map) 937void regmap_exit(struct regmap *map)
944{ 938{
939 struct regmap_async *async;
940
945 regcache_exit(map); 941 regcache_exit(map);
946 regmap_debugfs_exit(map); 942 regmap_debugfs_exit(map);
947 regmap_range_exit(map); 943 regmap_range_exit(map);
948 if (map->bus && map->bus->free_context) 944 if (map->bus && map->bus->free_context)
949 map->bus->free_context(map->bus_context); 945 map->bus->free_context(map->bus_context);
950 kfree(map->work_buf); 946 kfree(map->work_buf);
947 while (!list_empty(&map->async_free)) {
948 async = list_first_entry_or_null(&map->async_free,
949 struct regmap_async,
950 list);
951 list_del(&async->list);
952 kfree(async->work_buf);
953 kfree(async);
954 }
951 kfree(map); 955 kfree(map);
952} 956}
953EXPORT_SYMBOL_GPL(regmap_exit); 957EXPORT_SYMBOL_GPL(regmap_exit);
@@ -1039,7 +1043,7 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1039} 1043}
1040 1044
1041int _regmap_raw_write(struct regmap *map, unsigned int reg, 1045int _regmap_raw_write(struct regmap *map, unsigned int reg,
1042 const void *val, size_t val_len, bool async) 1046 const void *val, size_t val_len)
1043{ 1047{
1044 struct regmap_range_node *range; 1048 struct regmap_range_node *range;
1045 unsigned long flags; 1049 unsigned long flags;
@@ -1091,7 +1095,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1091 dev_dbg(map->dev, "Writing window %d/%zu\n", 1095 dev_dbg(map->dev, "Writing window %d/%zu\n",
1092 win_residue, val_len / map->format.val_bytes); 1096 win_residue, val_len / map->format.val_bytes);
1093 ret = _regmap_raw_write(map, reg, val, win_residue * 1097 ret = _regmap_raw_write(map, reg, val, win_residue *
1094 map->format.val_bytes, async); 1098 map->format.val_bytes);
1095 if (ret != 0) 1099 if (ret != 0)
1096 return ret; 1100 return ret;
1097 1101
@@ -1114,21 +1118,42 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1114 1118
1115 u8[0] |= map->write_flag_mask; 1119 u8[0] |= map->write_flag_mask;
1116 1120
1117 if (async && map->bus->async_write) { 1121 /*
1118 struct regmap_async *async = map->bus->async_alloc(); 1122 * Essentially all I/O mechanisms will be faster with a single
1119 if (!async) 1123 * buffer to write. Since register syncs often generate raw
1120 return -ENOMEM; 1124 * writes of single registers optimise that case.
1125 */
1126 if (val != work_val && val_len == map->format.val_bytes) {
1127 memcpy(work_val, val, map->format.val_bytes);
1128 val = work_val;
1129 }
1130
1131 if (map->async && map->bus->async_write) {
1132 struct regmap_async *async;
1121 1133
1122 trace_regmap_async_write_start(map->dev, reg, val_len); 1134 trace_regmap_async_write_start(map->dev, reg, val_len);
1123 1135
1124 async->work_buf = kzalloc(map->format.buf_size, 1136 spin_lock_irqsave(&map->async_lock, flags);
1125 GFP_KERNEL | GFP_DMA); 1137 async = list_first_entry_or_null(&map->async_free,
1126 if (!async->work_buf) { 1138 struct regmap_async,
1127 kfree(async); 1139 list);
1128 return -ENOMEM; 1140 if (async)
1141 list_del(&async->list);
1142 spin_unlock_irqrestore(&map->async_lock, flags);
1143
1144 if (!async) {
1145 async = map->bus->async_alloc();
1146 if (!async)
1147 return -ENOMEM;
1148
1149 async->work_buf = kzalloc(map->format.buf_size,
1150 GFP_KERNEL | GFP_DMA);
1151 if (!async->work_buf) {
1152 kfree(async);
1153 return -ENOMEM;
1154 }
1129 } 1155 }
1130 1156
1131 INIT_WORK(&async->cleanup, async_cleanup);
1132 async->map = map; 1157 async->map = map;
1133 1158
1134 /* If the caller supplied the value we can use it safely. */ 1159 /* If the caller supplied the value we can use it safely. */
@@ -1152,11 +1177,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1152 ret); 1177 ret);
1153 1178
1154 spin_lock_irqsave(&map->async_lock, flags); 1179 spin_lock_irqsave(&map->async_lock, flags);
1155 list_del(&async->list); 1180 list_move(&async->list, &map->async_free);
1156 spin_unlock_irqrestore(&map->async_lock, flags); 1181 spin_unlock_irqrestore(&map->async_lock, flags);
1157
1158 kfree(async->work_buf);
1159 kfree(async);
1160 } 1182 }
1161 1183
1162 return ret; 1184 return ret;
@@ -1253,7 +1275,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
1253 map->work_buf + 1275 map->work_buf +
1254 map->format.reg_bytes + 1276 map->format.reg_bytes +
1255 map->format.pad_bytes, 1277 map->format.pad_bytes,
1256 map->format.val_bytes, false); 1278 map->format.val_bytes);
1257} 1279}
1258 1280
1259static inline void *_regmap_map_get_context(struct regmap *map) 1281static inline void *_regmap_map_get_context(struct regmap *map)
@@ -1318,6 +1340,37 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1318EXPORT_SYMBOL_GPL(regmap_write); 1340EXPORT_SYMBOL_GPL(regmap_write);
1319 1341
1320/** 1342/**
1343 * regmap_write_async(): Write a value to a single register asynchronously
1344 *
1345 * @map: Register map to write to
1346 * @reg: Register to write to
1347 * @val: Value to be written
1348 *
1349 * A value of zero will be returned on success, a negative errno will
1350 * be returned in error cases.
1351 */
1352int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1353{
1354 int ret;
1355
1356 if (reg % map->reg_stride)
1357 return -EINVAL;
1358
1359 map->lock(map->lock_arg);
1360
1361 map->async = true;
1362
1363 ret = _regmap_write(map, reg, val);
1364
1365 map->async = false;
1366
1367 map->unlock(map->lock_arg);
1368
1369 return ret;
1370}
1371EXPORT_SYMBOL_GPL(regmap_write_async);
1372
1373/**
1321 * regmap_raw_write(): Write raw values to one or more registers 1374 * regmap_raw_write(): Write raw values to one or more registers
1322 * 1375 *
1323 * @map: Register map to write to 1376 * @map: Register map to write to
@@ -1345,7 +1398,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
1345 1398
1346 map->lock(map->lock_arg); 1399 map->lock(map->lock_arg);
1347 1400
1348 ret = _regmap_raw_write(map, reg, val, val_len, false); 1401 ret = _regmap_raw_write(map, reg, val, val_len);
1349 1402
1350 map->unlock(map->lock_arg); 1403 map->unlock(map->lock_arg);
1351 1404
@@ -1369,6 +1422,74 @@ int regmap_field_write(struct regmap_field *field, unsigned int val)
1369} 1422}
1370EXPORT_SYMBOL_GPL(regmap_field_write); 1423EXPORT_SYMBOL_GPL(regmap_field_write);
1371 1424
1425/**
1426 * regmap_field_update_bits(): Perform a read/modify/write cycle
1427 * on the register field
1428 *
1429 * @field: Register field to write to
1430 * @mask: Bitmask to change
1431 * @val: Value to be written
1432 *
1433 * A value of zero will be returned on success, a negative errno will
1434 * be returned in error cases.
1435 */
1436int regmap_field_update_bits(struct regmap_field *field, unsigned int mask, unsigned int val)
1437{
1438 mask = (mask << field->shift) & field->mask;
1439
1440 return regmap_update_bits(field->regmap, field->reg,
1441 mask, val << field->shift);
1442}
1443EXPORT_SYMBOL_GPL(regmap_field_update_bits);
1444
1445/**
1446 * regmap_fields_write(): Write a value to a single register field with port ID
1447 *
1448 * @field: Register field to write to
1449 * @id: port ID
1450 * @val: Value to be written
1451 *
1452 * A value of zero will be returned on success, a negative errno will
1453 * be returned in error cases.
1454 */
1455int regmap_fields_write(struct regmap_field *field, unsigned int id,
1456 unsigned int val)
1457{
1458 if (id >= field->id_size)
1459 return -EINVAL;
1460
1461 return regmap_update_bits(field->regmap,
1462 field->reg + (field->id_offset * id),
1463 field->mask, val << field->shift);
1464}
1465EXPORT_SYMBOL_GPL(regmap_fields_write);
1466
1467/**
1468 * regmap_fields_update_bits(): Perform a read/modify/write cycle
1469 * on the register field
1470 *
1471 * @field: Register field to write to
1472 * @id: port ID
1473 * @mask: Bitmask to change
1474 * @val: Value to be written
1475 *
1476 * A value of zero will be returned on success, a negative errno will
1477 * be returned in error cases.
1478 */
1479int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
1480 unsigned int mask, unsigned int val)
1481{
1482 if (id >= field->id_size)
1483 return -EINVAL;
1484
1485 mask = (mask << field->shift) & field->mask;
1486
1487 return regmap_update_bits(field->regmap,
1488 field->reg + (field->id_offset * id),
1489 mask, val << field->shift);
1490}
1491EXPORT_SYMBOL_GPL(regmap_fields_update_bits);
1492
1372/* 1493/*
1373 * regmap_bulk_write(): Write multiple registers to the device 1494 * regmap_bulk_write(): Write multiple registers to the device
1374 * 1495 *
@@ -1426,8 +1547,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1426 return ret; 1547 return ret;
1427 } 1548 }
1428 } else { 1549 } else {
1429 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count, 1550 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
1430 false);
1431 } 1551 }
1432 1552
1433 if (val_bytes != 1) 1553 if (val_bytes != 1)
@@ -1473,7 +1593,11 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
1473 1593
1474 map->lock(map->lock_arg); 1594 map->lock(map->lock_arg);
1475 1595
1476 ret = _regmap_raw_write(map, reg, val, val_len, true); 1596 map->async = true;
1597
1598 ret = _regmap_raw_write(map, reg, val, val_len);
1599
1600 map->async = false;
1477 1601
1478 map->unlock(map->lock_arg); 1602 map->unlock(map->lock_arg);
1479 1603
@@ -1677,6 +1801,39 @@ int regmap_field_read(struct regmap_field *field, unsigned int *val)
1677EXPORT_SYMBOL_GPL(regmap_field_read); 1801EXPORT_SYMBOL_GPL(regmap_field_read);
1678 1802
1679/** 1803/**
1804 * regmap_fields_read(): Read a value to a single register field with port ID
1805 *
1806 * @field: Register field to read from
1807 * @id: port ID
1808 * @val: Pointer to store read value
1809 *
1810 * A value of zero will be returned on success, a negative errno will
1811 * be returned in error cases.
1812 */
1813int regmap_fields_read(struct regmap_field *field, unsigned int id,
1814 unsigned int *val)
1815{
1816 int ret;
1817 unsigned int reg_val;
1818
1819 if (id >= field->id_size)
1820 return -EINVAL;
1821
1822 ret = regmap_read(field->regmap,
1823 field->reg + (field->id_offset * id),
1824 &reg_val);
1825 if (ret != 0)
1826 return ret;
1827
1828 reg_val &= field->mask;
1829 reg_val >>= field->shift;
1830 *val = reg_val;
1831
1832 return ret;
1833}
1834EXPORT_SYMBOL_GPL(regmap_fields_read);
1835
1836/**
1680 * regmap_bulk_read(): Read multiple registers from the device 1837 * regmap_bulk_read(): Read multiple registers from the device
1681 * 1838 *
1682 * @map: Register map to write to 1839 * @map: Register map to write to
@@ -1788,6 +1945,41 @@ int regmap_update_bits(struct regmap *map, unsigned int reg,
1788EXPORT_SYMBOL_GPL(regmap_update_bits); 1945EXPORT_SYMBOL_GPL(regmap_update_bits);
1789 1946
1790/** 1947/**
1948 * regmap_update_bits_async: Perform a read/modify/write cycle on the register
1949 * map asynchronously
1950 *
1951 * @map: Register map to update
1952 * @reg: Register to update
1953 * @mask: Bitmask to change
1954 * @val: New value for bitmask
1955 *
1956 * With most buses the read must be done synchronously so this is most
1957 * useful for devices with a cache which do not need to interact with
1958 * the hardware to determine the current register value.
1959 *
1960 * Returns zero for success, a negative number on error.
1961 */
1962int regmap_update_bits_async(struct regmap *map, unsigned int reg,
1963 unsigned int mask, unsigned int val)
1964{
1965 bool change;
1966 int ret;
1967
1968 map->lock(map->lock_arg);
1969
1970 map->async = true;
1971
1972 ret = _regmap_update_bits(map, reg, mask, val, &change);
1973
1974 map->async = false;
1975
1976 map->unlock(map->lock_arg);
1977
1978 return ret;
1979}
1980EXPORT_SYMBOL_GPL(regmap_update_bits_async);
1981
1982/**
1791 * regmap_update_bits_check: Perform a read/modify/write cycle on the 1983 * regmap_update_bits_check: Perform a read/modify/write cycle on the
1792 * register map and report if updated 1984 * register map and report if updated
1793 * 1985 *
@@ -1812,6 +2004,43 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
1812} 2004}
1813EXPORT_SYMBOL_GPL(regmap_update_bits_check); 2005EXPORT_SYMBOL_GPL(regmap_update_bits_check);
1814 2006
2007/**
2008 * regmap_update_bits_check_async: Perform a read/modify/write cycle on the
2009 * register map asynchronously and report if
2010 * updated
2011 *
2012 * @map: Register map to update
2013 * @reg: Register to update
2014 * @mask: Bitmask to change
2015 * @val: New value for bitmask
2016 * @change: Boolean indicating if a write was done
2017 *
2018 * With most buses the read must be done synchronously so this is most
2019 * useful for devices with a cache which do not need to interact with
2020 * the hardware to determine the current register value.
2021 *
2022 * Returns zero for success, a negative number on error.
2023 */
2024int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
2025 unsigned int mask, unsigned int val,
2026 bool *change)
2027{
2028 int ret;
2029
2030 map->lock(map->lock_arg);
2031
2032 map->async = true;
2033
2034 ret = _regmap_update_bits(map, reg, mask, val, change);
2035
2036 map->async = false;
2037
2038 map->unlock(map->lock_arg);
2039
2040 return ret;
2041}
2042EXPORT_SYMBOL_GPL(regmap_update_bits_check_async);
2043
1815void regmap_async_complete_cb(struct regmap_async *async, int ret) 2044void regmap_async_complete_cb(struct regmap_async *async, int ret)
1816{ 2045{
1817 struct regmap *map = async->map; 2046 struct regmap *map = async->map;
@@ -1820,8 +2049,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
1820 trace_regmap_async_io_complete(map->dev); 2049 trace_regmap_async_io_complete(map->dev);
1821 2050
1822 spin_lock(&map->async_lock); 2051 spin_lock(&map->async_lock);
1823 2052 list_move(&async->list, &map->async_free);
1824 list_del(&async->list);
1825 wake = list_empty(&map->async_list); 2053 wake = list_empty(&map->async_list);
1826 2054
1827 if (ret != 0) 2055 if (ret != 0)
@@ -1829,8 +2057,6 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
1829 2057
1830 spin_unlock(&map->async_lock); 2058 spin_unlock(&map->async_lock);
1831 2059
1832 schedule_work(&async->cleanup);
1833
1834 if (wake) 2060 if (wake)
1835 wake_up(&map->async_waitq); 2061 wake_up(&map->async_waitq);
1836} 2062}
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 06189e55b4e5..94c280d36e8b 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -10,6 +10,7 @@
10#include <linux/errno.h> 10#include <linux/errno.h>
11#include <linux/err.h> 11#include <linux/err.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <xen/xen.h>
13#include <xen/events.h> 14#include <xen/events.h>
14#include <xen/interface/io/tpmif.h> 15#include <xen/interface/io/tpmif.h>
15#include <xen/grant_table.h> 16#include <xen/grant_table.h>
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 51410c2ac2cb..4d978a3c88f7 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -27,6 +27,14 @@
27 */ 27 */
28 28
29#define SRC_CR 0x00U 29#define SRC_CR 0x00U
30#define SRC_CR_T0_ENSEL BIT(15)
31#define SRC_CR_T1_ENSEL BIT(17)
32#define SRC_CR_T2_ENSEL BIT(19)
33#define SRC_CR_T3_ENSEL BIT(21)
34#define SRC_CR_T4_ENSEL BIT(23)
35#define SRC_CR_T5_ENSEL BIT(25)
36#define SRC_CR_T6_ENSEL BIT(27)
37#define SRC_CR_T7_ENSEL BIT(29)
30#define SRC_XTALCR 0x0CU 38#define SRC_XTALCR 0x0CU
31#define SRC_XTALCR_XTALTIMEN BIT(20) 39#define SRC_XTALCR_XTALTIMEN BIT(20)
32#define SRC_XTALCR_SXTALDIS BIT(19) 40#define SRC_XTALCR_SXTALDIS BIT(19)
@@ -543,6 +551,19 @@ void __init nomadik_clk_init(void)
543 __func__, np->name); 551 __func__, np->name);
544 return; 552 return;
545 } 553 }
554
555 /* Set all timers to use the 2.4 MHz TIMCLK */
556 val = readl(src_base + SRC_CR);
557 val |= SRC_CR_T0_ENSEL;
558 val |= SRC_CR_T1_ENSEL;
559 val |= SRC_CR_T2_ENSEL;
560 val |= SRC_CR_T3_ENSEL;
561 val |= SRC_CR_T4_ENSEL;
562 val |= SRC_CR_T5_ENSEL;
563 val |= SRC_CR_T6_ENSEL;
564 val |= SRC_CR_T7_ENSEL;
565 writel(val, src_base + SRC_CR);
566
546 val = readl(src_base + SRC_XTALCR); 567 val = readl(src_base + SRC_XTALCR);
547 pr_info("SXTALO is %s\n", 568 pr_info("SXTALO is %s\n",
548 (val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled"); 569 (val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled");
diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c
index fc777bdc1886..81a202d12a7a 100644
--- a/drivers/clk/mvebu/armada-370.c
+++ b/drivers/clk/mvebu/armada-370.c
@@ -39,8 +39,8 @@ static const struct coreclk_ratio a370_coreclk_ratios[] __initconst = {
39}; 39};
40 40
41static const u32 a370_tclk_freqs[] __initconst = { 41static const u32 a370_tclk_freqs[] __initconst = {
42 16600000, 42 166000000,
43 20000000, 43 200000000,
44}; 44};
45 45
46static u32 __init a370_get_tclk_freq(void __iomem *sar) 46static u32 __init a370_get_tclk_freq(void __iomem *sar)
diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
index 5bb848cac6ec..81dd31a686df 100644
--- a/drivers/clk/socfpga/clk.c
+++ b/drivers/clk/socfpga/clk.c
@@ -49,7 +49,7 @@
49#define SOCFPGA_L4_SP_CLK "l4_sp_clk" 49#define SOCFPGA_L4_SP_CLK "l4_sp_clk"
50#define SOCFPGA_NAND_CLK "nand_clk" 50#define SOCFPGA_NAND_CLK "nand_clk"
51#define SOCFPGA_NAND_X_CLK "nand_x_clk" 51#define SOCFPGA_NAND_X_CLK "nand_x_clk"
52#define SOCFPGA_MMC_CLK "mmc_clk" 52#define SOCFPGA_MMC_CLK "sdmmc_clk"
53#define SOCFPGA_DB_CLK "gpio_db_clk" 53#define SOCFPGA_DB_CLK "gpio_db_clk"
54 54
55#define div_mask(width) ((1 << (width)) - 1) 55#define div_mask(width) ((1 << (width)) - 1)
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index 67ccf4aa7277..f5e4c21b301f 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -107,7 +107,7 @@ static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
107 107
108 vco = icst_hz_to_vco(icst->params, rate); 108 vco = icst_hz_to_vco(icst->params, rate);
109 icst->rate = icst_hz(icst->params, vco); 109 icst->rate = icst_hz(icst->params, vco);
110 vco_set(icst->vcoreg, icst->lockreg, vco); 110 vco_set(icst->lockreg, icst->vcoreg, vco);
111 return 0; 111 return 0;
112} 112}
113 113
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 08ae128cce9b..c73fc2b74de2 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -65,6 +65,7 @@ void proc_fork_connector(struct task_struct *task)
65 65
66 msg = (struct cn_msg *)buffer; 66 msg = (struct cn_msg *)buffer;
67 ev = (struct proc_event *)msg->data; 67 ev = (struct proc_event *)msg->data;
68 memset(&ev->event_data, 0, sizeof(ev->event_data));
68 get_seq(&msg->seq, &ev->cpu); 69 get_seq(&msg->seq, &ev->cpu);
69 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 70 ktime_get_ts(&ts); /* get high res monotonic timestamp */
70 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 71 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -80,6 +81,7 @@ void proc_fork_connector(struct task_struct *task)
80 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 81 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
81 msg->ack = 0; /* not used */ 82 msg->ack = 0; /* not used */
82 msg->len = sizeof(*ev); 83 msg->len = sizeof(*ev);
84 msg->flags = 0; /* not used */
83 /* If cn_netlink_send() failed, the data is not sent */ 85 /* If cn_netlink_send() failed, the data is not sent */
84 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 86 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
85} 87}
@@ -96,6 +98,7 @@ void proc_exec_connector(struct task_struct *task)
96 98
97 msg = (struct cn_msg *)buffer; 99 msg = (struct cn_msg *)buffer;
98 ev = (struct proc_event *)msg->data; 100 ev = (struct proc_event *)msg->data;
101 memset(&ev->event_data, 0, sizeof(ev->event_data));
99 get_seq(&msg->seq, &ev->cpu); 102 get_seq(&msg->seq, &ev->cpu);
100 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 103 ktime_get_ts(&ts); /* get high res monotonic timestamp */
101 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 104 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -106,6 +109,7 @@ void proc_exec_connector(struct task_struct *task)
106 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 109 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
107 msg->ack = 0; /* not used */ 110 msg->ack = 0; /* not used */
108 msg->len = sizeof(*ev); 111 msg->len = sizeof(*ev);
112 msg->flags = 0; /* not used */
109 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 113 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
110} 114}
111 115
@@ -122,6 +126,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
122 126
123 msg = (struct cn_msg *)buffer; 127 msg = (struct cn_msg *)buffer;
124 ev = (struct proc_event *)msg->data; 128 ev = (struct proc_event *)msg->data;
129 memset(&ev->event_data, 0, sizeof(ev->event_data));
125 ev->what = which_id; 130 ev->what = which_id;
126 ev->event_data.id.process_pid = task->pid; 131 ev->event_data.id.process_pid = task->pid;
127 ev->event_data.id.process_tgid = task->tgid; 132 ev->event_data.id.process_tgid = task->tgid;
@@ -145,6 +150,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
145 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 150 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
146 msg->ack = 0; /* not used */ 151 msg->ack = 0; /* not used */
147 msg->len = sizeof(*ev); 152 msg->len = sizeof(*ev);
153 msg->flags = 0; /* not used */
148 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 154 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
149} 155}
150 156
@@ -160,6 +166,7 @@ void proc_sid_connector(struct task_struct *task)
160 166
161 msg = (struct cn_msg *)buffer; 167 msg = (struct cn_msg *)buffer;
162 ev = (struct proc_event *)msg->data; 168 ev = (struct proc_event *)msg->data;
169 memset(&ev->event_data, 0, sizeof(ev->event_data));
163 get_seq(&msg->seq, &ev->cpu); 170 get_seq(&msg->seq, &ev->cpu);
164 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 171 ktime_get_ts(&ts); /* get high res monotonic timestamp */
165 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 172 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -170,6 +177,7 @@ void proc_sid_connector(struct task_struct *task)
170 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 177 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
171 msg->ack = 0; /* not used */ 178 msg->ack = 0; /* not used */
172 msg->len = sizeof(*ev); 179 msg->len = sizeof(*ev);
180 msg->flags = 0; /* not used */
173 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 181 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
174} 182}
175 183
@@ -185,6 +193,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
185 193
186 msg = (struct cn_msg *)buffer; 194 msg = (struct cn_msg *)buffer;
187 ev = (struct proc_event *)msg->data; 195 ev = (struct proc_event *)msg->data;
196 memset(&ev->event_data, 0, sizeof(ev->event_data));
188 get_seq(&msg->seq, &ev->cpu); 197 get_seq(&msg->seq, &ev->cpu);
189 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 198 ktime_get_ts(&ts); /* get high res monotonic timestamp */
190 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 199 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -203,6 +212,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
203 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 212 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
204 msg->ack = 0; /* not used */ 213 msg->ack = 0; /* not used */
205 msg->len = sizeof(*ev); 214 msg->len = sizeof(*ev);
215 msg->flags = 0; /* not used */
206 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 216 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
207} 217}
208 218
@@ -218,6 +228,7 @@ void proc_comm_connector(struct task_struct *task)
218 228
219 msg = (struct cn_msg *)buffer; 229 msg = (struct cn_msg *)buffer;
220 ev = (struct proc_event *)msg->data; 230 ev = (struct proc_event *)msg->data;
231 memset(&ev->event_data, 0, sizeof(ev->event_data));
221 get_seq(&msg->seq, &ev->cpu); 232 get_seq(&msg->seq, &ev->cpu);
222 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 233 ktime_get_ts(&ts); /* get high res monotonic timestamp */
223 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 234 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -229,6 +240,7 @@ void proc_comm_connector(struct task_struct *task)
229 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 240 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
230 msg->ack = 0; /* not used */ 241 msg->ack = 0; /* not used */
231 msg->len = sizeof(*ev); 242 msg->len = sizeof(*ev);
243 msg->flags = 0; /* not used */
232 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 244 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
233} 245}
234 246
@@ -244,6 +256,7 @@ void proc_coredump_connector(struct task_struct *task)
244 256
245 msg = (struct cn_msg *)buffer; 257 msg = (struct cn_msg *)buffer;
246 ev = (struct proc_event *)msg->data; 258 ev = (struct proc_event *)msg->data;
259 memset(&ev->event_data, 0, sizeof(ev->event_data));
247 get_seq(&msg->seq, &ev->cpu); 260 get_seq(&msg->seq, &ev->cpu);
248 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 261 ktime_get_ts(&ts); /* get high res monotonic timestamp */
249 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 262 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -254,6 +267,7 @@ void proc_coredump_connector(struct task_struct *task)
254 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 267 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
255 msg->ack = 0; /* not used */ 268 msg->ack = 0; /* not used */
256 msg->len = sizeof(*ev); 269 msg->len = sizeof(*ev);
270 msg->flags = 0; /* not used */
257 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 271 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
258} 272}
259 273
@@ -269,6 +283,7 @@ void proc_exit_connector(struct task_struct *task)
269 283
270 msg = (struct cn_msg *)buffer; 284 msg = (struct cn_msg *)buffer;
271 ev = (struct proc_event *)msg->data; 285 ev = (struct proc_event *)msg->data;
286 memset(&ev->event_data, 0, sizeof(ev->event_data));
272 get_seq(&msg->seq, &ev->cpu); 287 get_seq(&msg->seq, &ev->cpu);
273 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 288 ktime_get_ts(&ts); /* get high res monotonic timestamp */
274 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 289 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -281,6 +296,7 @@ void proc_exit_connector(struct task_struct *task)
281 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 296 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
282 msg->ack = 0; /* not used */ 297 msg->ack = 0; /* not used */
283 msg->len = sizeof(*ev); 298 msg->len = sizeof(*ev);
299 msg->flags = 0; /* not used */
284 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 300 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
285} 301}
286 302
@@ -304,6 +320,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
304 320
305 msg = (struct cn_msg *)buffer; 321 msg = (struct cn_msg *)buffer;
306 ev = (struct proc_event *)msg->data; 322 ev = (struct proc_event *)msg->data;
323 memset(&ev->event_data, 0, sizeof(ev->event_data));
307 msg->seq = rcvd_seq; 324 msg->seq = rcvd_seq;
308 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 325 ktime_get_ts(&ts); /* get high res monotonic timestamp */
309 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 326 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -313,6 +330,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
313 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 330 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
314 msg->ack = rcvd_ack + 1; 331 msg->ack = rcvd_ack + 1;
315 msg->len = sizeof(*ev); 332 msg->len = sizeof(*ev);
333 msg->flags = 0; /* not used */
316 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 334 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
317} 335}
318 336
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 6ecfa758942c..a36749f1e44a 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -109,7 +109,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
109 109
110 data = nlmsg_data(nlh); 110 data = nlmsg_data(nlh);
111 111
112 memcpy(data, msg, sizeof(*data) + msg->len); 112 memcpy(data, msg, size);
113 113
114 NETLINK_CB(skb).dst_group = group; 114 NETLINK_CB(skb).dst_group = group;
115 115
@@ -157,17 +157,18 @@ static int cn_call_callback(struct sk_buff *skb)
157static void cn_rx_skb(struct sk_buff *__skb) 157static void cn_rx_skb(struct sk_buff *__skb)
158{ 158{
159 struct nlmsghdr *nlh; 159 struct nlmsghdr *nlh;
160 int err;
161 struct sk_buff *skb; 160 struct sk_buff *skb;
161 int len, err;
162 162
163 skb = skb_get(__skb); 163 skb = skb_get(__skb);
164 164
165 if (skb->len >= NLMSG_HDRLEN) { 165 if (skb->len >= NLMSG_HDRLEN) {
166 nlh = nlmsg_hdr(skb); 166 nlh = nlmsg_hdr(skb);
167 len = nlmsg_len(nlh);
167 168
168 if (nlh->nlmsg_len < sizeof(struct cn_msg) || 169 if (len < (int)sizeof(struct cn_msg) ||
169 skb->len < nlh->nlmsg_len || 170 skb->len < nlh->nlmsg_len ||
170 nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) { 171 len > CONNECTOR_MAX_MSG_SIZE) {
171 kfree_skb(skb); 172 kfree_skb(skb);
172 return; 173 return;
173 } 174 }
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index d2c3253e015e..506fd23c7550 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -986,12 +986,12 @@ static int __init acpi_cpufreq_init(void)
986{ 986{
987 int ret; 987 int ret;
988 988
989 if (acpi_disabled)
990 return -ENODEV;
991
989 /* don't keep reloading if cpufreq_driver exists */ 992 /* don't keep reloading if cpufreq_driver exists */
990 if (cpufreq_get_current_driver()) 993 if (cpufreq_get_current_driver())
991 return 0; 994 return -EEXIST;
992
993 if (acpi_disabled)
994 return 0;
995 995
996 pr_debug("acpi_cpufreq_init\n"); 996 pr_debug("acpi_cpufreq_init\n");
997 997
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 32b3479a2405..eb3fdc755000 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -48,7 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
48} 48}
49 49
50struct sample { 50struct sample {
51 int core_pct_busy; 51 int32_t core_pct_busy;
52 u64 aperf; 52 u64 aperf;
53 u64 mperf; 53 u64 mperf;
54 int freq; 54 int freq;
@@ -68,7 +68,7 @@ struct _pid {
68 int32_t i_gain; 68 int32_t i_gain;
69 int32_t d_gain; 69 int32_t d_gain;
70 int deadband; 70 int deadband;
71 int last_err; 71 int32_t last_err;
72}; 72};
73 73
74struct cpudata { 74struct cpudata {
@@ -153,16 +153,15 @@ static inline void pid_d_gain_set(struct _pid *pid, int percent)
153 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); 153 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
154} 154}
155 155
156static signed int pid_calc(struct _pid *pid, int busy) 156static signed int pid_calc(struct _pid *pid, int32_t busy)
157{ 157{
158 signed int err, result; 158 signed int result;
159 int32_t pterm, dterm, fp_error; 159 int32_t pterm, dterm, fp_error;
160 int32_t integral_limit; 160 int32_t integral_limit;
161 161
162 err = pid->setpoint - busy; 162 fp_error = int_tofp(pid->setpoint) - busy;
163 fp_error = int_tofp(err);
164 163
165 if (abs(err) <= pid->deadband) 164 if (abs(fp_error) <= int_tofp(pid->deadband))
166 return 0; 165 return 0;
167 166
168 pterm = mul_fp(pid->p_gain, fp_error); 167 pterm = mul_fp(pid->p_gain, fp_error);
@@ -176,8 +175,8 @@ static signed int pid_calc(struct _pid *pid, int busy)
176 if (pid->integral < -integral_limit) 175 if (pid->integral < -integral_limit)
177 pid->integral = -integral_limit; 176 pid->integral = -integral_limit;
178 177
179 dterm = mul_fp(pid->d_gain, (err - pid->last_err)); 178 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
180 pid->last_err = err; 179 pid->last_err = fp_error;
181 180
182 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 181 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
183 182
@@ -367,12 +366,13 @@ static int intel_pstate_turbo_pstate(void)
367static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 366static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
368{ 367{
369 int max_perf = cpu->pstate.turbo_pstate; 368 int max_perf = cpu->pstate.turbo_pstate;
369 int max_perf_adj;
370 int min_perf; 370 int min_perf;
371 if (limits.no_turbo) 371 if (limits.no_turbo)
372 max_perf = cpu->pstate.max_pstate; 372 max_perf = cpu->pstate.max_pstate;
373 373
374 max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); 374 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
375 *max = clamp_t(int, max_perf, 375 *max = clamp_t(int, max_perf_adj,
376 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 376 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
377 377
378 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); 378 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
@@ -383,6 +383,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
383static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 383static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
384{ 384{
385 int max_perf, min_perf; 385 int max_perf, min_perf;
386 u64 val;
386 387
387 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 388 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
388 389
@@ -394,11 +395,11 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
394 trace_cpu_frequency(pstate * 100000, cpu->cpu); 395 trace_cpu_frequency(pstate * 100000, cpu->cpu);
395 396
396 cpu->pstate.current_pstate = pstate; 397 cpu->pstate.current_pstate = pstate;
398 val = pstate << 8;
397 if (limits.no_turbo) 399 if (limits.no_turbo)
398 wrmsrl(MSR_IA32_PERF_CTL, BIT(32) | (pstate << 8)); 400 val |= (u64)1 << 32;
399 else
400 wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
401 401
402 wrmsrl(MSR_IA32_PERF_CTL, val);
402} 403}
403 404
404static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps) 405static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
@@ -435,8 +436,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
435 struct sample *sample) 436 struct sample *sample)
436{ 437{
437 u64 core_pct; 438 u64 core_pct;
438 core_pct = div64_u64(sample->aperf * 100, sample->mperf); 439 core_pct = div64_u64(int_tofp(sample->aperf * 100),
439 sample->freq = cpu->pstate.max_pstate * core_pct * 1000; 440 sample->mperf);
441 sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
440 442
441 sample->core_pct_busy = core_pct; 443 sample->core_pct_busy = core_pct;
442} 444}
@@ -468,22 +470,19 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
468 mod_timer_pinned(&cpu->timer, jiffies + delay); 470 mod_timer_pinned(&cpu->timer, jiffies + delay);
469} 471}
470 472
471static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) 473static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
472{ 474{
473 int32_t busy_scaled;
474 int32_t core_busy, max_pstate, current_pstate; 475 int32_t core_busy, max_pstate, current_pstate;
475 476
476 core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy); 477 core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
477 max_pstate = int_tofp(cpu->pstate.max_pstate); 478 max_pstate = int_tofp(cpu->pstate.max_pstate);
478 current_pstate = int_tofp(cpu->pstate.current_pstate); 479 current_pstate = int_tofp(cpu->pstate.current_pstate);
479 busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 480 return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
480
481 return fp_toint(busy_scaled);
482} 481}
483 482
484static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 483static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
485{ 484{
486 int busy_scaled; 485 int32_t busy_scaled;
487 struct _pid *pid; 486 struct _pid *pid;
488 signed int ctl = 0; 487 signed int ctl = 0;
489 int steps; 488 int steps;
@@ -637,8 +636,8 @@ static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
637 636
638static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 637static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
639{ 638{
640 int rc, min_pstate, max_pstate;
641 struct cpudata *cpu; 639 struct cpudata *cpu;
640 int rc;
642 641
643 rc = intel_pstate_init_cpu(policy->cpu); 642 rc = intel_pstate_init_cpu(policy->cpu);
644 if (rc) 643 if (rc)
@@ -652,9 +651,8 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
652 else 651 else
653 policy->policy = CPUFREQ_POLICY_POWERSAVE; 652 policy->policy = CPUFREQ_POLICY_POWERSAVE;
654 653
655 intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate); 654 policy->min = cpu->pstate.min_pstate * 100000;
656 policy->min = min_pstate * 100000; 655 policy->max = cpu->pstate.turbo_pstate * 100000;
657 policy->max = max_pstate * 100000;
658 656
659 /* cpuinfo and default policy values */ 657 /* cpuinfo and default policy values */
660 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000; 658 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 8a72b0c555f8..15631f92ab7d 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -166,7 +166,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
166 if (freq->frequency == CPUFREQ_ENTRY_INVALID) 166 if (freq->frequency == CPUFREQ_ENTRY_INVALID)
167 continue; 167 continue;
168 168
169 dvfs = &s3c64xx_dvfs_table[freq->index]; 169 dvfs = &s3c64xx_dvfs_table[freq->driver_data];
170 found = 0; 170 found = 0;
171 171
172 for (i = 0; i < count; i++) { 172 for (i = 0; i < count; i++) {
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 3519111c566b..10b577fcf48d 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -305,6 +305,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
305 edma_alloc_slot(EDMA_CTLR(echan->ch_num), 305 edma_alloc_slot(EDMA_CTLR(echan->ch_num),
306 EDMA_SLOT_ANY); 306 EDMA_SLOT_ANY);
307 if (echan->slot[i] < 0) { 307 if (echan->slot[i] < 0) {
308 kfree(edesc);
308 dev_err(dev, "Failed to allocate slot\n"); 309 dev_err(dev, "Failed to allocate slot\n");
309 kfree(edesc); 310 kfree(edesc);
310 return NULL; 311 return NULL;
@@ -346,6 +347,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
346 ccnt = sg_dma_len(sg) / (acnt * bcnt); 347 ccnt = sg_dma_len(sg) / (acnt * bcnt);
347 if (ccnt > (SZ_64K - 1)) { 348 if (ccnt > (SZ_64K - 1)) {
348 dev_err(dev, "Exceeded max SG segment size\n"); 349 dev_err(dev, "Exceeded max SG segment size\n");
350 kfree(edesc);
349 return NULL; 351 return NULL;
350 } 352 }
351 cidx = acnt * bcnt; 353 cidx = acnt * bcnt;
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index 2d9ca6055e5e..41b5913ddabe 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -248,14 +248,15 @@ static void lp_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
248 struct lp_gpio *lg = irq_data_get_irq_handler_data(data); 248 struct lp_gpio *lg = irq_data_get_irq_handler_data(data);
249 struct irq_chip *chip = irq_data_get_irq_chip(data); 249 struct irq_chip *chip = irq_data_get_irq_chip(data);
250 u32 base, pin, mask; 250 u32 base, pin, mask;
251 unsigned long reg, pending; 251 unsigned long reg, ena, pending;
252 unsigned virq; 252 unsigned virq;
253 253
254 /* check from GPIO controller which pin triggered the interrupt */ 254 /* check from GPIO controller which pin triggered the interrupt */
255 for (base = 0; base < lg->chip.ngpio; base += 32) { 255 for (base = 0; base < lg->chip.ngpio; base += 32) {
256 reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT); 256 reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
257 ena = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
257 258
258 while ((pending = inl(reg))) { 259 while ((pending = (inl(reg) & inl(ena)))) {
259 pin = __ffs(pending); 260 pin = __ffs(pending);
260 mask = BIT(pin); 261 mask = BIT(pin);
261 /* Clear before handling so we don't lose an edge */ 262 /* Clear before handling so we don't lose an edge */
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 86ef3461ec06..0dee0e0c247a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -136,7 +136,7 @@ static struct gpio_desc *gpio_to_desc(unsigned gpio)
136 */ 136 */
137static int desc_to_gpio(const struct gpio_desc *desc) 137static int desc_to_gpio(const struct gpio_desc *desc)
138{ 138{
139 return desc->chip->base + gpio_chip_hwgpio(desc); 139 return desc - &gpio_desc[0];
140} 140}
141 141
142 142
@@ -1398,7 +1398,7 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
1398 int status = -EPROBE_DEFER; 1398 int status = -EPROBE_DEFER;
1399 unsigned long flags; 1399 unsigned long flags;
1400 1400
1401 if (!desc || !desc->chip) { 1401 if (!desc) {
1402 pr_warn("%s: invalid GPIO\n", __func__); 1402 pr_warn("%s: invalid GPIO\n", __func__);
1403 return -EINVAL; 1403 return -EINVAL;
1404 } 1404 }
@@ -1406,6 +1406,8 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
1406 spin_lock_irqsave(&gpio_lock, flags); 1406 spin_lock_irqsave(&gpio_lock, flags);
1407 1407
1408 chip = desc->chip; 1408 chip = desc->chip;
1409 if (chip == NULL)
1410 goto done;
1409 1411
1410 if (!try_module_get(chip->owner)) 1412 if (!try_module_get(chip->owner))
1411 goto done; 1413 goto done;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index e572dd20bdee..fe58d0833a11 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -61,7 +61,7 @@ static int drm_version(struct drm_device *dev, void *data,
61 61
62/** Ioctl table */ 62/** Ioctl table */
63static const struct drm_ioctl_desc drm_ioctls[] = { 63static const struct drm_ioctl_desc drm_ioctls[] = {
64 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED), 64 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
65 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), 65 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
66 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), 66 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
67 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), 67 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
@@ -402,9 +402,16 @@ long drm_ioctl(struct file *filp,
402 cmd = ioctl->cmd_drv; 402 cmd = ioctl->cmd_drv;
403 } 403 }
404 else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { 404 else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
405 u32 drv_size;
406
405 ioctl = &drm_ioctls[nr]; 407 ioctl = &drm_ioctls[nr];
406 cmd = ioctl->cmd; 408
409 drv_size = _IOC_SIZE(ioctl->cmd);
407 usize = asize = _IOC_SIZE(cmd); 410 usize = asize = _IOC_SIZE(cmd);
411 if (drv_size > asize)
412 asize = drv_size;
413
414 cmd = ioctl->cmd;
408 } else 415 } else
409 goto err_i1; 416 goto err_i1;
410 417
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 69d8ed5416c3..2ad27880cd04 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -505,6 +505,8 @@ static int i915_drm_freeze(struct drm_device *dev)
505 intel_modeset_suspend_hw(dev); 505 intel_modeset_suspend_hw(dev);
506 } 506 }
507 507
508 i915_gem_suspend_gtt_mappings(dev);
509
508 i915_save_state(dev); 510 i915_save_state(dev);
509 511
510 intel_opregion_fini(dev); 512 intel_opregion_fini(dev);
@@ -648,7 +650,8 @@ static int i915_drm_thaw(struct drm_device *dev)
648 mutex_lock(&dev->struct_mutex); 650 mutex_lock(&dev->struct_mutex);
649 i915_gem_restore_gtt_mappings(dev); 651 i915_gem_restore_gtt_mappings(dev);
650 mutex_unlock(&dev->struct_mutex); 652 mutex_unlock(&dev->struct_mutex);
651 } 653 } else if (drm_core_check_feature(dev, DRIVER_MODESET))
654 i915_check_and_clear_faults(dev);
652 655
653 __i915_drm_thaw(dev); 656 __i915_drm_thaw(dev);
654 657
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 35874b3a86dc..ab0f2c0a440c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -497,10 +497,12 @@ struct i915_address_space {
497 497
498 /* FIXME: Need a more generic return type */ 498 /* FIXME: Need a more generic return type */
499 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, 499 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
500 enum i915_cache_level level); 500 enum i915_cache_level level,
501 bool valid); /* Create a valid PTE */
501 void (*clear_range)(struct i915_address_space *vm, 502 void (*clear_range)(struct i915_address_space *vm,
502 unsigned int first_entry, 503 unsigned int first_entry,
503 unsigned int num_entries); 504 unsigned int num_entries,
505 bool use_scratch);
504 void (*insert_entries)(struct i915_address_space *vm, 506 void (*insert_entries)(struct i915_address_space *vm,
505 struct sg_table *st, 507 struct sg_table *st,
506 unsigned int first_entry, 508 unsigned int first_entry,
@@ -2065,6 +2067,8 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
2065void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 2067void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
2066 struct drm_i915_gem_object *obj); 2068 struct drm_i915_gem_object *obj);
2067 2069
2070void i915_check_and_clear_faults(struct drm_device *dev);
2071void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
2068void i915_gem_restore_gtt_mappings(struct drm_device *dev); 2072void i915_gem_restore_gtt_mappings(struct drm_device *dev);
2069int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); 2073int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
2070void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, 2074void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 212f6d8c35ec..1f7b4caefb6e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -58,9 +58,10 @@
58#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) 58#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
59 59
60static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, 60static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
61 enum i915_cache_level level) 61 enum i915_cache_level level,
62 bool valid)
62{ 63{
63 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 64 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
64 pte |= GEN6_PTE_ADDR_ENCODE(addr); 65 pte |= GEN6_PTE_ADDR_ENCODE(addr);
65 66
66 switch (level) { 67 switch (level) {
@@ -79,9 +80,10 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
79} 80}
80 81
81static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, 82static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
82 enum i915_cache_level level) 83 enum i915_cache_level level,
84 bool valid)
83{ 85{
84 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 86 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
85 pte |= GEN6_PTE_ADDR_ENCODE(addr); 87 pte |= GEN6_PTE_ADDR_ENCODE(addr);
86 88
87 switch (level) { 89 switch (level) {
@@ -105,9 +107,10 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
105#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) 107#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
106 108
107static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, 109static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
108 enum i915_cache_level level) 110 enum i915_cache_level level,
111 bool valid)
109{ 112{
110 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 113 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
111 pte |= GEN6_PTE_ADDR_ENCODE(addr); 114 pte |= GEN6_PTE_ADDR_ENCODE(addr);
112 115
113 /* Mark the page as writeable. Other platforms don't have a 116 /* Mark the page as writeable. Other platforms don't have a
@@ -122,9 +125,10 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
122} 125}
123 126
124static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, 127static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
125 enum i915_cache_level level) 128 enum i915_cache_level level,
129 bool valid)
126{ 130{
127 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 131 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
128 pte |= HSW_PTE_ADDR_ENCODE(addr); 132 pte |= HSW_PTE_ADDR_ENCODE(addr);
129 133
130 if (level != I915_CACHE_NONE) 134 if (level != I915_CACHE_NONE)
@@ -134,9 +138,10 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
134} 138}
135 139
136static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, 140static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
137 enum i915_cache_level level) 141 enum i915_cache_level level,
142 bool valid)
138{ 143{
139 gen6_gtt_pte_t pte = GEN6_PTE_VALID; 144 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
140 pte |= HSW_PTE_ADDR_ENCODE(addr); 145 pte |= HSW_PTE_ADDR_ENCODE(addr);
141 146
142 switch (level) { 147 switch (level) {
@@ -236,7 +241,8 @@ static int gen6_ppgtt_enable(struct drm_device *dev)
236/* PPGTT support for Sandybdrige/Gen6 and later */ 241/* PPGTT support for Sandybdrige/Gen6 and later */
237static void gen6_ppgtt_clear_range(struct i915_address_space *vm, 242static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
238 unsigned first_entry, 243 unsigned first_entry,
239 unsigned num_entries) 244 unsigned num_entries,
245 bool use_scratch)
240{ 246{
241 struct i915_hw_ppgtt *ppgtt = 247 struct i915_hw_ppgtt *ppgtt =
242 container_of(vm, struct i915_hw_ppgtt, base); 248 container_of(vm, struct i915_hw_ppgtt, base);
@@ -245,7 +251,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
245 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 251 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
246 unsigned last_pte, i; 252 unsigned last_pte, i;
247 253
248 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); 254 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
249 255
250 while (num_entries) { 256 while (num_entries) {
251 last_pte = first_pte + num_entries; 257 last_pte = first_pte + num_entries;
@@ -282,7 +288,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
282 dma_addr_t page_addr; 288 dma_addr_t page_addr;
283 289
284 page_addr = sg_page_iter_dma_address(&sg_iter); 290 page_addr = sg_page_iter_dma_address(&sg_iter);
285 pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level); 291 pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
286 if (++act_pte == I915_PPGTT_PT_ENTRIES) { 292 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
287 kunmap_atomic(pt_vaddr); 293 kunmap_atomic(pt_vaddr);
288 act_pt++; 294 act_pt++;
@@ -367,7 +373,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
367 } 373 }
368 374
369 ppgtt->base.clear_range(&ppgtt->base, 0, 375 ppgtt->base.clear_range(&ppgtt->base, 0,
370 ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES); 376 ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
371 377
372 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); 378 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
373 379
@@ -444,7 +450,8 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
444{ 450{
445 ppgtt->base.clear_range(&ppgtt->base, 451 ppgtt->base.clear_range(&ppgtt->base,
446 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, 452 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
447 obj->base.size >> PAGE_SHIFT); 453 obj->base.size >> PAGE_SHIFT,
454 true);
448} 455}
449 456
450extern int intel_iommu_gfx_mapped; 457extern int intel_iommu_gfx_mapped;
@@ -485,15 +492,65 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
485 dev_priv->mm.interruptible = interruptible; 492 dev_priv->mm.interruptible = interruptible;
486} 493}
487 494
495void i915_check_and_clear_faults(struct drm_device *dev)
496{
497 struct drm_i915_private *dev_priv = dev->dev_private;
498 struct intel_ring_buffer *ring;
499 int i;
500
501 if (INTEL_INFO(dev)->gen < 6)
502 return;
503
504 for_each_ring(ring, dev_priv, i) {
505 u32 fault_reg;
506 fault_reg = I915_READ(RING_FAULT_REG(ring));
507 if (fault_reg & RING_FAULT_VALID) {
508 DRM_DEBUG_DRIVER("Unexpected fault\n"
509 "\tAddr: 0x%08lx\\n"
510 "\tAddress space: %s\n"
511 "\tSource ID: %d\n"
512 "\tType: %d\n",
513 fault_reg & PAGE_MASK,
514 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
515 RING_FAULT_SRCID(fault_reg),
516 RING_FAULT_FAULT_TYPE(fault_reg));
517 I915_WRITE(RING_FAULT_REG(ring),
518 fault_reg & ~RING_FAULT_VALID);
519 }
520 }
521 POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
522}
523
524void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
525{
526 struct drm_i915_private *dev_priv = dev->dev_private;
527
528 /* Don't bother messing with faults pre GEN6 as we have little
529 * documentation supporting that it's a good idea.
530 */
531 if (INTEL_INFO(dev)->gen < 6)
532 return;
533
534 i915_check_and_clear_faults(dev);
535
536 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
537 dev_priv->gtt.base.start / PAGE_SIZE,
538 dev_priv->gtt.base.total / PAGE_SIZE,
539 false);
540}
541
488void i915_gem_restore_gtt_mappings(struct drm_device *dev) 542void i915_gem_restore_gtt_mappings(struct drm_device *dev)
489{ 543{
490 struct drm_i915_private *dev_priv = dev->dev_private; 544 struct drm_i915_private *dev_priv = dev->dev_private;
491 struct drm_i915_gem_object *obj; 545 struct drm_i915_gem_object *obj;
492 546
547 i915_check_and_clear_faults(dev);
548
493 /* First fill our portion of the GTT with scratch pages */ 549 /* First fill our portion of the GTT with scratch pages */
494 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, 550 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
495 dev_priv->gtt.base.start / PAGE_SIZE, 551 dev_priv->gtt.base.start / PAGE_SIZE,
496 dev_priv->gtt.base.total / PAGE_SIZE); 552 dev_priv->gtt.base.total / PAGE_SIZE,
553 true);
497 554
498 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 555 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
499 i915_gem_clflush_object(obj, obj->pin_display); 556 i915_gem_clflush_object(obj, obj->pin_display);
@@ -536,7 +593,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
536 593
537 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 594 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
538 addr = sg_page_iter_dma_address(&sg_iter); 595 addr = sg_page_iter_dma_address(&sg_iter);
539 iowrite32(vm->pte_encode(addr, level), &gtt_entries[i]); 596 iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
540 i++; 597 i++;
541 } 598 }
542 599
@@ -548,7 +605,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
548 */ 605 */
549 if (i != 0) 606 if (i != 0)
550 WARN_ON(readl(&gtt_entries[i-1]) != 607 WARN_ON(readl(&gtt_entries[i-1]) !=
551 vm->pte_encode(addr, level)); 608 vm->pte_encode(addr, level, true));
552 609
553 /* This next bit makes the above posting read even more important. We 610 /* This next bit makes the above posting read even more important. We
554 * want to flush the TLBs only after we're certain all the PTE updates 611 * want to flush the TLBs only after we're certain all the PTE updates
@@ -560,7 +617,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
560 617
561static void gen6_ggtt_clear_range(struct i915_address_space *vm, 618static void gen6_ggtt_clear_range(struct i915_address_space *vm,
562 unsigned int first_entry, 619 unsigned int first_entry,
563 unsigned int num_entries) 620 unsigned int num_entries,
621 bool use_scratch)
564{ 622{
565 struct drm_i915_private *dev_priv = vm->dev->dev_private; 623 struct drm_i915_private *dev_priv = vm->dev->dev_private;
566 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = 624 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
@@ -573,7 +631,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
573 first_entry, num_entries, max_entries)) 631 first_entry, num_entries, max_entries))
574 num_entries = max_entries; 632 num_entries = max_entries;
575 633
576 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); 634 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
635
577 for (i = 0; i < num_entries; i++) 636 for (i = 0; i < num_entries; i++)
578 iowrite32(scratch_pte, &gtt_base[i]); 637 iowrite32(scratch_pte, &gtt_base[i]);
579 readl(gtt_base); 638 readl(gtt_base);
@@ -594,7 +653,8 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
594 653
595static void i915_ggtt_clear_range(struct i915_address_space *vm, 654static void i915_ggtt_clear_range(struct i915_address_space *vm,
596 unsigned int first_entry, 655 unsigned int first_entry,
597 unsigned int num_entries) 656 unsigned int num_entries,
657 bool unused)
598{ 658{
599 intel_gtt_clear_range(first_entry, num_entries); 659 intel_gtt_clear_range(first_entry, num_entries);
600} 660}
@@ -622,7 +682,8 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
622 682
623 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, 683 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
624 entry, 684 entry,
625 obj->base.size >> PAGE_SHIFT); 685 obj->base.size >> PAGE_SHIFT,
686 true);
626 687
627 obj->has_global_gtt_mapping = 0; 688 obj->has_global_gtt_mapping = 0;
628} 689}
@@ -709,11 +770,11 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
709 const unsigned long count = (hole_end - hole_start) / PAGE_SIZE; 770 const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
710 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", 771 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
711 hole_start, hole_end); 772 hole_start, hole_end);
712 ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count); 773 ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
713 } 774 }
714 775
715 /* And finally clear the reserved guard page */ 776 /* And finally clear the reserved guard page */
716 ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1); 777 ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
717} 778}
718 779
719static bool 780static bool
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 38f96f65d87a..ef9b35479f01 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -604,6 +604,10 @@
604#define ARB_MODE_SWIZZLE_IVB (1<<5) 604#define ARB_MODE_SWIZZLE_IVB (1<<5)
605#define RENDER_HWS_PGA_GEN7 (0x04080) 605#define RENDER_HWS_PGA_GEN7 (0x04080)
606#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) 606#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
607#define RING_FAULT_GTTSEL_MASK (1<<11)
608#define RING_FAULT_SRCID(x) ((x >> 3) & 0xff)
609#define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3)
610#define RING_FAULT_VALID (1<<0)
607#define DONE_REG 0x40b0 611#define DONE_REG 0x40b0
608#define BSD_HWS_PGA_GEN7 (0x04180) 612#define BSD_HWS_PGA_GEN7 (0x04180)
609#define BLT_HWS_PGA_GEN7 (0x04280) 613#define BLT_HWS_PGA_GEN7 (0x04280)
@@ -4279,7 +4283,9 @@
4279#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN) 4283#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
4280 4284
4281#define SOUTH_DSPCLK_GATE_D 0xc2020 4285#define SOUTH_DSPCLK_GATE_D 0xc2020
4286#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
4282#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) 4287#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
4288#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
4283#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12) 4289#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
4284 4290
4285/* CPU: FDI_TX */ 4291/* CPU: FDI_TX */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index ea9022ef15d5..10d1de5bce6f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -83,8 +83,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
83 return true; 83 return true;
84} 84}
85 85
86static void intel_crt_get_config(struct intel_encoder *encoder, 86static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
87 struct intel_crtc_config *pipe_config)
88{ 87{
89 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 88 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
90 struct intel_crt *crt = intel_encoder_to_crt(encoder); 89 struct intel_crt *crt = intel_encoder_to_crt(encoder);
@@ -102,7 +101,25 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
102 else 101 else
103 flags |= DRM_MODE_FLAG_NVSYNC; 102 flags |= DRM_MODE_FLAG_NVSYNC;
104 103
105 pipe_config->adjusted_mode.flags |= flags; 104 return flags;
105}
106
107static void intel_crt_get_config(struct intel_encoder *encoder,
108 struct intel_crtc_config *pipe_config)
109{
110 pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
111}
112
113static void hsw_crt_get_config(struct intel_encoder *encoder,
114 struct intel_crtc_config *pipe_config)
115{
116 intel_ddi_get_config(encoder, pipe_config);
117
118 pipe_config->adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
119 DRM_MODE_FLAG_NHSYNC |
120 DRM_MODE_FLAG_PVSYNC |
121 DRM_MODE_FLAG_NVSYNC);
122 pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
106} 123}
107 124
108/* Note: The caller is required to filter out dpms modes not supported by the 125/* Note: The caller is required to filter out dpms modes not supported by the
@@ -799,7 +816,10 @@ void intel_crt_init(struct drm_device *dev)
799 crt->base.mode_set = intel_crt_mode_set; 816 crt->base.mode_set = intel_crt_mode_set;
800 crt->base.disable = intel_disable_crt; 817 crt->base.disable = intel_disable_crt;
801 crt->base.enable = intel_enable_crt; 818 crt->base.enable = intel_enable_crt;
802 crt->base.get_config = intel_crt_get_config; 819 if (IS_HASWELL(dev))
820 crt->base.get_config = hsw_crt_get_config;
821 else
822 crt->base.get_config = intel_crt_get_config;
803 if (I915_HAS_HOTPLUG(dev)) 823 if (I915_HAS_HOTPLUG(dev))
804 crt->base.hpd_pin = HPD_CRT; 824 crt->base.hpd_pin = HPD_CRT;
805 if (HAS_DDI(dev)) 825 if (HAS_DDI(dev))
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 63de2701b974..b53fff84a7d5 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1249,8 +1249,8 @@ static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
1249 intel_dp_check_link_status(intel_dp); 1249 intel_dp_check_link_status(intel_dp);
1250} 1250}
1251 1251
1252static void intel_ddi_get_config(struct intel_encoder *encoder, 1252void intel_ddi_get_config(struct intel_encoder *encoder,
1253 struct intel_crtc_config *pipe_config) 1253 struct intel_crtc_config *pipe_config)
1254{ 1254{
1255 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 1255 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1256 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 1256 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
@@ -1268,6 +1268,23 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
1268 flags |= DRM_MODE_FLAG_NVSYNC; 1268 flags |= DRM_MODE_FLAG_NVSYNC;
1269 1269
1270 pipe_config->adjusted_mode.flags |= flags; 1270 pipe_config->adjusted_mode.flags |= flags;
1271
1272 switch (temp & TRANS_DDI_BPC_MASK) {
1273 case TRANS_DDI_BPC_6:
1274 pipe_config->pipe_bpp = 18;
1275 break;
1276 case TRANS_DDI_BPC_8:
1277 pipe_config->pipe_bpp = 24;
1278 break;
1279 case TRANS_DDI_BPC_10:
1280 pipe_config->pipe_bpp = 30;
1281 break;
1282 case TRANS_DDI_BPC_12:
1283 pipe_config->pipe_bpp = 36;
1284 break;
1285 default:
1286 break;
1287 }
1271} 1288}
1272 1289
1273static void intel_ddi_destroy(struct drm_encoder *encoder) 1290static void intel_ddi_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 581fb4b2f766..d78d33f9337d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2327,9 +2327,10 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
2327 FDI_FE_ERRC_ENABLE); 2327 FDI_FE_ERRC_ENABLE);
2328} 2328}
2329 2329
2330static bool pipe_has_enabled_pch(struct intel_crtc *intel_crtc) 2330static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
2331{ 2331{
2332 return intel_crtc->base.enabled && intel_crtc->config.has_pch_encoder; 2332 return crtc->base.enabled && crtc->active &&
2333 crtc->config.has_pch_encoder;
2333} 2334}
2334 2335
2335static void ivb_modeset_global_resources(struct drm_device *dev) 2336static void ivb_modeset_global_resources(struct drm_device *dev)
@@ -2979,6 +2980,48 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
2979 I915_READ(VSYNCSHIFT(cpu_transcoder))); 2980 I915_READ(VSYNCSHIFT(cpu_transcoder)));
2980} 2981}
2981 2982
2983static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
2984{
2985 struct drm_i915_private *dev_priv = dev->dev_private;
2986 uint32_t temp;
2987
2988 temp = I915_READ(SOUTH_CHICKEN1);
2989 if (temp & FDI_BC_BIFURCATION_SELECT)
2990 return;
2991
2992 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2993 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2994
2995 temp |= FDI_BC_BIFURCATION_SELECT;
2996 DRM_DEBUG_KMS("enabling fdi C rx\n");
2997 I915_WRITE(SOUTH_CHICKEN1, temp);
2998 POSTING_READ(SOUTH_CHICKEN1);
2999}
3000
3001static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3002{
3003 struct drm_device *dev = intel_crtc->base.dev;
3004 struct drm_i915_private *dev_priv = dev->dev_private;
3005
3006 switch (intel_crtc->pipe) {
3007 case PIPE_A:
3008 break;
3009 case PIPE_B:
3010 if (intel_crtc->config.fdi_lanes > 2)
3011 WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
3012 else
3013 cpt_enable_fdi_bc_bifurcation(dev);
3014
3015 break;
3016 case PIPE_C:
3017 cpt_enable_fdi_bc_bifurcation(dev);
3018
3019 break;
3020 default:
3021 BUG();
3022 }
3023}
3024
2982/* 3025/*
2983 * Enable PCH resources required for PCH ports: 3026 * Enable PCH resources required for PCH ports:
2984 * - PCH PLLs 3027 * - PCH PLLs
@@ -2997,6 +3040,9 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2997 3040
2998 assert_pch_transcoder_disabled(dev_priv, pipe); 3041 assert_pch_transcoder_disabled(dev_priv, pipe);
2999 3042
3043 if (IS_IVYBRIDGE(dev))
3044 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3045
3000 /* Write the TU size bits before fdi link training, so that error 3046 /* Write the TU size bits before fdi link training, so that error
3001 * detection works. */ 3047 * detection works. */
3002 I915_WRITE(FDI_RX_TUSIZE1(pipe), 3048 I915_WRITE(FDI_RX_TUSIZE1(pipe),
@@ -4983,6 +5029,22 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4983 if (!(tmp & PIPECONF_ENABLE)) 5029 if (!(tmp & PIPECONF_ENABLE))
4984 return false; 5030 return false;
4985 5031
5032 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
5033 switch (tmp & PIPECONF_BPC_MASK) {
5034 case PIPECONF_6BPC:
5035 pipe_config->pipe_bpp = 18;
5036 break;
5037 case PIPECONF_8BPC:
5038 pipe_config->pipe_bpp = 24;
5039 break;
5040 case PIPECONF_10BPC:
5041 pipe_config->pipe_bpp = 30;
5042 break;
5043 default:
5044 break;
5045 }
5046 }
5047
4986 intel_get_pipe_timings(crtc, pipe_config); 5048 intel_get_pipe_timings(crtc, pipe_config);
4987 5049
4988 i9xx_get_pfit_config(crtc, pipe_config); 5050 i9xx_get_pfit_config(crtc, pipe_config);
@@ -5576,48 +5638,6 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
5576 return true; 5638 return true;
5577} 5639}
5578 5640
5579static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
5580{
5581 struct drm_i915_private *dev_priv = dev->dev_private;
5582 uint32_t temp;
5583
5584 temp = I915_READ(SOUTH_CHICKEN1);
5585 if (temp & FDI_BC_BIFURCATION_SELECT)
5586 return;
5587
5588 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5589 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5590
5591 temp |= FDI_BC_BIFURCATION_SELECT;
5592 DRM_DEBUG_KMS("enabling fdi C rx\n");
5593 I915_WRITE(SOUTH_CHICKEN1, temp);
5594 POSTING_READ(SOUTH_CHICKEN1);
5595}
5596
5597static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
5598{
5599 struct drm_device *dev = intel_crtc->base.dev;
5600 struct drm_i915_private *dev_priv = dev->dev_private;
5601
5602 switch (intel_crtc->pipe) {
5603 case PIPE_A:
5604 break;
5605 case PIPE_B:
5606 if (intel_crtc->config.fdi_lanes > 2)
5607 WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
5608 else
5609 cpt_enable_fdi_bc_bifurcation(dev);
5610
5611 break;
5612 case PIPE_C:
5613 cpt_enable_fdi_bc_bifurcation(dev);
5614
5615 break;
5616 default:
5617 BUG();
5618 }
5619}
5620
5621int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 5641int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
5622{ 5642{
5623 /* 5643 /*
@@ -5811,9 +5831,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5811 &intel_crtc->config.fdi_m_n); 5831 &intel_crtc->config.fdi_m_n);
5812 } 5832 }
5813 5833
5814 if (IS_IVYBRIDGE(dev))
5815 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
5816
5817 ironlake_set_pipeconf(crtc); 5834 ironlake_set_pipeconf(crtc);
5818 5835
5819 /* Set up the display plane register */ 5836 /* Set up the display plane register */
@@ -5881,6 +5898,23 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5881 if (!(tmp & PIPECONF_ENABLE)) 5898 if (!(tmp & PIPECONF_ENABLE))
5882 return false; 5899 return false;
5883 5900
5901 switch (tmp & PIPECONF_BPC_MASK) {
5902 case PIPECONF_6BPC:
5903 pipe_config->pipe_bpp = 18;
5904 break;
5905 case PIPECONF_8BPC:
5906 pipe_config->pipe_bpp = 24;
5907 break;
5908 case PIPECONF_10BPC:
5909 pipe_config->pipe_bpp = 30;
5910 break;
5911 case PIPECONF_12BPC:
5912 pipe_config->pipe_bpp = 36;
5913 break;
5914 default:
5915 break;
5916 }
5917
5884 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 5918 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
5885 struct intel_shared_dpll *pll; 5919 struct intel_shared_dpll *pll;
5886 5920
@@ -8612,6 +8646,9 @@ intel_pipe_config_compare(struct drm_device *dev,
8612 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 8646 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
8613 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 8647 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
8614 8648
8649 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
8650 PIPE_CONF_CHECK_I(pipe_bpp);
8651
8615#undef PIPE_CONF_CHECK_X 8652#undef PIPE_CONF_CHECK_X
8616#undef PIPE_CONF_CHECK_I 8653#undef PIPE_CONF_CHECK_I
8617#undef PIPE_CONF_CHECK_FLAGS 8654#undef PIPE_CONF_CHECK_FLAGS
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2c555f91bfae..1a431377d83b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1401,6 +1401,26 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1401 else 1401 else
1402 pipe_config->port_clock = 270000; 1402 pipe_config->port_clock = 270000;
1403 } 1403 }
1404
1405 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
1406 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
1407 /*
1408 * This is a big fat ugly hack.
1409 *
1410 * Some machines in UEFI boot mode provide us a VBT that has 18
1411 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
1412 * unknown we fail to light up. Yet the same BIOS boots up with
1413 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
1414 * max, not what it tells us to use.
1415 *
1416 * Note: This will still be broken if the eDP panel is not lit
1417 * up by the BIOS, and thus we can't get the mode at module
1418 * load.
1419 */
1420 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
1421 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
1422 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
1423 }
1404} 1424}
1405 1425
1406static bool is_edp_psr(struct intel_dp *intel_dp) 1426static bool is_edp_psr(struct intel_dp *intel_dp)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9b7b68fd5d47..7f2b384ac939 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -765,6 +765,8 @@ extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
765extern bool 765extern bool
766intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); 766intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
767extern void intel_ddi_fdi_disable(struct drm_crtc *crtc); 767extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
768extern void intel_ddi_get_config(struct intel_encoder *encoder,
769 struct intel_crtc_config *pipe_config);
768 770
769extern void intel_display_handle_reset(struct drm_device *dev); 771extern void intel_display_handle_reset(struct drm_device *dev);
770extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 772extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 831a5c021c4b..b8af94a5be39 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -700,6 +700,22 @@ static const struct dmi_system_id intel_no_lvds[] = {
700 }, 700 },
701 { 701 {
702 .callback = intel_no_lvds_dmi_callback, 702 .callback = intel_no_lvds_dmi_callback,
703 .ident = "Intel D410PT",
704 .matches = {
705 DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
706 DMI_MATCH(DMI_BOARD_NAME, "D410PT"),
707 },
708 },
709 {
710 .callback = intel_no_lvds_dmi_callback,
711 .ident = "Intel D425KT",
712 .matches = {
713 DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
714 DMI_EXACT_MATCH(DMI_BOARD_NAME, "D425KT"),
715 },
716 },
717 {
718 .callback = intel_no_lvds_dmi_callback,
703 .ident = "Intel D510MO", 719 .ident = "Intel D510MO",
704 .matches = { 720 .matches = {
705 DMI_MATCH(DMI_BOARD_VENDOR, "Intel"), 721 DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f4c5e95b2d6f..26c2ea3e985c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4759,7 +4759,9 @@ static void cpt_init_clock_gating(struct drm_device *dev)
4759 * gating for the panel power sequencer or it will fail to 4759 * gating for the panel power sequencer or it will fail to
4760 * start up when no ports are active. 4760 * start up when no ports are active.
4761 */ 4761 */
4762 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 4762 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
4763 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
4764 PCH_CPUNIT_CLOCK_GATE_DISABLE);
4763 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | 4765 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
4764 DPLS_EDP_PPS_FIX_DIS); 4766 DPLS_EDP_PPS_FIX_DIS);
4765 /* The below fixes the weird display corruption, a few pixels shifted 4767 /* The below fixes the weird display corruption, a few pixels shifted
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 32923d2f6002..5e891b226acf 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -707,24 +707,37 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
707 switch (connector->connector_type) { 707 switch (connector->connector_type) {
708 case DRM_MODE_CONNECTOR_DVII: 708 case DRM_MODE_CONNECTOR_DVII:
709 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ 709 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
710 if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || 710 if (radeon_audio != 0) {
711 (drm_detect_hdmi_monitor(radeon_connector->edid) && 711 if (radeon_connector->use_digital &&
712 (radeon_connector->audio == RADEON_AUDIO_AUTO))) 712 (radeon_connector->audio == RADEON_AUDIO_ENABLE))
713 return ATOM_ENCODER_MODE_HDMI; 713 return ATOM_ENCODER_MODE_HDMI;
714 else if (radeon_connector->use_digital) 714 else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
715 (radeon_connector->audio == RADEON_AUDIO_AUTO))
716 return ATOM_ENCODER_MODE_HDMI;
717 else if (radeon_connector->use_digital)
718 return ATOM_ENCODER_MODE_DVI;
719 else
720 return ATOM_ENCODER_MODE_CRT;
721 } else if (radeon_connector->use_digital) {
715 return ATOM_ENCODER_MODE_DVI; 722 return ATOM_ENCODER_MODE_DVI;
716 else 723 } else {
717 return ATOM_ENCODER_MODE_CRT; 724 return ATOM_ENCODER_MODE_CRT;
725 }
718 break; 726 break;
719 case DRM_MODE_CONNECTOR_DVID: 727 case DRM_MODE_CONNECTOR_DVID:
720 case DRM_MODE_CONNECTOR_HDMIA: 728 case DRM_MODE_CONNECTOR_HDMIA:
721 default: 729 default:
722 if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || 730 if (radeon_audio != 0) {
723 (drm_detect_hdmi_monitor(radeon_connector->edid) && 731 if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
724 (radeon_connector->audio == RADEON_AUDIO_AUTO))) 732 return ATOM_ENCODER_MODE_HDMI;
725 return ATOM_ENCODER_MODE_HDMI; 733 else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
726 else 734 (radeon_connector->audio == RADEON_AUDIO_AUTO))
735 return ATOM_ENCODER_MODE_HDMI;
736 else
737 return ATOM_ENCODER_MODE_DVI;
738 } else {
727 return ATOM_ENCODER_MODE_DVI; 739 return ATOM_ENCODER_MODE_DVI;
740 }
728 break; 741 break;
729 case DRM_MODE_CONNECTOR_LVDS: 742 case DRM_MODE_CONNECTOR_LVDS:
730 return ATOM_ENCODER_MODE_LVDS; 743 return ATOM_ENCODER_MODE_LVDS;
@@ -732,14 +745,19 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
732 case DRM_MODE_CONNECTOR_DisplayPort: 745 case DRM_MODE_CONNECTOR_DisplayPort:
733 dig_connector = radeon_connector->con_priv; 746 dig_connector = radeon_connector->con_priv;
734 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 747 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
735 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 748 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
736 return ATOM_ENCODER_MODE_DP; 749 return ATOM_ENCODER_MODE_DP;
737 else if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || 750 } else if (radeon_audio != 0) {
738 (drm_detect_hdmi_monitor(radeon_connector->edid) && 751 if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
739 (radeon_connector->audio == RADEON_AUDIO_AUTO))) 752 return ATOM_ENCODER_MODE_HDMI;
740 return ATOM_ENCODER_MODE_HDMI; 753 else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
741 else 754 (radeon_connector->audio == RADEON_AUDIO_AUTO))
755 return ATOM_ENCODER_MODE_HDMI;
756 else
757 return ATOM_ENCODER_MODE_DVI;
758 } else {
742 return ATOM_ENCODER_MODE_DVI; 759 return ATOM_ENCODER_MODE_DVI;
760 }
743 break; 761 break;
744 case DRM_MODE_CONNECTOR_eDP: 762 case DRM_MODE_CONNECTOR_eDP:
745 return ATOM_ENCODER_MODE_DP; 763 return ATOM_ENCODER_MODE_DP;
@@ -1655,7 +1673,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1655 * does the same thing and more. 1673 * does the same thing and more.
1656 */ 1674 */
1657 if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) && 1675 if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
1658 (rdev->family != CHIP_RS880)) 1676 (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
1659 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); 1677 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1660 } 1678 }
1661 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { 1679 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index b874ccdf52f7..9cd2bc989ac7 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -1694,6 +1694,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
1694 fw_name); 1694 fw_name);
1695 release_firmware(rdev->smc_fw); 1695 release_firmware(rdev->smc_fw);
1696 rdev->smc_fw = NULL; 1696 rdev->smc_fw = NULL;
1697 err = 0;
1697 } else if (rdev->smc_fw->size != smc_req_size) { 1698 } else if (rdev->smc_fw->size != smc_req_size) {
1698 printk(KERN_ERR 1699 printk(KERN_ERR
1699 "cik_smc: Bogus length %zu in firmware \"%s\"\n", 1700 "cik_smc: Bogus length %zu in firmware \"%s\"\n",
@@ -3182,6 +3183,7 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3182 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); 3183 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3183 if (r) { 3184 if (r) {
3184 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 3185 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3186 radeon_scratch_free(rdev, scratch);
3185 return r; 3187 return r;
3186 } 3188 }
3187 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); 3189 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
@@ -3198,6 +3200,8 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3198 r = radeon_fence_wait(ib.fence, false); 3200 r = radeon_fence_wait(ib.fence, false);
3199 if (r) { 3201 if (r) {
3200 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3202 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3203 radeon_scratch_free(rdev, scratch);
3204 radeon_ib_free(rdev, &ib);
3201 return r; 3205 return r;
3202 } 3206 }
3203 for (i = 0; i < rdev->usec_timeout; i++) { 3207 for (i = 0; i < rdev->usec_timeout; i++) {
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 85a69d2ea3d2..9fcd338c0fcf 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -113,6 +113,9 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
113 u8 *sadb; 113 u8 *sadb;
114 int sad_count; 114 int sad_count;
115 115
116 /* XXX: setting this register causes hangs on some asics */
117 return;
118
116 if (!dig->afmt->pin) 119 if (!dig->afmt->pin)
117 return; 120 return;
118 121
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index f815c20640bd..57fcc4b16a52 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -67,6 +67,9 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
67 u8 *sadb; 67 u8 *sadb;
68 int sad_count; 68 int sad_count;
69 69
70 /* XXX: setting this register causes hangs on some asics */
71 return;
72
70 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 73 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
71 if (connector->encoder == encoder) 74 if (connector->encoder == encoder)
72 radeon_connector = to_radeon_connector(connector); 75 radeon_connector = to_radeon_connector(connector);
@@ -288,6 +291,7 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
288 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */ 291 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
289 292
290 WREG32(HDMI_ACR_PACKET_CONTROL + offset, 293 WREG32(HDMI_ACR_PACKET_CONTROL + offset,
294 HDMI_ACR_SOURCE | /* select SW CTS value */
291 HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ 295 HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
292 296
293 evergreen_hdmi_update_ACR(encoder, mode->clock); 297 evergreen_hdmi_update_ACR(encoder, mode->clock);
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 71399065db04..b41905573cd2 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2635,7 +2635,7 @@ int kv_dpm_init(struct radeon_device *rdev)
2635 pi->caps_sclk_ds = true; 2635 pi->caps_sclk_ds = true;
2636 pi->enable_auto_thermal_throttling = true; 2636 pi->enable_auto_thermal_throttling = true;
2637 pi->disable_nb_ps3_in_battery = false; 2637 pi->disable_nb_ps3_in_battery = false;
2638 pi->bapm_enable = true; 2638 pi->bapm_enable = false;
2639 pi->voltage_drop_t = 0; 2639 pi->voltage_drop_t = 0;
2640 pi->caps_sclk_throttle_low_notification = false; 2640 pi->caps_sclk_throttle_low_notification = false;
2641 pi->caps_fps = false; /* true? */ 2641 pi->caps_fps = false; /* true? */
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 93c1f9ef5da9..cac2866d79da 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -804,6 +804,7 @@ int ni_init_microcode(struct radeon_device *rdev)
804 fw_name); 804 fw_name);
805 release_firmware(rdev->smc_fw); 805 release_firmware(rdev->smc_fw);
806 rdev->smc_fw = NULL; 806 rdev->smc_fw = NULL;
807 err = 0;
807 } else if (rdev->smc_fw->size != smc_req_size) { 808 } else if (rdev->smc_fw->size != smc_req_size) {
808 printk(KERN_ERR 809 printk(KERN_ERR
809 "ni_mc: Bogus length %zu in firmware \"%s\"\n", 810 "ni_mc: Bogus length %zu in firmware \"%s\"\n",
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2a1b1876b431..f9be22062df1 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2302,6 +2302,7 @@ int r600_init_microcode(struct radeon_device *rdev)
2302 fw_name); 2302 fw_name);
2303 release_firmware(rdev->smc_fw); 2303 release_firmware(rdev->smc_fw);
2304 rdev->smc_fw = NULL; 2304 rdev->smc_fw = NULL;
2305 err = 0;
2305 } else if (rdev->smc_fw->size != smc_req_size) { 2306 } else if (rdev->smc_fw->size != smc_req_size) {
2306 printk(KERN_ERR 2307 printk(KERN_ERR
2307 "smc: Bogus length %zu in firmware \"%s\"\n", 2308 "smc: Bogus length %zu in firmware \"%s\"\n",
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 5b729319f27b..06022e3b9c3b 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -309,6 +309,9 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
309 u8 *sadb; 309 u8 *sadb;
310 int sad_count; 310 int sad_count;
311 311
312 /* XXX: setting this register causes hangs on some asics */
313 return;
314
312 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 315 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
313 if (connector->encoder == encoder) 316 if (connector->encoder == encoder)
314 radeon_connector = to_radeon_connector(connector); 317 radeon_connector = to_radeon_connector(connector);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index a400ac1c4147..24f4960f59ee 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1272,8 +1272,8 @@ struct radeon_blacklist_clocks
1272struct radeon_clock_and_voltage_limits { 1272struct radeon_clock_and_voltage_limits {
1273 u32 sclk; 1273 u32 sclk;
1274 u32 mclk; 1274 u32 mclk;
1275 u32 vddc; 1275 u16 vddc;
1276 u32 vddci; 1276 u16 vddci;
1277}; 1277};
1278 1278
1279struct radeon_clock_array { 1279struct radeon_clock_array {
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 79159b5da05b..64565732cb98 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1658,9 +1658,12 @@ radeon_add_atom_connector(struct drm_device *dev,
1658 drm_object_attach_property(&radeon_connector->base.base, 1658 drm_object_attach_property(&radeon_connector->base.base,
1659 rdev->mode_info.underscan_vborder_property, 1659 rdev->mode_info.underscan_vborder_property,
1660 0); 1660 0);
1661 drm_object_attach_property(&radeon_connector->base.base, 1661 if (radeon_audio != 0)
1662 rdev->mode_info.audio_property, 1662 drm_object_attach_property(&radeon_connector->base.base,
1663 RADEON_AUDIO_DISABLE); 1663 rdev->mode_info.audio_property,
1664 (radeon_audio == 1) ?
1665 RADEON_AUDIO_AUTO :
1666 RADEON_AUDIO_DISABLE);
1664 subpixel_order = SubPixelHorizontalRGB; 1667 subpixel_order = SubPixelHorizontalRGB;
1665 connector->interlace_allowed = true; 1668 connector->interlace_allowed = true;
1666 if (connector_type == DRM_MODE_CONNECTOR_HDMIB) 1669 if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1754,10 +1757,12 @@ radeon_add_atom_connector(struct drm_device *dev,
1754 rdev->mode_info.underscan_vborder_property, 1757 rdev->mode_info.underscan_vborder_property,
1755 0); 1758 0);
1756 } 1759 }
1757 if (ASIC_IS_DCE2(rdev)) { 1760 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
1758 drm_object_attach_property(&radeon_connector->base.base, 1761 drm_object_attach_property(&radeon_connector->base.base,
1759 rdev->mode_info.audio_property, 1762 rdev->mode_info.audio_property,
1760 RADEON_AUDIO_DISABLE); 1763 (radeon_audio == 1) ?
1764 RADEON_AUDIO_AUTO :
1765 RADEON_AUDIO_DISABLE);
1761 } 1766 }
1762 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1767 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1763 radeon_connector->dac_load_detect = true; 1768 radeon_connector->dac_load_detect = true;
@@ -1799,10 +1804,12 @@ radeon_add_atom_connector(struct drm_device *dev,
1799 rdev->mode_info.underscan_vborder_property, 1804 rdev->mode_info.underscan_vborder_property,
1800 0); 1805 0);
1801 } 1806 }
1802 if (ASIC_IS_DCE2(rdev)) { 1807 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
1803 drm_object_attach_property(&radeon_connector->base.base, 1808 drm_object_attach_property(&radeon_connector->base.base,
1804 rdev->mode_info.audio_property, 1809 rdev->mode_info.audio_property,
1805 RADEON_AUDIO_DISABLE); 1810 (radeon_audio == 1) ?
1811 RADEON_AUDIO_AUTO :
1812 RADEON_AUDIO_DISABLE);
1806 } 1813 }
1807 subpixel_order = SubPixelHorizontalRGB; 1814 subpixel_order = SubPixelHorizontalRGB;
1808 connector->interlace_allowed = true; 1815 connector->interlace_allowed = true;
@@ -1843,10 +1850,12 @@ radeon_add_atom_connector(struct drm_device *dev,
1843 rdev->mode_info.underscan_vborder_property, 1850 rdev->mode_info.underscan_vborder_property,
1844 0); 1851 0);
1845 } 1852 }
1846 if (ASIC_IS_DCE2(rdev)) { 1853 if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
1847 drm_object_attach_property(&radeon_connector->base.base, 1854 drm_object_attach_property(&radeon_connector->base.base,
1848 rdev->mode_info.audio_property, 1855 rdev->mode_info.audio_property,
1849 RADEON_AUDIO_DISABLE); 1856 (radeon_audio == 1) ?
1857 RADEON_AUDIO_AUTO :
1858 RADEON_AUDIO_DISABLE);
1850 } 1859 }
1851 connector->interlace_allowed = true; 1860 connector->interlace_allowed = true;
1852 /* in theory with a DP to VGA converter... */ 1861 /* in theory with a DP to VGA converter... */
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 66c222836631..80285e35bc65 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -85,9 +85,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
85 VRAM, also but everything into VRAM on AGP cards to avoid 85 VRAM, also but everything into VRAM on AGP cards to avoid
86 image corruptions */ 86 image corruptions */
87 if (p->ring == R600_RING_TYPE_UVD_INDEX && 87 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
88 p->rdev->family < CHIP_PALM &&
89 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) { 88 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
90 89 /* TODO: is this still needed for NI+ ? */
91 p->relocs[i].lobj.domain = 90 p->relocs[i].lobj.domain =
92 RADEON_GEM_DOMAIN_VRAM; 91 RADEON_GEM_DOMAIN_VRAM;
93 92
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index cdd12dcd988b..9c14a1ba1de4 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -153,7 +153,7 @@ int radeon_benchmarking = 0;
153int radeon_testing = 0; 153int radeon_testing = 0;
154int radeon_connector_table = 0; 154int radeon_connector_table = 0;
155int radeon_tv = 1; 155int radeon_tv = 1;
156int radeon_audio = 1; 156int radeon_audio = -1;
157int radeon_disp_priority = 0; 157int radeon_disp_priority = 0;
158int radeon_hw_i2c = 0; 158int radeon_hw_i2c = 0;
159int radeon_pcie_gen2 = -1; 159int radeon_pcie_gen2 = -1;
@@ -196,7 +196,7 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
196MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); 196MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
197module_param_named(tv, radeon_tv, int, 0444); 197module_param_named(tv, radeon_tv, int, 0444);
198 198
199MODULE_PARM_DESC(audio, "Audio enable (1 = enable)"); 199MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
200module_param_named(audio, radeon_audio, int, 0444); 200module_param_named(audio, radeon_audio, int, 0444);
201 201
202MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)"); 202MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 4f2e73f79638..308eff5be1b4 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -476,7 +476,8 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
476 return -EINVAL; 476 return -EINVAL;
477 } 477 }
478 478
479 if (p->rdev->family < CHIP_PALM && (cmd == 0 || cmd == 0x3) && 479 /* TODO: is this still necessary on NI+ ? */
480 if ((cmd == 0 || cmd == 0x3) &&
480 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { 481 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
481 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", 482 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
482 start, end); 483 start, end);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d4652af425b8..d96f7cbca0a1 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1681,6 +1681,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1681 fw_name); 1681 fw_name);
1682 release_firmware(rdev->smc_fw); 1682 release_firmware(rdev->smc_fw);
1683 rdev->smc_fw = NULL; 1683 rdev->smc_fw = NULL;
1684 err = 0;
1684 } else if (rdev->smc_fw->size != smc_req_size) { 1685 } else if (rdev->smc_fw->size != smc_req_size) {
1685 printk(KERN_ERR 1686 printk(KERN_ERR
1686 "si_smc: Bogus length %zu in firmware \"%s\"\n", 1687 "si_smc: Bogus length %zu in firmware \"%s\"\n",
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 3100fa9cb52f..7266805d9786 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -212,8 +212,8 @@ int uvd_v1_0_start(struct radeon_device *rdev)
212 /* enable VCPU clock */ 212 /* enable VCPU clock */
213 WREG32(UVD_VCPU_CNTL, 1 << 9); 213 WREG32(UVD_VCPU_CNTL, 1 << 9);
214 214
215 /* enable UMC and NC0 */ 215 /* enable UMC */
216 WREG32_P(UVD_LMI_CTRL2, 1 << 13, ~((1 << 8) | (1 << 13))); 216 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
217 217
218 /* boot up the VCPU */ 218 /* boot up the VCPU */
219 WREG32(UVD_SOFT_RESET, 0); 219 WREG32(UVD_SOFT_RESET, 0);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 1a90f0a2f7e5..0508f93b9795 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -740,9 +740,17 @@ static void vmw_postclose(struct drm_device *dev,
740 struct vmw_fpriv *vmw_fp; 740 struct vmw_fpriv *vmw_fp;
741 741
742 vmw_fp = vmw_fpriv(file_priv); 742 vmw_fp = vmw_fpriv(file_priv);
743 ttm_object_file_release(&vmw_fp->tfile); 743
744 if (vmw_fp->locked_master) 744 if (vmw_fp->locked_master) {
745 struct vmw_master *vmaster =
746 vmw_master(vmw_fp->locked_master);
747
748 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
749 ttm_vt_unlock(&vmaster->lock);
745 drm_master_put(&vmw_fp->locked_master); 750 drm_master_put(&vmw_fp->locked_master);
751 }
752
753 ttm_object_file_release(&vmw_fp->tfile);
746 kfree(vmw_fp); 754 kfree(vmw_fp);
747} 755}
748 756
@@ -925,14 +933,13 @@ static void vmw_master_drop(struct drm_device *dev,
925 933
926 vmw_fp->locked_master = drm_master_get(file_priv->master); 934 vmw_fp->locked_master = drm_master_get(file_priv->master);
927 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); 935 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
928 vmw_execbuf_release_pinned_bo(dev_priv);
929
930 if (unlikely((ret != 0))) { 936 if (unlikely((ret != 0))) {
931 DRM_ERROR("Unable to lock TTM at VT switch.\n"); 937 DRM_ERROR("Unable to lock TTM at VT switch.\n");
932 drm_master_put(&vmw_fp->locked_master); 938 drm_master_put(&vmw_fp->locked_master);
933 } 939 }
934 940
935 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 941 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
942 vmw_execbuf_release_pinned_bo(dev_priv);
936 943
937 if (!dev_priv->enable_fb) { 944 if (!dev_priv->enable_fb) {
938 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); 945 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 0e67cf41065d..37fb4befec82 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -970,7 +970,7 @@ void vmw_resource_unreserve(struct vmw_resource *res,
970 if (new_backup) 970 if (new_backup)
971 res->backup_offset = new_backup_offset; 971 res->backup_offset = new_backup_offset;
972 972
973 if (!res->func->may_evict) 973 if (!res->func->may_evict || res->id == -1)
974 return; 974 return;
975 975
976 write_lock(&dev_priv->resource_lock); 976 write_lock(&dev_priv->resource_lock);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 5a8c01112a23..e80da62363bc 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -319,7 +319,7 @@ static s32 item_sdata(struct hid_item *item)
319 319
320static int hid_parser_global(struct hid_parser *parser, struct hid_item *item) 320static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
321{ 321{
322 __u32 raw_value; 322 __s32 raw_value;
323 switch (item->tag) { 323 switch (item->tag) {
324 case HID_GLOBAL_ITEM_TAG_PUSH: 324 case HID_GLOBAL_ITEM_TAG_PUSH:
325 325
@@ -370,10 +370,11 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
370 return 0; 370 return 0;
371 371
372 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT: 372 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
373 /* Units exponent negative numbers are given through a 373 /* Many devices provide unit exponent as a two's complement
374 * two's complement. 374 * nibble due to the common misunderstanding of HID
375 * See "6.2.2.7 Global Items" for more information. */ 375 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
376 raw_value = item_udata(item); 376 * both this and the standard encoding. */
377 raw_value = item_sdata(item);
377 if (!(raw_value & 0xfffffff0)) 378 if (!(raw_value & 0xfffffff0))
378 parser->global.unit_exponent = hid_snto32(raw_value, 4); 379 parser->global.unit_exponent = hid_snto32(raw_value, 4);
379 else 380 else
@@ -1870,6 +1871,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1870 1871
1871 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) }, 1872 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
1872 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) }, 1873 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
1874 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
1873 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, 1875 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
1874 { } 1876 { }
1875}; 1877};
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 9cbc7ab07dfa..f0296a50be5f 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -633,6 +633,7 @@
633#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003 633#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
634 634
635#define USB_VENDOR_ID_NINTENDO 0x057e 635#define USB_VENDOR_ID_NINTENDO 0x057e
636#define USB_VENDOR_ID_NINTENDO2 0x054c
636#define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306 637#define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306
637#define USB_DEVICE_ID_NINTENDO_WIIMOTE2 0x0330 638#define USB_DEVICE_ID_NINTENDO_WIIMOTE2 0x0330
638 639
@@ -792,6 +793,8 @@
792#define USB_DEVICE_ID_SYNAPTICS_COMP_TP 0x0009 793#define USB_DEVICE_ID_SYNAPTICS_COMP_TP 0x0009
793#define USB_DEVICE_ID_SYNAPTICS_WTP 0x0010 794#define USB_DEVICE_ID_SYNAPTICS_WTP 0x0010
794#define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013 795#define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013
796#define USB_DEVICE_ID_SYNAPTICS_LTS1 0x0af8
797#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
795 798
796#define USB_VENDOR_ID_THINGM 0x27b8 799#define USB_VENDOR_ID_THINGM 0x27b8
797#define USB_DEVICE_ID_BLINK1 0x01ed 800#define USB_DEVICE_ID_BLINK1 0x01ed
@@ -919,4 +922,7 @@
919#define USB_VENDOR_ID_PRIMAX 0x0461 922#define USB_VENDOR_ID_PRIMAX 0x0461
920#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05 923#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
921 924
925#define USB_VENDOR_ID_SIS 0x0457
926#define USB_DEVICE_ID_SIS_TS 0x1013
927
922#endif 928#endif
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 8741d953dcc8..d97f2323af57 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -192,6 +192,7 @@ static int hidinput_setkeycode(struct input_dev *dev,
192 return -EINVAL; 192 return -EINVAL;
193} 193}
194 194
195
195/** 196/**
196 * hidinput_calc_abs_res - calculate an absolute axis resolution 197 * hidinput_calc_abs_res - calculate an absolute axis resolution
197 * @field: the HID report field to calculate resolution for 198 * @field: the HID report field to calculate resolution for
@@ -234,23 +235,17 @@ __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
234 case ABS_MT_TOOL_Y: 235 case ABS_MT_TOOL_Y:
235 case ABS_MT_TOUCH_MAJOR: 236 case ABS_MT_TOUCH_MAJOR:
236 case ABS_MT_TOUCH_MINOR: 237 case ABS_MT_TOUCH_MINOR:
237 if (field->unit & 0xffffff00) /* Not a length */ 238 if (field->unit == 0x11) { /* If centimeters */
238 return 0;
239 unit_exponent += hid_snto32(field->unit >> 4, 4) - 1;
240 switch (field->unit & 0xf) {
241 case 0x1: /* If centimeters */
242 /* Convert to millimeters */ 239 /* Convert to millimeters */
243 unit_exponent += 1; 240 unit_exponent += 1;
244 break; 241 } else if (field->unit == 0x13) { /* If inches */
245 case 0x3: /* If inches */
246 /* Convert to millimeters */ 242 /* Convert to millimeters */
247 prev = physical_extents; 243 prev = physical_extents;
248 physical_extents *= 254; 244 physical_extents *= 254;
249 if (physical_extents < prev) 245 if (physical_extents < prev)
250 return 0; 246 return 0;
251 unit_exponent -= 1; 247 unit_exponent -= 1;
252 break; 248 } else {
253 default:
254 return 0; 249 return 0;
255 } 250 }
256 break; 251 break;
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index abb20db2b443..1446f526ee8b 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -834,7 +834,8 @@ static void wiimote_init_set_type(struct wiimote_data *wdata,
834 goto done; 834 goto done;
835 } 835 }
836 836
837 if (vendor == USB_VENDOR_ID_NINTENDO) { 837 if (vendor == USB_VENDOR_ID_NINTENDO ||
838 vendor == USB_VENDOR_ID_NINTENDO2) {
838 if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE) { 839 if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE) {
839 devtype = WIIMOTE_DEV_GEN10; 840 devtype = WIIMOTE_DEV_GEN10;
840 goto done; 841 goto done;
@@ -1855,6 +1856,8 @@ static void wiimote_hid_remove(struct hid_device *hdev)
1855static const struct hid_device_id wiimote_hid_devices[] = { 1856static const struct hid_device_id wiimote_hid_devices[] = {
1856 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, 1857 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
1857 USB_DEVICE_ID_NINTENDO_WIIMOTE) }, 1858 USB_DEVICE_ID_NINTENDO_WIIMOTE) },
1859 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2,
1860 USB_DEVICE_ID_NINTENDO_WIIMOTE) },
1858 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, 1861 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
1859 USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, 1862 USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
1860 { } 1863 { }
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 07345521f421..3fca3be08337 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -110,6 +110,9 @@ static const struct hid_blacklist {
110 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT }, 110 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
111 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT }, 111 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
112 { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS }, 112 { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
113 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
114 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
115 { USB_VENDOR_ID_SIS, USB_DEVICE_ID_SIS_TS, HID_QUIRK_NO_INIT_REPORTS },
113 116
114 { 0, 0 } 117 { 0, 0 }
115}; 118};
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index a7b30be86ae0..52605c0ea3a6 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -525,8 +525,10 @@ static int adf4350_probe(struct spi_device *spi)
525 } 525 }
526 526
527 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); 527 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
528 if (indio_dev == NULL) 528 if (indio_dev == NULL) {
529 return -ENOMEM; 529 ret = -ENOMEM;
530 goto error_disable_clk;
531 }
530 532
531 st = iio_priv(indio_dev); 533 st = iio_priv(indio_dev);
532 534
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 2710f7245c3b..2db7dcd826b9 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -477,6 +477,9 @@ void iio_disable_all_buffers(struct iio_dev *indio_dev)
477 indio_dev->currentmode = INDIO_DIRECT_MODE; 477 indio_dev->currentmode = INDIO_DIRECT_MODE;
478 if (indio_dev->setup_ops->postdisable) 478 if (indio_dev->setup_ops->postdisable)
479 indio_dev->setup_ops->postdisable(indio_dev); 479 indio_dev->setup_ops->postdisable(indio_dev);
480
481 if (indio_dev->available_scan_masks == NULL)
482 kfree(indio_dev->active_scan_mask);
480} 483}
481 484
482int iio_update_buffers(struct iio_dev *indio_dev, 485int iio_update_buffers(struct iio_dev *indio_dev,
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 5ceda710f516..b84791f03a27 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -31,6 +31,17 @@ config INFINIBAND_USER_ACCESS
31 libibverbs, libibcm and a hardware driver library from 31 libibverbs, libibcm and a hardware driver library from
32 <http://www.openfabrics.org/git/>. 32 <http://www.openfabrics.org/git/>.
33 33
34config INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
35 bool "Experimental and unstable ABI for userspace access to flow steering verbs"
36 depends on INFINIBAND_USER_ACCESS
37 depends on STAGING
38 ---help---
39 The final ABI for userspace access to flow steering verbs
40 has not been defined. To use the current ABI, *WHICH WILL
41 CHANGE IN THE FUTURE*, say Y here.
42
43 If unsure, say N.
44
34config INFINIBAND_USER_MEM 45config INFINIBAND_USER_MEM
35 bool 46 bool
36 depends on INFINIBAND_USER_ACCESS != n 47 depends on INFINIBAND_USER_ACCESS != n
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index d040b877475f..d8f9c6c272d7 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -217,7 +217,9 @@ IB_UVERBS_DECLARE_CMD(destroy_srq);
217IB_UVERBS_DECLARE_CMD(create_xsrq); 217IB_UVERBS_DECLARE_CMD(create_xsrq);
218IB_UVERBS_DECLARE_CMD(open_xrcd); 218IB_UVERBS_DECLARE_CMD(open_xrcd);
219IB_UVERBS_DECLARE_CMD(close_xrcd); 219IB_UVERBS_DECLARE_CMD(close_xrcd);
220#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
220IB_UVERBS_DECLARE_CMD(create_flow); 221IB_UVERBS_DECLARE_CMD(create_flow);
221IB_UVERBS_DECLARE_CMD(destroy_flow); 222IB_UVERBS_DECLARE_CMD(destroy_flow);
223#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
222 224
223#endif /* UVERBS_H */ 225#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index f2b81b9ee0d6..2f0f01b70e3b 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -54,7 +54,9 @@ static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
54static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; 54static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
55static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; 55static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
56static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; 56static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
57#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
57static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; 58static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
59#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
58 60
59#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ 61#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
60 do { \ 62 do { \
@@ -2599,6 +2601,7 @@ out_put:
2599 return ret ? ret : in_len; 2601 return ret ? ret : in_len;
2600} 2602}
2601 2603
2604#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
2602static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec, 2605static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec,
2603 union ib_flow_spec *ib_spec) 2606 union ib_flow_spec *ib_spec)
2604{ 2607{
@@ -2824,6 +2827,7 @@ ssize_t ib_uverbs_destroy_flow(struct ib_uverbs_file *file,
2824 2827
2825 return ret ? ret : in_len; 2828 return ret ? ret : in_len;
2826} 2829}
2830#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
2827 2831
2828static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 2832static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
2829 struct ib_uverbs_create_xsrq *cmd, 2833 struct ib_uverbs_create_xsrq *cmd,
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 75ad86c4abf8..2df31f68ea09 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -115,8 +115,10 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
115 [IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd, 115 [IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd,
116 [IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq, 116 [IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq,
117 [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp, 117 [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp,
118#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
118 [IB_USER_VERBS_CMD_CREATE_FLOW] = ib_uverbs_create_flow, 119 [IB_USER_VERBS_CMD_CREATE_FLOW] = ib_uverbs_create_flow,
119 [IB_USER_VERBS_CMD_DESTROY_FLOW] = ib_uverbs_destroy_flow 120 [IB_USER_VERBS_CMD_DESTROY_FLOW] = ib_uverbs_destroy_flow
121#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
120}; 122};
121 123
122static void ib_uverbs_add_one(struct ib_device *device); 124static void ib_uverbs_add_one(struct ib_device *device);
@@ -605,6 +607,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
605 if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command))) 607 if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
606 return -ENOSYS; 608 return -ENOSYS;
607 609
610#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
608 if (hdr.command >= IB_USER_VERBS_CMD_THRESHOLD) { 611 if (hdr.command >= IB_USER_VERBS_CMD_THRESHOLD) {
609 struct ib_uverbs_cmd_hdr_ex hdr_ex; 612 struct ib_uverbs_cmd_hdr_ex hdr_ex;
610 613
@@ -621,6 +624,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
621 (hdr_ex.out_words + 624 (hdr_ex.out_words +
622 hdr_ex.provider_out_words) * 4); 625 hdr_ex.provider_out_words) * 4);
623 } else { 626 } else {
627#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
624 if (hdr.in_words * 4 != count) 628 if (hdr.in_words * 4 != count)
625 return -EINVAL; 629 return -EINVAL;
626 630
@@ -628,7 +632,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
628 buf + sizeof(hdr), 632 buf + sizeof(hdr),
629 hdr.in_words * 4, 633 hdr.in_words * 4,
630 hdr.out_words * 4); 634 hdr.out_words * 4);
635#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
631 } 636 }
637#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
632} 638}
633 639
634static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma) 640static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c
index d5d1929753e4..cedda25232be 100644
--- a/drivers/infiniband/hw/amso1100/c2_ae.c
+++ b/drivers/infiniband/hw/amso1100/c2_ae.c
@@ -141,7 +141,7 @@ static const char *to_qp_state_str(int state)
141 return "C2_QP_STATE_ERROR"; 141 return "C2_QP_STATE_ERROR";
142 default: 142 default:
143 return "<invalid QP state>"; 143 return "<invalid QP state>";
144 }; 144 }
145} 145}
146 146
147void c2_ae_event(struct c2_dev *c2dev, u32 mq_index) 147void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index d6c5a73becf4..f0612645de99 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1691,9 +1691,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
1691 ibdev->ib_dev.create_flow = mlx4_ib_create_flow; 1691 ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
1692 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow; 1692 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
1693 1693
1694#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
1694 ibdev->ib_dev.uverbs_cmd_mask |= 1695 ibdev->ib_dev.uverbs_cmd_mask |=
1695 (1ull << IB_USER_VERBS_CMD_CREATE_FLOW) | 1696 (1ull << IB_USER_VERBS_CMD_CREATE_FLOW) |
1696 (1ull << IB_USER_VERBS_CMD_DESTROY_FLOW); 1697 (1ull << IB_USER_VERBS_CMD_DESTROY_FLOW);
1698#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
1697 } 1699 }
1698 1700
1699 mlx4_ib_alloc_eqs(dev, ibdev); 1701 mlx4_ib_alloc_eqs(dev, ibdev);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 3f831de9a4d8..b1a6cb3a2809 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -164,6 +164,7 @@ int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
164static int alloc_comp_eqs(struct mlx5_ib_dev *dev) 164static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
165{ 165{
166 struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; 166 struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
167 char name[MLX5_MAX_EQ_NAME];
167 struct mlx5_eq *eq, *n; 168 struct mlx5_eq *eq, *n;
168 int ncomp_vec; 169 int ncomp_vec;
169 int nent; 170 int nent;
@@ -180,11 +181,10 @@ static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
180 goto clean; 181 goto clean;
181 } 182 }
182 183
183 snprintf(eq->name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i); 184 snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
184 err = mlx5_create_map_eq(&dev->mdev, eq, 185 err = mlx5_create_map_eq(&dev->mdev, eq,
185 i + MLX5_EQ_VEC_COMP_BASE, nent, 0, 186 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
186 eq->name, 187 name, &dev->mdev.priv.uuari.uars[0]);
187 &dev->mdev.priv.uuari.uars[0]);
188 if (err) { 188 if (err) {
189 kfree(eq); 189 kfree(eq);
190 goto clean; 190 goto clean;
@@ -301,9 +301,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
301 props->max_srq_sge = max_rq_sg - 1; 301 props->max_srq_sge = max_rq_sg - 1;
302 props->max_fast_reg_page_list_len = (unsigned int)-1; 302 props->max_fast_reg_page_list_len = (unsigned int)-1;
303 props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay; 303 props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay;
304 props->atomic_cap = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ? 304 props->atomic_cap = IB_ATOMIC_NONE;
305 IB_ATOMIC_HCA : IB_ATOMIC_NONE; 305 props->masked_atomic_cap = IB_ATOMIC_NONE;
306 props->masked_atomic_cap = IB_ATOMIC_HCA;
307 props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); 306 props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
308 props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg; 307 props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg;
309 props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg; 308 props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg;
@@ -1006,6 +1005,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
1006 ibev.device = &ibdev->ib_dev; 1005 ibev.device = &ibdev->ib_dev;
1007 ibev.element.port_num = port; 1006 ibev.element.port_num = port;
1008 1007
1008 if (port < 1 || port > ibdev->num_ports) {
1009 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
1010 return;
1011 }
1012
1009 if (ibdev->ib_active) 1013 if (ibdev->ib_active)
1010 ib_dispatch_event(&ibev); 1014 ib_dispatch_event(&ibev);
1011} 1015}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index bd41df95b6f0..3453580b1eb2 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -42,6 +42,10 @@ enum {
42 DEF_CACHE_SIZE = 10, 42 DEF_CACHE_SIZE = 10,
43}; 43};
44 44
45enum {
46 MLX5_UMR_ALIGN = 2048
47};
48
45static __be64 *mr_align(__be64 *ptr, int align) 49static __be64 *mr_align(__be64 *ptr, int align)
46{ 50{
47 unsigned long mask = align - 1; 51 unsigned long mask = align - 1;
@@ -61,13 +65,11 @@ static int order2idx(struct mlx5_ib_dev *dev, int order)
61 65
62static int add_keys(struct mlx5_ib_dev *dev, int c, int num) 66static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
63{ 67{
64 struct device *ddev = dev->ib_dev.dma_device;
65 struct mlx5_mr_cache *cache = &dev->cache; 68 struct mlx5_mr_cache *cache = &dev->cache;
66 struct mlx5_cache_ent *ent = &cache->ent[c]; 69 struct mlx5_cache_ent *ent = &cache->ent[c];
67 struct mlx5_create_mkey_mbox_in *in; 70 struct mlx5_create_mkey_mbox_in *in;
68 struct mlx5_ib_mr *mr; 71 struct mlx5_ib_mr *mr;
69 int npages = 1 << ent->order; 72 int npages = 1 << ent->order;
70 int size = sizeof(u64) * npages;
71 int err = 0; 73 int err = 0;
72 int i; 74 int i;
73 75
@@ -83,21 +85,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
83 } 85 }
84 mr->order = ent->order; 86 mr->order = ent->order;
85 mr->umred = 1; 87 mr->umred = 1;
86 mr->pas = kmalloc(size + 0x3f, GFP_KERNEL);
87 if (!mr->pas) {
88 kfree(mr);
89 err = -ENOMEM;
90 goto out;
91 }
92 mr->dma = dma_map_single(ddev, mr_align(mr->pas, 0x40), size,
93 DMA_TO_DEVICE);
94 if (dma_mapping_error(ddev, mr->dma)) {
95 kfree(mr->pas);
96 kfree(mr);
97 err = -ENOMEM;
98 goto out;
99 }
100
101 in->seg.status = 1 << 6; 88 in->seg.status = 1 << 6;
102 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2); 89 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
103 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); 90 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
@@ -108,8 +95,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
108 sizeof(*in)); 95 sizeof(*in));
109 if (err) { 96 if (err) {
110 mlx5_ib_warn(dev, "create mkey failed %d\n", err); 97 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
111 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
112 kfree(mr->pas);
113 kfree(mr); 98 kfree(mr);
114 goto out; 99 goto out;
115 } 100 }
@@ -129,11 +114,9 @@ out:
129 114
130static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) 115static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
131{ 116{
132 struct device *ddev = dev->ib_dev.dma_device;
133 struct mlx5_mr_cache *cache = &dev->cache; 117 struct mlx5_mr_cache *cache = &dev->cache;
134 struct mlx5_cache_ent *ent = &cache->ent[c]; 118 struct mlx5_cache_ent *ent = &cache->ent[c];
135 struct mlx5_ib_mr *mr; 119 struct mlx5_ib_mr *mr;
136 int size;
137 int err; 120 int err;
138 int i; 121 int i;
139 122
@@ -149,14 +132,10 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
149 ent->size--; 132 ent->size--;
150 spin_unlock(&ent->lock); 133 spin_unlock(&ent->lock);
151 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); 134 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
152 if (err) { 135 if (err)
153 mlx5_ib_warn(dev, "failed destroy mkey\n"); 136 mlx5_ib_warn(dev, "failed destroy mkey\n");
154 } else { 137 else
155 size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
156 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
157 kfree(mr->pas);
158 kfree(mr); 138 kfree(mr);
159 }
160 } 139 }
161} 140}
162 141
@@ -408,13 +387,12 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
408 387
409static void clean_keys(struct mlx5_ib_dev *dev, int c) 388static void clean_keys(struct mlx5_ib_dev *dev, int c)
410{ 389{
411 struct device *ddev = dev->ib_dev.dma_device;
412 struct mlx5_mr_cache *cache = &dev->cache; 390 struct mlx5_mr_cache *cache = &dev->cache;
413 struct mlx5_cache_ent *ent = &cache->ent[c]; 391 struct mlx5_cache_ent *ent = &cache->ent[c];
414 struct mlx5_ib_mr *mr; 392 struct mlx5_ib_mr *mr;
415 int size;
416 int err; 393 int err;
417 394
395 cancel_delayed_work(&ent->dwork);
418 while (1) { 396 while (1) {
419 spin_lock(&ent->lock); 397 spin_lock(&ent->lock);
420 if (list_empty(&ent->head)) { 398 if (list_empty(&ent->head)) {
@@ -427,14 +405,10 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
427 ent->size--; 405 ent->size--;
428 spin_unlock(&ent->lock); 406 spin_unlock(&ent->lock);
429 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); 407 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
430 if (err) { 408 if (err)
431 mlx5_ib_warn(dev, "failed destroy mkey\n"); 409 mlx5_ib_warn(dev, "failed destroy mkey\n");
432 } else { 410 else
433 size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
434 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
435 kfree(mr->pas);
436 kfree(mr); 411 kfree(mr);
437 }
438 } 412 }
439} 413}
440 414
@@ -540,13 +514,15 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
540 int i; 514 int i;
541 515
542 dev->cache.stopped = 1; 516 dev->cache.stopped = 1;
543 destroy_workqueue(dev->cache.wq); 517 flush_workqueue(dev->cache.wq);
544 518
545 mlx5_mr_cache_debugfs_cleanup(dev); 519 mlx5_mr_cache_debugfs_cleanup(dev);
546 520
547 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) 521 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
548 clean_keys(dev, i); 522 clean_keys(dev, i);
549 523
524 destroy_workqueue(dev->cache.wq);
525
550 return 0; 526 return 0;
551} 527}
552 528
@@ -675,10 +651,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
675 int page_shift, int order, int access_flags) 651 int page_shift, int order, int access_flags)
676{ 652{
677 struct mlx5_ib_dev *dev = to_mdev(pd->device); 653 struct mlx5_ib_dev *dev = to_mdev(pd->device);
654 struct device *ddev = dev->ib_dev.dma_device;
678 struct umr_common *umrc = &dev->umrc; 655 struct umr_common *umrc = &dev->umrc;
679 struct ib_send_wr wr, *bad; 656 struct ib_send_wr wr, *bad;
680 struct mlx5_ib_mr *mr; 657 struct mlx5_ib_mr *mr;
681 struct ib_sge sg; 658 struct ib_sge sg;
659 int size = sizeof(u64) * npages;
682 int err; 660 int err;
683 int i; 661 int i;
684 662
@@ -697,7 +675,22 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
697 if (!mr) 675 if (!mr)
698 return ERR_PTR(-EAGAIN); 676 return ERR_PTR(-EAGAIN);
699 677
700 mlx5_ib_populate_pas(dev, umem, page_shift, mr_align(mr->pas, 0x40), 1); 678 mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
679 if (!mr->pas) {
680 err = -ENOMEM;
681 goto error;
682 }
683
684 mlx5_ib_populate_pas(dev, umem, page_shift,
685 mr_align(mr->pas, MLX5_UMR_ALIGN), 1);
686
687 mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
688 DMA_TO_DEVICE);
689 if (dma_mapping_error(ddev, mr->dma)) {
690 kfree(mr->pas);
691 err = -ENOMEM;
692 goto error;
693 }
701 694
702 memset(&wr, 0, sizeof(wr)); 695 memset(&wr, 0, sizeof(wr));
703 wr.wr_id = (u64)(unsigned long)mr; 696 wr.wr_id = (u64)(unsigned long)mr;
@@ -718,6 +711,9 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
718 wait_for_completion(&mr->done); 711 wait_for_completion(&mr->done);
719 up(&umrc->sem); 712 up(&umrc->sem);
720 713
714 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
715 kfree(mr->pas);
716
721 if (mr->status != IB_WC_SUCCESS) { 717 if (mr->status != IB_WC_SUCCESS) {
722 mlx5_ib_warn(dev, "reg umr failed\n"); 718 mlx5_ib_warn(dev, "reg umr failed\n");
723 err = -EFAULT; 719 err = -EFAULT;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 045f8cdbd303..5659ea880741 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -203,7 +203,7 @@ static int sq_overhead(enum ib_qp_type qp_type)
203 203
204 switch (qp_type) { 204 switch (qp_type) {
205 case IB_QPT_XRC_INI: 205 case IB_QPT_XRC_INI:
206 size = sizeof(struct mlx5_wqe_xrc_seg); 206 size += sizeof(struct mlx5_wqe_xrc_seg);
207 /* fall through */ 207 /* fall through */
208 case IB_QPT_RC: 208 case IB_QPT_RC:
209 size += sizeof(struct mlx5_wqe_ctrl_seg) + 209 size += sizeof(struct mlx5_wqe_ctrl_seg) +
@@ -211,20 +211,23 @@ static int sq_overhead(enum ib_qp_type qp_type)
211 sizeof(struct mlx5_wqe_raddr_seg); 211 sizeof(struct mlx5_wqe_raddr_seg);
212 break; 212 break;
213 213
214 case IB_QPT_XRC_TGT:
215 return 0;
216
214 case IB_QPT_UC: 217 case IB_QPT_UC:
215 size = sizeof(struct mlx5_wqe_ctrl_seg) + 218 size += sizeof(struct mlx5_wqe_ctrl_seg) +
216 sizeof(struct mlx5_wqe_raddr_seg); 219 sizeof(struct mlx5_wqe_raddr_seg);
217 break; 220 break;
218 221
219 case IB_QPT_UD: 222 case IB_QPT_UD:
220 case IB_QPT_SMI: 223 case IB_QPT_SMI:
221 case IB_QPT_GSI: 224 case IB_QPT_GSI:
222 size = sizeof(struct mlx5_wqe_ctrl_seg) + 225 size += sizeof(struct mlx5_wqe_ctrl_seg) +
223 sizeof(struct mlx5_wqe_datagram_seg); 226 sizeof(struct mlx5_wqe_datagram_seg);
224 break; 227 break;
225 228
226 case MLX5_IB_QPT_REG_UMR: 229 case MLX5_IB_QPT_REG_UMR:
227 size = sizeof(struct mlx5_wqe_ctrl_seg) + 230 size += sizeof(struct mlx5_wqe_ctrl_seg) +
228 sizeof(struct mlx5_wqe_umr_ctrl_seg) + 231 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
229 sizeof(struct mlx5_mkey_seg); 232 sizeof(struct mlx5_mkey_seg);
230 break; 233 break;
@@ -270,7 +273,8 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
270 return wqe_size; 273 return wqe_size;
271 274
272 if (wqe_size > dev->mdev.caps.max_sq_desc_sz) { 275 if (wqe_size > dev->mdev.caps.max_sq_desc_sz) {
273 mlx5_ib_dbg(dev, "\n"); 276 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
277 wqe_size, dev->mdev.caps.max_sq_desc_sz);
274 return -EINVAL; 278 return -EINVAL;
275 } 279 }
276 280
@@ -280,9 +284,15 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
280 284
281 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); 285 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
282 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; 286 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
287 if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
288 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
289 qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
290 return -ENOMEM;
291 }
283 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); 292 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
284 qp->sq.max_gs = attr->cap.max_send_sge; 293 qp->sq.max_gs = attr->cap.max_send_sge;
285 qp->sq.max_post = 1 << ilog2(wq_size / wqe_size); 294 qp->sq.max_post = wq_size / wqe_size;
295 attr->cap.max_send_wr = qp->sq.max_post;
286 296
287 return wq_size; 297 return wq_size;
288} 298}
@@ -1280,6 +1290,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
1280 MLX5_QP_OPTPAR_Q_KEY, 1290 MLX5_QP_OPTPAR_Q_KEY,
1281 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX | 1291 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
1282 MLX5_QP_OPTPAR_Q_KEY, 1292 MLX5_QP_OPTPAR_Q_KEY,
1293 [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1294 MLX5_QP_OPTPAR_RRE |
1295 MLX5_QP_OPTPAR_RAE |
1296 MLX5_QP_OPTPAR_RWE |
1297 MLX5_QP_OPTPAR_PKEY_INDEX,
1283 }, 1298 },
1284 }, 1299 },
1285 [MLX5_QP_STATE_RTR] = { 1300 [MLX5_QP_STATE_RTR] = {
@@ -1314,6 +1329,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
1314 [MLX5_QP_STATE_RTS] = { 1329 [MLX5_QP_STATE_RTS] = {
1315 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, 1330 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1316 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, 1331 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
1332 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
1333 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
1334 MLX5_QP_OPTPAR_RWE |
1335 MLX5_QP_OPTPAR_RAE |
1336 MLX5_QP_OPTPAR_RRE,
1317 }, 1337 },
1318 }, 1338 },
1319}; 1339};
@@ -1651,29 +1671,6 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1651 rseg->reserved = 0; 1671 rseg->reserved = 0;
1652} 1672}
1653 1673
1654static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
1655{
1656 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1657 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1658 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1659 } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
1660 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1661 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1662 } else {
1663 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1664 aseg->compare = 0;
1665 }
1666}
1667
1668static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg,
1669 struct ib_send_wr *wr)
1670{
1671 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1672 aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
1673 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1674 aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1675}
1676
1677static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, 1674static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
1678 struct ib_send_wr *wr) 1675 struct ib_send_wr *wr)
1679{ 1676{
@@ -2063,28 +2060,11 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2063 2060
2064 case IB_WR_ATOMIC_CMP_AND_SWP: 2061 case IB_WR_ATOMIC_CMP_AND_SWP:
2065 case IB_WR_ATOMIC_FETCH_AND_ADD: 2062 case IB_WR_ATOMIC_FETCH_AND_ADD:
2066 set_raddr_seg(seg, wr->wr.atomic.remote_addr,
2067 wr->wr.atomic.rkey);
2068 seg += sizeof(struct mlx5_wqe_raddr_seg);
2069
2070 set_atomic_seg(seg, wr);
2071 seg += sizeof(struct mlx5_wqe_atomic_seg);
2072
2073 size += (sizeof(struct mlx5_wqe_raddr_seg) +
2074 sizeof(struct mlx5_wqe_atomic_seg)) / 16;
2075 break;
2076
2077 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: 2063 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
2078 set_raddr_seg(seg, wr->wr.atomic.remote_addr, 2064 mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
2079 wr->wr.atomic.rkey); 2065 err = -ENOSYS;
2080 seg += sizeof(struct mlx5_wqe_raddr_seg); 2066 *bad_wr = wr;
2081 2067 goto out;
2082 set_masked_atomic_seg(seg, wr);
2083 seg += sizeof(struct mlx5_wqe_masked_atomic_seg);
2084
2085 size += (sizeof(struct mlx5_wqe_raddr_seg) +
2086 sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16;
2087 break;
2088 2068
2089 case IB_WR_LOCAL_INV: 2069 case IB_WR_LOCAL_INV:
2090 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 2070 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 84d297afd6a9..0aa478bc291a 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -295,7 +295,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
295 mlx5_vfree(in); 295 mlx5_vfree(in);
296 if (err) { 296 if (err) {
297 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); 297 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
298 goto err_srq; 298 goto err_usr_kern_srq;
299 } 299 }
300 300
301 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn); 301 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
@@ -316,6 +316,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
316 316
317err_core: 317err_core:
318 mlx5_core_destroy_srq(&dev->mdev, &srq->msrq); 318 mlx5_core_destroy_srq(&dev->mdev, &srq->msrq);
319
320err_usr_kern_srq:
319 if (pd->uobject) 321 if (pd->uobject)
320 destroy_srq_user(pd, srq); 322 destroy_srq_user(pd, srq);
321 else 323 else
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 7c9d35f39d75..690201738993 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -357,7 +357,7 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
357 mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n", 357 mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",
358 eqe->type, eqe->subtype, eq->eqn); 358 eqe->type, eqe->subtype, eq->eqn);
359 break; 359 break;
360 }; 360 }
361 361
362 set_eqe_hw(eqe); 362 set_eqe_hw(eqe);
363 ++eq->cons_index; 363 ++eq->cons_index;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 4ed8235d2d36..50219ab2279d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -150,7 +150,7 @@ enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
150 return IB_QPS_SQE; 150 return IB_QPS_SQE;
151 case OCRDMA_QPS_ERR: 151 case OCRDMA_QPS_ERR:
152 return IB_QPS_ERR; 152 return IB_QPS_ERR;
153 }; 153 }
154 return IB_QPS_ERR; 154 return IB_QPS_ERR;
155} 155}
156 156
@@ -171,7 +171,7 @@ static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
171 return OCRDMA_QPS_SQE; 171 return OCRDMA_QPS_SQE;
172 case IB_QPS_ERR: 172 case IB_QPS_ERR:
173 return OCRDMA_QPS_ERR; 173 return OCRDMA_QPS_ERR;
174 }; 174 }
175 return OCRDMA_QPS_ERR; 175 return OCRDMA_QPS_ERR;
176} 176}
177 177
@@ -1982,7 +1982,7 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
1982 break; 1982 break;
1983 default: 1983 default:
1984 return -EINVAL; 1984 return -EINVAL;
1985 }; 1985 }
1986 1986
1987 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd)); 1987 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
1988 if (!cmd) 1988 if (!cmd)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 56e004940f18..0ce7674621ea 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -531,7 +531,7 @@ static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
531 case BE_DEV_DOWN: 531 case BE_DEV_DOWN:
532 ocrdma_close(dev); 532 ocrdma_close(dev);
533 break; 533 break;
534 }; 534 }
535} 535}
536 536
537static struct ocrdma_driver ocrdma_drv = { 537static struct ocrdma_driver ocrdma_drv = {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 6e982bb43c31..69f1d1221a6b 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -141,7 +141,7 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
141 /* Unsupported */ 141 /* Unsupported */
142 *ib_speed = IB_SPEED_SDR; 142 *ib_speed = IB_SPEED_SDR;
143 *ib_width = IB_WIDTH_1X; 143 *ib_width = IB_WIDTH_1X;
144 }; 144 }
145} 145}
146 146
147 147
@@ -2331,7 +2331,7 @@ static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2331 default: 2331 default:
2332 ibwc_status = IB_WC_GENERAL_ERR; 2332 ibwc_status = IB_WC_GENERAL_ERR;
2333 break; 2333 break;
2334 }; 2334 }
2335 return ibwc_status; 2335 return ibwc_status;
2336} 2336}
2337 2337
@@ -2370,7 +2370,7 @@ static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2370 pr_err("%s() invalid opcode received = 0x%x\n", 2370 pr_err("%s() invalid opcode received = 0x%x\n",
2371 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); 2371 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
2372 break; 2372 break;
2373 }; 2373 }
2374} 2374}
2375 2375
2376static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp, 2376static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 3591855cc5b5..6df23502059a 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -594,7 +594,7 @@ isert_connect_release(struct isert_conn *isert_conn)
594 594
595 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 595 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
596 596
597 if (device->use_frwr) 597 if (device && device->use_frwr)
598 isert_conn_free_frwr_pool(isert_conn); 598 isert_conn_free_frwr_pool(isert_conn);
599 599
600 if (isert_conn->conn_qp) { 600 if (isert_conn->conn_qp) {
diff --git a/drivers/input/input.c b/drivers/input/input.c
index c04469928925..e75d015024a1 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1734,6 +1734,7 @@ EXPORT_SYMBOL_GPL(input_class);
1734 */ 1734 */
1735struct input_dev *input_allocate_device(void) 1735struct input_dev *input_allocate_device(void)
1736{ 1736{
1737 static atomic_t input_no = ATOMIC_INIT(0);
1737 struct input_dev *dev; 1738 struct input_dev *dev;
1738 1739
1739 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL); 1740 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
@@ -1743,9 +1744,13 @@ struct input_dev *input_allocate_device(void)
1743 device_initialize(&dev->dev); 1744 device_initialize(&dev->dev);
1744 mutex_init(&dev->mutex); 1745 mutex_init(&dev->mutex);
1745 spin_lock_init(&dev->event_lock); 1746 spin_lock_init(&dev->event_lock);
1747 init_timer(&dev->timer);
1746 INIT_LIST_HEAD(&dev->h_list); 1748 INIT_LIST_HEAD(&dev->h_list);
1747 INIT_LIST_HEAD(&dev->node); 1749 INIT_LIST_HEAD(&dev->node);
1748 1750
1751 dev_set_name(&dev->dev, "input%ld",
1752 (unsigned long) atomic_inc_return(&input_no) - 1);
1753
1749 __module_get(THIS_MODULE); 1754 __module_get(THIS_MODULE);
1750 } 1755 }
1751 1756
@@ -2019,7 +2024,6 @@ static void devm_input_device_unregister(struct device *dev, void *res)
2019 */ 2024 */
2020int input_register_device(struct input_dev *dev) 2025int input_register_device(struct input_dev *dev)
2021{ 2026{
2022 static atomic_t input_no = ATOMIC_INIT(0);
2023 struct input_devres *devres = NULL; 2027 struct input_devres *devres = NULL;
2024 struct input_handler *handler; 2028 struct input_handler *handler;
2025 unsigned int packet_size; 2029 unsigned int packet_size;
@@ -2059,7 +2063,6 @@ int input_register_device(struct input_dev *dev)
2059 * If delay and period are pre-set by the driver, then autorepeating 2063 * If delay and period are pre-set by the driver, then autorepeating
2060 * is handled by the driver itself and we don't do it in input.c. 2064 * is handled by the driver itself and we don't do it in input.c.
2061 */ 2065 */
2062 init_timer(&dev->timer);
2063 if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) { 2066 if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) {
2064 dev->timer.data = (long) dev; 2067 dev->timer.data = (long) dev;
2065 dev->timer.function = input_repeat_key; 2068 dev->timer.function = input_repeat_key;
@@ -2073,9 +2076,6 @@ int input_register_device(struct input_dev *dev)
2073 if (!dev->setkeycode) 2076 if (!dev->setkeycode)
2074 dev->setkeycode = input_default_setkeycode; 2077 dev->setkeycode = input_default_setkeycode;
2075 2078
2076 dev_set_name(&dev->dev, "input%ld",
2077 (unsigned long) atomic_inc_return(&input_no) - 1);
2078
2079 error = device_add(&dev->dev); 2079 error = device_add(&dev->dev);
2080 if (error) 2080 if (error)
2081 goto err_free_vals; 2081 goto err_free_vals;
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 134c3b404a54..a2e758d27584 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -786,10 +786,17 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
786 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); 786 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
787 input_set_capability(input_dev, EV_MSC, MSC_SCAN); 787 input_set_capability(input_dev, EV_MSC, MSC_SCAN);
788 788
789 if (pdata) 789 if (pdata) {
790 error = pxa27x_keypad_build_keycode(keypad); 790 error = pxa27x_keypad_build_keycode(keypad);
791 else 791 } else {
792 error = pxa27x_keypad_build_keycode_from_dt(keypad); 792 error = pxa27x_keypad_build_keycode_from_dt(keypad);
793 /*
794 * Data that we get from DT resides in dynamically
795 * allocated memory so we need to update our pdata
796 * pointer.
797 */
798 pdata = keypad->pdata;
799 }
793 if (error) { 800 if (error) {
794 dev_err(&pdev->dev, "failed to build keycode\n"); 801 dev_err(&pdev->dev, "failed to build keycode\n");
795 goto failed_put_clk; 802 goto failed_put_clk;
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index 082684e7f390..9365535ba7f1 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -351,7 +351,9 @@ static void cm109_urb_irq_callback(struct urb *urb)
351 if (status) { 351 if (status) {
352 if (status == -ESHUTDOWN) 352 if (status == -ESHUTDOWN)
353 return; 353 return;
354 dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status); 354 dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n",
355 __func__, status);
356 goto out;
355 } 357 }
356 358
357 /* Special keys */ 359 /* Special keys */
@@ -418,8 +420,12 @@ static void cm109_urb_ctl_callback(struct urb *urb)
418 dev->ctl_data->byte[2], 420 dev->ctl_data->byte[2],
419 dev->ctl_data->byte[3]); 421 dev->ctl_data->byte[3]);
420 422
421 if (status) 423 if (status) {
422 dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status); 424 if (status == -ESHUTDOWN)
425 return;
426 dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n",
427 __func__, status);
428 }
423 429
424 spin_lock(&dev->ctl_submit_lock); 430 spin_lock(&dev->ctl_submit_lock);
425 431
@@ -427,7 +433,7 @@ static void cm109_urb_ctl_callback(struct urb *urb)
427 433
428 if (likely(!dev->shutdown)) { 434 if (likely(!dev->shutdown)) {
429 435
430 if (dev->buzzer_pending) { 436 if (dev->buzzer_pending || status) {
431 dev->buzzer_pending = 0; 437 dev->buzzer_pending = 0;
432 dev->ctl_urb_pending = 1; 438 dev->ctl_urb_pending = 1;
433 cm109_submit_buzz_toggle(dev); 439 cm109_submit_buzz_toggle(dev);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 7c5d72a6a26a..83658472ad25 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -103,6 +103,7 @@ static const struct alps_model_info alps_model_data[] = {
103 /* Dell Latitude E5500, E6400, E6500, Precision M4400 */ 103 /* Dell Latitude E5500, E6400, E6500, Precision M4400 */
104 { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, 104 { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
105 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, 105 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
106 { { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_DUALPOINT }, /* Dell XT2 */
106 { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ 107 { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
107 { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, 108 { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
108 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ 109 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 78e4de42efaa..52c9ebf94729 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -223,21 +223,26 @@ static int i8042_flush(void)
223{ 223{
224 unsigned long flags; 224 unsigned long flags;
225 unsigned char data, str; 225 unsigned char data, str;
226 int i = 0; 226 int count = 0;
227 int retval = 0;
227 228
228 spin_lock_irqsave(&i8042_lock, flags); 229 spin_lock_irqsave(&i8042_lock, flags);
229 230
230 while (((str = i8042_read_status()) & I8042_STR_OBF) && (i < I8042_BUFFER_SIZE)) { 231 while ((str = i8042_read_status()) & I8042_STR_OBF) {
231 udelay(50); 232 if (count++ < I8042_BUFFER_SIZE) {
232 data = i8042_read_data(); 233 udelay(50);
233 i++; 234 data = i8042_read_data();
234 dbg("%02x <- i8042 (flush, %s)\n", 235 dbg("%02x <- i8042 (flush, %s)\n",
235 data, str & I8042_STR_AUXDATA ? "aux" : "kbd"); 236 data, str & I8042_STR_AUXDATA ? "aux" : "kbd");
237 } else {
238 retval = -EIO;
239 break;
240 }
236 } 241 }
237 242
238 spin_unlock_irqrestore(&i8042_lock, flags); 243 spin_unlock_irqrestore(&i8042_lock, flags);
239 244
240 return i; 245 return retval;
241} 246}
242 247
243/* 248/*
@@ -849,7 +854,7 @@ static int __init i8042_check_aux(void)
849 854
850static int i8042_controller_check(void) 855static int i8042_controller_check(void)
851{ 856{
852 if (i8042_flush() == I8042_BUFFER_SIZE) { 857 if (i8042_flush()) {
853 pr_err("No controller found\n"); 858 pr_err("No controller found\n");
854 return -ENODEV; 859 return -ENODEV;
855 } 860 }
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 79b69ea47f74..e53416a4d7f3 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -1031,6 +1031,7 @@ static void wacom_destroy_leds(struct wacom *wacom)
1031} 1031}
1032 1032
1033static enum power_supply_property wacom_battery_props[] = { 1033static enum power_supply_property wacom_battery_props[] = {
1034 POWER_SUPPLY_PROP_SCOPE,
1034 POWER_SUPPLY_PROP_CAPACITY 1035 POWER_SUPPLY_PROP_CAPACITY
1035}; 1036};
1036 1037
@@ -1042,6 +1043,9 @@ static int wacom_battery_get_property(struct power_supply *psy,
1042 int ret = 0; 1043 int ret = 0;
1043 1044
1044 switch (psp) { 1045 switch (psp) {
1046 case POWER_SUPPLY_PROP_SCOPE:
1047 val->intval = POWER_SUPPLY_SCOPE_DEVICE;
1048 break;
1045 case POWER_SUPPLY_PROP_CAPACITY: 1049 case POWER_SUPPLY_PROP_CAPACITY:
1046 val->intval = 1050 val->intval =
1047 wacom->wacom_wac.battery_capacity * 100 / 31; 1051 wacom->wacom_wac.battery_capacity * 100 / 31;
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index b2aa503c16b1..c59b797eeafa 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -2054,6 +2054,12 @@ static const struct wacom_features wacom_features_0x101 =
2054static const struct wacom_features wacom_features_0x10D = 2054static const struct wacom_features wacom_features_0x10D =
2055 { "Wacom ISDv4 10D", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, 2055 { "Wacom ISDv4 10D", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
2056 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 2056 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
2057static const struct wacom_features wacom_features_0x10E =
2058 { "Wacom ISDv4 10E", WACOM_PKGLEN_MTTPC, 27760, 15694, 255,
2059 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
2060static const struct wacom_features wacom_features_0x10F =
2061 { "Wacom ISDv4 10F", WACOM_PKGLEN_MTTPC, 27760, 15694, 255,
2062 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
2057static const struct wacom_features wacom_features_0x4001 = 2063static const struct wacom_features wacom_features_0x4001 =
2058 { "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, 2064 { "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
2059 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 2065 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2248,6 +2254,8 @@ const struct usb_device_id wacom_ids[] = {
2248 { USB_DEVICE_WACOM(0x100) }, 2254 { USB_DEVICE_WACOM(0x100) },
2249 { USB_DEVICE_WACOM(0x101) }, 2255 { USB_DEVICE_WACOM(0x101) },
2250 { USB_DEVICE_WACOM(0x10D) }, 2256 { USB_DEVICE_WACOM(0x10D) },
2257 { USB_DEVICE_WACOM(0x10E) },
2258 { USB_DEVICE_WACOM(0x10F) },
2251 { USB_DEVICE_WACOM(0x300) }, 2259 { USB_DEVICE_WACOM(0x300) },
2252 { USB_DEVICE_WACOM(0x301) }, 2260 { USB_DEVICE_WACOM(0x301) },
2253 { USB_DEVICE_WACOM(0x304) }, 2261 { USB_DEVICE_WACOM(0x304) },
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index b6a74bcbb08f..2a7f0dd6abab 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -1000,7 +1000,7 @@ static void request_write(struct cached_dev *dc, struct search *s)
1000 1000
1001 if (bio->bi_rw & REQ_FLUSH) { 1001 if (bio->bi_rw & REQ_FLUSH) {
1002 /* Also need to send a flush to the backing device */ 1002 /* Also need to send a flush to the backing device */
1003 struct bio *flush = bio_alloc_bioset(0, GFP_NOIO, 1003 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
1004 dc->disk.bio_split); 1004 dc->disk.bio_split);
1005 1005
1006 flush->bi_rw = WRITE_FLUSH; 1006 flush->bi_rw = WRITE_FLUSH;
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 4caa8e6d59d7..2d2b1b7588d7 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -269,6 +269,14 @@ static chunk_t area_location(struct pstore *ps, chunk_t area)
269 return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area); 269 return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
270} 270}
271 271
272static void skip_metadata(struct pstore *ps)
273{
274 uint32_t stride = ps->exceptions_per_area + 1;
275 chunk_t next_free = ps->next_free;
276 if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
277 ps->next_free++;
278}
279
272/* 280/*
273 * Read or write a metadata area. Remembering to skip the first 281 * Read or write a metadata area. Remembering to skip the first
274 * chunk which holds the header. 282 * chunk which holds the header.
@@ -502,6 +510,8 @@ static int read_exceptions(struct pstore *ps,
502 510
503 ps->current_area--; 511 ps->current_area--;
504 512
513 skip_metadata(ps);
514
505 return 0; 515 return 0;
506} 516}
507 517
@@ -616,8 +626,6 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
616 struct dm_exception *e) 626 struct dm_exception *e)
617{ 627{
618 struct pstore *ps = get_info(store); 628 struct pstore *ps = get_info(store);
619 uint32_t stride;
620 chunk_t next_free;
621 sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); 629 sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
622 630
623 /* Is there enough room ? */ 631 /* Is there enough room ? */
@@ -630,10 +638,8 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
630 * Move onto the next free pending, making sure to take 638 * Move onto the next free pending, making sure to take
631 * into account the location of the metadata chunks. 639 * into account the location of the metadata chunks.
632 */ 640 */
633 stride = (ps->exceptions_per_area + 1); 641 ps->next_free++;
634 next_free = ++ps->next_free; 642 skip_metadata(ps);
635 if (sector_div(next_free, stride) == 1)
636 ps->next_free++;
637 643
638 atomic_inc(&ps->pending_count); 644 atomic_inc(&ps->pending_count);
639 return 0; 645 return 0;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index adf4d7e1d5e1..561a65f82e26 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8111,6 +8111,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
8111 u64 *p; 8111 u64 *p;
8112 int lo, hi; 8112 int lo, hi;
8113 int rv = 1; 8113 int rv = 1;
8114 unsigned long flags;
8114 8115
8115 if (bb->shift < 0) 8116 if (bb->shift < 0)
8116 /* badblocks are disabled */ 8117 /* badblocks are disabled */
@@ -8125,7 +8126,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
8125 sectors = next - s; 8126 sectors = next - s;
8126 } 8127 }
8127 8128
8128 write_seqlock_irq(&bb->lock); 8129 write_seqlock_irqsave(&bb->lock, flags);
8129 8130
8130 p = bb->page; 8131 p = bb->page;
8131 lo = 0; 8132 lo = 0;
@@ -8241,7 +8242,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
8241 bb->changed = 1; 8242 bb->changed = 1;
8242 if (!acknowledged) 8243 if (!acknowledged)
8243 bb->unacked_exist = 1; 8244 bb->unacked_exist = 1;
8244 write_sequnlock_irq(&bb->lock); 8245 write_sequnlock_irqrestore(&bb->lock, flags);
8245 8246
8246 return rv; 8247 return rv;
8247} 8248}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d60412c7f995..aacf6bf352d8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1479,6 +1479,7 @@ static int raid1_spare_active(struct mddev *mddev)
1479 } 1479 }
1480 } 1480 }
1481 if (rdev 1481 if (rdev
1482 && rdev->recovery_offset == MaxSector
1482 && !test_bit(Faulty, &rdev->flags) 1483 && !test_bit(Faulty, &rdev->flags)
1483 && !test_and_set_bit(In_sync, &rdev->flags)) { 1484 && !test_and_set_bit(In_sync, &rdev->flags)) {
1484 count++; 1485 count++;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index df7b0a06b0ea..73dc8a377522 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1782,6 +1782,7 @@ static int raid10_spare_active(struct mddev *mddev)
1782 } 1782 }
1783 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state); 1783 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
1784 } else if (tmp->rdev 1784 } else if (tmp->rdev
1785 && tmp->rdev->recovery_offset == MaxSector
1785 && !test_bit(Faulty, &tmp->rdev->flags) 1786 && !test_bit(Faulty, &tmp->rdev->flags)
1786 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 1787 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1787 count++; 1788 count++;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7ff4f252ca1a..f8b906843926 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -778,6 +778,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
778 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 778 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
779 bi->bi_io_vec[0].bv_offset = 0; 779 bi->bi_io_vec[0].bv_offset = 0;
780 bi->bi_size = STRIPE_SIZE; 780 bi->bi_size = STRIPE_SIZE;
781 /*
782 * If this is discard request, set bi_vcnt 0. We don't
783 * want to confuse SCSI because SCSI will replace payload
784 */
785 if (rw & REQ_DISCARD)
786 bi->bi_vcnt = 0;
781 if (rrdev) 787 if (rrdev)
782 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); 788 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
783 789
@@ -816,6 +822,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
816 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 822 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
817 rbi->bi_io_vec[0].bv_offset = 0; 823 rbi->bi_io_vec[0].bv_offset = 0;
818 rbi->bi_size = STRIPE_SIZE; 824 rbi->bi_size = STRIPE_SIZE;
825 /*
826 * If this is discard request, set bi_vcnt 0. We don't
827 * want to confuse SCSI because SCSI will replace payload
828 */
829 if (rw & REQ_DISCARD)
830 rbi->bi_vcnt = 0;
819 if (conf->mddev->gendisk) 831 if (conf->mddev->gendisk)
820 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), 832 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
821 rbi, disk_devt(conf->mddev->gendisk), 833 rbi, disk_devt(conf->mddev->gendisk),
@@ -2910,6 +2922,14 @@ static void handle_stripe_clean_event(struct r5conf *conf,
2910 } 2922 }
2911 /* now that discard is done we can proceed with any sync */ 2923 /* now that discard is done we can proceed with any sync */
2912 clear_bit(STRIPE_DISCARD, &sh->state); 2924 clear_bit(STRIPE_DISCARD, &sh->state);
2925 /*
2926 * SCSI discard will change some bio fields and the stripe has
2927 * no updated data, so remove it from hash list and the stripe
2928 * will be reinitialized
2929 */
2930 spin_lock_irq(&conf->device_lock);
2931 remove_hash(sh);
2932 spin_unlock_irq(&conf->device_lock);
2913 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) 2933 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
2914 set_bit(STRIPE_HANDLE, &sh->state); 2934 set_bit(STRIPE_HANDLE, &sh->state);
2915 2935
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 2521f7e23018..e79749cfec81 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -912,14 +912,8 @@ static int tda10071_init(struct dvb_frontend *fe)
912 { 0xd5, 0x03, 0x03 }, 912 { 0xd5, 0x03, 0x03 },
913 }; 913 };
914 914
915 /* firmware status */ 915 if (priv->warm) {
916 ret = tda10071_rd_reg(priv, 0x51, &tmp);
917 if (ret)
918 goto error;
919
920 if (!tmp) {
921 /* warm state - wake up device from sleep */ 916 /* warm state - wake up device from sleep */
922 priv->warm = 1;
923 917
924 for (i = 0; i < ARRAY_SIZE(tab); i++) { 918 for (i = 0; i < ARRAY_SIZE(tab); i++) {
925 ret = tda10071_wr_reg_mask(priv, tab[i].reg, 919 ret = tda10071_wr_reg_mask(priv, tab[i].reg,
@@ -937,7 +931,6 @@ static int tda10071_init(struct dvb_frontend *fe)
937 goto error; 931 goto error;
938 } else { 932 } else {
939 /* cold state - try to download firmware */ 933 /* cold state - try to download firmware */
940 priv->warm = 0;
941 934
942 /* request the firmware, this will block and timeout */ 935 /* request the firmware, this will block and timeout */
943 ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent); 936 ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index bb0c99d7a4f1..b06a7e54ee0d 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -628,16 +628,13 @@ static int ad9389b_s_stream(struct v4l2_subdev *sd, int enable)
628 628
629static const struct v4l2_dv_timings_cap ad9389b_timings_cap = { 629static const struct v4l2_dv_timings_cap ad9389b_timings_cap = {
630 .type = V4L2_DV_BT_656_1120, 630 .type = V4L2_DV_BT_656_1120,
631 .bt = { 631 /* keep this initialization for compatibility with GCC < 4.4.6 */
632 .max_width = 1920, 632 .reserved = { 0 },
633 .max_height = 1200, 633 V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
634 .min_pixelclock = 25000000, 634 V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
635 .max_pixelclock = 170000000,
636 .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
637 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, 635 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
638 .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | 636 V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
639 V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM, 637 V4L2_DV_BT_CAP_CUSTOM)
640 },
641}; 638};
642 639
643static int ad9389b_s_dv_timings(struct v4l2_subdev *sd, 640static int ad9389b_s_dv_timings(struct v4l2_subdev *sd,
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 7a576097471f..7c8d971f1f61 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -119,16 +119,14 @@ static int adv7511_s_clock_freq(struct v4l2_subdev *sd, u32 freq);
119 119
120static const struct v4l2_dv_timings_cap adv7511_timings_cap = { 120static const struct v4l2_dv_timings_cap adv7511_timings_cap = {
121 .type = V4L2_DV_BT_656_1120, 121 .type = V4L2_DV_BT_656_1120,
122 .bt = { 122 /* keep this initialization for compatibility with GCC < 4.4.6 */
123 .max_width = ADV7511_MAX_WIDTH, 123 .reserved = { 0 },
124 .max_height = ADV7511_MAX_HEIGHT, 124 V4L2_INIT_BT_TIMINGS(0, ADV7511_MAX_WIDTH, 0, ADV7511_MAX_HEIGHT,
125 .min_pixelclock = ADV7511_MIN_PIXELCLOCK, 125 ADV7511_MIN_PIXELCLOCK, ADV7511_MAX_PIXELCLOCK,
126 .max_pixelclock = ADV7511_MAX_PIXELCLOCK, 126 V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
127 .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
128 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, 127 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
129 .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | 128 V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
130 V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM, 129 V4L2_DV_BT_CAP_CUSTOM)
131 },
132}; 130};
133 131
134static inline struct adv7511_state *get_adv7511_state(struct v4l2_subdev *sd) 132static inline struct adv7511_state *get_adv7511_state(struct v4l2_subdev *sd)
@@ -1126,6 +1124,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
1126 state->i2c_edid = i2c_new_dummy(client->adapter, state->i2c_edid_addr >> 1); 1124 state->i2c_edid = i2c_new_dummy(client->adapter, state->i2c_edid_addr >> 1);
1127 if (state->i2c_edid == NULL) { 1125 if (state->i2c_edid == NULL) {
1128 v4l2_err(sd, "failed to register edid i2c client\n"); 1126 v4l2_err(sd, "failed to register edid i2c client\n");
1127 err = -ENOMEM;
1129 goto err_entity; 1128 goto err_entity;
1130 } 1129 }
1131 1130
@@ -1133,6 +1132,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
1133 state->work_queue = create_singlethread_workqueue(sd->name); 1132 state->work_queue = create_singlethread_workqueue(sd->name);
1134 if (state->work_queue == NULL) { 1133 if (state->work_queue == NULL) {
1135 v4l2_err(sd, "could not create workqueue\n"); 1134 v4l2_err(sd, "could not create workqueue\n");
1135 err = -ENOMEM;
1136 goto err_unreg_cec; 1136 goto err_unreg_cec;
1137 } 1137 }
1138 1138
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index d1748901337c..22f729d66a96 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -546,30 +546,24 @@ static inline bool is_digital_input(struct v4l2_subdev *sd)
546 546
547static const struct v4l2_dv_timings_cap adv7842_timings_cap_analog = { 547static const struct v4l2_dv_timings_cap adv7842_timings_cap_analog = {
548 .type = V4L2_DV_BT_656_1120, 548 .type = V4L2_DV_BT_656_1120,
549 .bt = { 549 /* keep this initialization for compatibility with GCC < 4.4.6 */
550 .max_width = 1920, 550 .reserved = { 0 },
551 .max_height = 1200, 551 V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
552 .min_pixelclock = 25000000, 552 V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
553 .max_pixelclock = 170000000,
554 .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
555 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, 553 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
556 .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | 554 V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
557 V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM, 555 V4L2_DV_BT_CAP_CUSTOM)
558 },
559}; 556};
560 557
561static const struct v4l2_dv_timings_cap adv7842_timings_cap_digital = { 558static const struct v4l2_dv_timings_cap adv7842_timings_cap_digital = {
562 .type = V4L2_DV_BT_656_1120, 559 .type = V4L2_DV_BT_656_1120,
563 .bt = { 560 /* keep this initialization for compatibility with GCC < 4.4.6 */
564 .max_width = 1920, 561 .reserved = { 0 },
565 .max_height = 1200, 562 V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000,
566 .min_pixelclock = 25000000, 563 V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
567 .max_pixelclock = 225000000,
568 .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
569 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT, 564 V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
570 .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | 565 V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
571 V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM, 566 V4L2_DV_BT_CAP_CUSTOM)
572 },
573}; 567};
574 568
575static inline const struct v4l2_dv_timings_cap * 569static inline const struct v4l2_dv_timings_cap *
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index a58a8f663ffb..d9f65d7e3e58 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -46,14 +46,10 @@ struct ths8200_state {
46 46
47static const struct v4l2_dv_timings_cap ths8200_timings_cap = { 47static const struct v4l2_dv_timings_cap ths8200_timings_cap = {
48 .type = V4L2_DV_BT_656_1120, 48 .type = V4L2_DV_BT_656_1120,
49 .bt = { 49 /* keep this initialization for compatibility with GCC < 4.4.6 */
50 .max_width = 1920, 50 .reserved = { 0 },
51 .max_height = 1080, 51 V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1080, 25000000, 148500000,
52 .min_pixelclock = 25000000, 52 V4L2_DV_BT_STD_CEA861, V4L2_DV_BT_CAP_PROGRESSIVE)
53 .max_pixelclock = 148500000,
54 .standards = V4L2_DV_BT_STD_CEA861,
55 .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE,
56 },
57}; 53};
58 54
59static inline struct ths8200_state *to_state(struct v4l2_subdev *sd) 55static inline struct ths8200_state *to_state(struct v4l2_subdev *sd)
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index e12bbd8c3f0b..fb60da85bc2c 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1455,6 +1455,7 @@ static int video_release(struct file *file)
1455 1455
1456 /* stop video capture */ 1456 /* stop video capture */
1457 if (res_check(fh, RESOURCE_VIDEO)) { 1457 if (res_check(fh, RESOURCE_VIDEO)) {
1458 pm_qos_remove_request(&dev->qos_request);
1458 videobuf_streamoff(&fh->cap); 1459 videobuf_streamoff(&fh->cap);
1459 res_free(dev,fh,RESOURCE_VIDEO); 1460 res_free(dev,fh,RESOURCE_VIDEO);
1460 } 1461 }
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 15d23968d1de..9b88a4601007 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1423,6 +1423,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
1423 jpeg->vfd_decoder->release = video_device_release; 1423 jpeg->vfd_decoder->release = video_device_release;
1424 jpeg->vfd_decoder->lock = &jpeg->lock; 1424 jpeg->vfd_decoder->lock = &jpeg->lock;
1425 jpeg->vfd_decoder->v4l2_dev = &jpeg->v4l2_dev; 1425 jpeg->vfd_decoder->v4l2_dev = &jpeg->v4l2_dev;
1426 jpeg->vfd_decoder->vfl_dir = VFL_DIR_M2M;
1426 1427
1427 ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1); 1428 ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1);
1428 if (ret) { 1429 if (ret) {
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index 7a9c5e9329f2..4f30341dc2ab 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -776,7 +776,7 @@ static int sh_vou_try_fmt_vid_out(struct file *file, void *priv,
776 v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 1, 776 v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 1,
777 &pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0); 777 &pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
778 778
779 for (i = 0; ARRAY_SIZE(vou_fmt); i++) 779 for (i = 0; i < ARRAY_SIZE(vou_fmt); i++)
780 if (vou_fmt[i].pfmt == pix->pixelformat) 780 if (vou_fmt[i].pfmt == pix->pixelformat)
781 return 0; 781 return 0;
782 782
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c
index 8f9f6211c52e..f975b7008692 100644
--- a/drivers/media/platform/soc_camera/mx3_camera.c
+++ b/drivers/media/platform/soc_camera/mx3_camera.c
@@ -266,7 +266,6 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
266 struct idmac_channel *ichan = mx3_cam->idmac_channel[0]; 266 struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
267 struct idmac_video_param *video = &ichan->params.video; 267 struct idmac_video_param *video = &ichan->params.video;
268 const struct soc_mbus_pixelfmt *host_fmt = icd->current_fmt->host_fmt; 268 const struct soc_mbus_pixelfmt *host_fmt = icd->current_fmt->host_fmt;
269 unsigned long flags;
270 dma_cookie_t cookie; 269 dma_cookie_t cookie;
271 size_t new_size; 270 size_t new_size;
272 271
@@ -328,7 +327,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
328 memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0)); 327 memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0));
329#endif 328#endif
330 329
331 spin_lock_irqsave(&mx3_cam->lock, flags); 330 spin_lock_irq(&mx3_cam->lock);
332 list_add_tail(&buf->queue, &mx3_cam->capture); 331 list_add_tail(&buf->queue, &mx3_cam->capture);
333 332
334 if (!mx3_cam->active) 333 if (!mx3_cam->active)
@@ -351,7 +350,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
351 if (mx3_cam->active == buf) 350 if (mx3_cam->active == buf)
352 mx3_cam->active = NULL; 351 mx3_cam->active = NULL;
353 352
354 spin_unlock_irqrestore(&mx3_cam->lock, flags); 353 spin_unlock_irq(&mx3_cam->lock);
355error: 354error:
356 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); 355 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
357} 356}
diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
index ad9309da4a91..6c96e4898777 100644
--- a/drivers/media/tuners/e4000.c
+++ b/drivers/media/tuners/e4000.c
@@ -19,6 +19,7 @@
19 */ 19 */
20 20
21#include "e4000_priv.h" 21#include "e4000_priv.h"
22#include <linux/math64.h>
22 23
23/* write multiple registers */ 24/* write multiple registers */
24static int e4000_wr_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len) 25static int e4000_wr_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len)
@@ -233,7 +234,7 @@ static int e4000_set_params(struct dvb_frontend *fe)
233 * or more. 234 * or more.
234 */ 235 */
235 f_vco = c->frequency * e4000_pll_lut[i].mul; 236 f_vco = c->frequency * e4000_pll_lut[i].mul;
236 sigma_delta = 0x10000UL * (f_vco % priv->cfg->clock) / priv->cfg->clock; 237 sigma_delta = div_u64(0x10000ULL * (f_vco % priv->cfg->clock), priv->cfg->clock);
237 buf[0] = f_vco / priv->cfg->clock; 238 buf[0] = f_vco / priv->cfg->clock;
238 buf[1] = (sigma_delta >> 0) & 0xff; 239 buf[1] = (sigma_delta >> 0) & 0xff;
239 buf[2] = (sigma_delta >> 8) & 0xff; 240 buf[2] = (sigma_delta >> 8) & 0xff;
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index c43c8d32be40..be77482c3070 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -111,6 +111,13 @@ static const struct dmi_system_id stk_upside_down_dmi_table[] = {
111 DMI_MATCH(DMI_PRODUCT_NAME, "F3JC") 111 DMI_MATCH(DMI_PRODUCT_NAME, "F3JC")
112 } 112 }
113 }, 113 },
114 {
115 .ident = "T12Rg-H",
116 .matches = {
117 DMI_MATCH(DMI_SYS_VENDOR, "HCL Infosystems Limited"),
118 DMI_MATCH(DMI_PRODUCT_NAME, "T12Rg-H")
119 }
120 },
114 {} 121 {}
115}; 122};
116 123
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 81695d48c13e..c3bb2502225b 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -2090,6 +2090,15 @@ static struct usb_device_id uvc_ids[] = {
2090 .bInterfaceSubClass = 1, 2090 .bInterfaceSubClass = 1,
2091 .bInterfaceProtocol = 0, 2091 .bInterfaceProtocol = 0,
2092 .driver_info = UVC_QUIRK_PROBE_MINMAX }, 2092 .driver_info = UVC_QUIRK_PROBE_MINMAX },
2093 /* Microsoft Lifecam NX-3000 */
2094 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2095 | USB_DEVICE_ID_MATCH_INT_INFO,
2096 .idVendor = 0x045e,
2097 .idProduct = 0x0721,
2098 .bInterfaceClass = USB_CLASS_VIDEO,
2099 .bInterfaceSubClass = 1,
2100 .bInterfaceProtocol = 0,
2101 .driver_info = UVC_QUIRK_PROBE_DEF },
2093 /* Microsoft Lifecam VX-7000 */ 2102 /* Microsoft Lifecam VX-7000 */
2094 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 2103 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2095 | USB_DEVICE_ID_MATCH_INT_INFO, 2104 | USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2174,6 +2183,15 @@ static struct usb_device_id uvc_ids[] = {
2174 .bInterfaceSubClass = 1, 2183 .bInterfaceSubClass = 1,
2175 .bInterfaceProtocol = 0, 2184 .bInterfaceProtocol = 0,
2176 .driver_info = UVC_QUIRK_PROBE_DEF }, 2185 .driver_info = UVC_QUIRK_PROBE_DEF },
2186 /* Dell SP2008WFP Monitor */
2187 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2188 | USB_DEVICE_ID_MATCH_INT_INFO,
2189 .idVendor = 0x05a9,
2190 .idProduct = 0x2641,
2191 .bInterfaceClass = USB_CLASS_VIDEO,
2192 .bInterfaceSubClass = 1,
2193 .bInterfaceProtocol = 0,
2194 .driver_info = UVC_QUIRK_PROBE_DEF },
2177 /* Dell Alienware X51 */ 2195 /* Dell Alienware X51 */
2178 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 2196 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2179 | USB_DEVICE_ID_MATCH_INT_INFO, 2197 | USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 594c75eab5a5..de0e87f0b2c3 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -353,7 +353,9 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
353 353
354 if (b->m.planes[plane].bytesused > length) 354 if (b->m.planes[plane].bytesused > length)
355 return -EINVAL; 355 return -EINVAL;
356 if (b->m.planes[plane].data_offset >= 356
357 if (b->m.planes[plane].data_offset > 0 &&
358 b->m.planes[plane].data_offset >=
357 b->m.planes[plane].bytesused) 359 b->m.planes[plane].bytesused)
358 return -EINVAL; 360 return -EINVAL;
359 } 361 }
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index fd56f2563201..646f08f4f504 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -423,6 +423,39 @@ static inline int vma_is_io(struct vm_area_struct *vma)
423 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); 423 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
424} 424}
425 425
426static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
427 struct vm_area_struct *vma, unsigned long *res)
428{
429 unsigned long pfn, start_pfn, prev_pfn;
430 unsigned int i;
431 int ret;
432
433 if (!vma_is_io(vma))
434 return -EFAULT;
435
436 ret = follow_pfn(vma, start, &pfn);
437 if (ret)
438 return ret;
439
440 start_pfn = pfn;
441 start += PAGE_SIZE;
442
443 for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
444 prev_pfn = pfn;
445 ret = follow_pfn(vma, start, &pfn);
446
447 if (ret) {
448 pr_err("no page for address %lu\n", start);
449 return ret;
450 }
451 if (pfn != prev_pfn + 1)
452 return -EINVAL;
453 }
454
455 *res = start_pfn;
456 return 0;
457}
458
426static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, 459static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
427 int n_pages, struct vm_area_struct *vma, int write) 460 int n_pages, struct vm_area_struct *vma, int write)
428{ 461{
@@ -433,6 +466,9 @@ static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
433 unsigned long pfn; 466 unsigned long pfn;
434 int ret = follow_pfn(vma, start, &pfn); 467 int ret = follow_pfn(vma, start, &pfn);
435 468
469 if (!pfn_valid(pfn))
470 return -EINVAL;
471
436 if (ret) { 472 if (ret) {
437 pr_err("no page for address %lu\n", start); 473 pr_err("no page for address %lu\n", start);
438 return ret; 474 return ret;
@@ -468,16 +504,49 @@ static void vb2_dc_put_userptr(void *buf_priv)
468 struct vb2_dc_buf *buf = buf_priv; 504 struct vb2_dc_buf *buf = buf_priv;
469 struct sg_table *sgt = buf->dma_sgt; 505 struct sg_table *sgt = buf->dma_sgt;
470 506
471 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); 507 if (sgt) {
472 if (!vma_is_io(buf->vma)) 508 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
473 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); 509 if (!vma_is_io(buf->vma))
510 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
474 511
475 sg_free_table(sgt); 512 sg_free_table(sgt);
476 kfree(sgt); 513 kfree(sgt);
514 }
477 vb2_put_vma(buf->vma); 515 vb2_put_vma(buf->vma);
478 kfree(buf); 516 kfree(buf);
479} 517}
480 518
519/*
520 * For some kind of reserved memory there might be no struct page available,
521 * so all that can be done to support such 'pages' is to try to convert
522 * pfn to dma address or at the last resort just assume that
523 * dma address == physical address (like it has been assumed in earlier version
524 * of videobuf2-dma-contig
525 */
526
527#ifdef __arch_pfn_to_dma
528static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
529{
530 return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
531}
532#elif defined(__pfn_to_bus)
533static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
534{
535 return (dma_addr_t)__pfn_to_bus(pfn);
536}
537#elif defined(__pfn_to_phys)
538static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
539{
540 return (dma_addr_t)__pfn_to_phys(pfn);
541}
542#else
543static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
544{
545 /* really, we cannot do anything better at this point */
546 return (dma_addr_t)(pfn) << PAGE_SHIFT;
547}
548#endif
549
481static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, 550static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
482 unsigned long size, int write) 551 unsigned long size, int write)
483{ 552{
@@ -548,6 +617,14 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
548 /* extract page list from userspace mapping */ 617 /* extract page list from userspace mapping */
549 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write); 618 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
550 if (ret) { 619 if (ret) {
620 unsigned long pfn;
621 if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
622 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
623 buf->size = size;
624 kfree(pages);
625 return buf;
626 }
627
551 pr_err("failed to get user pages\n"); 628 pr_err("failed to get user pages\n");
552 goto fail_vma; 629 goto fail_vma;
553 } 630 }
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index 2a9b100c4825..dbbf8ee3f592 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -158,8 +158,6 @@ int mc13xxx_reg_read(struct mc13xxx *mc13xxx, unsigned int offset, u32 *val)
158{ 158{
159 int ret; 159 int ret;
160 160
161 BUG_ON(!mutex_is_locked(&mc13xxx->lock));
162
163 if (offset > MC13XXX_NUMREGS) 161 if (offset > MC13XXX_NUMREGS)
164 return -EINVAL; 162 return -EINVAL;
165 163
@@ -172,8 +170,6 @@ EXPORT_SYMBOL(mc13xxx_reg_read);
172 170
173int mc13xxx_reg_write(struct mc13xxx *mc13xxx, unsigned int offset, u32 val) 171int mc13xxx_reg_write(struct mc13xxx *mc13xxx, unsigned int offset, u32 val)
174{ 172{
175 BUG_ON(!mutex_is_locked(&mc13xxx->lock));
176
177 dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x\n", offset, val); 173 dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x\n", offset, val);
178 174
179 if (offset > MC13XXX_NUMREGS || val > 0xffffff) 175 if (offset > MC13XXX_NUMREGS || val > 0xffffff)
@@ -186,7 +182,6 @@ EXPORT_SYMBOL(mc13xxx_reg_write);
186int mc13xxx_reg_rmw(struct mc13xxx *mc13xxx, unsigned int offset, 182int mc13xxx_reg_rmw(struct mc13xxx *mc13xxx, unsigned int offset,
187 u32 mask, u32 val) 183 u32 mask, u32 val)
188{ 184{
189 BUG_ON(!mutex_is_locked(&mc13xxx->lock));
190 BUG_ON(val & ~mask); 185 BUG_ON(val & ~mask);
191 dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x (mask: 0x%06x)\n", 186 dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x (mask: 0x%06x)\n",
192 offset, val, mask); 187 offset, val, mask);
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
index 77189daadf1e..5f14ef6693c2 100644
--- a/drivers/mfd/mc13xxx-spi.c
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -94,10 +94,15 @@ static int mc13xxx_spi_write(void *context, const void *data, size_t count)
94{ 94{
95 struct device *dev = context; 95 struct device *dev = context;
96 struct spi_device *spi = to_spi_device(dev); 96 struct spi_device *spi = to_spi_device(dev);
97 const char *reg = data;
97 98
98 if (count != 4) 99 if (count != 4)
99 return -ENOTSUPP; 100 return -ENOTSUPP;
100 101
102 /* include errata fix for spi audio problems */
103 if (*reg == MC13783_AUDIO_CODEC || *reg == MC13783_AUDIO_DAC)
104 spi_write(spi, data, count);
105
101 return spi_write(spi, data, count); 106 return spi_write(spi, data, count);
102} 107}
103 108
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 59ab0692f0b9..a9830ff8e3f3 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -349,7 +349,7 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
349 349
350int common_nfc_set_geometry(struct gpmi_nand_data *this) 350int common_nfc_set_geometry(struct gpmi_nand_data *this)
351{ 351{
352 return set_geometry_by_ecc_info(this) ? 0 : legacy_set_geometry(this); 352 return legacy_set_geometry(this);
353} 353}
354 354
355struct dma_chan *get_dma_chan(struct gpmi_nand_data *this) 355struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index dd03dfdfb0d6..c28d4e29af1a 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1320,7 +1320,12 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1320 for (cs = 0; cs < pdata->num_cs; cs++) { 1320 for (cs = 0; cs < pdata->num_cs; cs++) {
1321 struct mtd_info *mtd = info->host[cs]->mtd; 1321 struct mtd_info *mtd = info->host[cs]->mtd;
1322 1322
1323 mtd->name = pdev->name; 1323 /*
1324 * The mtd name matches the one used in 'mtdparts' kernel
1325 * parameter. This name cannot be changed or otherwise
1326 * user's mtd partitions configuration would get broken.
1327 */
1328 mtd->name = "pxa3xx_nand-0";
1324 info->cs = cs; 1329 info->cs = cs;
1325 ret = pxa3xx_nand_scan(mtd); 1330 ret = pxa3xx_nand_scan(mtd);
1326 if (ret) { 1331 if (ret) {
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 3b1ff6148702..693d8ffe4653 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1405,10 +1405,10 @@ static int at91_can_remove(struct platform_device *pdev)
1405 1405
1406static const struct platform_device_id at91_can_id_table[] = { 1406static const struct platform_device_id at91_can_id_table[] = {
1407 { 1407 {
1408 .name = "at91_can", 1408 .name = "at91sam9x5_can",
1409 .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data, 1409 .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data,
1410 }, { 1410 }, {
1411 .name = "at91sam9x5_can", 1411 .name = "at91_can",
1412 .driver_data = (kernel_ulong_t)&at91_at91sam9263_data, 1412 .driver_data = (kernel_ulong_t)&at91_at91sam9263_data,
1413 }, { 1413 }, {
1414 /* sentinel */ 1414 /* sentinel */
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index f9cba4123c66..1870c4731a57 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -705,14 +705,14 @@ static size_t can_get_size(const struct net_device *dev)
705 size_t size; 705 size_t size;
706 706
707 size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */ 707 size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
708 size += sizeof(struct can_ctrlmode); /* IFLA_CAN_CTRLMODE */ 708 size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
709 size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */ 709 size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
710 size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */ 710 size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
711 size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */ 711 size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
712 if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */ 712 if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
713 size += sizeof(struct can_berr_counter); 713 size += nla_total_size(sizeof(struct can_berr_counter));
714 if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */ 714 if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
715 size += sizeof(struct can_bittiming_const); 715 size += nla_total_size(sizeof(struct can_bittiming_const));
716 716
717 return size; 717 return size;
718} 718}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 3f21142138b7..8f5ce747feb5 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -62,7 +62,7 @@
62#define FLEXCAN_MCR_BCC BIT(16) 62#define FLEXCAN_MCR_BCC BIT(16)
63#define FLEXCAN_MCR_LPRIO_EN BIT(13) 63#define FLEXCAN_MCR_LPRIO_EN BIT(13)
64#define FLEXCAN_MCR_AEN BIT(12) 64#define FLEXCAN_MCR_AEN BIT(12)
65#define FLEXCAN_MCR_MAXMB(x) ((x) & 0xf) 65#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x1f)
66#define FLEXCAN_MCR_IDAM_A (0 << 8) 66#define FLEXCAN_MCR_IDAM_A (0 << 8)
67#define FLEXCAN_MCR_IDAM_B (1 << 8) 67#define FLEXCAN_MCR_IDAM_B (1 << 8)
68#define FLEXCAN_MCR_IDAM_C (2 << 8) 68#define FLEXCAN_MCR_IDAM_C (2 << 8)
@@ -735,9 +735,11 @@ static int flexcan_chip_start(struct net_device *dev)
735 * 735 *
736 */ 736 */
737 reg_mcr = flexcan_read(&regs->mcr); 737 reg_mcr = flexcan_read(&regs->mcr);
738 reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
738 reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT | 739 reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
739 FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | 740 FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN |
740 FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS; 741 FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS |
742 FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID);
741 netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr); 743 netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
742 flexcan_write(reg_mcr, &regs->mcr); 744 flexcan_write(reg_mcr, &regs->mcr);
743 745
@@ -771,6 +773,10 @@ static int flexcan_chip_start(struct net_device *dev)
771 netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); 773 netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
772 flexcan_write(reg_ctrl, &regs->ctrl); 774 flexcan_write(reg_ctrl, &regs->ctrl);
773 775
776 /* Abort any pending TX, mark Mailbox as INACTIVE */
777 flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
778 &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
779
774 /* acceptance mask/acceptance code (accept everything) */ 780 /* acceptance mask/acceptance code (accept everything) */
775 flexcan_write(0x0, &regs->rxgmask); 781 flexcan_write(0x0, &regs->rxgmask);
776 flexcan_write(0x0, &regs->rx14mask); 782 flexcan_write(0x0, &regs->rx14mask);
@@ -979,9 +985,9 @@ static void unregister_flexcandev(struct net_device *dev)
979} 985}
980 986
981static const struct of_device_id flexcan_of_match[] = { 987static const struct of_device_id flexcan_of_match[] = {
982 { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
983 { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
984 { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, }, 988 { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
989 { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
990 { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
985 { /* sentinel */ }, 991 { /* sentinel */ },
986}; 992};
987MODULE_DEVICE_TABLE(of, flexcan_of_match); 993MODULE_DEVICE_TABLE(of, flexcan_of_match);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 97b3d32a98bd..c5e375ddd6c0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1197,8 +1197,9 @@ union cdu_context {
1197/* TM (timers) host DB constants */ 1197/* TM (timers) host DB constants */
1198#define TM_ILT_PAGE_SZ_HW 0 1198#define TM_ILT_PAGE_SZ_HW 0
1199#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ 1199#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
1200/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ 1200#define TM_CONN_NUM (BNX2X_FIRST_VF_CID + \
1201#define TM_CONN_NUM 1024 1201 BNX2X_VF_CIDS + \
1202 CNIC_ISCSI_CID_MAX)
1202#define TM_ILT_SZ (8 * TM_CONN_NUM) 1203#define TM_ILT_SZ (8 * TM_CONN_NUM)
1203#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) 1204#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
1204 1205
@@ -1527,7 +1528,6 @@ struct bnx2x {
1527#define PCI_32BIT_FLAG (1 << 1) 1528#define PCI_32BIT_FLAG (1 << 1)
1528#define ONE_PORT_FLAG (1 << 2) 1529#define ONE_PORT_FLAG (1 << 2)
1529#define NO_WOL_FLAG (1 << 3) 1530#define NO_WOL_FLAG (1 << 3)
1530#define USING_DAC_FLAG (1 << 4)
1531#define USING_MSIX_FLAG (1 << 5) 1531#define USING_MSIX_FLAG (1 << 5)
1532#define USING_MSI_FLAG (1 << 6) 1532#define USING_MSI_FLAG (1 << 6)
1533#define DISABLE_MSI_FLAG (1 << 7) 1533#define DISABLE_MSI_FLAG (1 << 7)
@@ -1621,7 +1621,7 @@ struct bnx2x {
1621 u16 rx_ticks_int; 1621 u16 rx_ticks_int;
1622 u16 rx_ticks; 1622 u16 rx_ticks;
1623/* Maximal coalescing timeout in us */ 1623/* Maximal coalescing timeout in us */
1624#define BNX2X_MAX_COALESCE_TOUT (0xf0*12) 1624#define BNX2X_MAX_COALESCE_TOUT (0xff*BNX2X_BTR)
1625 1625
1626 u32 lin_cnt; 1626 u32 lin_cnt;
1627 1627
@@ -2072,7 +2072,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
2072 2072
2073void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, 2073void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
2074 u8 src_type, u8 dst_type); 2074 u8 src_type, u8 dst_type);
2075int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae); 2075int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
2076 u32 *comp);
2076 2077
2077/* FLR related routines */ 2078/* FLR related routines */
2078u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp); 2079u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp);
@@ -2498,4 +2499,8 @@ enum bnx2x_pci_bus_speed {
2498}; 2499};
2499 2500
2500void bnx2x_set_local_cmng(struct bnx2x *bp); 2501void bnx2x_set_local_cmng(struct bnx2x *bp);
2502
2503#define MCPR_SCRATCH_BASE(bp) \
2504 (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
2505
2501#endif /* bnx2x.h */ 2506#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e66beff2704d..4ab4c89c60cd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -681,6 +681,7 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
681 } 681 }
682 } 682 }
683#endif 683#endif
684 skb_record_rx_queue(skb, fp->rx_queue);
684 napi_gro_receive(&fp->napi, skb); 685 napi_gro_receive(&fp->napi, skb);
685} 686}
686 687
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 324de5f05332..e8efa1c93ffe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -891,17 +891,8 @@ static void bnx2x_get_regs(struct net_device *dev,
891 * will re-enable parity attentions right after the dump. 891 * will re-enable parity attentions right after the dump.
892 */ 892 */
893 893
894 /* Disable parity on path 0 */
895 bnx2x_pretend_func(bp, 0);
896 bnx2x_disable_blocks_parity(bp); 894 bnx2x_disable_blocks_parity(bp);
897 895
898 /* Disable parity on path 1 */
899 bnx2x_pretend_func(bp, 1);
900 bnx2x_disable_blocks_parity(bp);
901
902 /* Return to current function */
903 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
904
905 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1; 896 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
906 dump_hdr.preset = DUMP_ALL_PRESETS; 897 dump_hdr.preset = DUMP_ALL_PRESETS;
907 dump_hdr.version = BNX2X_DUMP_VERSION; 898 dump_hdr.version = BNX2X_DUMP_VERSION;
@@ -928,18 +919,9 @@ static void bnx2x_get_regs(struct net_device *dev,
928 /* Actually read the registers */ 919 /* Actually read the registers */
929 __bnx2x_get_regs(bp, p); 920 __bnx2x_get_regs(bp, p);
930 921
931 /* Re-enable parity attentions on path 0 */ 922 /* Re-enable parity attentions */
932 bnx2x_pretend_func(bp, 0);
933 bnx2x_clear_blocks_parity(bp); 923 bnx2x_clear_blocks_parity(bp);
934 bnx2x_enable_blocks_parity(bp); 924 bnx2x_enable_blocks_parity(bp);
935
936 /* Re-enable parity attentions on path 1 */
937 bnx2x_pretend_func(bp, 1);
938 bnx2x_clear_blocks_parity(bp);
939 bnx2x_enable_blocks_parity(bp);
940
941 /* Return to current function */
942 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
943} 925}
944 926
945static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset) 927static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset)
@@ -993,17 +975,8 @@ static int bnx2x_get_dump_data(struct net_device *dev,
993 * will re-enable parity attentions right after the dump. 975 * will re-enable parity attentions right after the dump.
994 */ 976 */
995 977
996 /* Disable parity on path 0 */
997 bnx2x_pretend_func(bp, 0);
998 bnx2x_disable_blocks_parity(bp); 978 bnx2x_disable_blocks_parity(bp);
999 979
1000 /* Disable parity on path 1 */
1001 bnx2x_pretend_func(bp, 1);
1002 bnx2x_disable_blocks_parity(bp);
1003
1004 /* Return to current function */
1005 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1006
1007 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1; 980 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
1008 dump_hdr.preset = bp->dump_preset_idx; 981 dump_hdr.preset = bp->dump_preset_idx;
1009 dump_hdr.version = BNX2X_DUMP_VERSION; 982 dump_hdr.version = BNX2X_DUMP_VERSION;
@@ -1032,19 +1005,10 @@ static int bnx2x_get_dump_data(struct net_device *dev,
1032 /* Actually read the registers */ 1005 /* Actually read the registers */
1033 __bnx2x_get_preset_regs(bp, p, dump_hdr.preset); 1006 __bnx2x_get_preset_regs(bp, p, dump_hdr.preset);
1034 1007
1035 /* Re-enable parity attentions on path 0 */ 1008 /* Re-enable parity attentions */
1036 bnx2x_pretend_func(bp, 0);
1037 bnx2x_clear_blocks_parity(bp); 1009 bnx2x_clear_blocks_parity(bp);
1038 bnx2x_enable_blocks_parity(bp); 1010 bnx2x_enable_blocks_parity(bp);
1039 1011
1040 /* Re-enable parity attentions on path 1 */
1041 bnx2x_pretend_func(bp, 1);
1042 bnx2x_clear_blocks_parity(bp);
1043 bnx2x_enable_blocks_parity(bp);
1044
1045 /* Return to current function */
1046 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1047
1048 return 0; 1012 return 0;
1049} 1013}
1050 1014
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 76df015f486a..c2dfea7968f4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -640,23 +640,35 @@ static const struct {
640 * [30] MCP Latched ump_tx_parity 640 * [30] MCP Latched ump_tx_parity
641 * [31] MCP Latched scpad_parity 641 * [31] MCP Latched scpad_parity
642 */ 642 */
643#define MISC_AEU_ENABLE_MCP_PRTY_BITS \ 643#define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS \
644 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ 644 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
645 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ 645 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
646 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ 646 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
647
648#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
649 (MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \
647 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) 650 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
648 651
649/* Below registers control the MCP parity attention output. When 652/* Below registers control the MCP parity attention output. When
650 * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are 653 * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
651 * enabled, when cleared - disabled. 654 * enabled, when cleared - disabled.
652 */ 655 */
653static const u32 mcp_attn_ctl_regs[] = { 656static const struct {
654 MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, 657 u32 addr;
655 MISC_REG_AEU_ENABLE4_NIG_0, 658 u32 bits;
656 MISC_REG_AEU_ENABLE4_PXP_0, 659} mcp_attn_ctl_regs[] = {
657 MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, 660 { MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
658 MISC_REG_AEU_ENABLE4_NIG_1, 661 MISC_AEU_ENABLE_MCP_PRTY_BITS },
659 MISC_REG_AEU_ENABLE4_PXP_1 662 { MISC_REG_AEU_ENABLE4_NIG_0,
663 MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
664 { MISC_REG_AEU_ENABLE4_PXP_0,
665 MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
666 { MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
667 MISC_AEU_ENABLE_MCP_PRTY_BITS },
668 { MISC_REG_AEU_ENABLE4_NIG_1,
669 MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
670 { MISC_REG_AEU_ENABLE4_PXP_1,
671 MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }
660}; 672};
661 673
662static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable) 674static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
@@ -665,14 +677,14 @@ static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
665 u32 reg_val; 677 u32 reg_val;
666 678
667 for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) { 679 for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
668 reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]); 680 reg_val = REG_RD(bp, mcp_attn_ctl_regs[i].addr);
669 681
670 if (enable) 682 if (enable)
671 reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS; 683 reg_val |= mcp_attn_ctl_regs[i].bits;
672 else 684 else
673 reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS; 685 reg_val &= ~mcp_attn_ctl_regs[i].bits;
674 686
675 REG_WR(bp, mcp_attn_ctl_regs[i], reg_val); 687 REG_WR(bp, mcp_attn_ctl_regs[i].addr, reg_val);
676 } 688 }
677} 689}
678 690
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 82b658d8c04c..b42f89ce02ef 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -503,9 +503,9 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
503} 503}
504 504
505/* issue a dmae command over the init-channel and wait for completion */ 505/* issue a dmae command over the init-channel and wait for completion */
506int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae) 506int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
507 u32 *comp)
507{ 508{
508 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
509 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; 509 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
510 int rc = 0; 510 int rc = 0;
511 511
@@ -518,14 +518,14 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
518 spin_lock_bh(&bp->dmae_lock); 518 spin_lock_bh(&bp->dmae_lock);
519 519
520 /* reset completion */ 520 /* reset completion */
521 *wb_comp = 0; 521 *comp = 0;
522 522
523 /* post the command on the channel used for initializations */ 523 /* post the command on the channel used for initializations */
524 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); 524 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
525 525
526 /* wait for completion */ 526 /* wait for completion */
527 udelay(5); 527 udelay(5);
528 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 528 while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
529 529
530 if (!cnt || 530 if (!cnt ||
531 (bp->recovery_state != BNX2X_RECOVERY_DONE && 531 (bp->recovery_state != BNX2X_RECOVERY_DONE &&
@@ -537,7 +537,7 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
537 cnt--; 537 cnt--;
538 udelay(50); 538 udelay(50);
539 } 539 }
540 if (*wb_comp & DMAE_PCI_ERR_FLAG) { 540 if (*comp & DMAE_PCI_ERR_FLAG) {
541 BNX2X_ERR("DMAE PCI error!\n"); 541 BNX2X_ERR("DMAE PCI error!\n");
542 rc = DMAE_PCI_ERROR; 542 rc = DMAE_PCI_ERROR;
543 } 543 }
@@ -574,7 +574,7 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
574 dmae.len = len32; 574 dmae.len = len32;
575 575
576 /* issue the command and wait for completion */ 576 /* issue the command and wait for completion */
577 rc = bnx2x_issue_dmae_with_comp(bp, &dmae); 577 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
578 if (rc) { 578 if (rc) {
579 BNX2X_ERR("DMAE returned failure %d\n", rc); 579 BNX2X_ERR("DMAE returned failure %d\n", rc);
580 bnx2x_panic(); 580 bnx2x_panic();
@@ -611,7 +611,7 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
611 dmae.len = len32; 611 dmae.len = len32;
612 612
613 /* issue the command and wait for completion */ 613 /* issue the command and wait for completion */
614 rc = bnx2x_issue_dmae_with_comp(bp, &dmae); 614 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
615 if (rc) { 615 if (rc) {
616 BNX2X_ERR("DMAE returned failure %d\n", rc); 616 BNX2X_ERR("DMAE returned failure %d\n", rc);
617 bnx2x_panic(); 617 bnx2x_panic();
@@ -751,6 +751,10 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
751 return rc; 751 return rc;
752} 752}
753 753
754#define MCPR_TRACE_BUFFER_SIZE (0x800)
755#define SCRATCH_BUFFER_SIZE(bp) \
756 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
757
754void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) 758void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
755{ 759{
756 u32 addr, val; 760 u32 addr, val;
@@ -775,7 +779,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
775 trace_shmem_base = bp->common.shmem_base; 779 trace_shmem_base = bp->common.shmem_base;
776 else 780 else
777 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); 781 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
778 addr = trace_shmem_base - 0x800; 782
783 /* sanity */
784 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
785 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
786 SCRATCH_BUFFER_SIZE(bp)) {
787 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
788 trace_shmem_base);
789 return;
790 }
791
792 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
779 793
780 /* validate TRCB signature */ 794 /* validate TRCB signature */
781 mark = REG_RD(bp, addr); 795 mark = REG_RD(bp, addr);
@@ -787,14 +801,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
787 /* read cyclic buffer pointer */ 801 /* read cyclic buffer pointer */
788 addr += 4; 802 addr += 4;
789 mark = REG_RD(bp, addr); 803 mark = REG_RD(bp, addr);
790 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) 804 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
791 + ((mark + 0x3) & ~0x3) - 0x08000000; 805 if (mark >= trace_shmem_base || mark < addr + 4) {
806 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
807 return;
808 }
792 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); 809 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
793 810
794 printk("%s", lvl); 811 printk("%s", lvl);
795 812
796 /* dump buffer after the mark */ 813 /* dump buffer after the mark */
797 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) { 814 for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
798 for (word = 0; word < 8; word++) 815 for (word = 0; word < 8; word++)
799 data[word] = htonl(REG_RD(bp, offset + 4*word)); 816 data[word] = htonl(REG_RD(bp, offset + 4*word));
800 data[8] = 0x0; 817 data[8] = 0x0;
@@ -4280,65 +4297,60 @@ static void _print_next_block(int idx, const char *blk)
4280 pr_cont("%s%s", idx ? ", " : "", blk); 4297 pr_cont("%s%s", idx ? ", " : "", blk);
4281} 4298}
4282 4299
4283static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, 4300static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4284 int par_num, bool print) 4301 int *par_num, bool print)
4285{ 4302{
4286 int i = 0; 4303 u32 cur_bit;
4287 u32 cur_bit = 0; 4304 bool res;
4305 int i;
4306
4307 res = false;
4308
4288 for (i = 0; sig; i++) { 4309 for (i = 0; sig; i++) {
4289 cur_bit = ((u32)0x1 << i); 4310 cur_bit = (0x1UL << i);
4290 if (sig & cur_bit) { 4311 if (sig & cur_bit) {
4291 switch (cur_bit) { 4312 res |= true; /* Each bit is real error! */
4292 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 4313
4293 if (print) { 4314 if (print) {
4294 _print_next_block(par_num++, "BRB"); 4315 switch (cur_bit) {
4316 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4317 _print_next_block((*par_num)++, "BRB");
4295 _print_parity(bp, 4318 _print_parity(bp,
4296 BRB1_REG_BRB1_PRTY_STS); 4319 BRB1_REG_BRB1_PRTY_STS);
4297 } 4320 break;
4298 break; 4321 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4299 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 4322 _print_next_block((*par_num)++,
4300 if (print) { 4323 "PARSER");
4301 _print_next_block(par_num++, "PARSER");
4302 _print_parity(bp, PRS_REG_PRS_PRTY_STS); 4324 _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4303 } 4325 break;
4304 break; 4326 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4305 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 4327 _print_next_block((*par_num)++, "TSDM");
4306 if (print) {
4307 _print_next_block(par_num++, "TSDM");
4308 _print_parity(bp, 4328 _print_parity(bp,
4309 TSDM_REG_TSDM_PRTY_STS); 4329 TSDM_REG_TSDM_PRTY_STS);
4310 } 4330 break;
4311 break; 4331 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4312 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 4332 _print_next_block((*par_num)++,
4313 if (print) {
4314 _print_next_block(par_num++,
4315 "SEARCHER"); 4333 "SEARCHER");
4316 _print_parity(bp, SRC_REG_SRC_PRTY_STS); 4334 _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4317 } 4335 break;
4318 break; 4336 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4319 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 4337 _print_next_block((*par_num)++, "TCM");
4320 if (print) { 4338 _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4321 _print_next_block(par_num++, "TCM"); 4339 break;
4322 _print_parity(bp, 4340 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4323 TCM_REG_TCM_PRTY_STS); 4341 _print_next_block((*par_num)++,
4324 } 4342 "TSEMI");
4325 break;
4326 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4327 if (print) {
4328 _print_next_block(par_num++, "TSEMI");
4329 _print_parity(bp, 4343 _print_parity(bp,
4330 TSEM_REG_TSEM_PRTY_STS_0); 4344 TSEM_REG_TSEM_PRTY_STS_0);
4331 _print_parity(bp, 4345 _print_parity(bp,
4332 TSEM_REG_TSEM_PRTY_STS_1); 4346 TSEM_REG_TSEM_PRTY_STS_1);
4333 } 4347 break;
4334 break; 4348 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4335 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 4349 _print_next_block((*par_num)++, "XPB");
4336 if (print) {
4337 _print_next_block(par_num++, "XPB");
4338 _print_parity(bp, GRCBASE_XPB + 4350 _print_parity(bp, GRCBASE_XPB +
4339 PB_REG_PB_PRTY_STS); 4351 PB_REG_PB_PRTY_STS);
4352 break;
4340 } 4353 }
4341 break;
4342 } 4354 }
4343 4355
4344 /* Clear the bit */ 4356 /* Clear the bit */
@@ -4346,53 +4358,59 @@ static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4346 } 4358 }
4347 } 4359 }
4348 4360
4349 return par_num; 4361 return res;
4350} 4362}
4351 4363
4352static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, 4364static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4353 int par_num, bool *global, 4365 int *par_num, bool *global,
4354 bool print) 4366 bool print)
4355{ 4367{
4356 int i = 0; 4368 u32 cur_bit;
4357 u32 cur_bit = 0; 4369 bool res;
4370 int i;
4371
4372 res = false;
4373
4358 for (i = 0; sig; i++) { 4374 for (i = 0; sig; i++) {
4359 cur_bit = ((u32)0x1 << i); 4375 cur_bit = (0x1UL << i);
4360 if (sig & cur_bit) { 4376 if (sig & cur_bit) {
4377 res |= true; /* Each bit is real error! */
4361 switch (cur_bit) { 4378 switch (cur_bit) {
4362 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 4379 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4363 if (print) { 4380 if (print) {
4364 _print_next_block(par_num++, "PBF"); 4381 _print_next_block((*par_num)++, "PBF");
4365 _print_parity(bp, PBF_REG_PBF_PRTY_STS); 4382 _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4366 } 4383 }
4367 break; 4384 break;
4368 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 4385 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4369 if (print) { 4386 if (print) {
4370 _print_next_block(par_num++, "QM"); 4387 _print_next_block((*par_num)++, "QM");
4371 _print_parity(bp, QM_REG_QM_PRTY_STS); 4388 _print_parity(bp, QM_REG_QM_PRTY_STS);
4372 } 4389 }
4373 break; 4390 break;
4374 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 4391 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4375 if (print) { 4392 if (print) {
4376 _print_next_block(par_num++, "TM"); 4393 _print_next_block((*par_num)++, "TM");
4377 _print_parity(bp, TM_REG_TM_PRTY_STS); 4394 _print_parity(bp, TM_REG_TM_PRTY_STS);
4378 } 4395 }
4379 break; 4396 break;
4380 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 4397 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4381 if (print) { 4398 if (print) {
4382 _print_next_block(par_num++, "XSDM"); 4399 _print_next_block((*par_num)++, "XSDM");
4383 _print_parity(bp, 4400 _print_parity(bp,
4384 XSDM_REG_XSDM_PRTY_STS); 4401 XSDM_REG_XSDM_PRTY_STS);
4385 } 4402 }
4386 break; 4403 break;
4387 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 4404 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4388 if (print) { 4405 if (print) {
4389 _print_next_block(par_num++, "XCM"); 4406 _print_next_block((*par_num)++, "XCM");
4390 _print_parity(bp, XCM_REG_XCM_PRTY_STS); 4407 _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4391 } 4408 }
4392 break; 4409 break;
4393 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 4410 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4394 if (print) { 4411 if (print) {
4395 _print_next_block(par_num++, "XSEMI"); 4412 _print_next_block((*par_num)++,
4413 "XSEMI");
4396 _print_parity(bp, 4414 _print_parity(bp,
4397 XSEM_REG_XSEM_PRTY_STS_0); 4415 XSEM_REG_XSEM_PRTY_STS_0);
4398 _print_parity(bp, 4416 _print_parity(bp,
@@ -4401,7 +4419,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4401 break; 4419 break;
4402 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 4420 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4403 if (print) { 4421 if (print) {
4404 _print_next_block(par_num++, 4422 _print_next_block((*par_num)++,
4405 "DOORBELLQ"); 4423 "DOORBELLQ");
4406 _print_parity(bp, 4424 _print_parity(bp,
4407 DORQ_REG_DORQ_PRTY_STS); 4425 DORQ_REG_DORQ_PRTY_STS);
@@ -4409,7 +4427,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4409 break; 4427 break;
4410 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 4428 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4411 if (print) { 4429 if (print) {
4412 _print_next_block(par_num++, "NIG"); 4430 _print_next_block((*par_num)++, "NIG");
4413 if (CHIP_IS_E1x(bp)) { 4431 if (CHIP_IS_E1x(bp)) {
4414 _print_parity(bp, 4432 _print_parity(bp,
4415 NIG_REG_NIG_PRTY_STS); 4433 NIG_REG_NIG_PRTY_STS);
@@ -4423,32 +4441,34 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4423 break; 4441 break;
4424 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 4442 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4425 if (print) 4443 if (print)
4426 _print_next_block(par_num++, 4444 _print_next_block((*par_num)++,
4427 "VAUX PCI CORE"); 4445 "VAUX PCI CORE");
4428 *global = true; 4446 *global = true;
4429 break; 4447 break;
4430 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 4448 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4431 if (print) { 4449 if (print) {
4432 _print_next_block(par_num++, "DEBUG"); 4450 _print_next_block((*par_num)++,
4451 "DEBUG");
4433 _print_parity(bp, DBG_REG_DBG_PRTY_STS); 4452 _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4434 } 4453 }
4435 break; 4454 break;
4436 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 4455 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4437 if (print) { 4456 if (print) {
4438 _print_next_block(par_num++, "USDM"); 4457 _print_next_block((*par_num)++, "USDM");
4439 _print_parity(bp, 4458 _print_parity(bp,
4440 USDM_REG_USDM_PRTY_STS); 4459 USDM_REG_USDM_PRTY_STS);
4441 } 4460 }
4442 break; 4461 break;
4443 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 4462 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4444 if (print) { 4463 if (print) {
4445 _print_next_block(par_num++, "UCM"); 4464 _print_next_block((*par_num)++, "UCM");
4446 _print_parity(bp, UCM_REG_UCM_PRTY_STS); 4465 _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4447 } 4466 }
4448 break; 4467 break;
4449 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 4468 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4450 if (print) { 4469 if (print) {
4451 _print_next_block(par_num++, "USEMI"); 4470 _print_next_block((*par_num)++,
4471 "USEMI");
4452 _print_parity(bp, 4472 _print_parity(bp,
4453 USEM_REG_USEM_PRTY_STS_0); 4473 USEM_REG_USEM_PRTY_STS_0);
4454 _print_parity(bp, 4474 _print_parity(bp,
@@ -4457,21 +4477,21 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4457 break; 4477 break;
4458 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 4478 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4459 if (print) { 4479 if (print) {
4460 _print_next_block(par_num++, "UPB"); 4480 _print_next_block((*par_num)++, "UPB");
4461 _print_parity(bp, GRCBASE_UPB + 4481 _print_parity(bp, GRCBASE_UPB +
4462 PB_REG_PB_PRTY_STS); 4482 PB_REG_PB_PRTY_STS);
4463 } 4483 }
4464 break; 4484 break;
4465 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 4485 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4466 if (print) { 4486 if (print) {
4467 _print_next_block(par_num++, "CSDM"); 4487 _print_next_block((*par_num)++, "CSDM");
4468 _print_parity(bp, 4488 _print_parity(bp,
4469 CSDM_REG_CSDM_PRTY_STS); 4489 CSDM_REG_CSDM_PRTY_STS);
4470 } 4490 }
4471 break; 4491 break;
4472 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 4492 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4473 if (print) { 4493 if (print) {
4474 _print_next_block(par_num++, "CCM"); 4494 _print_next_block((*par_num)++, "CCM");
4475 _print_parity(bp, CCM_REG_CCM_PRTY_STS); 4495 _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4476 } 4496 }
4477 break; 4497 break;
@@ -4482,80 +4502,73 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4482 } 4502 }
4483 } 4503 }
4484 4504
4485 return par_num; 4505 return res;
4486} 4506}
4487 4507
4488static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, 4508static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4489 int par_num, bool print) 4509 int *par_num, bool print)
4490{ 4510{
4491 int i = 0; 4511 u32 cur_bit;
4492 u32 cur_bit = 0; 4512 bool res;
4513 int i;
4514
4515 res = false;
4516
4493 for (i = 0; sig; i++) { 4517 for (i = 0; sig; i++) {
4494 cur_bit = ((u32)0x1 << i); 4518 cur_bit = (0x1UL << i);
4495 if (sig & cur_bit) { 4519 if (sig & cur_bit) {
4496 switch (cur_bit) { 4520 res |= true; /* Each bit is real error! */
4497 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 4521 if (print) {
4498 if (print) { 4522 switch (cur_bit) {
4499 _print_next_block(par_num++, "CSEMI"); 4523 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4524 _print_next_block((*par_num)++,
4525 "CSEMI");
4500 _print_parity(bp, 4526 _print_parity(bp,
4501 CSEM_REG_CSEM_PRTY_STS_0); 4527 CSEM_REG_CSEM_PRTY_STS_0);
4502 _print_parity(bp, 4528 _print_parity(bp,
4503 CSEM_REG_CSEM_PRTY_STS_1); 4529 CSEM_REG_CSEM_PRTY_STS_1);
4504 } 4530 break;
4505 break; 4531 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4506 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 4532 _print_next_block((*par_num)++, "PXP");
4507 if (print) {
4508 _print_next_block(par_num++, "PXP");
4509 _print_parity(bp, PXP_REG_PXP_PRTY_STS); 4533 _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4510 _print_parity(bp, 4534 _print_parity(bp,
4511 PXP2_REG_PXP2_PRTY_STS_0); 4535 PXP2_REG_PXP2_PRTY_STS_0);
4512 _print_parity(bp, 4536 _print_parity(bp,
4513 PXP2_REG_PXP2_PRTY_STS_1); 4537 PXP2_REG_PXP2_PRTY_STS_1);
4514 } 4538 break;
4515 break; 4539 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4516 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 4540 _print_next_block((*par_num)++,
4517 if (print) 4541 "PXPPCICLOCKCLIENT");
4518 _print_next_block(par_num++, 4542 break;
4519 "PXPPCICLOCKCLIENT"); 4543 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4520 break; 4544 _print_next_block((*par_num)++, "CFC");
4521 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4522 if (print) {
4523 _print_next_block(par_num++, "CFC");
4524 _print_parity(bp, 4545 _print_parity(bp,
4525 CFC_REG_CFC_PRTY_STS); 4546 CFC_REG_CFC_PRTY_STS);
4526 } 4547 break;
4527 break; 4548 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4528 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 4549 _print_next_block((*par_num)++, "CDU");
4529 if (print) {
4530 _print_next_block(par_num++, "CDU");
4531 _print_parity(bp, CDU_REG_CDU_PRTY_STS); 4550 _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4532 } 4551 break;
4533 break; 4552 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4534 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 4553 _print_next_block((*par_num)++, "DMAE");
4535 if (print) {
4536 _print_next_block(par_num++, "DMAE");
4537 _print_parity(bp, 4554 _print_parity(bp,
4538 DMAE_REG_DMAE_PRTY_STS); 4555 DMAE_REG_DMAE_PRTY_STS);
4539 } 4556 break;
4540 break; 4557 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4541 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 4558 _print_next_block((*par_num)++, "IGU");
4542 if (print) {
4543 _print_next_block(par_num++, "IGU");
4544 if (CHIP_IS_E1x(bp)) 4559 if (CHIP_IS_E1x(bp))
4545 _print_parity(bp, 4560 _print_parity(bp,
4546 HC_REG_HC_PRTY_STS); 4561 HC_REG_HC_PRTY_STS);
4547 else 4562 else
4548 _print_parity(bp, 4563 _print_parity(bp,
4549 IGU_REG_IGU_PRTY_STS); 4564 IGU_REG_IGU_PRTY_STS);
4550 } 4565 break;
4551 break; 4566 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4552 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 4567 _print_next_block((*par_num)++, "MISC");
4553 if (print) {
4554 _print_next_block(par_num++, "MISC");
4555 _print_parity(bp, 4568 _print_parity(bp,
4556 MISC_REG_MISC_PRTY_STS); 4569 MISC_REG_MISC_PRTY_STS);
4570 break;
4557 } 4571 }
4558 break;
4559 } 4572 }
4560 4573
4561 /* Clear the bit */ 4574 /* Clear the bit */
@@ -4563,40 +4576,49 @@ static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4563 } 4576 }
4564 } 4577 }
4565 4578
4566 return par_num; 4579 return res;
4567} 4580}
4568 4581
4569static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, 4582static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4570 bool *global, bool print) 4583 int *par_num, bool *global,
4584 bool print)
4571{ 4585{
4572 int i = 0; 4586 bool res = false;
4573 u32 cur_bit = 0; 4587 u32 cur_bit;
4588 int i;
4589
4574 for (i = 0; sig; i++) { 4590 for (i = 0; sig; i++) {
4575 cur_bit = ((u32)0x1 << i); 4591 cur_bit = (0x1UL << i);
4576 if (sig & cur_bit) { 4592 if (sig & cur_bit) {
4577 switch (cur_bit) { 4593 switch (cur_bit) {
4578 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 4594 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4579 if (print) 4595 if (print)
4580 _print_next_block(par_num++, "MCP ROM"); 4596 _print_next_block((*par_num)++,
4597 "MCP ROM");
4581 *global = true; 4598 *global = true;
4599 res |= true;
4582 break; 4600 break;
4583 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 4601 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4584 if (print) 4602 if (print)
4585 _print_next_block(par_num++, 4603 _print_next_block((*par_num)++,
4586 "MCP UMP RX"); 4604 "MCP UMP RX");
4587 *global = true; 4605 *global = true;
4606 res |= true;
4588 break; 4607 break;
4589 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 4608 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4590 if (print) 4609 if (print)
4591 _print_next_block(par_num++, 4610 _print_next_block((*par_num)++,
4592 "MCP UMP TX"); 4611 "MCP UMP TX");
4593 *global = true; 4612 *global = true;
4613 res |= true;
4594 break; 4614 break;
4595 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 4615 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4596 if (print) 4616 if (print)
4597 _print_next_block(par_num++, 4617 _print_next_block((*par_num)++,
4598 "MCP SCPAD"); 4618 "MCP SCPAD");
4599 *global = true; 4619 /* clear latched SCPAD PATIRY from MCP */
4620 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4621 1UL << 10);
4600 break; 4622 break;
4601 } 4623 }
4602 4624
@@ -4605,45 +4627,50 @@ static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
4605 } 4627 }
4606 } 4628 }
4607 4629
4608 return par_num; 4630 return res;
4609} 4631}
4610 4632
4611static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, 4633static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4612 int par_num, bool print) 4634 int *par_num, bool print)
4613{ 4635{
4614 int i = 0; 4636 u32 cur_bit;
4615 u32 cur_bit = 0; 4637 bool res;
4638 int i;
4639
4640 res = false;
4641
4616 for (i = 0; sig; i++) { 4642 for (i = 0; sig; i++) {
4617 cur_bit = ((u32)0x1 << i); 4643 cur_bit = (0x1UL << i);
4618 if (sig & cur_bit) { 4644 if (sig & cur_bit) {
4619 switch (cur_bit) { 4645 res |= true; /* Each bit is real error! */
4620 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 4646 if (print) {
4621 if (print) { 4647 switch (cur_bit) {
4622 _print_next_block(par_num++, "PGLUE_B"); 4648 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4649 _print_next_block((*par_num)++,
4650 "PGLUE_B");
4623 _print_parity(bp, 4651 _print_parity(bp,
4624 PGLUE_B_REG_PGLUE_B_PRTY_STS); 4652 PGLUE_B_REG_PGLUE_B_PRTY_STS);
4625 } 4653 break;
4626 break; 4654 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4627 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 4655 _print_next_block((*par_num)++, "ATC");
4628 if (print) {
4629 _print_next_block(par_num++, "ATC");
4630 _print_parity(bp, 4656 _print_parity(bp,
4631 ATC_REG_ATC_PRTY_STS); 4657 ATC_REG_ATC_PRTY_STS);
4658 break;
4632 } 4659 }
4633 break;
4634 } 4660 }
4635
4636 /* Clear the bit */ 4661 /* Clear the bit */
4637 sig &= ~cur_bit; 4662 sig &= ~cur_bit;
4638 } 4663 }
4639 } 4664 }
4640 4665
4641 return par_num; 4666 return res;
4642} 4667}
4643 4668
4644static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, 4669static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4645 u32 *sig) 4670 u32 *sig)
4646{ 4671{
4672 bool res = false;
4673
4647 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 4674 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4648 (sig[1] & HW_PRTY_ASSERT_SET_1) || 4675 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4649 (sig[2] & HW_PRTY_ASSERT_SET_2) || 4676 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
@@ -4660,23 +4687,22 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4660 if (print) 4687 if (print)
4661 netdev_err(bp->dev, 4688 netdev_err(bp->dev,
4662 "Parity errors detected in blocks: "); 4689 "Parity errors detected in blocks: ");
4663 par_num = bnx2x_check_blocks_with_parity0(bp, 4690 res |= bnx2x_check_blocks_with_parity0(bp,
4664 sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); 4691 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
4665 par_num = bnx2x_check_blocks_with_parity1(bp, 4692 res |= bnx2x_check_blocks_with_parity1(bp,
4666 sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); 4693 sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
4667 par_num = bnx2x_check_blocks_with_parity2(bp, 4694 res |= bnx2x_check_blocks_with_parity2(bp,
4668 sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); 4695 sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
4669 par_num = bnx2x_check_blocks_with_parity3( 4696 res |= bnx2x_check_blocks_with_parity3(bp,
4670 sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); 4697 sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
4671 par_num = bnx2x_check_blocks_with_parity4(bp, 4698 res |= bnx2x_check_blocks_with_parity4(bp,
4672 sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); 4699 sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
4673 4700
4674 if (print) 4701 if (print)
4675 pr_cont("\n"); 4702 pr_cont("\n");
4703 }
4676 4704
4677 return true; 4705 return res;
4678 } else
4679 return false;
4680} 4706}
4681 4707
4682/** 4708/**
@@ -7126,7 +7152,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
7126 int port = BP_PORT(bp); 7152 int port = BP_PORT(bp);
7127 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 7153 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
7128 u32 low, high; 7154 u32 low, high;
7129 u32 val; 7155 u32 val, reg;
7130 7156
7131 DP(NETIF_MSG_HW, "starting port init port %d\n", port); 7157 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
7132 7158
@@ -7271,6 +7297,17 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
7271 val |= CHIP_IS_E1(bp) ? 0 : 0x10; 7297 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7272 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); 7298 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
7273 7299
7300 /* SCPAD_PARITY should NOT trigger close the gates */
7301 reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
7302 REG_WR(bp, reg,
7303 REG_RD(bp, reg) &
7304 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7305
7306 reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
7307 REG_WR(bp, reg,
7308 REG_RD(bp, reg) &
7309 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7310
7274 bnx2x_init_block(bp, BLOCK_NIG, init_phase); 7311 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7275 7312
7276 if (!CHIP_IS_E1x(bp)) { 7313 if (!CHIP_IS_E1x(bp)) {
@@ -11685,9 +11722,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
11685static int bnx2x_open(struct net_device *dev) 11722static int bnx2x_open(struct net_device *dev)
11686{ 11723{
11687 struct bnx2x *bp = netdev_priv(dev); 11724 struct bnx2x *bp = netdev_priv(dev);
11688 bool global = false;
11689 int other_engine = BP_PATH(bp) ? 0 : 1;
11690 bool other_load_status, load_status;
11691 int rc; 11725 int rc;
11692 11726
11693 bp->stats_init = true; 11727 bp->stats_init = true;
@@ -11703,6 +11737,10 @@ static int bnx2x_open(struct net_device *dev)
11703 * Parity recovery is only relevant for PF driver. 11737 * Parity recovery is only relevant for PF driver.
11704 */ 11738 */
11705 if (IS_PF(bp)) { 11739 if (IS_PF(bp)) {
11740 int other_engine = BP_PATH(bp) ? 0 : 1;
11741 bool other_load_status, load_status;
11742 bool global = false;
11743
11706 other_load_status = bnx2x_get_load_status(bp, other_engine); 11744 other_load_status = bnx2x_get_load_status(bp, other_engine);
11707 load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); 11745 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
11708 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || 11746 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
@@ -12080,7 +12118,6 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp)
12080 struct device *dev = &bp->pdev->dev; 12118 struct device *dev = &bp->pdev->dev;
12081 12119
12082 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 12120 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
12083 bp->flags |= USING_DAC_FLAG;
12084 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 12121 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
12085 dev_err(dev, "dma_set_coherent_mask failed, aborting\n"); 12122 dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
12086 return -EIO; 12123 return -EIO;
@@ -12248,8 +12285,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
12248 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; 12285 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
12249 12286
12250 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; 12287 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
12251 if (bp->flags & USING_DAC_FLAG) 12288 dev->features |= NETIF_F_HIGHDMA;
12252 dev->features |= NETIF_F_HIGHDMA;
12253 12289
12254 /* Add Loopback capability to the device */ 12290 /* Add Loopback capability to the device */
12255 dev->hw_features |= NETIF_F_LOOPBACK; 12291 dev->hw_features |= NETIF_F_LOOPBACK;
@@ -12612,24 +12648,24 @@ static int set_max_cos_est(int chip_id)
12612 return BNX2X_MULTI_TX_COS_E1X; 12648 return BNX2X_MULTI_TX_COS_E1X;
12613 case BCM57712: 12649 case BCM57712:
12614 case BCM57712_MF: 12650 case BCM57712_MF:
12615 case BCM57712_VF:
12616 return BNX2X_MULTI_TX_COS_E2_E3A0; 12651 return BNX2X_MULTI_TX_COS_E2_E3A0;
12617 case BCM57800: 12652 case BCM57800:
12618 case BCM57800_MF: 12653 case BCM57800_MF:
12619 case BCM57800_VF:
12620 case BCM57810: 12654 case BCM57810:
12621 case BCM57810_MF: 12655 case BCM57810_MF:
12622 case BCM57840_4_10: 12656 case BCM57840_4_10:
12623 case BCM57840_2_20: 12657 case BCM57840_2_20:
12624 case BCM57840_O: 12658 case BCM57840_O:
12625 case BCM57840_MFO: 12659 case BCM57840_MFO:
12626 case BCM57810_VF:
12627 case BCM57840_MF: 12660 case BCM57840_MF:
12628 case BCM57840_VF:
12629 case BCM57811: 12661 case BCM57811:
12630 case BCM57811_MF: 12662 case BCM57811_MF:
12631 case BCM57811_VF:
12632 return BNX2X_MULTI_TX_COS_E3B0; 12663 return BNX2X_MULTI_TX_COS_E3B0;
12664 case BCM57712_VF:
12665 case BCM57800_VF:
12666 case BCM57810_VF:
12667 case BCM57840_VF:
12668 case BCM57811_VF:
12633 return 1; 12669 return 1;
12634 default: 12670 default:
12635 pr_err("Unknown board_type (%d), aborting\n", chip_id); 12671 pr_err("Unknown board_type (%d), aborting\n", chip_id);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 9ad012bdd915..bf08ad68b405 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -470,10 +470,10 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
470 bnx2x_vfop_qdtor, cmd->done); 470 bnx2x_vfop_qdtor, cmd->done);
471 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, 471 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
472 cmd->block); 472 cmd->block);
473 } else {
474 BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid);
475 return -ENOMEM;
473 } 476 }
474 DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n",
475 vf->abs_vfid, vfop->rc);
476 return -ENOMEM;
477} 477}
478 478
479static void 479static void
@@ -3390,14 +3390,16 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3390 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 3390 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
3391 if (rc) { 3391 if (rc) {
3392 BNX2X_ERR("failed to delete eth macs\n"); 3392 BNX2X_ERR("failed to delete eth macs\n");
3393 return -EINVAL; 3393 rc = -EINVAL;
3394 goto out;
3394 } 3395 }
3395 3396
3396 /* remove existing uc list macs */ 3397 /* remove existing uc list macs */
3397 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); 3398 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
3398 if (rc) { 3399 if (rc) {
3399 BNX2X_ERR("failed to delete uc_list macs\n"); 3400 BNX2X_ERR("failed to delete uc_list macs\n");
3400 return -EINVAL; 3401 rc = -EINVAL;
3402 goto out;
3401 } 3403 }
3402 3404
3403 /* configure the new mac to device */ 3405 /* configure the new mac to device */
@@ -3405,6 +3407,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3405 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 3407 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
3406 BNX2X_ETH_MAC, &ramrod_flags); 3408 BNX2X_ETH_MAC, &ramrod_flags);
3407 3409
3410out:
3408 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3411 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3409 } 3412 }
3410 3413
@@ -3467,7 +3470,8 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3467 &ramrod_flags); 3470 &ramrod_flags);
3468 if (rc) { 3471 if (rc) {
3469 BNX2X_ERR("failed to delete vlans\n"); 3472 BNX2X_ERR("failed to delete vlans\n");
3470 return -EINVAL; 3473 rc = -EINVAL;
3474 goto out;
3471 } 3475 }
3472 3476
3473 /* send queue update ramrod to configure default vlan and silent 3477 /* send queue update ramrod to configure default vlan and silent
@@ -3501,7 +3505,8 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3501 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 3505 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3502 if (rc) { 3506 if (rc) {
3503 BNX2X_ERR("failed to configure vlan\n"); 3507 BNX2X_ERR("failed to configure vlan\n");
3504 return -EINVAL; 3508 rc = -EINVAL;
3509 goto out;
3505 } 3510 }
3506 3511
3507 /* configure default vlan to vf queue and set silent 3512 /* configure default vlan to vf queue and set silent
@@ -3519,18 +3524,18 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3519 rc = bnx2x_queue_state_change(bp, &q_params); 3524 rc = bnx2x_queue_state_change(bp, &q_params);
3520 if (rc) { 3525 if (rc) {
3521 BNX2X_ERR("Failed to configure default VLAN\n"); 3526 BNX2X_ERR("Failed to configure default VLAN\n");
3522 return rc; 3527 goto out;
3523 } 3528 }
3524 3529
3525 /* clear the flag indicating that this VF needs its vlan 3530 /* clear the flag indicating that this VF needs its vlan
3526 * (will only be set if the HV configured th Vlan before vf was 3531 * (will only be set if the HV configured the Vlan before vf was
3527 * and we were called because the VF came up later 3532 * up and we were called because the VF came up later
3528 */ 3533 */
3534out:
3529 vf->cfg_flags &= ~VF_CFG_VLAN; 3535 vf->cfg_flags &= ~VF_CFG_VLAN;
3530
3531 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3536 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3532 } 3537 }
3533 return 0; 3538 return rc;
3534} 3539}
3535 3540
3536/* crc is the first field in the bulletin board. Compute the crc over the 3541/* crc is the first field in the bulletin board. Compute the crc over the
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 86436c77af03..3b75070411aa 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -196,7 +196,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
196 196
197 } else if (bp->func_stx) { 197 } else if (bp->func_stx) {
198 *stats_comp = 0; 198 *stats_comp = 0;
199 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); 199 bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp);
200 } 200 }
201} 201}
202 202
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index da16953eb2ec..28757dfacf0d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -980,7 +980,7 @@ static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
980 dmae.len = len32; 980 dmae.len = len32;
981 981
982 /* issue the command and wait for completion */ 982 /* issue the command and wait for completion */
983 return bnx2x_issue_dmae_with_comp(bp, &dmae); 983 return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
984} 984}
985 985
986static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf) 986static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 78d6d6b970e1..48f52882a22b 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -106,7 +106,6 @@
106#define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */ 106#define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */
107 107
108#define XGMAC_ADDR_AE 0x80000000 108#define XGMAC_ADDR_AE 0x80000000
109#define XGMAC_MAX_FILTER_ADDR 31
110 109
111/* PMT Control and Status */ 110/* PMT Control and Status */
112#define XGMAC_PMT_POINTER_RESET 0x80000000 111#define XGMAC_PMT_POINTER_RESET 0x80000000
@@ -384,6 +383,7 @@ struct xgmac_priv {
384 struct device *device; 383 struct device *device;
385 struct napi_struct napi; 384 struct napi_struct napi;
386 385
386 int max_macs;
387 struct xgmac_extra_stats xstats; 387 struct xgmac_extra_stats xstats;
388 388
389 spinlock_t stats_lock; 389 spinlock_t stats_lock;
@@ -1291,14 +1291,12 @@ static void xgmac_set_rx_mode(struct net_device *dev)
1291 netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n", 1291 netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
1292 netdev_mc_count(dev), netdev_uc_count(dev)); 1292 netdev_mc_count(dev), netdev_uc_count(dev));
1293 1293
1294 if (dev->flags & IFF_PROMISC) { 1294 if (dev->flags & IFF_PROMISC)
1295 writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER); 1295 value |= XGMAC_FRAME_FILTER_PR;
1296 return;
1297 }
1298 1296
1299 memset(hash_filter, 0, sizeof(hash_filter)); 1297 memset(hash_filter, 0, sizeof(hash_filter));
1300 1298
1301 if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) { 1299 if (netdev_uc_count(dev) > priv->max_macs) {
1302 use_hash = true; 1300 use_hash = true;
1303 value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF; 1301 value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
1304 } 1302 }
@@ -1321,7 +1319,7 @@ static void xgmac_set_rx_mode(struct net_device *dev)
1321 goto out; 1319 goto out;
1322 } 1320 }
1323 1321
1324 if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) { 1322 if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) {
1325 use_hash = true; 1323 use_hash = true;
1326 value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; 1324 value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
1327 } else { 1325 } else {
@@ -1342,8 +1340,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
1342 } 1340 }
1343 1341
1344out: 1342out:
1345 for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++) 1343 for (i = reg; i <= priv->max_macs; i++)
1346 xgmac_set_mac_addr(ioaddr, NULL, reg); 1344 xgmac_set_mac_addr(ioaddr, NULL, i);
1347 for (i = 0; i < XGMAC_NUM_HASH; i++) 1345 for (i = 0; i < XGMAC_NUM_HASH; i++)
1348 writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); 1346 writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
1349 1347
@@ -1761,6 +1759,13 @@ static int xgmac_probe(struct platform_device *pdev)
1761 uid = readl(priv->base + XGMAC_VERSION); 1759 uid = readl(priv->base + XGMAC_VERSION);
1762 netdev_info(ndev, "h/w version is 0x%x\n", uid); 1760 netdev_info(ndev, "h/w version is 0x%x\n", uid);
1763 1761
1762 /* Figure out how many valid mac address filter registers we have */
1763 writel(1, priv->base + XGMAC_ADDR_HIGH(31));
1764 if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1)
1765 priv->max_macs = 31;
1766 else
1767 priv->max_macs = 7;
1768
1764 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 1769 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1765 ndev->irq = platform_get_irq(pdev, 0); 1770 ndev->irq = platform_get_irq(pdev, 0);
1766 if (ndev->irq == -ENXIO) { 1771 if (ndev->irq == -ENXIO) {
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 5f5896e522d2..a7a941b1a655 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -158,18 +158,6 @@ static inline board_info_t *to_dm9000_board(struct net_device *dev)
158 158
159/* DM9000 network board routine ---------------------------- */ 159/* DM9000 network board routine ---------------------------- */
160 160
161static void
162dm9000_reset(board_info_t * db)
163{
164 dev_dbg(db->dev, "resetting device\n");
165
166 /* RESET device */
167 writeb(DM9000_NCR, db->io_addr);
168 udelay(200);
169 writeb(NCR_RST, db->io_data);
170 udelay(200);
171}
172
173/* 161/*
174 * Read a byte from I/O port 162 * Read a byte from I/O port
175 */ 163 */
@@ -191,6 +179,27 @@ iow(board_info_t * db, int reg, int value)
191 writeb(value, db->io_data); 179 writeb(value, db->io_data);
192} 180}
193 181
182static void
183dm9000_reset(board_info_t *db)
184{
185 dev_dbg(db->dev, "resetting device\n");
186
187 /* Reset DM9000, see DM9000 Application Notes V1.22 Jun 11, 2004 page 29
188 * The essential point is that we have to do a double reset, and the
189 * instruction is to set LBK into MAC internal loopback mode.
190 */
191 iow(db, DM9000_NCR, 0x03);
192 udelay(100); /* Application note says at least 20 us */
193 if (ior(db, DM9000_NCR) & 1)
194 dev_err(db->dev, "dm9000 did not respond to first reset\n");
195
196 iow(db, DM9000_NCR, 0);
197 iow(db, DM9000_NCR, 0x03);
198 udelay(100);
199 if (ior(db, DM9000_NCR) & 1)
200 dev_err(db->dev, "dm9000 did not respond to second reset\n");
201}
202
194/* routines for sending block to chip */ 203/* routines for sending block to chip */
195 204
196static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count) 205static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
@@ -744,15 +753,20 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
744static void dm9000_show_carrier(board_info_t *db, 753static void dm9000_show_carrier(board_info_t *db,
745 unsigned carrier, unsigned nsr) 754 unsigned carrier, unsigned nsr)
746{ 755{
756 int lpa;
747 struct net_device *ndev = db->ndev; 757 struct net_device *ndev = db->ndev;
758 struct mii_if_info *mii = &db->mii;
748 unsigned ncr = dm9000_read_locked(db, DM9000_NCR); 759 unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
749 760
750 if (carrier) 761 if (carrier) {
751 dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n", 762 lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
763 dev_info(db->dev,
764 "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n",
752 ndev->name, (nsr & NSR_SPEED) ? 10 : 100, 765 ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
753 (ncr & NCR_FDX) ? "full" : "half"); 766 (ncr & NCR_FDX) ? "full" : "half", lpa);
754 else 767 } else {
755 dev_info(db->dev, "%s: link down\n", ndev->name); 768 dev_info(db->dev, "%s: link down\n", ndev->name);
769 }
756} 770}
757 771
758static void 772static void
@@ -890,9 +904,15 @@ dm9000_init_dm9000(struct net_device *dev)
890 (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0); 904 (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
891 905
892 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ 906 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
907 iow(db, DM9000_GPR, 0);
893 908
894 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ 909 /* If we are dealing with DM9000B, some extra steps are required: a
895 dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */ 910 * manual phy reset, and setting init params.
911 */
912 if (db->type == TYPE_DM9000B) {
913 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
914 dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM);
915 }
896 916
897 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; 917 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
898 918
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index bd0e0c0bbcd8..c08fd32bb8e5 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1198,7 +1198,6 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1198 1198
1199 if (lancer_chip(adapter)) { 1199 if (lancer_chip(adapter)) {
1200 req->hdr.version = 1; 1200 req->hdr.version = 1;
1201 req->if_id = cpu_to_le16(adapter->if_handle);
1202 } else if (BEx_chip(adapter)) { 1201 } else if (BEx_chip(adapter)) {
1203 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) 1202 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1204 req->hdr.version = 2; 1203 req->hdr.version = 2;
@@ -1206,6 +1205,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1206 req->hdr.version = 2; 1205 req->hdr.version = 2;
1207 } 1206 }
1208 1207
1208 if (req->hdr.version > 0)
1209 req->if_id = cpu_to_le16(adapter->if_handle);
1209 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 1210 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1210 req->ulp_num = BE_ULP1_NUM; 1211 req->ulp_num = BE_ULP1_NUM;
1211 req->type = BE_ETH_TX_RING_TYPE_STANDARD; 1212 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index c4eaadeb572f..9fbe4dda7a0e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -88,6 +88,7 @@
88 88
89#include <asm/io.h> 89#include <asm/io.h>
90#include <asm/reg.h> 90#include <asm/reg.h>
91#include <asm/mpc85xx.h>
91#include <asm/irq.h> 92#include <asm/irq.h>
92#include <asm/uaccess.h> 93#include <asm/uaccess.h>
93#include <linux/module.h> 94#include <linux/module.h>
@@ -939,9 +940,8 @@ static void gfar_init_filer_table(struct gfar_private *priv)
939 } 940 }
940} 941}
941 942
942static void gfar_detect_errata(struct gfar_private *priv) 943static void __gfar_detect_errata_83xx(struct gfar_private *priv)
943{ 944{
944 struct device *dev = &priv->ofdev->dev;
945 unsigned int pvr = mfspr(SPRN_PVR); 945 unsigned int pvr = mfspr(SPRN_PVR);
946 unsigned int svr = mfspr(SPRN_SVR); 946 unsigned int svr = mfspr(SPRN_SVR);
947 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ 947 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
@@ -957,15 +957,33 @@ static void gfar_detect_errata(struct gfar_private *priv)
957 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 957 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
958 priv->errata |= GFAR_ERRATA_76; 958 priv->errata |= GFAR_ERRATA_76;
959 959
960 /* MPC8313 and MPC837x all rev */ 960 /* MPC8313 Rev < 2.0 */
961 if ((pvr == 0x80850010 && mod == 0x80b0) || 961 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
962 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 962 priv->errata |= GFAR_ERRATA_12;
963 priv->errata |= GFAR_ERRATA_A002; 963}
964 964
965 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */ 965static void __gfar_detect_errata_85xx(struct gfar_private *priv)
966 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) || 966{
967 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020)) 967 unsigned int svr = mfspr(SPRN_SVR);
968
969 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
968 priv->errata |= GFAR_ERRATA_12; 970 priv->errata |= GFAR_ERRATA_12;
971 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
972 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
973 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
974}
975
976static void gfar_detect_errata(struct gfar_private *priv)
977{
978 struct device *dev = &priv->ofdev->dev;
979
980 /* no plans to fix */
981 priv->errata |= GFAR_ERRATA_A002;
982
983 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
984 __gfar_detect_errata_85xx(priv);
985 else /* non-mpc85xx parts, i.e. e300 core based */
986 __gfar_detect_errata_83xx(priv);
969 987
970 if (priv->errata) 988 if (priv->errata)
971 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", 989 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
@@ -1599,7 +1617,7 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)
1599 /* Normaly TSEC should not hang on GRS commands, so we should 1617 /* Normaly TSEC should not hang on GRS commands, so we should
1600 * actually wait for IEVENT_GRSC flag. 1618 * actually wait for IEVENT_GRSC flag.
1601 */ 1619 */
1602 if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002))) 1620 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1603 return 0; 1621 return 0;
1604 1622
1605 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are 1623 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 86d51429a189..151e00cad113 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2655,6 +2655,8 @@ static int igb_set_eee(struct net_device *netdev,
2655 (hw->phy.media_type != e1000_media_type_copper)) 2655 (hw->phy.media_type != e1000_media_type_copper))
2656 return -EOPNOTSUPP; 2656 return -EOPNOTSUPP;
2657 2657
2658 memset(&eee_curr, 0, sizeof(struct ethtool_eee));
2659
2658 ret_val = igb_get_eee(netdev, &eee_curr); 2660 ret_val = igb_get_eee(netdev, &eee_curr);
2659 if (ret_val) 2661 if (ret_val)
2660 return ret_val; 2662 return ret_val;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 7fb5677451f9..2c210ec35d59 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1131,15 +1131,13 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
1131 p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT); 1131 p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
1132 p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT); 1132 p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
1133 spin_unlock_bh(&mp->mib_counters_lock); 1133 spin_unlock_bh(&mp->mib_counters_lock);
1134
1135 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
1136} 1134}
1137 1135
1138static void mib_counters_timer_wrapper(unsigned long _mp) 1136static void mib_counters_timer_wrapper(unsigned long _mp)
1139{ 1137{
1140 struct mv643xx_eth_private *mp = (void *)_mp; 1138 struct mv643xx_eth_private *mp = (void *)_mp;
1141
1142 mib_counters_update(mp); 1139 mib_counters_update(mp);
1140 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
1143} 1141}
1144 1142
1145 1143
@@ -2237,6 +2235,7 @@ static int mv643xx_eth_open(struct net_device *dev)
2237 mp->int_mask |= INT_TX_END_0 << i; 2235 mp->int_mask |= INT_TX_END_0 << i;
2238 } 2236 }
2239 2237
2238 add_timer(&mp->mib_counters_timer);
2240 port_start(mp); 2239 port_start(mp);
2241 2240
2242 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); 2241 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
@@ -2534,6 +2533,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
2534 if (!ppdev) 2533 if (!ppdev)
2535 return -ENOMEM; 2534 return -ENOMEM;
2536 ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 2535 ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
2536 ppdev->dev.of_node = pnp;
2537 2537
2538 ret = platform_device_add_resources(ppdev, &res, 1); 2538 ret = platform_device_add_resources(ppdev, &res, 1);
2539 if (ret) 2539 if (ret)
@@ -2916,7 +2916,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2916 mp->mib_counters_timer.data = (unsigned long)mp; 2916 mp->mib_counters_timer.data = (unsigned long)mp;
2917 mp->mib_counters_timer.function = mib_counters_timer_wrapper; 2917 mp->mib_counters_timer.function = mib_counters_timer_wrapper;
2918 mp->mib_counters_timer.expires = jiffies + 30 * HZ; 2918 mp->mib_counters_timer.expires = jiffies + 30 * HZ;
2919 add_timer(&mp->mib_counters_timer);
2920 2919
2921 spin_lock_init(&mp->mib_counters_lock); 2920 spin_lock_init(&mp->mib_counters_lock);
2922 2921
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index dec455c8f627..afe2efa69c86 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -70,14 +70,15 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
70 put_page(page); 70 put_page(page);
71 return -ENOMEM; 71 return -ENOMEM;
72 } 72 }
73 page_alloc->size = PAGE_SIZE << order; 73 page_alloc->page_size = PAGE_SIZE << order;
74 page_alloc->page = page; 74 page_alloc->page = page;
75 page_alloc->dma = dma; 75 page_alloc->dma = dma;
76 page_alloc->offset = frag_info->frag_align; 76 page_alloc->page_offset = frag_info->frag_align;
77 /* Not doing get_page() for each frag is a big win 77 /* Not doing get_page() for each frag is a big win
78 * on asymetric workloads. 78 * on asymetric workloads.
79 */ 79 */
80 atomic_set(&page->_count, page_alloc->size / frag_info->frag_stride); 80 atomic_set(&page->_count,
81 page_alloc->page_size / frag_info->frag_stride);
81 return 0; 82 return 0;
82} 83}
83 84
@@ -96,16 +97,19 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
96 for (i = 0; i < priv->num_frags; i++) { 97 for (i = 0; i < priv->num_frags; i++) {
97 frag_info = &priv->frag_info[i]; 98 frag_info = &priv->frag_info[i];
98 page_alloc[i] = ring_alloc[i]; 99 page_alloc[i] = ring_alloc[i];
99 page_alloc[i].offset += frag_info->frag_stride; 100 page_alloc[i].page_offset += frag_info->frag_stride;
100 if (page_alloc[i].offset + frag_info->frag_stride <= ring_alloc[i].size) 101
102 if (page_alloc[i].page_offset + frag_info->frag_stride <=
103 ring_alloc[i].page_size)
101 continue; 104 continue;
105
102 if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp)) 106 if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
103 goto out; 107 goto out;
104 } 108 }
105 109
106 for (i = 0; i < priv->num_frags; i++) { 110 for (i = 0; i < priv->num_frags; i++) {
107 frags[i] = ring_alloc[i]; 111 frags[i] = ring_alloc[i];
108 dma = ring_alloc[i].dma + ring_alloc[i].offset; 112 dma = ring_alloc[i].dma + ring_alloc[i].page_offset;
109 ring_alloc[i] = page_alloc[i]; 113 ring_alloc[i] = page_alloc[i];
110 rx_desc->data[i].addr = cpu_to_be64(dma); 114 rx_desc->data[i].addr = cpu_to_be64(dma);
111 } 115 }
@@ -117,7 +121,7 @@ out:
117 frag_info = &priv->frag_info[i]; 121 frag_info = &priv->frag_info[i];
118 if (page_alloc[i].page != ring_alloc[i].page) { 122 if (page_alloc[i].page != ring_alloc[i].page) {
119 dma_unmap_page(priv->ddev, page_alloc[i].dma, 123 dma_unmap_page(priv->ddev, page_alloc[i].dma,
120 page_alloc[i].size, PCI_DMA_FROMDEVICE); 124 page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
121 page = page_alloc[i].page; 125 page = page_alloc[i].page;
122 atomic_set(&page->_count, 1); 126 atomic_set(&page->_count, 1);
123 put_page(page); 127 put_page(page);
@@ -131,10 +135,12 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
131 int i) 135 int i)
132{ 136{
133 const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; 137 const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
138 u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
139
134 140
135 if (frags[i].offset + frag_info->frag_stride > frags[i].size) 141 if (next_frag_end > frags[i].page_size)
136 dma_unmap_page(priv->ddev, frags[i].dma, frags[i].size, 142 dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
137 PCI_DMA_FROMDEVICE); 143 PCI_DMA_FROMDEVICE);
138 144
139 if (frags[i].page) 145 if (frags[i].page)
140 put_page(frags[i].page); 146 put_page(frags[i].page);
@@ -161,7 +167,7 @@ out:
161 167
162 page_alloc = &ring->page_alloc[i]; 168 page_alloc = &ring->page_alloc[i];
163 dma_unmap_page(priv->ddev, page_alloc->dma, 169 dma_unmap_page(priv->ddev, page_alloc->dma,
164 page_alloc->size, PCI_DMA_FROMDEVICE); 170 page_alloc->page_size, PCI_DMA_FROMDEVICE);
165 page = page_alloc->page; 171 page = page_alloc->page;
166 atomic_set(&page->_count, 1); 172 atomic_set(&page->_count, 1);
167 put_page(page); 173 put_page(page);
@@ -184,10 +190,11 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
184 i, page_count(page_alloc->page)); 190 i, page_count(page_alloc->page));
185 191
186 dma_unmap_page(priv->ddev, page_alloc->dma, 192 dma_unmap_page(priv->ddev, page_alloc->dma,
187 page_alloc->size, PCI_DMA_FROMDEVICE); 193 page_alloc->page_size, PCI_DMA_FROMDEVICE);
188 while (page_alloc->offset + frag_info->frag_stride < page_alloc->size) { 194 while (page_alloc->page_offset + frag_info->frag_stride <
195 page_alloc->page_size) {
189 put_page(page_alloc->page); 196 put_page(page_alloc->page);
190 page_alloc->offset += frag_info->frag_stride; 197 page_alloc->page_offset += frag_info->frag_stride;
191 } 198 }
192 page_alloc->page = NULL; 199 page_alloc->page = NULL;
193 } 200 }
@@ -478,7 +485,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
478 /* Save page reference in skb */ 485 /* Save page reference in skb */
479 __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page); 486 __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
480 skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size); 487 skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
481 skb_frags_rx[nr].page_offset = frags[nr].offset; 488 skb_frags_rx[nr].page_offset = frags[nr].page_offset;
482 skb->truesize += frag_info->frag_stride; 489 skb->truesize += frag_info->frag_stride;
483 frags[nr].page = NULL; 490 frags[nr].page = NULL;
484 } 491 }
@@ -517,7 +524,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
517 524
518 /* Get pointer to first fragment so we could copy the headers into the 525 /* Get pointer to first fragment so we could copy the headers into the
519 * (linear part of the) skb */ 526 * (linear part of the) skb */
520 va = page_address(frags[0].page) + frags[0].offset; 527 va = page_address(frags[0].page) + frags[0].page_offset;
521 528
522 if (length <= SMALL_PACKET_SIZE) { 529 if (length <= SMALL_PACKET_SIZE) {
523 /* We are copying all relevant data to the skb - temporarily 530 /* We are copying all relevant data to the skb - temporarily
@@ -645,7 +652,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
645 dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), 652 dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
646 DMA_FROM_DEVICE); 653 DMA_FROM_DEVICE);
647 ethh = (struct ethhdr *)(page_address(frags[0].page) + 654 ethh = (struct ethhdr *)(page_address(frags[0].page) +
648 frags[0].offset); 655 frags[0].page_offset);
649 656
650 if (is_multicast_ether_addr(ethh->h_dest)) { 657 if (is_multicast_ether_addr(ethh->h_dest)) {
651 struct mlx4_mac_entry *entry; 658 struct mlx4_mac_entry *entry;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 5e0aa569306a..bf06e3610d27 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -237,8 +237,8 @@ struct mlx4_en_tx_desc {
237struct mlx4_en_rx_alloc { 237struct mlx4_en_rx_alloc {
238 struct page *page; 238 struct page *page;
239 dma_addr_t dma; 239 dma_addr_t dma;
240 u32 offset; 240 u32 page_offset;
241 u32 size; 241 u32 page_size;
242}; 242};
243 243
244struct mlx4_en_tx_ring { 244struct mlx4_en_tx_ring {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 5472cbd34028..6ca30739625f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -180,28 +180,32 @@ static int verify_block_sig(struct mlx5_cmd_prot_block *block)
180 return 0; 180 return 0;
181} 181}
182 182
183static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token) 183static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
184 int csum)
184{ 185{
185 block->token = token; 186 block->token = token;
186 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2); 187 if (csum) {
187 block->sig = ~xor8_buf(block, sizeof(*block) - 1); 188 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
189 sizeof(block->data) - 2);
190 block->sig = ~xor8_buf(block, sizeof(*block) - 1);
191 }
188} 192}
189 193
190static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token) 194static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
191{ 195{
192 struct mlx5_cmd_mailbox *next = msg->next; 196 struct mlx5_cmd_mailbox *next = msg->next;
193 197
194 while (next) { 198 while (next) {
195 calc_block_sig(next->buf, token); 199 calc_block_sig(next->buf, token, csum);
196 next = next->next; 200 next = next->next;
197 } 201 }
198} 202}
199 203
200static void set_signature(struct mlx5_cmd_work_ent *ent) 204static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
201{ 205{
202 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); 206 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
203 calc_chain_sig(ent->in, ent->token); 207 calc_chain_sig(ent->in, ent->token, csum);
204 calc_chain_sig(ent->out, ent->token); 208 calc_chain_sig(ent->out, ent->token, csum);
205} 209}
206 210
207static void poll_timeout(struct mlx5_cmd_work_ent *ent) 211static void poll_timeout(struct mlx5_cmd_work_ent *ent)
@@ -539,8 +543,7 @@ static void cmd_work_handler(struct work_struct *work)
539 lay->type = MLX5_PCI_CMD_XPORT; 543 lay->type = MLX5_PCI_CMD_XPORT;
540 lay->token = ent->token; 544 lay->token = ent->token;
541 lay->status_own = CMD_OWNER_HW; 545 lay->status_own = CMD_OWNER_HW;
542 if (!cmd->checksum_disabled) 546 set_signature(ent, !cmd->checksum_disabled);
543 set_signature(ent);
544 dump_command(dev, ent, 1); 547 dump_command(dev, ent, 1);
545 ktime_get_ts(&ent->ts1); 548 ktime_get_ts(&ent->ts1);
546 549
@@ -773,8 +776,6 @@ static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
773 776
774 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); 777 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
775 block = next->buf; 778 block = next->buf;
776 if (xor8_buf(block, sizeof(*block)) != 0xff)
777 return -EINVAL;
778 779
779 memcpy(to, block->data, copy); 780 memcpy(to, block->data, copy);
780 to += copy; 781 to += copy;
@@ -1361,6 +1362,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
1361 goto err_map; 1362 goto err_map;
1362 } 1363 }
1363 1364
1365 cmd->checksum_disabled = 1;
1364 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; 1366 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1365 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; 1367 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1366 1368
@@ -1510,7 +1512,7 @@ int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1510 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; 1512 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
1511 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; 1513 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
1512 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; 1514 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
1513 case MLX5_CMD_STAT_LIM_ERR: return -EINVAL; 1515 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
1514 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; 1516 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
1515 case MLX5_CMD_STAT_IX_ERR: return -EINVAL; 1517 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
1516 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; 1518 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 443cc4d7b024..2231d93cc7ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -366,9 +366,11 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
366 goto err_in; 366 goto err_in;
367 } 367 }
368 368
369 snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
370 name, pci_name(dev->pdev));
369 eq->eqn = out.eq_number; 371 eq->eqn = out.eq_number;
370 err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, 372 err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
371 name, eq); 373 eq->name, eq);
372 if (err) 374 if (err)
373 goto err_eq; 375 goto err_eq;
374 376
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index b47739b0b5f6..bc0f5fb66e24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -165,9 +165,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
165 struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL; 165 struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL;
166 struct mlx5_cmd_query_hca_cap_mbox_in query_ctx; 166 struct mlx5_cmd_query_hca_cap_mbox_in query_ctx;
167 struct mlx5_cmd_set_hca_cap_mbox_out set_out; 167 struct mlx5_cmd_set_hca_cap_mbox_out set_out;
168 struct mlx5_profile *prof = dev->profile;
169 u64 flags; 168 u64 flags;
170 int csum = 1;
171 int err; 169 int err;
172 170
173 memset(&query_ctx, 0, sizeof(query_ctx)); 171 memset(&query_ctx, 0, sizeof(query_ctx));
@@ -197,20 +195,14 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
197 memcpy(&set_ctx->hca_cap, &query_out->hca_cap, 195 memcpy(&set_ctx->hca_cap, &query_out->hca_cap,
198 sizeof(set_ctx->hca_cap)); 196 sizeof(set_ctx->hca_cap));
199 197
200 if (prof->mask & MLX5_PROF_MASK_CMDIF_CSUM) {
201 csum = !!prof->cmdif_csum;
202 flags = be64_to_cpu(set_ctx->hca_cap.flags);
203 if (csum)
204 flags |= MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
205 else
206 flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
207
208 set_ctx->hca_cap.flags = cpu_to_be64(flags);
209 }
210
211 if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) 198 if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
212 set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; 199 set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp;
213 200
201 flags = be64_to_cpu(query_out->hca_cap.flags);
202 /* disable checksum */
203 flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
204
205 set_ctx->hca_cap.flags = cpu_to_be64(flags);
214 memset(&set_out, 0, sizeof(set_out)); 206 memset(&set_out, 0, sizeof(set_out));
215 set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12); 207 set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12);
216 set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP); 208 set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP);
@@ -225,9 +217,6 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
225 if (err) 217 if (err)
226 goto query_ex; 218 goto query_ex;
227 219
228 if (!csum)
229 dev->cmd.checksum_disabled = 1;
230
231query_ex: 220query_ex:
232 kfree(query_out); 221 kfree(query_out);
233 kfree(set_ctx); 222 kfree(set_ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 3a2408d44820..7b12acf210f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -90,6 +90,10 @@ struct mlx5_manage_pages_outbox {
90 __be64 pas[0]; 90 __be64 pas[0];
91}; 91};
92 92
93enum {
94 MAX_RECLAIM_TIME_MSECS = 5000,
95};
96
93static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) 97static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
94{ 98{
95 struct rb_root *root = &dev->priv.page_root; 99 struct rb_root *root = &dev->priv.page_root;
@@ -279,6 +283,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
279 int err; 283 int err;
280 int i; 284 int i;
281 285
286 if (nclaimed)
287 *nclaimed = 0;
288
282 memset(&in, 0, sizeof(in)); 289 memset(&in, 0, sizeof(in));
283 outlen = sizeof(*out) + npages * sizeof(out->pas[0]); 290 outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
284 out = mlx5_vzalloc(outlen); 291 out = mlx5_vzalloc(outlen);
@@ -388,20 +395,25 @@ static int optimal_reclaimed_pages(void)
388 395
389int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) 396int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
390{ 397{
391 unsigned long end = jiffies + msecs_to_jiffies(5000); 398 unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
392 struct fw_page *fwp; 399 struct fw_page *fwp;
393 struct rb_node *p; 400 struct rb_node *p;
401 int nclaimed = 0;
394 int err; 402 int err;
395 403
396 do { 404 do {
397 p = rb_first(&dev->priv.page_root); 405 p = rb_first(&dev->priv.page_root);
398 if (p) { 406 if (p) {
399 fwp = rb_entry(p, struct fw_page, rb_node); 407 fwp = rb_entry(p, struct fw_page, rb_node);
400 err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), NULL); 408 err = reclaim_pages(dev, fwp->func_id,
409 optimal_reclaimed_pages(),
410 &nclaimed);
401 if (err) { 411 if (err) {
402 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err); 412 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err);
403 return err; 413 return err;
404 } 414 }
415 if (nclaimed)
416 end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
405 } 417 }
406 if (time_after(jiffies, end)) { 418 if (time_after(jiffies, end)) {
407 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); 419 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index bd1a2d2bc2ae..ea54d95e5b9f 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -448,7 +448,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
448 irq = irq_of_parse_and_map(node, 0); 448 irq = irq_of_parse_and_map(node, 0);
449 if (irq <= 0) { 449 if (irq <= 0) {
450 netdev_err(ndev, "irq_of_parse_and_map failed\n"); 450 netdev_err(ndev, "irq_of_parse_and_map failed\n");
451 return -EINVAL; 451 ret = -EINVAL;
452 goto irq_map_fail;
452 } 453 }
453 454
454 priv = netdev_priv(ndev); 455 priv = netdev_priv(ndev);
@@ -472,24 +473,32 @@ static int moxart_mac_probe(struct platform_device *pdev)
472 priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * 473 priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
473 TX_DESC_NUM, &priv->tx_base, 474 TX_DESC_NUM, &priv->tx_base,
474 GFP_DMA | GFP_KERNEL); 475 GFP_DMA | GFP_KERNEL);
475 if (priv->tx_desc_base == NULL) 476 if (priv->tx_desc_base == NULL) {
477 ret = -ENOMEM;
476 goto init_fail; 478 goto init_fail;
479 }
477 480
478 priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE * 481 priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
479 RX_DESC_NUM, &priv->rx_base, 482 RX_DESC_NUM, &priv->rx_base,
480 GFP_DMA | GFP_KERNEL); 483 GFP_DMA | GFP_KERNEL);
481 if (priv->rx_desc_base == NULL) 484 if (priv->rx_desc_base == NULL) {
485 ret = -ENOMEM;
482 goto init_fail; 486 goto init_fail;
487 }
483 488
484 priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM, 489 priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM,
485 GFP_ATOMIC); 490 GFP_ATOMIC);
486 if (!priv->tx_buf_base) 491 if (!priv->tx_buf_base) {
492 ret = -ENOMEM;
487 goto init_fail; 493 goto init_fail;
494 }
488 495
489 priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM, 496 priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM,
490 GFP_ATOMIC); 497 GFP_ATOMIC);
491 if (!priv->rx_buf_base) 498 if (!priv->rx_buf_base) {
499 ret = -ENOMEM;
492 goto init_fail; 500 goto init_fail;
501 }
493 502
494 platform_set_drvdata(pdev, ndev); 503 platform_set_drvdata(pdev, ndev);
495 504
@@ -522,7 +531,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
522init_fail: 531init_fail:
523 netdev_err(ndev, "init failed\n"); 532 netdev_err(ndev, "init failed\n");
524 moxart_mac_free_memory(ndev); 533 moxart_mac_free_memory(ndev);
525 534irq_map_fail:
535 free_netdev(ndev);
526 return ret; 536 return ret;
527} 537}
528 538
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index ebe4c86e5230..ff83a9fcd4c5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -665,7 +665,7 @@ static int qlcnic_set_channels(struct net_device *dev,
665 return err; 665 return err;
666 } 666 }
667 667
668 if (channel->tx_count) { 668 if (qlcnic_82xx_check(adapter) && channel->tx_count) {
669 err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count); 669 err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count);
670 if (err) 670 if (err)
671 return err; 671 return err;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 21d00a0449a1..9e61eb867452 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2257,7 +2257,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2257 2257
2258 err = qlcnic_alloc_adapter_resources(adapter); 2258 err = qlcnic_alloc_adapter_resources(adapter);
2259 if (err) 2259 if (err)
2260 goto err_out_free_netdev; 2260 goto err_out_free_wq;
2261 2261
2262 adapter->dev_rst_time = jiffies; 2262 adapter->dev_rst_time = jiffies;
2263 adapter->ahw->revision_id = pdev->revision; 2263 adapter->ahw->revision_id = pdev->revision;
@@ -2396,6 +2396,9 @@ err_out_disable_msi:
2396err_out_free_hw: 2396err_out_free_hw:
2397 qlcnic_free_adapter_resources(adapter); 2397 qlcnic_free_adapter_resources(adapter);
2398 2398
2399err_out_free_wq:
2400 destroy_workqueue(adapter->qlcnic_wq);
2401
2399err_out_free_netdev: 2402err_out_free_netdev:
2400 free_netdev(netdev); 2403 free_netdev(netdev);
2401 2404
@@ -3648,11 +3651,6 @@ int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, u32 txq)
3648 u8 max_hw = QLCNIC_MAX_TX_RINGS; 3651 u8 max_hw = QLCNIC_MAX_TX_RINGS;
3649 u32 max_allowed; 3652 u32 max_allowed;
3650 3653
3651 if (!qlcnic_82xx_check(adapter)) {
3652 netdev_err(netdev, "No Multi TX-Q support\n");
3653 return -EINVAL;
3654 }
3655
3656 if (!qlcnic_use_msi_x && !qlcnic_use_msi) { 3654 if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
3657 netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n"); 3655 netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n");
3658 return -EINVAL; 3656 return -EINVAL;
@@ -3692,8 +3690,7 @@ int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
3692 u8 max_hw = adapter->ahw->max_rx_ques; 3690 u8 max_hw = adapter->ahw->max_rx_ques;
3693 u32 max_allowed; 3691 u32 max_allowed;
3694 3692
3695 if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x && 3693 if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
3696 !qlcnic_use_msi) {
3697 netdev_err(netdev, "No RSS support in INT-x mode\n"); 3694 netdev_err(netdev, "No RSS support in INT-x mode\n");
3698 return -EINVAL; 3695 return -EINVAL;
3699 } 3696 }
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 5cd831ebfa83..b57c278d3b46 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -688,12 +688,16 @@ static struct sh_eth_cpu_data r8a7740_data = {
688 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 688 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
689 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 689 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
690 EESR_TDE | EESR_ECI, 690 EESR_TDE | EESR_ECI,
691 .fdr_value = 0x0000070f,
692 .rmcr_value = 0x00000001,
691 693
692 .apr = 1, 694 .apr = 1,
693 .mpr = 1, 695 .mpr = 1,
694 .tpauser = 1, 696 .tpauser = 1,
695 .bculr = 1, 697 .bculr = 1,
696 .hw_swap = 1, 698 .hw_swap = 1,
699 .rpadir = 1,
700 .rpadir_value = 2 << 16,
697 .no_trimd = 1, 701 .no_trimd = 1,
698 .no_ade = 1, 702 .no_ade = 1,
699 .tsu = 1, 703 .tsu = 1,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 9f18ae984f9e..21f9ad6392e9 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -444,6 +444,18 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
444 EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS), 444 EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
445 EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS), 445 EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
446 EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS), 446 EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
447 EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
448 EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
449 EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
450 EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
451 EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
452 EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
453 EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
454 EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
455 EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
456 EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
457 EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
458 EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
447}; 459};
448 460
449#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \ 461#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
@@ -498,44 +510,72 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
498#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \ 510#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
499 (1ULL << EF10_STAT_rx_length_error)) 511 (1ULL << EF10_STAT_rx_length_error))
500 512
501#if BITS_PER_LONG == 64 513/* These statistics are only provided if the firmware supports the
502#define STAT_MASK_BITMAP(bits) (bits) 514 * capability PM_AND_RXDP_COUNTERS.
503#else 515 */
504#define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32 516#define HUNT_PM_AND_RXDP_STAT_MASK ( \
505#endif 517 (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \
506 518 (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \
507static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx) 519 (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \
508{ 520 (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \
509 static const unsigned long hunt_40g_stat_mask[] = { 521 (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \
510 STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK | 522 (1ULL << EF10_STAT_rx_pm_discard_qbb) | \
511 HUNT_40G_EXTRA_STAT_MASK) 523 (1ULL << EF10_STAT_rx_pm_discard_mapping) | \
512 }; 524 (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
513 static const unsigned long hunt_10g_only_stat_mask[] = { 525 (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
514 STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK | 526 (1ULL << EF10_STAT_rx_dp_streaming_packets) | \
515 HUNT_10G_ONLY_STAT_MASK) 527 (1ULL << EF10_STAT_rx_dp_emerg_fetch) | \
516 }; 528 (1ULL << EF10_STAT_rx_dp_emerg_wait))
529
530static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
531{
532 u64 raw_mask = HUNT_COMMON_STAT_MASK;
517 u32 port_caps = efx_mcdi_phy_get_caps(efx); 533 u32 port_caps = efx_mcdi_phy_get_caps(efx);
534 struct efx_ef10_nic_data *nic_data = efx->nic_data;
518 535
519 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) 536 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
520 return hunt_40g_stat_mask; 537 raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
521 else 538 else
522 return hunt_10g_only_stat_mask; 539 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
540
541 if (nic_data->datapath_caps &
542 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
543 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
544
545 return raw_mask;
546}
547
548static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
549{
550 u64 raw_mask = efx_ef10_raw_stat_mask(efx);
551
552#if BITS_PER_LONG == 64
553 mask[0] = raw_mask;
554#else
555 mask[0] = raw_mask & 0xffffffff;
556 mask[1] = raw_mask >> 32;
557#endif
523} 558}
524 559
525static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) 560static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
526{ 561{
562 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
563
564 efx_ef10_get_stat_mask(efx, mask);
527 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, 565 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
528 efx_ef10_stat_mask(efx), names); 566 mask, names);
529} 567}
530 568
531static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) 569static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
532{ 570{
533 struct efx_ef10_nic_data *nic_data = efx->nic_data; 571 struct efx_ef10_nic_data *nic_data = efx->nic_data;
534 const unsigned long *stats_mask = efx_ef10_stat_mask(efx); 572 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
535 __le64 generation_start, generation_end; 573 __le64 generation_start, generation_end;
536 u64 *stats = nic_data->stats; 574 u64 *stats = nic_data->stats;
537 __le64 *dma_stats; 575 __le64 *dma_stats;
538 576
577 efx_ef10_get_stat_mask(efx, mask);
578
539 dma_stats = efx->stats_buffer.addr; 579 dma_stats = efx->stats_buffer.addr;
540 nic_data = efx->nic_data; 580 nic_data = efx->nic_data;
541 581
@@ -543,8 +583,9 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
543 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) 583 if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
544 return 0; 584 return 0;
545 rmb(); 585 rmb();
546 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask, 586 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
547 stats, efx->stats_buffer.addr, false); 587 stats, efx->stats_buffer.addr, false);
588 rmb();
548 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; 589 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
549 if (generation_end != generation_start) 590 if (generation_end != generation_start)
550 return -EAGAIN; 591 return -EAGAIN;
@@ -563,12 +604,14 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
563static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats, 604static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
564 struct rtnl_link_stats64 *core_stats) 605 struct rtnl_link_stats64 *core_stats)
565{ 606{
566 const unsigned long *mask = efx_ef10_stat_mask(efx); 607 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
567 struct efx_ef10_nic_data *nic_data = efx->nic_data; 608 struct efx_ef10_nic_data *nic_data = efx->nic_data;
568 u64 *stats = nic_data->stats; 609 u64 *stats = nic_data->stats;
569 size_t stats_count = 0, index; 610 size_t stats_count = 0, index;
570 int retry; 611 int retry;
571 612
613 efx_ef10_get_stat_mask(efx, mask);
614
572 /* If we're unlucky enough to read statistics during the DMA, wait 615 /* If we're unlucky enough to read statistics during the DMA, wait
573 * up to 10ms for it to finish (typically takes <500us) 616 * up to 10ms for it to finish (typically takes <500us)
574 */ 617 */
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index c082562dbf4e..366c8e3e3784 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -963,7 +963,7 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
963 bool *was_attached) 963 bool *was_attached)
964{ 964{
965 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN); 965 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
966 MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN); 966 MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
967 size_t outlen; 967 size_t outlen;
968 int rc; 968 int rc;
969 969
@@ -981,6 +981,22 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
981 goto fail; 981 goto fail;
982 } 982 }
983 983
984 /* We currently assume we have control of the external link
985 * and are completely trusted by firmware. Abort probing
986 * if that's not true for this function.
987 */
988 if (driver_operating &&
989 outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN &&
990 (MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) &
991 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
992 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
993 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
994 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) {
995 netif_err(efx, probe, efx->net_dev,
996 "This driver version only supports one function per port\n");
997 return -ENODEV;
998 }
999
984 if (was_attached != NULL) 1000 if (was_attached != NULL)
985 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); 1001 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
986 return 0; 1002 return 0;
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index b5cf62492f8e..e0a63ddb7a6c 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -2574,8 +2574,58 @@
2574#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */ 2574#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
2575#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */ 2575#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
2576#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */ 2576#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
2577#define MC_CMD_GMAC_DMABUF_START 0x40 /* enum */ 2577/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
2578#define MC_CMD_GMAC_DMABUF_END 0x5f /* enum */ 2578 * capability only.
2579 */
2580#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c
2581/* enum: PM discard_bb_overflow counter. Valid for EF10 with
2582 * PM_AND_RXDP_COUNTERS capability only.
2583 */
2584#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d
2585/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
2586 * capability only.
2587 */
2588#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e
2589/* enum: PM discard_vfifo_full counter. Valid for EF10 with
2590 * PM_AND_RXDP_COUNTERS capability only.
2591 */
2592#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f
2593/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
2594 * capability only.
2595 */
2596#define MC_CMD_MAC_PM_TRUNC_QBB 0x40
2597/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
2598 * capability only.
2599 */
2600#define MC_CMD_MAC_PM_DISCARD_QBB 0x41
2601/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
2602 * capability only.
2603 */
2604#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42
2605/* enum: RXDP counter: Number of packets dropped due to the queue being
2606 * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
2607 */
2608#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43
2609/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10
2610 * with PM_AND_RXDP_COUNTERS capability only.
2611 */
2612#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45
2613/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with
2614 * PM_AND_RXDP_COUNTERS capability only.
2615 */
2616#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
2617/* enum: RXDP counter: Number of times an emergency descriptor fetch was
2618 * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
2619 */
2620#define MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS 0x47
2621/* enum: RXDP counter: Number of times the DPCPU waited for an existing
2622 * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
2623 */
2624#define MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS 0x48
2625/* enum: Start of GMAC stats buffer space, for Siena only. */
2626#define MC_CMD_GMAC_DMABUF_START 0x40
2627/* enum: End of GMAC stats buffer space, for Siena only. */
2628#define MC_CMD_GMAC_DMABUF_END 0x5f
2579#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */ 2629#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
2580#define MC_CMD_MAC_NSTATS 0x61 /* enum */ 2630#define MC_CMD_MAC_NSTATS 0x61 /* enum */
2581 2631
@@ -5065,6 +5115,8 @@
5065#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1 5115#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
5066#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26 5116#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
5067#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1 5117#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
5118#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
5119#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
5068/* RxDPCPU firmware id. */ 5120/* RxDPCPU firmware id. */
5069#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4 5121#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
5070#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2 5122#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index e7dbd2dd202e..9826594c8a48 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -469,8 +469,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
469 * @count: Length of the @desc array 469 * @count: Length of the @desc array
470 * @mask: Bitmask of which elements of @desc are enabled 470 * @mask: Bitmask of which elements of @desc are enabled
471 * @stats: Buffer to update with the converted statistics. The length 471 * @stats: Buffer to update with the converted statistics. The length
472 * of this array must be at least the number of set bits in the 472 * of this array must be at least @count.
473 * first @count bits of @mask.
474 * @dma_buf: DMA buffer containing hardware statistics 473 * @dma_buf: DMA buffer containing hardware statistics
475 * @accumulate: If set, the converted values will be added rather than 474 * @accumulate: If set, the converted values will be added rather than
476 * directly stored to the corresponding elements of @stats 475 * directly stored to the corresponding elements of @stats
@@ -503,11 +502,9 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
503 } 502 }
504 503
505 if (accumulate) 504 if (accumulate)
506 *stats += val; 505 stats[index] += val;
507 else 506 else
508 *stats = val; 507 stats[index] = val;
509 } 508 }
510
511 ++stats;
512 } 509 }
513} 510}
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index fda29d39032f..890bbbe8320e 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -386,6 +386,18 @@ enum {
386 EF10_STAT_rx_align_error, 386 EF10_STAT_rx_align_error,
387 EF10_STAT_rx_length_error, 387 EF10_STAT_rx_length_error,
388 EF10_STAT_rx_nodesc_drops, 388 EF10_STAT_rx_nodesc_drops,
389 EF10_STAT_rx_pm_trunc_bb_overflow,
390 EF10_STAT_rx_pm_discard_bb_overflow,
391 EF10_STAT_rx_pm_trunc_vfifo_full,
392 EF10_STAT_rx_pm_discard_vfifo_full,
393 EF10_STAT_rx_pm_trunc_qbb,
394 EF10_STAT_rx_pm_discard_qbb,
395 EF10_STAT_rx_pm_discard_mapping,
396 EF10_STAT_rx_dp_q_disabled_packets,
397 EF10_STAT_rx_dp_di_dropped_packets,
398 EF10_STAT_rx_dp_streaming_packets,
399 EF10_STAT_rx_dp_emerg_fetch,
400 EF10_STAT_rx_dp_emerg_wait,
389 EF10_STAT_COUNT 401 EF10_STAT_COUNT
390}; 402};
391 403
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 5730fe2445a6..98eedb90cdc3 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -1124,8 +1124,7 @@ static const char * chip_ids[ 16 ] = {
1124 void __iomem *__ioaddr = ioaddr; \ 1124 void __iomem *__ioaddr = ioaddr; \
1125 if (__len >= 2 && (unsigned long)__ptr & 2) { \ 1125 if (__len >= 2 && (unsigned long)__ptr & 2) { \
1126 __len -= 2; \ 1126 __len -= 2; \
1127 SMC_outw(*(u16 *)__ptr, ioaddr, \ 1127 SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
1128 DATA_REG(lp)); \
1129 __ptr += 2; \ 1128 __ptr += 2; \
1130 } \ 1129 } \
1131 if (SMC_CAN_USE_DATACS && lp->datacs) \ 1130 if (SMC_CAN_USE_DATACS && lp->datacs) \
@@ -1133,8 +1132,7 @@ static const char * chip_ids[ 16 ] = {
1133 SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \ 1132 SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \
1134 if (__len & 2) { \ 1133 if (__len & 2) { \
1135 __ptr += (__len & ~3); \ 1134 __ptr += (__len & ~3); \
1136 SMC_outw(*((u16 *)__ptr), ioaddr, \ 1135 SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
1137 DATA_REG(lp)); \
1138 } \ 1136 } \
1139 } else if (SMC_16BIT(lp)) \ 1137 } else if (SMC_16BIT(lp)) \
1140 SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1); \ 1138 SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1); \
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 79974e31187a..cc3ce557e4aa 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -639,13 +639,6 @@ void cpsw_rx_handler(void *token, int len, int status)
639static irqreturn_t cpsw_interrupt(int irq, void *dev_id) 639static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
640{ 640{
641 struct cpsw_priv *priv = dev_id; 641 struct cpsw_priv *priv = dev_id;
642 u32 rx, tx, rx_thresh;
643
644 rx_thresh = __raw_readl(&priv->wr_regs->rx_thresh_stat);
645 rx = __raw_readl(&priv->wr_regs->rx_stat);
646 tx = __raw_readl(&priv->wr_regs->tx_stat);
647 if (!rx_thresh && !rx && !tx)
648 return IRQ_NONE;
649 642
650 cpsw_intr_disable(priv); 643 cpsw_intr_disable(priv);
651 if (priv->irq_enabled == true) { 644 if (priv->irq_enabled == true) {
@@ -1169,9 +1162,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
1169 } 1162 }
1170 } 1163 }
1171 1164
1165 napi_enable(&priv->napi);
1172 cpdma_ctlr_start(priv->dma); 1166 cpdma_ctlr_start(priv->dma);
1173 cpsw_intr_enable(priv); 1167 cpsw_intr_enable(priv);
1174 napi_enable(&priv->napi);
1175 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 1168 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
1176 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 1169 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
1177 1170
@@ -1771,8 +1764,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1771 } 1764 }
1772 data->mac_control = prop; 1765 data->mac_control = prop;
1773 1766
1774 if (!of_property_read_u32(node, "dual_emac", &prop)) 1767 if (of_property_read_bool(node, "dual_emac"))
1775 data->dual_emac = prop; 1768 data->dual_emac = 1;
1776 1769
1777 /* 1770 /*
1778 * Populate all the child nodes here... 1771 * Populate all the child nodes here...
@@ -1782,7 +1775,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1782 if (ret) 1775 if (ret)
1783 pr_warn("Doesn't have any child node\n"); 1776 pr_warn("Doesn't have any child node\n");
1784 1777
1785 for_each_node_by_name(slave_node, "slave") { 1778 for_each_child_of_node(node, slave_node) {
1786 struct cpsw_slave_data *slave_data = data->slave_data + i; 1779 struct cpsw_slave_data *slave_data = data->slave_data + i;
1787 const void *mac_addr = NULL; 1780 const void *mac_addr = NULL;
1788 u32 phyid; 1781 u32 phyid;
@@ -1791,6 +1784,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1791 struct device_node *mdio_node; 1784 struct device_node *mdio_node;
1792 struct platform_device *mdio; 1785 struct platform_device *mdio;
1793 1786
1787 /* This is no slave child node, continue */
1788 if (strcmp(slave_node->name, "slave"))
1789 continue;
1790
1794 parp = of_get_property(slave_node, "phy_id", &lenp); 1791 parp = of_get_property(slave_node, "phy_id", &lenp);
1795 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { 1792 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
1796 pr_err("Missing slave[%d] phy_id property\n", i); 1793 pr_err("Missing slave[%d] phy_id property\n", i);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 67df09ea9d04..6a32ef9d63ae 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -876,8 +876,7 @@ static void emac_dev_mcast_set(struct net_device *ndev)
876 netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) { 876 netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
877 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); 877 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
878 emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL); 878 emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
879 } 879 } else if (!netdev_mc_empty(ndev)) {
880 if (!netdev_mc_empty(ndev)) {
881 struct netdev_hw_addr *ha; 880 struct netdev_hw_addr *ha;
882 881
883 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); 882 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 0721e72f9299..5af1c3e5032a 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -975,7 +975,6 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
975 return -EINVAL; /* Cannot change this parameter when up */ 975 return -EINVAL; /* Cannot change this parameter when up */
976 if ((ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_KERNEL)) == NULL) 976 if ((ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_KERNEL)) == NULL)
977 return -ENOBUFS; 977 return -ENOBUFS;
978 ym->bitrate = 9600;
979 if (copy_from_user(ym, ifr->ifr_data, sizeof(struct yamdrv_ioctl_mcs))) { 978 if (copy_from_user(ym, ifr->ifr_data, sizeof(struct yamdrv_ioctl_mcs))) {
980 kfree(ym); 979 kfree(ym);
981 return -EFAULT; 980 return -EFAULT;
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 42e6deee6db5..0632d34905c7 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -82,7 +82,6 @@ struct mrf24j40 {
82 82
83 struct mutex buffer_mutex; /* only used to protect buf */ 83 struct mutex buffer_mutex; /* only used to protect buf */
84 struct completion tx_complete; 84 struct completion tx_complete;
85 struct work_struct irqwork;
86 u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */ 85 u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */
87}; 86};
88 87
@@ -344,6 +343,8 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
344 if (ret) 343 if (ret)
345 goto err; 344 goto err;
346 345
346 INIT_COMPLETION(devrec->tx_complete);
347
347 /* Set TXNTRIG bit of TXNCON to send packet */ 348 /* Set TXNTRIG bit of TXNCON to send packet */
348 ret = read_short_reg(devrec, REG_TXNCON, &val); 349 ret = read_short_reg(devrec, REG_TXNCON, &val);
349 if (ret) 350 if (ret)
@@ -354,8 +355,6 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
354 val |= 0x4; 355 val |= 0x4;
355 write_short_reg(devrec, REG_TXNCON, val); 356 write_short_reg(devrec, REG_TXNCON, val);
356 357
357 INIT_COMPLETION(devrec->tx_complete);
358
359 /* Wait for the device to send the TX complete interrupt. */ 358 /* Wait for the device to send the TX complete interrupt. */
360 ret = wait_for_completion_interruptible_timeout( 359 ret = wait_for_completion_interruptible_timeout(
361 &devrec->tx_complete, 360 &devrec->tx_complete,
@@ -590,17 +589,6 @@ static struct ieee802154_ops mrf24j40_ops = {
590static irqreturn_t mrf24j40_isr(int irq, void *data) 589static irqreturn_t mrf24j40_isr(int irq, void *data)
591{ 590{
592 struct mrf24j40 *devrec = data; 591 struct mrf24j40 *devrec = data;
593
594 disable_irq_nosync(irq);
595
596 schedule_work(&devrec->irqwork);
597
598 return IRQ_HANDLED;
599}
600
601static void mrf24j40_isrwork(struct work_struct *work)
602{
603 struct mrf24j40 *devrec = container_of(work, struct mrf24j40, irqwork);
604 u8 intstat; 592 u8 intstat;
605 int ret; 593 int ret;
606 594
@@ -618,7 +606,7 @@ static void mrf24j40_isrwork(struct work_struct *work)
618 mrf24j40_handle_rx(devrec); 606 mrf24j40_handle_rx(devrec);
619 607
620out: 608out:
621 enable_irq(devrec->spi->irq); 609 return IRQ_HANDLED;
622} 610}
623 611
624static int mrf24j40_probe(struct spi_device *spi) 612static int mrf24j40_probe(struct spi_device *spi)
@@ -642,7 +630,6 @@ static int mrf24j40_probe(struct spi_device *spi)
642 630
643 mutex_init(&devrec->buffer_mutex); 631 mutex_init(&devrec->buffer_mutex);
644 init_completion(&devrec->tx_complete); 632 init_completion(&devrec->tx_complete);
645 INIT_WORK(&devrec->irqwork, mrf24j40_isrwork);
646 devrec->spi = spi; 633 devrec->spi = spi;
647 spi_set_drvdata(spi, devrec); 634 spi_set_drvdata(spi, devrec);
648 635
@@ -688,11 +675,12 @@ static int mrf24j40_probe(struct spi_device *spi)
688 val &= ~0x3; /* Clear RX mode (normal) */ 675 val &= ~0x3; /* Clear RX mode (normal) */
689 write_short_reg(devrec, REG_RXMCR, val); 676 write_short_reg(devrec, REG_RXMCR, val);
690 677
691 ret = request_irq(spi->irq, 678 ret = request_threaded_irq(spi->irq,
692 mrf24j40_isr, 679 NULL,
693 IRQF_TRIGGER_FALLING, 680 mrf24j40_isr,
694 dev_name(&spi->dev), 681 IRQF_TRIGGER_LOW|IRQF_ONESHOT,
695 devrec); 682 dev_name(&spi->dev),
683 devrec);
696 684
697 if (ret) { 685 if (ret) {
698 dev_err(printdev(devrec), "Unable to get IRQ"); 686 dev_err(printdev(devrec), "Unable to get IRQ");
@@ -721,7 +709,6 @@ static int mrf24j40_remove(struct spi_device *spi)
721 dev_dbg(printdev(devrec), "remove\n"); 709 dev_dbg(printdev(devrec), "remove\n");
722 710
723 free_irq(spi->irq, devrec); 711 free_irq(spi->irq, devrec);
724 flush_work(&devrec->irqwork); /* TODO: Is this the right call? */
725 ieee802154_unregister_device(devrec->dev); 712 ieee802154_unregister_device(devrec->dev);
726 ieee802154_free_device(devrec->dev); 713 ieee802154_free_device(devrec->dev);
727 /* TODO: Will ieee802154_free_device() wait until ->xmit() is 714 /* TODO: Will ieee802154_free_device() wait until ->xmit() is
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 807815fc9968..7cb105c103fe 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1293,7 +1293,8 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1293 if (unlikely(!noblock)) 1293 if (unlikely(!noblock))
1294 add_wait_queue(&tfile->wq.wait, &wait); 1294 add_wait_queue(&tfile->wq.wait, &wait);
1295 while (len) { 1295 while (len) {
1296 current->state = TASK_INTERRUPTIBLE; 1296 if (unlikely(!noblock))
1297 current->state = TASK_INTERRUPTIBLE;
1297 1298
1298 /* Read frames from the queue */ 1299 /* Read frames from the queue */
1299 if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) { 1300 if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
@@ -1320,9 +1321,10 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1320 break; 1321 break;
1321 } 1322 }
1322 1323
1323 current->state = TASK_RUNNING; 1324 if (unlikely(!noblock)) {
1324 if (unlikely(!noblock)) 1325 current->state = TASK_RUNNING;
1325 remove_wait_queue(&tfile->wq.wait, &wait); 1326 remove_wait_queue(&tfile->wq.wait, &wait);
1327 }
1326 1328
1327 return ret; 1329 return ret;
1328} 1330}
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 3569293df872..846cc19c04f2 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -36,8 +36,8 @@
36#define AX_RXHDR_L4_TYPE_TCP 16 36#define AX_RXHDR_L4_TYPE_TCP 16
37#define AX_RXHDR_L3CSUM_ERR 2 37#define AX_RXHDR_L3CSUM_ERR 2
38#define AX_RXHDR_L4CSUM_ERR 1 38#define AX_RXHDR_L4CSUM_ERR 1
39#define AX_RXHDR_CRC_ERR ((u32)BIT(31)) 39#define AX_RXHDR_CRC_ERR ((u32)BIT(29))
40#define AX_RXHDR_DROP_ERR ((u32)BIT(30)) 40#define AX_RXHDR_DROP_ERR ((u32)BIT(31))
41#define AX_ACCESS_MAC 0x01 41#define AX_ACCESS_MAC 0x01
42#define AX_ACCESS_PHY 0x02 42#define AX_ACCESS_PHY 0x02
43#define AX_ACCESS_EEPROM 0x04 43#define AX_ACCESS_EEPROM 0x04
@@ -1406,6 +1406,19 @@ static const struct driver_info sitecom_info = {
1406 .tx_fixup = ax88179_tx_fixup, 1406 .tx_fixup = ax88179_tx_fixup,
1407}; 1407};
1408 1408
1409static const struct driver_info samsung_info = {
1410 .description = "Samsung USB Ethernet Adapter",
1411 .bind = ax88179_bind,
1412 .unbind = ax88179_unbind,
1413 .status = ax88179_status,
1414 .link_reset = ax88179_link_reset,
1415 .reset = ax88179_reset,
1416 .stop = ax88179_stop,
1417 .flags = FLAG_ETHER | FLAG_FRAMING_AX,
1418 .rx_fixup = ax88179_rx_fixup,
1419 .tx_fixup = ax88179_tx_fixup,
1420};
1421
1409static const struct usb_device_id products[] = { 1422static const struct usb_device_id products[] = {
1410{ 1423{
1411 /* ASIX AX88179 10/100/1000 */ 1424 /* ASIX AX88179 10/100/1000 */
@@ -1418,7 +1431,11 @@ static const struct usb_device_id products[] = {
1418}, { 1431}, {
1419 /* Sitecom USB 3.0 to Gigabit Adapter */ 1432 /* Sitecom USB 3.0 to Gigabit Adapter */
1420 USB_DEVICE(0x0df6, 0x0072), 1433 USB_DEVICE(0x0df6, 0x0072),
1421 .driver_info = (unsigned long) &sitecom_info, 1434 .driver_info = (unsigned long)&sitecom_info,
1435}, {
1436 /* Samsung USB Ethernet Adapter */
1437 USB_DEVICE(0x04e8, 0xa100),
1438 .driver_info = (unsigned long)&samsung_info,
1422}, 1439},
1423 { }, 1440 { },
1424}; 1441};
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3d6aaf79d8b2..818ce90185b5 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -714,6 +714,7 @@ static const struct usb_device_id products[] = {
714 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 714 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
715 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 715 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
716 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 716 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
717 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
717 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ 718 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
718 719
719 /* 4. Gobi 1000 devices */ 720 /* 4. Gobi 1000 devices */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index bf94e10a37c8..90a429b7ebad 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1688,8 +1688,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1688 if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) && 1688 if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) &&
1689 !(info->flags & FLAG_MULTI_PACKET)) { 1689 !(info->flags & FLAG_MULTI_PACKET)) {
1690 dev->padding_pkt = kzalloc(1, GFP_KERNEL); 1690 dev->padding_pkt = kzalloc(1, GFP_KERNEL);
1691 if (!dev->padding_pkt) 1691 if (!dev->padding_pkt) {
1692 status = -ENOMEM;
1692 goto out4; 1693 goto out4;
1694 }
1693 } 1695 }
1694 1696
1695 status = register_netdev (net); 1697 status = register_netdev (net);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index defec2b3c5a4..9fbdfcd1e1a0 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -938,7 +938,9 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
938 return -EINVAL; 938 return -EINVAL;
939 } else { 939 } else {
940 vi->curr_queue_pairs = queue_pairs; 940 vi->curr_queue_pairs = queue_pairs;
941 schedule_delayed_work(&vi->refill, 0); 941 /* virtnet_open() will refill when device is going to up. */
942 if (dev->flags & IFF_UP)
943 schedule_delayed_work(&vi->refill, 0);
942 } 944 }
943 945
944 return 0; 946 return 0;
@@ -1116,6 +1118,11 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
1116{ 1118{
1117 struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); 1119 struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
1118 1120
1121 mutex_lock(&vi->config_lock);
1122
1123 if (!vi->config_enable)
1124 goto done;
1125
1119 switch(action & ~CPU_TASKS_FROZEN) { 1126 switch(action & ~CPU_TASKS_FROZEN) {
1120 case CPU_ONLINE: 1127 case CPU_ONLINE:
1121 case CPU_DOWN_FAILED: 1128 case CPU_DOWN_FAILED:
@@ -1128,6 +1135,9 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
1128 default: 1135 default:
1129 break; 1136 break;
1130 } 1137 }
1138
1139done:
1140 mutex_unlock(&vi->config_lock);
1131 return NOTIFY_OK; 1141 return NOTIFY_OK;
1132} 1142}
1133 1143
@@ -1733,7 +1743,9 @@ static int virtnet_restore(struct virtio_device *vdev)
1733 vi->config_enable = true; 1743 vi->config_enable = true;
1734 mutex_unlock(&vi->config_lock); 1744 mutex_unlock(&vi->config_lock);
1735 1745
1746 rtnl_lock();
1736 virtnet_set_queues(vi, vi->curr_queue_pairs); 1747 virtnet_set_queues(vi, vi->curr_queue_pairs);
1748 rtnl_unlock();
1737 1749
1738 return 0; 1750 return 0;
1739} 1751}
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 3f0c4f268751..bcfff0d62de4 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -1972,6 +1972,7 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
1972 } 1972 }
1973 1973
1974 i = port->index; 1974 i = port->index;
1975 memset(&sync, 0, sizeof(sync));
1975 sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed); 1976 sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed);
1976 /* Lucky card and linux use same encoding here */ 1977 /* Lucky card and linux use same encoding here */
1977 sync.clock_type = FST_RDB(card, portConfig[i].internalClock) == 1978 sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 6a24a5a70cc7..4c0a69779b89 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -355,6 +355,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
355 ifr->ifr_settings.size = size; /* data size wanted */ 355 ifr->ifr_settings.size = size; /* data size wanted */
356 return -ENOBUFS; 356 return -ENOBUFS;
357 } 357 }
358 memset(&line, 0, sizeof(line));
358 line.clock_type = get_status(port)->clocking; 359 line.clock_type = get_status(port)->clocking;
359 line.clock_rate = 0; 360 line.clock_rate = 0;
360 line.loopback = 0; 361 line.loopback = 0;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index e4f65900132d..709301f88dcd 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -208,6 +208,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
208 struct ath_hw *ah = sc->sc_ah; 208 struct ath_hw *ah = sc->sc_ah;
209 struct ath_common *common = ath9k_hw_common(ah); 209 struct ath_common *common = ath9k_hw_common(ah);
210 unsigned long flags; 210 unsigned long flags;
211 int i;
211 212
212 if (ath_startrecv(sc) != 0) { 213 if (ath_startrecv(sc) != 0) {
213 ath_err(common, "Unable to restart recv logic\n"); 214 ath_err(common, "Unable to restart recv logic\n");
@@ -235,6 +236,15 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
235 } 236 }
236 work: 237 work:
237 ath_restart_work(sc); 238 ath_restart_work(sc);
239
240 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
241 if (!ATH_TXQ_SETUP(sc, i))
242 continue;
243
244 spin_lock_bh(&sc->tx.txq[i].axq_lock);
245 ath_txq_schedule(sc, &sc->tx.txq[i]);
246 spin_unlock_bh(&sc->tx.txq[i].axq_lock);
247 }
238 } 248 }
239 249
240 ieee80211_wake_queues(sc->hw); 250 ieee80211_wake_queues(sc->hw);
@@ -539,21 +549,10 @@ chip_reset:
539 549
540static int ath_reset(struct ath_softc *sc) 550static int ath_reset(struct ath_softc *sc)
541{ 551{
542 int i, r; 552 int r;
543 553
544 ath9k_ps_wakeup(sc); 554 ath9k_ps_wakeup(sc);
545
546 r = ath_reset_internal(sc, NULL); 555 r = ath_reset_internal(sc, NULL);
547
548 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
549 if (!ATH_TXQ_SETUP(sc, i))
550 continue;
551
552 spin_lock_bh(&sc->tx.txq[i].axq_lock);
553 ath_txq_schedule(sc, &sc->tx.txq[i]);
554 spin_unlock_bh(&sc->tx.txq[i].axq_lock);
555 }
556
557 ath9k_ps_restore(sc); 556 ath9k_ps_restore(sc);
558 557
559 return r; 558 return r;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 5ac713d2ff5d..dd30452df966 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1969,15 +1969,18 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1969static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 1969static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1970 struct ath_atx_tid *tid, struct sk_buff *skb) 1970 struct ath_atx_tid *tid, struct sk_buff *skb)
1971{ 1971{
1972 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1972 struct ath_frame_info *fi = get_frame_info(skb); 1973 struct ath_frame_info *fi = get_frame_info(skb);
1973 struct list_head bf_head; 1974 struct list_head bf_head;
1974 struct ath_buf *bf; 1975 struct ath_buf *bf = fi->bf;
1975
1976 bf = fi->bf;
1977 1976
1978 INIT_LIST_HEAD(&bf_head); 1977 INIT_LIST_HEAD(&bf_head);
1979 list_add_tail(&bf->list, &bf_head); 1978 list_add_tail(&bf->list, &bf_head);
1980 bf->bf_state.bf_type = 0; 1979 bf->bf_state.bf_type = 0;
1980 if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
1981 bf->bf_state.bf_type = BUF_AMPDU;
1982 ath_tx_addto_baw(sc, tid, bf);
1983 }
1981 1984
1982 bf->bf_next = NULL; 1985 bf->bf_next = NULL;
1983 bf->bf_lastbf = bf; 1986 bf->bf_lastbf = bf;
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index 899cad34ccd3..755a0c8edfe1 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -237,7 +237,9 @@ static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id)
237 struct hwbus_priv *self = dev_id; 237 struct hwbus_priv *self = dev_id;
238 238
239 if (self->core) { 239 if (self->core) {
240 cw1200_spi_lock(self);
240 cw1200_irq_handler(self->core); 241 cw1200_irq_handler(self->core);
242 cw1200_spi_unlock(self);
241 return IRQ_HANDLED; 243 return IRQ_HANDLED;
242 } else { 244 } else {
243 return IRQ_NONE; 245 return IRQ_NONE;
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 30d45e2fc193..8ac305be68f4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -240,6 +240,12 @@ const struct iwl_cfg iwl6035_2agn_cfg = {
240 .ht_params = &iwl6000_ht_params, 240 .ht_params = &iwl6000_ht_params,
241}; 241};
242 242
243const struct iwl_cfg iwl6035_2agn_sff_cfg = {
244 .name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN",
245 IWL_DEVICE_6035,
246 .ht_params = &iwl6000_ht_params,
247};
248
243const struct iwl_cfg iwl1030_bgn_cfg = { 249const struct iwl_cfg iwl1030_bgn_cfg = {
244 .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN", 250 .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
245 IWL_DEVICE_6030, 251 IWL_DEVICE_6030,
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index e4d370bff306..b03c25e14903 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -280,6 +280,7 @@ extern const struct iwl_cfg iwl2000_2bgn_cfg;
280extern const struct iwl_cfg iwl2000_2bgn_d_cfg; 280extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
281extern const struct iwl_cfg iwl2030_2bgn_cfg; 281extern const struct iwl_cfg iwl2030_2bgn_cfg;
282extern const struct iwl_cfg iwl6035_2agn_cfg; 282extern const struct iwl_cfg iwl6035_2agn_cfg;
283extern const struct iwl_cfg iwl6035_2agn_sff_cfg;
283extern const struct iwl_cfg iwl105_bgn_cfg; 284extern const struct iwl_cfg iwl105_bgn_cfg;
284extern const struct iwl_cfg iwl105_bgn_d_cfg; 285extern const struct iwl_cfg iwl105_bgn_d_cfg;
285extern const struct iwl_cfg iwl135_bgn_cfg; 286extern const struct iwl_cfg iwl135_bgn_cfg;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index dd57a36ecb10..80b47508647c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -601,8 +601,10 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
601{ 601{
602 int ret; 602 int ret;
603 603
604 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 604 if (trans->state != IWL_TRANS_FW_ALIVE) {
605 "%s bad state = %d", __func__, trans->state); 605 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
606 return -EIO;
607 }
606 608
607 if (!(cmd->flags & CMD_ASYNC)) 609 if (!(cmd->flags & CMD_ASYNC))
608 lock_map_acquire_read(&trans->sync_cmd_lockdep_map); 610 lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 21407a353a3b..d58e393324ef 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -273,7 +273,10 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
273 if (!mvmvif->queue_params[ac].uapsd) 273 if (!mvmvif->queue_params[ac].uapsd)
274 continue; 274 continue;
275 275
276 cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); 276 if (mvm->cur_ucode != IWL_UCODE_WOWLAN)
277 cmd->flags |=
278 cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
279
277 cmd->uapsd_ac_flags |= BIT(ac); 280 cmd->uapsd_ac_flags |= BIT(ac);
278 281
279 /* QNDP TID - the highest TID with no admission control */ 282 /* QNDP TID - the highest TID with no admission control */
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 9a7ab8495300..621fb71f282a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -394,6 +394,11 @@ static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
394 return false; 394 return false;
395 } 395 }
396 396
397 /*
398 * If scan cannot be aborted, it means that we had a
399 * SCAN_COMPLETE_NOTIFICATION in the pipe and it called
400 * ieee80211_scan_completed already.
401 */
397 IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n", 402 IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n",
398 *resp); 403 *resp);
399 return true; 404 return true;
@@ -417,14 +422,19 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
417 SCAN_COMPLETE_NOTIFICATION }; 422 SCAN_COMPLETE_NOTIFICATION };
418 int ret; 423 int ret;
419 424
425 if (mvm->scan_status == IWL_MVM_SCAN_NONE)
426 return;
427
420 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort, 428 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
421 scan_abort_notif, 429 scan_abort_notif,
422 ARRAY_SIZE(scan_abort_notif), 430 ARRAY_SIZE(scan_abort_notif),
423 iwl_mvm_scan_abort_notif, NULL); 431 iwl_mvm_scan_abort_notif, NULL);
424 432
425 ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL); 433 ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD,
434 CMD_SYNC | CMD_SEND_IN_RFKILL, 0, NULL);
426 if (ret) { 435 if (ret) {
427 IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret); 436 IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
437 /* mac80211's state will be cleaned in the fw_restart flow */
428 goto out_remove_notif; 438 goto out_remove_notif;
429 } 439 }
430 440
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index dc02cb9792af..26108a1a29fa 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -139,13 +139,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
139 139
140/* 6x00 Series */ 140/* 6x00 Series */
141 {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)}, 141 {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
142 {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)},
142 {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)}, 143 {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
144 {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)},
143 {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)}, 145 {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
144 {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)}, 146 {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
145 {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)}, 147 {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
146 {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)}, 148 {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
147 {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)}, 149 {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
148 {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)}, 150 {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
151 {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)},
149 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, 152 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
150 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, 153 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
151 154
@@ -153,12 +156,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
153 {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)}, 156 {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
154 {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)}, 157 {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
155 {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)}, 158 {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
159 {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)},
156 {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)}, 160 {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
157 {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)}, 161 {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
162 {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)},
158 {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)}, 163 {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
164 {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)},
159 {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)}, 165 {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
160 {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)}, 166 {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
161 {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)}, 167 {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
168 {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)},
162 {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)}, 169 {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
163 {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */ 170 {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
164 {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */ 171 {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
@@ -240,8 +247,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
240 247
241/* 6x35 Series */ 248/* 6x35 Series */
242 {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)}, 249 {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
250 {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)},
243 {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)}, 251 {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
252 {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)},
244 {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)}, 253 {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
254 {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)},
245 {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)}, 255 {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
246 {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)}, 256 {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)},
247 257
@@ -260,54 +270,86 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
260#if IS_ENABLED(CONFIG_IWLMVM) 270#if IS_ENABLED(CONFIG_IWLMVM)
261/* 7000 Series */ 271/* 7000 Series */
262 {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, 272 {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
273 {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
263 {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)}, 274 {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
264 {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)}, 275 {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
276 {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
265 {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)}, 277 {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
266 {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)}, 278 {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)},
267 {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)}, 279 {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)},
268 {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)}, 280 {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)},
281 {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)},
269 {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)}, 282 {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)},
283 {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)},
270 {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)}, 284 {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)},
271 {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)}, 285 {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)},
286 {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)},
272 {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)}, 287 {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)},
288 {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)},
273 {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)}, 289 {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
274 {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)}, 290 {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
275 {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)}, 291 {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
276 {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)}, 292 {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)},
277 {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)}, 293 {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)},
278 {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)}, 294 {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)},
295 {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)},
296 {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)},
297 {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)},
298 {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)},
299 {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)},
279 {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)}, 300 {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
301 {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)},
280 {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)}, 302 {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
281 {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)}, 303 {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
282 {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)}, 304 {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
305 {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)},
283 {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)}, 306 {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)},
284 {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)}, 307 {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)},
308 {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)},
285 {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)}, 309 {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)},
286 {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)}, 310 {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)},
287 {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)}, 311 {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)},
312 {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
313 {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
288 {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)}, 314 {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
315 {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
289 {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)}, 316 {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
317 {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
290 {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)}, 318 {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)},
291 {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)}, 319 {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)},
320 {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)},
292 {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)}, 321 {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)},
293 {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)}, 322 {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)},
323 {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)},
324 {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)},
325 {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)},
326 {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)},
294 {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)}, 327 {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)},
328 {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)},
295 {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)}, 329 {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)},
296 {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)}, 330 {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)},
297 331
298/* 3160 Series */ 332/* 3160 Series */
299 {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)}, 333 {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)},
334 {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)},
300 {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)}, 335 {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)},
336 {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)},
301 {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)}, 337 {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)},
302 {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)}, 338 {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)},
303 {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)}, 339 {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)},
340 {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)},
304 {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)}, 341 {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)},
342 {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)},
343 {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)},
305 {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)}, 344 {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)},
345 {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)},
306 {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)}, 346 {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)},
347 {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)},
307 {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, 348 {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
308 {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, 349 {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
309 {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, 350 {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
310 {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, 351 {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
352 {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
311#endif /* CONFIG_IWLMVM */ 353#endif /* CONFIG_IWLMVM */
312 354
313 {0} 355 {0}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index bad95d28d50d..c3f904d422b0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1401,6 +1401,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1401 spin_lock_init(&trans_pcie->reg_lock); 1401 spin_lock_init(&trans_pcie->reg_lock);
1402 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 1402 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
1403 1403
1404 err = pci_enable_device(pdev);
1405 if (err)
1406 goto out_no_pci;
1407
1404 if (!cfg->base_params->pcie_l1_allowed) { 1408 if (!cfg->base_params->pcie_l1_allowed) {
1405 /* 1409 /*
1406 * W/A - seems to solve weird behavior. We need to remove this 1410 * W/A - seems to solve weird behavior. We need to remove this
@@ -1412,10 +1416,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1412 PCIE_LINK_STATE_CLKPM); 1416 PCIE_LINK_STATE_CLKPM);
1413 } 1417 }
1414 1418
1415 err = pci_enable_device(pdev);
1416 if (err)
1417 goto out_no_pci;
1418
1419 pci_set_master(pdev); 1419 pci_set_master(pdev);
1420 1420
1421 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 1421 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index f45eb29c2ede..1424335163b9 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1102,6 +1102,8 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
1102 * non-AGG queue. 1102 * non-AGG queue.
1103 */ 1103 */
1104 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); 1104 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
1105
1106 ssn = trans_pcie->txq[txq_id].q.read_ptr;
1105 } 1107 }
1106 1108
1107 /* Place first TFD at index corresponding to start sequence number. 1109 /* Place first TFD at index corresponding to start sequence number.
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 9d7c0e6c4fc7..37f873bb342f 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1422,13 +1422,19 @@ static int mwifiex_deauthenticate_infra(struct mwifiex_private *priv, u8 *mac)
1422 */ 1422 */
1423int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac) 1423int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
1424{ 1424{
1425 int ret = 0;
1426
1425 if (!priv->media_connected) 1427 if (!priv->media_connected)
1426 return 0; 1428 return 0;
1427 1429
1428 switch (priv->bss_mode) { 1430 switch (priv->bss_mode) {
1429 case NL80211_IFTYPE_STATION: 1431 case NL80211_IFTYPE_STATION:
1430 case NL80211_IFTYPE_P2P_CLIENT: 1432 case NL80211_IFTYPE_P2P_CLIENT:
1431 return mwifiex_deauthenticate_infra(priv, mac); 1433 ret = mwifiex_deauthenticate_infra(priv, mac);
1434 if (ret)
1435 cfg80211_disconnected(priv->netdev, 0, NULL, 0,
1436 GFP_KERNEL);
1437 break;
1432 case NL80211_IFTYPE_ADHOC: 1438 case NL80211_IFTYPE_ADHOC:
1433 return mwifiex_send_cmd_sync(priv, 1439 return mwifiex_send_cmd_sync(priv,
1434 HostCmd_CMD_802_11_AD_HOC_STOP, 1440 HostCmd_CMD_802_11_AD_HOC_STOP,
@@ -1440,7 +1446,7 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
1440 break; 1446 break;
1441 } 1447 }
1442 1448
1443 return 0; 1449 return ret;
1444} 1450}
1445EXPORT_SYMBOL_GPL(mwifiex_deauthenticate); 1451EXPORT_SYMBOL_GPL(mwifiex_deauthenticate);
1446 1452
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index fd778337deee..c2b91f566e05 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -358,10 +358,12 @@ process_start:
358 } 358 }
359 } while (true); 359 } while (true);
360 360
361 if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter)) 361 spin_lock_irqsave(&adapter->main_proc_lock, flags);
362 if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter)) {
363 spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
362 goto process_start; 364 goto process_start;
365 }
363 366
364 spin_lock_irqsave(&adapter->main_proc_lock, flags);
365 adapter->mwifiex_processing = false; 367 adapter->mwifiex_processing = false;
366 spin_unlock_irqrestore(&adapter->main_proc_lock, flags); 368 spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
367 369
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 8b057524b252..8c351f71f72f 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -118,7 +118,8 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
118 dev_dbg(adapter->dev, 118 dev_dbg(adapter->dev,
119 "info: successfully disconnected from %pM: reason code %d\n", 119 "info: successfully disconnected from %pM: reason code %d\n",
120 priv->cfg_bssid, reason_code); 120 priv->cfg_bssid, reason_code);
121 if (priv->bss_mode == NL80211_IFTYPE_STATION) { 121 if (priv->bss_mode == NL80211_IFTYPE_STATION ||
122 priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
122 cfg80211_disconnected(priv->netdev, reason_code, NULL, 0, 123 cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
123 GFP_KERNEL); 124 GFP_KERNEL);
124 } 125 }
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 76d95deb274b..dc49e525ae5e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -105,13 +105,11 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
105 goto exit_release_regions; 105 goto exit_release_regions;
106 } 106 }
107 107
108 pci_enable_msi(pci_dev);
109
110 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); 108 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
111 if (!hw) { 109 if (!hw) {
112 rt2x00_probe_err("Failed to allocate hardware\n"); 110 rt2x00_probe_err("Failed to allocate hardware\n");
113 retval = -ENOMEM; 111 retval = -ENOMEM;
114 goto exit_disable_msi; 112 goto exit_release_regions;
115 } 113 }
116 114
117 pci_set_drvdata(pci_dev, hw); 115 pci_set_drvdata(pci_dev, hw);
@@ -152,9 +150,6 @@ exit_free_reg:
152exit_free_device: 150exit_free_device:
153 ieee80211_free_hw(hw); 151 ieee80211_free_hw(hw);
154 152
155exit_disable_msi:
156 pci_disable_msi(pci_dev);
157
158exit_release_regions: 153exit_release_regions:
159 pci_release_regions(pci_dev); 154 pci_release_regions(pci_dev);
160 155
@@ -179,8 +174,6 @@ void rt2x00pci_remove(struct pci_dev *pci_dev)
179 rt2x00pci_free_reg(rt2x00dev); 174 rt2x00pci_free_reg(rt2x00dev);
180 ieee80211_free_hw(hw); 175 ieee80211_free_hw(hw);
181 176
182 pci_disable_msi(pci_dev);
183
184 /* 177 /*
185 * Free the PCI device data. 178 * Free the PCI device data.
186 */ 179 */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 763cf1defab5..5a060e537fbe 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -343,7 +343,8 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
343 (bool)GET_RX_DESC_PAGGR(pdesc)); 343 (bool)GET_RX_DESC_PAGGR(pdesc));
344 rx_status->mactime = GET_RX_DESC_TSFL(pdesc); 344 rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
345 if (phystatus) { 345 if (phystatus) {
346 p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE); 346 p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
347 stats->rx_bufshift);
347 rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc, 348 rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
348 p_drvinfo); 349 p_drvinfo);
349 } 350 }
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index b45bce20ad76..1b08d8798372 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -39,11 +39,15 @@ static int connect_rings(struct backend_info *);
39static void connect(struct backend_info *); 39static void connect(struct backend_info *);
40static void backend_create_xenvif(struct backend_info *be); 40static void backend_create_xenvif(struct backend_info *be);
41static void unregister_hotplug_status_watch(struct backend_info *be); 41static void unregister_hotplug_status_watch(struct backend_info *be);
42static void set_backend_state(struct backend_info *be,
43 enum xenbus_state state);
42 44
43static int netback_remove(struct xenbus_device *dev) 45static int netback_remove(struct xenbus_device *dev)
44{ 46{
45 struct backend_info *be = dev_get_drvdata(&dev->dev); 47 struct backend_info *be = dev_get_drvdata(&dev->dev);
46 48
49 set_backend_state(be, XenbusStateClosed);
50
47 unregister_hotplug_status_watch(be); 51 unregister_hotplug_status_watch(be);
48 if (be->vif) { 52 if (be->vif) {
49 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); 53 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 9d2009a9004d..78cc76053328 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -74,10 +74,4 @@ config OF_MTD
74 depends on MTD 74 depends on MTD
75 def_bool y 75 def_bool y
76 76
77config OF_RESERVED_MEM
78 depends on OF_FLATTREE && (DMA_CMA || (HAVE_GENERIC_DMA_COHERENT && HAVE_MEMBLOCK))
79 def_bool y
80 help
81 Initialization code for DMA reserved memory
82
83endmenu # OF 77endmenu # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index ed9660adad77..efd05102c405 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -9,4 +9,3 @@ obj-$(CONFIG_OF_MDIO) += of_mdio.o
9obj-$(CONFIG_OF_PCI) += of_pci.o 9obj-$(CONFIG_OF_PCI) += of_pci.o
10obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o 10obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o
11obj-$(CONFIG_OF_MTD) += of_mtd.o 11obj-$(CONFIG_OF_MTD) += of_mtd.o
12obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 865d3f66c86b..7d4c70f859e3 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -303,10 +303,8 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
303 struct device_node *cpun, *cpus; 303 struct device_node *cpun, *cpus;
304 304
305 cpus = of_find_node_by_path("/cpus"); 305 cpus = of_find_node_by_path("/cpus");
306 if (!cpus) { 306 if (!cpus)
307 pr_warn("Missing cpus node, bailing out\n");
308 return NULL; 307 return NULL;
309 }
310 308
311 for_each_child_of_node(cpus, cpun) { 309 for_each_child_of_node(cpus, cpun) {
312 if (of_node_cmp(cpun->type, "cpu")) 310 if (of_node_cmp(cpun->type, "cpu"))
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 229dd9d69e18..a4fa9ad31b8f 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -18,7 +18,6 @@
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/random.h>
22 21
23#include <asm/setup.h> /* for COMMAND_LINE_SIZE */ 22#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
24#ifdef CONFIG_PPC 23#ifdef CONFIG_PPC
@@ -803,14 +802,3 @@ void __init unflatten_device_tree(void)
803} 802}
804 803
805#endif /* CONFIG_OF_EARLY_FLATTREE */ 804#endif /* CONFIG_OF_EARLY_FLATTREE */
806
807/* Feed entire flattened device tree into the random pool */
808static int __init add_fdt_randomness(void)
809{
810 if (initial_boot_params)
811 add_device_randomness(initial_boot_params,
812 be32_to_cpu(initial_boot_params->totalsize));
813
814 return 0;
815}
816core_initcall(add_fdt_randomness);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
deleted file mode 100644
index 0fe40c7d6904..000000000000
--- a/drivers/of/of_reserved_mem.c
+++ /dev/null
@@ -1,173 +0,0 @@
1/*
2 * Device tree based initialization code for reserved memory.
3 *
4 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 * Author: Marek Szyprowski <m.szyprowski@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
12 */
13
14#include <linux/memblock.h>
15#include <linux/err.h>
16#include <linux/of.h>
17#include <linux/of_fdt.h>
18#include <linux/of_platform.h>
19#include <linux/mm.h>
20#include <linux/sizes.h>
21#include <linux/mm_types.h>
22#include <linux/dma-contiguous.h>
23#include <linux/dma-mapping.h>
24#include <linux/of_reserved_mem.h>
25
26#define MAX_RESERVED_REGIONS 16
27struct reserved_mem {
28 phys_addr_t base;
29 unsigned long size;
30 struct cma *cma;
31 char name[32];
32};
33static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
34static int reserved_mem_count;
35
36static int __init fdt_scan_reserved_mem(unsigned long node, const char *uname,
37 int depth, void *data)
38{
39 struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
40 phys_addr_t base, size;
41 int is_cma, is_reserved;
42 unsigned long len;
43 const char *status;
44 __be32 *prop;
45
46 is_cma = IS_ENABLED(CONFIG_DMA_CMA) &&
47 of_flat_dt_is_compatible(node, "linux,contiguous-memory-region");
48 is_reserved = of_flat_dt_is_compatible(node, "reserved-memory-region");
49
50 if (!is_reserved && !is_cma) {
51 /* ignore node and scan next one */
52 return 0;
53 }
54
55 status = of_get_flat_dt_prop(node, "status", &len);
56 if (status && strcmp(status, "okay") != 0) {
57 /* ignore disabled node nad scan next one */
58 return 0;
59 }
60
61 prop = of_get_flat_dt_prop(node, "reg", &len);
62 if (!prop || (len < (dt_root_size_cells + dt_root_addr_cells) *
63 sizeof(__be32))) {
64 pr_err("Reserved mem: node %s, incorrect \"reg\" property\n",
65 uname);
66 /* ignore node and scan next one */
67 return 0;
68 }
69 base = dt_mem_next_cell(dt_root_addr_cells, &prop);
70 size = dt_mem_next_cell(dt_root_size_cells, &prop);
71
72 if (!size) {
73 /* ignore node and scan next one */
74 return 0;
75 }
76
77 pr_info("Reserved mem: found %s, memory base %lx, size %ld MiB\n",
78 uname, (unsigned long)base, (unsigned long)size / SZ_1M);
79
80 if (reserved_mem_count == ARRAY_SIZE(reserved_mem))
81 return -ENOSPC;
82
83 rmem->base = base;
84 rmem->size = size;
85 strlcpy(rmem->name, uname, sizeof(rmem->name));
86
87 if (is_cma) {
88 struct cma *cma;
89 if (dma_contiguous_reserve_area(size, base, 0, &cma) == 0) {
90 rmem->cma = cma;
91 reserved_mem_count++;
92 if (of_get_flat_dt_prop(node,
93 "linux,default-contiguous-region",
94 NULL))
95 dma_contiguous_set_default(cma);
96 }
97 } else if (is_reserved) {
98 if (memblock_remove(base, size) == 0)
99 reserved_mem_count++;
100 else
101 pr_err("Failed to reserve memory for %s\n", uname);
102 }
103
104 return 0;
105}
106
107static struct reserved_mem *get_dma_memory_region(struct device *dev)
108{
109 struct device_node *node;
110 const char *name;
111 int i;
112
113 node = of_parse_phandle(dev->of_node, "memory-region", 0);
114 if (!node)
115 return NULL;
116
117 name = kbasename(node->full_name);
118 for (i = 0; i < reserved_mem_count; i++)
119 if (strcmp(name, reserved_mem[i].name) == 0)
120 return &reserved_mem[i];
121 return NULL;
122}
123
124/**
125 * of_reserved_mem_device_init() - assign reserved memory region to given device
126 *
127 * This function assign memory region pointed by "memory-region" device tree
128 * property to the given device.
129 */
130void of_reserved_mem_device_init(struct device *dev)
131{
132 struct reserved_mem *region = get_dma_memory_region(dev);
133 if (!region)
134 return;
135
136 if (region->cma) {
137 dev_set_cma_area(dev, region->cma);
138 pr_info("Assigned CMA %s to %s device\n", region->name,
139 dev_name(dev));
140 } else {
141 if (dma_declare_coherent_memory(dev, region->base, region->base,
142 region->size, DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) != 0)
143 pr_info("Declared reserved memory %s to %s device\n",
144 region->name, dev_name(dev));
145 }
146}
147
148/**
149 * of_reserved_mem_device_release() - release reserved memory device structures
150 *
151 * This function releases structures allocated for memory region handling for
152 * the given device.
153 */
154void of_reserved_mem_device_release(struct device *dev)
155{
156 struct reserved_mem *region = get_dma_memory_region(dev);
157 if (!region && !region->cma)
158 dma_release_declared_memory(dev);
159}
160
161/**
162 * early_init_dt_scan_reserved_mem() - create reserved memory regions
163 *
164 * This function grabs memory from early allocator for device exclusive use
165 * defined in device tree structures. It should be called by arch specific code
166 * once the early allocator (memblock) has been activated and all other
167 * subsystems have already allocated/reserved memory.
168 */
169void __init early_init_dt_scan_reserved_mem(void)
170{
171 of_scan_flat_dt_by_path("/memory/reserved-memory",
172 fdt_scan_reserved_mem, NULL);
173}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 9b439ac63d8e..f6dcde220821 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -21,7 +21,6 @@
21#include <linux/of_device.h> 21#include <linux/of_device.h>
22#include <linux/of_irq.h> 22#include <linux/of_irq.h>
23#include <linux/of_platform.h> 23#include <linux/of_platform.h>
24#include <linux/of_reserved_mem.h>
25#include <linux/platform_device.h> 24#include <linux/platform_device.h>
26 25
27const struct of_device_id of_default_bus_match_table[] = { 26const struct of_device_id of_default_bus_match_table[] = {
@@ -219,8 +218,6 @@ static struct platform_device *of_platform_device_create_pdata(
219 dev->dev.bus = &platform_bus_type; 218 dev->dev.bus = &platform_bus_type;
220 dev->dev.platform_data = platform_data; 219 dev->dev.platform_data = platform_data;
221 220
222 of_reserved_mem_device_init(&dev->dev);
223
224 /* We do not fill the DMA ops for platform devices by default. 221 /* We do not fill the DMA ops for platform devices by default.
225 * This is currently the responsibility of the platform code 222 * This is currently the responsibility of the platform code
226 * to do such, possibly using a device notifier 223 * to do such, possibly using a device notifier
@@ -228,7 +225,6 @@ static struct platform_device *of_platform_device_create_pdata(
228 225
229 if (of_device_add(dev) != 0) { 226 if (of_device_add(dev) != 0) {
230 platform_device_put(dev); 227 platform_device_put(dev);
231 of_reserved_mem_device_release(&dev->dev);
232 return NULL; 228 return NULL;
233 } 229 }
234 230
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 0b7d23b4ad95..1ea75236a15f 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -552,9 +552,8 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
552 struct acpiphp_func *func; 552 struct acpiphp_func *func;
553 int max, pass; 553 int max, pass;
554 LIST_HEAD(add_list); 554 LIST_HEAD(add_list);
555 int nr_found;
556 555
557 nr_found = acpiphp_rescan_slot(slot); 556 acpiphp_rescan_slot(slot);
558 max = acpiphp_max_busnr(bus); 557 max = acpiphp_max_busnr(bus);
559 for (pass = 0; pass < 2; pass++) { 558 for (pass = 0; pass < 2; pass++) {
560 list_for_each_entry(dev, &bus->devices, bus_list) { 559 list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -574,9 +573,6 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
574 } 573 }
575 } 574 }
576 __pci_bus_assign_resources(bus, &add_list, NULL); 575 __pci_bus_assign_resources(bus, &add_list, NULL);
577 /* Nothing more to do here if there are no new devices on this bus. */
578 if (!nr_found && (slot->flags & SLOT_ENABLED))
579 return;
580 576
581 acpiphp_sanitize_bus(bus); 577 acpiphp_sanitize_bus(bus);
582 acpiphp_set_hpp_values(bus); 578 acpiphp_set_hpp_values(bus);
@@ -994,14 +990,16 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
994 990
995 /* 991 /*
996 * This bridge should have been registered as a hotplug function 992 * This bridge should have been registered as a hotplug function
997 * under its parent, so the context has to be there. If not, we 993 * under its parent, so the context should be there, unless the
998 * are in deep goo. 994 * parent is going to be handled by pciehp, in which case this
995 * bridge is not interesting to us either.
999 */ 996 */
1000 mutex_lock(&acpiphp_context_lock); 997 mutex_lock(&acpiphp_context_lock);
1001 context = acpiphp_get_context(handle); 998 context = acpiphp_get_context(handle);
1002 if (WARN_ON(!context)) { 999 if (!context) {
1003 mutex_unlock(&acpiphp_context_lock); 1000 mutex_unlock(&acpiphp_context_lock);
1004 put_device(&bus->dev); 1001 put_device(&bus->dev);
1002 pci_dev_put(bridge->pci_dev);
1005 kfree(bridge); 1003 kfree(bridge);
1006 return; 1004 return;
1007 } 1005 }
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 96d6b2eef4f2..b51a7460cc49 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -504,6 +504,7 @@ config ASUS_WMI
504 depends on BACKLIGHT_CLASS_DEVICE 504 depends on BACKLIGHT_CLASS_DEVICE
505 depends on RFKILL || RFKILL = n 505 depends on RFKILL || RFKILL = n
506 depends on HOTPLUG_PCI 506 depends on HOTPLUG_PCI
507 depends on ACPI_VIDEO || ACPI_VIDEO = n
507 select INPUT_SPARSEKMAP 508 select INPUT_SPARSEKMAP
508 select LEDS_CLASS 509 select LEDS_CLASS
509 select NEW_LEDS 510 select NEW_LEDS
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index d3fd52036fd6..13ec195f0ca6 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -127,18 +127,17 @@ MODULE_PARM_DESC(minor,
127 "default is -1 (automatic)"); 127 "default is -1 (automatic)");
128#endif 128#endif
129 129
130static int kbd_backlight = 1; 130static int kbd_backlight = -1;
131module_param(kbd_backlight, int, 0444); 131module_param(kbd_backlight, int, 0444);
132MODULE_PARM_DESC(kbd_backlight, 132MODULE_PARM_DESC(kbd_backlight,
133 "set this to 0 to disable keyboard backlight, " 133 "set this to 0 to disable keyboard backlight, "
134 "1 to enable it (default: 0)"); 134 "1 to enable it (default: no change from current value)");
135 135
136static int kbd_backlight_timeout; /* = 0 */ 136static int kbd_backlight_timeout = -1;
137module_param(kbd_backlight_timeout, int, 0444); 137module_param(kbd_backlight_timeout, int, 0444);
138MODULE_PARM_DESC(kbd_backlight_timeout, 138MODULE_PARM_DESC(kbd_backlight_timeout,
139 "set this to 0 to set the default 10 seconds timeout, " 139 "meaningful values vary from 0 to 3 and their meaning depends "
140 "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout " 140 "on the model (default: no change from current value)");
141 "(default: 0)");
142 141
143#ifdef CONFIG_PM_SLEEP 142#ifdef CONFIG_PM_SLEEP
144static void sony_nc_kbd_backlight_resume(void); 143static void sony_nc_kbd_backlight_resume(void);
@@ -1844,6 +1843,8 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
1844 if (!kbdbl_ctl) 1843 if (!kbdbl_ctl)
1845 return -ENOMEM; 1844 return -ENOMEM;
1846 1845
1846 kbdbl_ctl->mode = kbd_backlight;
1847 kbdbl_ctl->timeout = kbd_backlight_timeout;
1847 kbdbl_ctl->handle = handle; 1848 kbdbl_ctl->handle = handle;
1848 if (handle == 0x0137) 1849 if (handle == 0x0137)
1849 kbdbl_ctl->base = 0x0C00; 1850 kbdbl_ctl->base = 0x0C00;
@@ -1870,8 +1871,8 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
1870 if (ret) 1871 if (ret)
1871 goto outmode; 1872 goto outmode;
1872 1873
1873 __sony_nc_kbd_backlight_mode_set(kbd_backlight); 1874 __sony_nc_kbd_backlight_mode_set(kbdbl_ctl->mode);
1874 __sony_nc_kbd_backlight_timeout_set(kbd_backlight_timeout); 1875 __sony_nc_kbd_backlight_timeout_set(kbdbl_ctl->timeout);
1875 1876
1876 return 0; 1877 return 0;
1877 1878
@@ -1886,17 +1887,8 @@ outkzalloc:
1886static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd) 1887static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
1887{ 1888{
1888 if (kbdbl_ctl) { 1889 if (kbdbl_ctl) {
1889 int result;
1890
1891 device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr); 1890 device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
1892 device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr); 1891 device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr);
1893
1894 /* restore the default hw behaviour */
1895 sony_call_snc_handle(kbdbl_ctl->handle,
1896 kbdbl_ctl->base | 0x10000, &result);
1897 sony_call_snc_handle(kbdbl_ctl->handle,
1898 kbdbl_ctl->base + 0x200, &result);
1899
1900 kfree(kbdbl_ctl); 1892 kfree(kbdbl_ctl);
1901 kbdbl_ctl = NULL; 1893 kbdbl_ctl = NULL;
1902 } 1894 }
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 5adb2042e824..cee7e2708a1f 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2077,6 +2077,7 @@ dasd_eckd_build_format(struct dasd_device *base,
2077 int intensity = 0; 2077 int intensity = 0;
2078 int r0_perm; 2078 int r0_perm;
2079 int nr_tracks; 2079 int nr_tracks;
2080 int use_prefix;
2080 2081
2081 startdev = dasd_alias_get_start_dev(base); 2082 startdev = dasd_alias_get_start_dev(base);
2082 if (!startdev) 2083 if (!startdev)
@@ -2106,28 +2107,46 @@ dasd_eckd_build_format(struct dasd_device *base,
2106 intensity = fdata->intensity; 2107 intensity = fdata->intensity;
2107 } 2108 }
2108 2109
2110 use_prefix = base_priv->features.feature[8] & 0x01;
2111
2109 switch (intensity) { 2112 switch (intensity) {
2110 case 0x00: /* Normal format */ 2113 case 0x00: /* Normal format */
2111 case 0x08: /* Normal format, use cdl. */ 2114 case 0x08: /* Normal format, use cdl. */
2112 cplength = 2 + (rpt*nr_tracks); 2115 cplength = 2 + (rpt*nr_tracks);
2113 datasize = sizeof(struct PFX_eckd_data) + 2116 if (use_prefix)
2114 sizeof(struct LO_eckd_data) + 2117 datasize = sizeof(struct PFX_eckd_data) +
2115 rpt * nr_tracks * sizeof(struct eckd_count); 2118 sizeof(struct LO_eckd_data) +
2119 rpt * nr_tracks * sizeof(struct eckd_count);
2120 else
2121 datasize = sizeof(struct DE_eckd_data) +
2122 sizeof(struct LO_eckd_data) +
2123 rpt * nr_tracks * sizeof(struct eckd_count);
2116 break; 2124 break;
2117 case 0x01: /* Write record zero and format track. */ 2125 case 0x01: /* Write record zero and format track. */
2118 case 0x09: /* Write record zero and format track, use cdl. */ 2126 case 0x09: /* Write record zero and format track, use cdl. */
2119 cplength = 2 + rpt * nr_tracks; 2127 cplength = 2 + rpt * nr_tracks;
2120 datasize = sizeof(struct PFX_eckd_data) + 2128 if (use_prefix)
2121 sizeof(struct LO_eckd_data) + 2129 datasize = sizeof(struct PFX_eckd_data) +
2122 sizeof(struct eckd_count) + 2130 sizeof(struct LO_eckd_data) +
2123 rpt * nr_tracks * sizeof(struct eckd_count); 2131 sizeof(struct eckd_count) +
2132 rpt * nr_tracks * sizeof(struct eckd_count);
2133 else
2134 datasize = sizeof(struct DE_eckd_data) +
2135 sizeof(struct LO_eckd_data) +
2136 sizeof(struct eckd_count) +
2137 rpt * nr_tracks * sizeof(struct eckd_count);
2124 break; 2138 break;
2125 case 0x04: /* Invalidate track. */ 2139 case 0x04: /* Invalidate track. */
2126 case 0x0c: /* Invalidate track, use cdl. */ 2140 case 0x0c: /* Invalidate track, use cdl. */
2127 cplength = 3; 2141 cplength = 3;
2128 datasize = sizeof(struct PFX_eckd_data) + 2142 if (use_prefix)
2129 sizeof(struct LO_eckd_data) + 2143 datasize = sizeof(struct PFX_eckd_data) +
2130 sizeof(struct eckd_count); 2144 sizeof(struct LO_eckd_data) +
2145 sizeof(struct eckd_count);
2146 else
2147 datasize = sizeof(struct DE_eckd_data) +
2148 sizeof(struct LO_eckd_data) +
2149 sizeof(struct eckd_count);
2131 break; 2150 break;
2132 default: 2151 default:
2133 dev_warn(&startdev->cdev->dev, 2152 dev_warn(&startdev->cdev->dev,
@@ -2147,14 +2166,25 @@ dasd_eckd_build_format(struct dasd_device *base,
2147 2166
2148 switch (intensity & ~0x08) { 2167 switch (intensity & ~0x08) {
2149 case 0x00: /* Normal format. */ 2168 case 0x00: /* Normal format. */
2150 prefix(ccw++, (struct PFX_eckd_data *) data, 2169 if (use_prefix) {
2151 fdata->start_unit, fdata->stop_unit, 2170 prefix(ccw++, (struct PFX_eckd_data *) data,
2152 DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2171 fdata->start_unit, fdata->stop_unit,
2153 /* grant subsystem permission to format R0 */ 2172 DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2154 if (r0_perm) 2173 /* grant subsystem permission to format R0 */
2155 ((struct PFX_eckd_data *)data) 2174 if (r0_perm)
2156 ->define_extent.ga_extended |= 0x04; 2175 ((struct PFX_eckd_data *)data)
2157 data += sizeof(struct PFX_eckd_data); 2176 ->define_extent.ga_extended |= 0x04;
2177 data += sizeof(struct PFX_eckd_data);
2178 } else {
2179 define_extent(ccw++, (struct DE_eckd_data *) data,
2180 fdata->start_unit, fdata->stop_unit,
2181 DASD_ECKD_CCW_WRITE_CKD, startdev);
2182 /* grant subsystem permission to format R0 */
2183 if (r0_perm)
2184 ((struct DE_eckd_data *) data)
2185 ->ga_extended |= 0x04;
2186 data += sizeof(struct DE_eckd_data);
2187 }
2158 ccw[-1].flags |= CCW_FLAG_CC; 2188 ccw[-1].flags |= CCW_FLAG_CC;
2159 locate_record(ccw++, (struct LO_eckd_data *) data, 2189 locate_record(ccw++, (struct LO_eckd_data *) data,
2160 fdata->start_unit, 0, rpt*nr_tracks, 2190 fdata->start_unit, 0, rpt*nr_tracks,
@@ -2163,11 +2193,18 @@ dasd_eckd_build_format(struct dasd_device *base,
2163 data += sizeof(struct LO_eckd_data); 2193 data += sizeof(struct LO_eckd_data);
2164 break; 2194 break;
2165 case 0x01: /* Write record zero + format track. */ 2195 case 0x01: /* Write record zero + format track. */
2166 prefix(ccw++, (struct PFX_eckd_data *) data, 2196 if (use_prefix) {
2167 fdata->start_unit, fdata->stop_unit, 2197 prefix(ccw++, (struct PFX_eckd_data *) data,
2168 DASD_ECKD_CCW_WRITE_RECORD_ZERO, 2198 fdata->start_unit, fdata->stop_unit,
2169 base, startdev); 2199 DASD_ECKD_CCW_WRITE_RECORD_ZERO,
2170 data += sizeof(struct PFX_eckd_data); 2200 base, startdev);
2201 data += sizeof(struct PFX_eckd_data);
2202 } else {
2203 define_extent(ccw++, (struct DE_eckd_data *) data,
2204 fdata->start_unit, fdata->stop_unit,
2205 DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev);
2206 data += sizeof(struct DE_eckd_data);
2207 }
2171 ccw[-1].flags |= CCW_FLAG_CC; 2208 ccw[-1].flags |= CCW_FLAG_CC;
2172 locate_record(ccw++, (struct LO_eckd_data *) data, 2209 locate_record(ccw++, (struct LO_eckd_data *) data,
2173 fdata->start_unit, 0, rpt * nr_tracks + 1, 2210 fdata->start_unit, 0, rpt * nr_tracks + 1,
@@ -2176,10 +2213,17 @@ dasd_eckd_build_format(struct dasd_device *base,
2176 data += sizeof(struct LO_eckd_data); 2213 data += sizeof(struct LO_eckd_data);
2177 break; 2214 break;
2178 case 0x04: /* Invalidate track. */ 2215 case 0x04: /* Invalidate track. */
2179 prefix(ccw++, (struct PFX_eckd_data *) data, 2216 if (use_prefix) {
2180 fdata->start_unit, fdata->stop_unit, 2217 prefix(ccw++, (struct PFX_eckd_data *) data,
2181 DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2218 fdata->start_unit, fdata->stop_unit,
2182 data += sizeof(struct PFX_eckd_data); 2219 DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2220 data += sizeof(struct PFX_eckd_data);
2221 } else {
2222 define_extent(ccw++, (struct DE_eckd_data *) data,
2223 fdata->start_unit, fdata->stop_unit,
2224 DASD_ECKD_CCW_WRITE_CKD, startdev);
2225 data += sizeof(struct DE_eckd_data);
2226 }
2183 ccw[-1].flags |= CCW_FLAG_CC; 2227 ccw[-1].flags |= CCW_FLAG_CC;
2184 locate_record(ccw++, (struct LO_eckd_data *) data, 2228 locate_record(ccw++, (struct LO_eckd_data *) data,
2185 fdata->start_unit, 0, 1, 2229 fdata->start_unit, 0, 1,
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index a3aa374799dc..1fe264379e0d 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -486,7 +486,7 @@ sclp_sync_wait(void)
486 timeout = 0; 486 timeout = 0;
487 if (timer_pending(&sclp_request_timer)) { 487 if (timer_pending(&sclp_request_timer)) {
488 /* Get timeout TOD value */ 488 /* Get timeout TOD value */
489 timeout = get_tod_clock() + 489 timeout = get_tod_clock_fast() +
490 sclp_tod_from_jiffies(sclp_request_timer.expires - 490 sclp_tod_from_jiffies(sclp_request_timer.expires -
491 jiffies); 491 jiffies);
492 } 492 }
@@ -508,7 +508,7 @@ sclp_sync_wait(void)
508 while (sclp_running_state != sclp_running_state_idle) { 508 while (sclp_running_state != sclp_running_state_idle) {
509 /* Check for expired request timer */ 509 /* Check for expired request timer */
510 if (timer_pending(&sclp_request_timer) && 510 if (timer_pending(&sclp_request_timer) &&
511 get_tod_clock() > timeout && 511 get_tod_clock_fast() > timeout &&
512 del_timer(&sclp_request_timer)) 512 del_timer(&sclp_request_timer))
513 sclp_request_timer.function(sclp_request_timer.data); 513 sclp_request_timer.function(sclp_request_timer.data);
514 cpu_relax(); 514 cpu_relax();
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 9b3a24e8d3a0..cf31d3321dab 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -313,7 +313,7 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
313 int ret; 313 int ret;
314 314
315 dev_num = iminor(inode); 315 dev_num = iminor(inode);
316 if (dev_num > MAXMINOR) 316 if (dev_num >= MAXMINOR)
317 return -ENODEV; 317 return -ENODEV;
318 logptr = &sys_ser[dev_num]; 318 logptr = &sys_ser[dev_num];
319 319
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index d7da67a31c77..88e35d85d205 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -878,9 +878,9 @@ static void css_reset(void)
878 atomic_inc(&chpid_reset_count); 878 atomic_inc(&chpid_reset_count);
879 } 879 }
880 /* Wait for machine check for all channel paths. */ 880 /* Wait for machine check for all channel paths. */
881 timeout = get_tod_clock() + (RCHP_TIMEOUT << 12); 881 timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12);
882 while (atomic_read(&chpid_reset_count) != 0) { 882 while (atomic_read(&chpid_reset_count) != 0) {
883 if (get_tod_clock() > timeout) 883 if (get_tod_clock_fast() > timeout)
884 break; 884 break;
885 cpu_relax(); 885 cpu_relax();
886 } 886 }
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 8ed52aa49122..bbd3e511c771 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -338,10 +338,10 @@ again:
338 retries++; 338 retries++;
339 339
340 if (!start_time) { 340 if (!start_time) {
341 start_time = get_tod_clock(); 341 start_time = get_tod_clock_fast();
342 goto again; 342 goto again;
343 } 343 }
344 if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) 344 if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
345 goto again; 345 goto again;
346 } 346 }
347 if (retries) { 347 if (retries) {
@@ -504,7 +504,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
504 int count, stop; 504 int count, stop;
505 unsigned char state = 0; 505 unsigned char state = 0;
506 506
507 q->timestamp = get_tod_clock(); 507 q->timestamp = get_tod_clock_fast();
508 508
509 /* 509 /*
510 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 510 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -595,7 +595,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
595 * At this point we know, that inbound first_to_check 595 * At this point we know, that inbound first_to_check
596 * has (probably) not moved (see qdio_inbound_processing). 596 * has (probably) not moved (see qdio_inbound_processing).
597 */ 597 */
598 if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { 598 if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
599 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", 599 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
600 q->first_to_check); 600 q->first_to_check);
601 return 1; 601 return 1;
@@ -728,7 +728,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
728 int count, stop; 728 int count, stop;
729 unsigned char state = 0; 729 unsigned char state = 0;
730 730
731 q->timestamp = get_tod_clock(); 731 q->timestamp = get_tod_clock_fast();
732 732
733 if (need_siga_sync(q)) 733 if (need_siga_sync(q))
734 if (((queue_type(q) != QDIO_IQDIO_QFMT) && 734 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index feab3a5e50b5..757eb0716d45 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -696,7 +696,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
696 while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, 696 while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC,
697 PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, 697 PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
698 pci_device)) != NULL) { 698 pci_device)) != NULL) {
699 struct blogic_adapter *adapter = adapter; 699 struct blogic_adapter *host_adapter = adapter;
700 struct blogic_adapter_info adapter_info; 700 struct blogic_adapter_info adapter_info;
701 enum blogic_isa_ioport mod_ioaddr_req; 701 enum blogic_isa_ioport mod_ioaddr_req;
702 unsigned char bus; 702 unsigned char bus;
@@ -744,9 +744,9 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
744 known and enabled, note that the particular Standard ISA I/O 744 known and enabled, note that the particular Standard ISA I/O
745 Address should not be probed. 745 Address should not be probed.
746 */ 746 */
747 adapter->io_addr = io_addr; 747 host_adapter->io_addr = io_addr;
748 blogic_intreset(adapter); 748 blogic_intreset(host_adapter);
749 if (blogic_cmd(adapter, BLOGIC_INQ_PCI_INFO, NULL, 0, 749 if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
750 &adapter_info, sizeof(adapter_info)) == 750 &adapter_info, sizeof(adapter_info)) ==
751 sizeof(adapter_info)) { 751 sizeof(adapter_info)) {
752 if (adapter_info.isa_port < 6) 752 if (adapter_info.isa_port < 6)
@@ -762,7 +762,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
762 I/O Address assigned at system initialization. 762 I/O Address assigned at system initialization.
763 */ 763 */
764 mod_ioaddr_req = BLOGIC_IO_DISABLE; 764 mod_ioaddr_req = BLOGIC_IO_DISABLE;
765 blogic_cmd(adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req, 765 blogic_cmd(host_adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req,
766 sizeof(mod_ioaddr_req), NULL, 0); 766 sizeof(mod_ioaddr_req), NULL, 0);
767 /* 767 /*
768 For the first MultiMaster Host Adapter enumerated, 768 For the first MultiMaster Host Adapter enumerated,
@@ -779,12 +779,12 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
779 779
780 fetch_localram.offset = BLOGIC_AUTOSCSI_BASE + 45; 780 fetch_localram.offset = BLOGIC_AUTOSCSI_BASE + 45;
781 fetch_localram.count = sizeof(autoscsi_byte45); 781 fetch_localram.count = sizeof(autoscsi_byte45);
782 blogic_cmd(adapter, BLOGIC_FETCH_LOCALRAM, 782 blogic_cmd(host_adapter, BLOGIC_FETCH_LOCALRAM,
783 &fetch_localram, sizeof(fetch_localram), 783 &fetch_localram, sizeof(fetch_localram),
784 &autoscsi_byte45, 784 &autoscsi_byte45,
785 sizeof(autoscsi_byte45)); 785 sizeof(autoscsi_byte45));
786 blogic_cmd(adapter, BLOGIC_GET_BOARD_ID, NULL, 0, &id, 786 blogic_cmd(host_adapter, BLOGIC_GET_BOARD_ID, NULL, 0,
787 sizeof(id)); 787 &id, sizeof(id));
788 if (id.fw_ver_digit1 == '5') 788 if (id.fw_ver_digit1 == '5')
789 force_scan_order = 789 force_scan_order =
790 autoscsi_byte45.force_scan_order; 790 autoscsi_byte45.force_scan_order;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 408a42ef787a..f0d432c139d0 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -771,6 +771,8 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
771static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 771static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
772{ 772{
773 struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; 773 struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
774 if (!capable(CAP_SYS_RAWIO))
775 return -EPERM;
774 return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg); 776 return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
775} 777}
776 778
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 2ef497ebadc0..ee5c1833eb73 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -20,7 +20,7 @@
20 * | Device Discovery | 0x2095 | 0x2020-0x2022, | 20 * | Device Discovery | 0x2095 | 0x2020-0x2022, |
21 * | | | 0x2011-0x2012, | 21 * | | | 0x2011-0x2012, |
22 * | | | 0x2016 | 22 * | | | 0x2016 |
23 * | Queue Command and IO tracing | 0x3058 | 0x3006-0x300b | 23 * | Queue Command and IO tracing | 0x3059 | 0x3006-0x300b |
24 * | | | 0x3027-0x3028 | 24 * | | | 0x3027-0x3028 |
25 * | | | 0x303d-0x3041 | 25 * | | | 0x303d-0x3041 |
26 * | | | 0x302d,0x3033 | 26 * | | | 0x302d,0x3033 |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index df1b30ba938c..ff9c86b1a0d8 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1957,6 +1957,15 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1957 que = MSW(sts->handle); 1957 que = MSW(sts->handle);
1958 req = ha->req_q_map[que]; 1958 req = ha->req_q_map[que];
1959 1959
1960 /* Check for invalid queue pointer */
1961 if (req == NULL ||
1962 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
1963 ql_dbg(ql_dbg_io, vha, 0x3059,
1964 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
1965 "que=%u.\n", sts->handle, req, que);
1966 return;
1967 }
1968
1960 /* Validate handle. */ 1969 /* Validate handle. */
1961 if (handle < req->num_outstanding_cmds) 1970 if (handle < req->num_outstanding_cmds)
1962 sp = req->outstanding_cmds[handle]; 1971 sp = req->outstanding_cmds[handle];
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e62d17d41d4e..5693f6d7eddb 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2854,6 +2854,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2854 gd->events |= DISK_EVENT_MEDIA_CHANGE; 2854 gd->events |= DISK_EVENT_MEDIA_CHANGE;
2855 } 2855 }
2856 2856
2857 blk_pm_runtime_init(sdp->request_queue, dev);
2857 add_disk(gd); 2858 add_disk(gd);
2858 if (sdkp->capacity) 2859 if (sdkp->capacity)
2859 sd_dif_config_host(sdkp); 2860 sd_dif_config_host(sdkp);
@@ -2862,7 +2863,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2862 2863
2863 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 2864 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
2864 sdp->removable ? "removable " : ""); 2865 sdp->removable ? "removable " : "");
2865 blk_pm_runtime_init(sdp->request_queue, dev);
2866 scsi_autopm_put_device(sdp); 2866 scsi_autopm_put_device(sdp);
2867 put_device(&sdkp->dev); 2867 put_device(&sdkp->dev);
2868} 2868}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 5cbc4bb1b395..df5e961484e1 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -105,8 +105,11 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ;
105static int sg_add(struct device *, struct class_interface *); 105static int sg_add(struct device *, struct class_interface *);
106static void sg_remove(struct device *, struct class_interface *); 106static void sg_remove(struct device *, struct class_interface *);
107 107
108static DEFINE_SPINLOCK(sg_open_exclusive_lock);
109
108static DEFINE_IDR(sg_index_idr); 110static DEFINE_IDR(sg_index_idr);
109static DEFINE_RWLOCK(sg_index_lock); 111static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
112 file descriptor list for device */
110 113
111static struct class_interface sg_interface = { 114static struct class_interface sg_interface = {
112 .add_dev = sg_add, 115 .add_dev = sg_add,
@@ -143,7 +146,8 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
143} Sg_request; 146} Sg_request;
144 147
145typedef struct sg_fd { /* holds the state of a file descriptor */ 148typedef struct sg_fd { /* holds the state of a file descriptor */
146 struct list_head sfd_siblings; /* protected by sfd_lock of device */ 149 /* sfd_siblings is protected by sg_index_lock */
150 struct list_head sfd_siblings;
147 struct sg_device *parentdp; /* owning device */ 151 struct sg_device *parentdp; /* owning device */
148 wait_queue_head_t read_wait; /* queue read until command done */ 152 wait_queue_head_t read_wait; /* queue read until command done */
149 rwlock_t rq_list_lock; /* protect access to list in req_arr */ 153 rwlock_t rq_list_lock; /* protect access to list in req_arr */
@@ -166,12 +170,13 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
166 170
167typedef struct sg_device { /* holds the state of each scsi generic device */ 171typedef struct sg_device { /* holds the state of each scsi generic device */
168 struct scsi_device *device; 172 struct scsi_device *device;
173 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
169 int sg_tablesize; /* adapter's max scatter-gather table size */ 174 int sg_tablesize; /* adapter's max scatter-gather table size */
170 u32 index; /* device index number */ 175 u32 index; /* device index number */
171 spinlock_t sfd_lock; /* protect file descriptor list for device */ 176 /* sfds is protected by sg_index_lock */
172 struct list_head sfds; 177 struct list_head sfds;
173 struct rw_semaphore o_sem; /* exclude open should hold this rwsem */
174 volatile char detached; /* 0->attached, 1->detached pending removal */ 178 volatile char detached; /* 0->attached, 1->detached pending removal */
179 /* exclude protected by sg_open_exclusive_lock */
175 char exclude; /* opened for exclusive access */ 180 char exclude; /* opened for exclusive access */
176 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ 181 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
177 struct gendisk *disk; 182 struct gendisk *disk;
@@ -220,14 +225,35 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
220 return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE); 225 return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
221} 226}
222 227
228static int get_exclude(Sg_device *sdp)
229{
230 unsigned long flags;
231 int ret;
232
233 spin_lock_irqsave(&sg_open_exclusive_lock, flags);
234 ret = sdp->exclude;
235 spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
236 return ret;
237}
238
239static int set_exclude(Sg_device *sdp, char val)
240{
241 unsigned long flags;
242
243 spin_lock_irqsave(&sg_open_exclusive_lock, flags);
244 sdp->exclude = val;
245 spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
246 return val;
247}
248
223static int sfds_list_empty(Sg_device *sdp) 249static int sfds_list_empty(Sg_device *sdp)
224{ 250{
225 unsigned long flags; 251 unsigned long flags;
226 int ret; 252 int ret;
227 253
228 spin_lock_irqsave(&sdp->sfd_lock, flags); 254 read_lock_irqsave(&sg_index_lock, flags);
229 ret = list_empty(&sdp->sfds); 255 ret = list_empty(&sdp->sfds);
230 spin_unlock_irqrestore(&sdp->sfd_lock, flags); 256 read_unlock_irqrestore(&sg_index_lock, flags);
231 return ret; 257 return ret;
232} 258}
233 259
@@ -239,6 +265,7 @@ sg_open(struct inode *inode, struct file *filp)
239 struct request_queue *q; 265 struct request_queue *q;
240 Sg_device *sdp; 266 Sg_device *sdp;
241 Sg_fd *sfp; 267 Sg_fd *sfp;
268 int res;
242 int retval; 269 int retval;
243 270
244 nonseekable_open(inode, filp); 271 nonseekable_open(inode, filp);
@@ -267,52 +294,54 @@ sg_open(struct inode *inode, struct file *filp)
267 goto error_out; 294 goto error_out;
268 } 295 }
269 296
270 if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) { 297 if (flags & O_EXCL) {
271 retval = -EPERM; /* Can't lock it with read only access */ 298 if (O_RDONLY == (flags & O_ACCMODE)) {
272 goto error_out; 299 retval = -EPERM; /* Can't lock it with read only access */
273 } 300 goto error_out;
274 if (flags & O_NONBLOCK) { 301 }
275 if (flags & O_EXCL) { 302 if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) {
276 if (!down_write_trylock(&sdp->o_sem)) { 303 retval = -EBUSY;
277 retval = -EBUSY; 304 goto error_out;
278 goto error_out; 305 }
279 } 306 res = wait_event_interruptible(sdp->o_excl_wait,
280 } else { 307 ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1)));
281 if (!down_read_trylock(&sdp->o_sem)) { 308 if (res) {
282 retval = -EBUSY; 309 retval = res; /* -ERESTARTSYS because signal hit process */
283 goto error_out; 310 goto error_out;
284 } 311 }
312 } else if (get_exclude(sdp)) { /* some other fd has an exclusive lock on dev */
313 if (flags & O_NONBLOCK) {
314 retval = -EBUSY;
315 goto error_out;
316 }
317 res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp));
318 if (res) {
319 retval = res; /* -ERESTARTSYS because signal hit process */
320 goto error_out;
285 } 321 }
286 } else {
287 if (flags & O_EXCL)
288 down_write(&sdp->o_sem);
289 else
290 down_read(&sdp->o_sem);
291 } 322 }
292 /* Since write lock is held, no need to check sfd_list */ 323 if (sdp->detached) {
293 if (flags & O_EXCL) 324 retval = -ENODEV;
294 sdp->exclude = 1; /* used by release lock */ 325 goto error_out;
295 326 }
296 if (sfds_list_empty(sdp)) { /* no existing opens on this device */ 327 if (sfds_list_empty(sdp)) { /* no existing opens on this device */
297 sdp->sgdebug = 0; 328 sdp->sgdebug = 0;
298 q = sdp->device->request_queue; 329 q = sdp->device->request_queue;
299 sdp->sg_tablesize = queue_max_segments(q); 330 sdp->sg_tablesize = queue_max_segments(q);
300 } 331 }
301 sfp = sg_add_sfp(sdp, dev); 332 if ((sfp = sg_add_sfp(sdp, dev)))
302 if (!IS_ERR(sfp))
303 filp->private_data = sfp; 333 filp->private_data = sfp;
304 /* retval is already provably zero at this point because of the
305 * check after retval = scsi_autopm_get_device(sdp->device))
306 */
307 else { 334 else {
308 retval = PTR_ERR(sfp);
309
310 if (flags & O_EXCL) { 335 if (flags & O_EXCL) {
311 sdp->exclude = 0; /* undo if error */ 336 set_exclude(sdp, 0); /* undo if error */
312 up_write(&sdp->o_sem); 337 wake_up_interruptible(&sdp->o_excl_wait);
313 } else 338 }
314 up_read(&sdp->o_sem); 339 retval = -ENOMEM;
340 goto error_out;
341 }
342 retval = 0;
315error_out: 343error_out:
344 if (retval) {
316 scsi_autopm_put_device(sdp->device); 345 scsi_autopm_put_device(sdp->device);
317sdp_put: 346sdp_put:
318 scsi_device_put(sdp->device); 347 scsi_device_put(sdp->device);
@@ -329,18 +358,13 @@ sg_release(struct inode *inode, struct file *filp)
329{ 358{
330 Sg_device *sdp; 359 Sg_device *sdp;
331 Sg_fd *sfp; 360 Sg_fd *sfp;
332 int excl;
333 361
334 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 362 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
335 return -ENXIO; 363 return -ENXIO;
336 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); 364 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
337 365
338 excl = sdp->exclude; 366 set_exclude(sdp, 0);
339 sdp->exclude = 0; 367 wake_up_interruptible(&sdp->o_excl_wait);
340 if (excl)
341 up_write(&sdp->o_sem);
342 else
343 up_read(&sdp->o_sem);
344 368
345 scsi_autopm_put_device(sdp->device); 369 scsi_autopm_put_device(sdp->device);
346 kref_put(&sfp->f_ref, sg_remove_sfp); 370 kref_put(&sfp->f_ref, sg_remove_sfp);
@@ -1391,9 +1415,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1391 disk->first_minor = k; 1415 disk->first_minor = k;
1392 sdp->disk = disk; 1416 sdp->disk = disk;
1393 sdp->device = scsidp; 1417 sdp->device = scsidp;
1394 spin_lock_init(&sdp->sfd_lock);
1395 INIT_LIST_HEAD(&sdp->sfds); 1418 INIT_LIST_HEAD(&sdp->sfds);
1396 init_rwsem(&sdp->o_sem); 1419 init_waitqueue_head(&sdp->o_excl_wait);
1397 sdp->sg_tablesize = queue_max_segments(q); 1420 sdp->sg_tablesize = queue_max_segments(q);
1398 sdp->index = k; 1421 sdp->index = k;
1399 kref_init(&sdp->d_ref); 1422 kref_init(&sdp->d_ref);
@@ -1526,13 +1549,11 @@ static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
1526 1549
1527 /* Need a write lock to set sdp->detached. */ 1550 /* Need a write lock to set sdp->detached. */
1528 write_lock_irqsave(&sg_index_lock, iflags); 1551 write_lock_irqsave(&sg_index_lock, iflags);
1529 spin_lock(&sdp->sfd_lock);
1530 sdp->detached = 1; 1552 sdp->detached = 1;
1531 list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) { 1553 list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
1532 wake_up_interruptible(&sfp->read_wait); 1554 wake_up_interruptible(&sfp->read_wait);
1533 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); 1555 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
1534 } 1556 }
1535 spin_unlock(&sdp->sfd_lock);
1536 write_unlock_irqrestore(&sg_index_lock, iflags); 1557 write_unlock_irqrestore(&sg_index_lock, iflags);
1537 1558
1538 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); 1559 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
@@ -2043,7 +2064,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
2043 2064
2044 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); 2065 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
2045 if (!sfp) 2066 if (!sfp)
2046 return ERR_PTR(-ENOMEM); 2067 return NULL;
2047 2068
2048 init_waitqueue_head(&sfp->read_wait); 2069 init_waitqueue_head(&sfp->read_wait);
2049 rwlock_init(&sfp->rq_list_lock); 2070 rwlock_init(&sfp->rq_list_lock);
@@ -2057,13 +2078,9 @@ sg_add_sfp(Sg_device * sdp, int dev)
2057 sfp->cmd_q = SG_DEF_COMMAND_Q; 2078 sfp->cmd_q = SG_DEF_COMMAND_Q;
2058 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; 2079 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
2059 sfp->parentdp = sdp; 2080 sfp->parentdp = sdp;
2060 spin_lock_irqsave(&sdp->sfd_lock, iflags); 2081 write_lock_irqsave(&sg_index_lock, iflags);
2061 if (sdp->detached) {
2062 spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
2063 return ERR_PTR(-ENODEV);
2064 }
2065 list_add_tail(&sfp->sfd_siblings, &sdp->sfds); 2082 list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
2066 spin_unlock_irqrestore(&sdp->sfd_lock, iflags); 2083 write_unlock_irqrestore(&sg_index_lock, iflags);
2067 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp)); 2084 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
2068 if (unlikely(sg_big_buff != def_reserved_size)) 2085 if (unlikely(sg_big_buff != def_reserved_size))
2069 sg_big_buff = def_reserved_size; 2086 sg_big_buff = def_reserved_size;
@@ -2113,9 +2130,10 @@ static void sg_remove_sfp(struct kref *kref)
2113 struct sg_device *sdp = sfp->parentdp; 2130 struct sg_device *sdp = sfp->parentdp;
2114 unsigned long iflags; 2131 unsigned long iflags;
2115 2132
2116 spin_lock_irqsave(&sdp->sfd_lock, iflags); 2133 write_lock_irqsave(&sg_index_lock, iflags);
2117 list_del(&sfp->sfd_siblings); 2134 list_del(&sfp->sfd_siblings);
2118 spin_unlock_irqrestore(&sdp->sfd_lock, iflags); 2135 write_unlock_irqrestore(&sg_index_lock, iflags);
2136 wake_up_interruptible(&sdp->o_excl_wait);
2119 2137
2120 INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); 2138 INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
2121 schedule_work(&sfp->ew.work); 2139 schedule_work(&sfp->ew.work);
@@ -2502,7 +2520,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
2502 return 0; 2520 return 0;
2503} 2521}
2504 2522
2505/* must be called while holding sg_index_lock and sfd_lock */ 2523/* must be called while holding sg_index_lock */
2506static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) 2524static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2507{ 2525{
2508 int k, m, new_interface, blen, usg; 2526 int k, m, new_interface, blen, usg;
@@ -2587,26 +2605,22 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
2587 2605
2588 read_lock_irqsave(&sg_index_lock, iflags); 2606 read_lock_irqsave(&sg_index_lock, iflags);
2589 sdp = it ? sg_lookup_dev(it->index) : NULL; 2607 sdp = it ? sg_lookup_dev(it->index) : NULL;
2590 if (sdp) { 2608 if (sdp && !list_empty(&sdp->sfds)) {
2591 spin_lock(&sdp->sfd_lock); 2609 struct scsi_device *scsidp = sdp->device;
2592 if (!list_empty(&sdp->sfds)) {
2593 struct scsi_device *scsidp = sdp->device;
2594 2610
2595 seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); 2611 seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
2596 if (sdp->detached) 2612 if (sdp->detached)
2597 seq_printf(s, "detached pending close "); 2613 seq_printf(s, "detached pending close ");
2598 else 2614 else
2599 seq_printf 2615 seq_printf
2600 (s, "scsi%d chan=%d id=%d lun=%d em=%d", 2616 (s, "scsi%d chan=%d id=%d lun=%d em=%d",
2601 scsidp->host->host_no, 2617 scsidp->host->host_no,
2602 scsidp->channel, scsidp->id, 2618 scsidp->channel, scsidp->id,
2603 scsidp->lun, 2619 scsidp->lun,
2604 scsidp->host->hostt->emulated); 2620 scsidp->host->hostt->emulated);
2605 seq_printf(s, " sg_tablesize=%d excl=%d\n", 2621 seq_printf(s, " sg_tablesize=%d excl=%d\n",
2606 sdp->sg_tablesize, sdp->exclude); 2622 sdp->sg_tablesize, get_exclude(sdp));
2607 sg_proc_debug_helper(s, sdp); 2623 sg_proc_debug_helper(s, sdp);
2608 }
2609 spin_unlock(&sdp->sfd_lock);
2610 } 2624 }
2611 read_unlock_irqrestore(&sg_index_lock, iflags); 2625 read_unlock_irqrestore(&sg_index_lock, iflags);
2612 return 0; 2626 return 0;
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index f91bc1fdd895..639ba96adb36 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -1960,6 +1960,7 @@ cntrlEnd:
1960 1960
1961 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n"); 1961 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
1962 1962
1963 memset(&DevInfo, 0, sizeof(DevInfo));
1963 DevInfo.MaxRDMBufferSize = BUFFER_4K; 1964 DevInfo.MaxRDMBufferSize = BUFFER_4K;
1964 DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START; 1965 DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
1965 DevInfo.u32RxAlignmentCorrection = 0; 1966 DevInfo.u32RxAlignmentCorrection = 0;
diff --git a/drivers/staging/media/msi3101/Kconfig b/drivers/staging/media/msi3101/Kconfig
index b94a95a597d6..76d5bbd4d93c 100644
--- a/drivers/staging/media/msi3101/Kconfig
+++ b/drivers/staging/media/msi3101/Kconfig
@@ -1,3 +1,4 @@
1config USB_MSI3101 1config USB_MSI3101
2 tristate "Mirics MSi3101 SDR Dongle" 2 tristate "Mirics MSi3101 SDR Dongle"
3 depends on USB && VIDEO_DEV && VIDEO_V4L2 3 depends on USB && VIDEO_DEV && VIDEO_V4L2
4 select VIDEOBUF2_VMALLOC
diff --git a/drivers/staging/media/msi3101/sdr-msi3101.c b/drivers/staging/media/msi3101/sdr-msi3101.c
index 24c7b70a6cbf..4c3bf776bb20 100644
--- a/drivers/staging/media/msi3101/sdr-msi3101.c
+++ b/drivers/staging/media/msi3101/sdr-msi3101.c
@@ -1131,7 +1131,13 @@ static int msi3101_queue_setup(struct vb2_queue *vq,
1131 /* Absolute min and max number of buffers available for mmap() */ 1131 /* Absolute min and max number of buffers available for mmap() */
1132 *nbuffers = 32; 1132 *nbuffers = 32;
1133 *nplanes = 1; 1133 *nplanes = 1;
1134 sizes[0] = PAGE_ALIGN(3 * 3072); /* 3 * 768 * 4 */ 1134 /*
1135 * 3, wMaxPacketSize 3x 1024 bytes
1136 * 504, max IQ sample pairs per 1024 frame
1137 * 2, two samples, I and Q
1138 * 4, 32-bit float
1139 */
1140 sizes[0] = PAGE_ALIGN(3 * 504 * 2 * 4); /* = 12096 */
1135 dev_dbg(&s->udev->dev, "%s: nbuffers=%d sizes[0]=%d\n", 1141 dev_dbg(&s->udev->dev, "%s: nbuffers=%d sizes[0]=%d\n",
1136 __func__, *nbuffers, sizes[0]); 1142 __func__, *nbuffers, sizes[0]);
1137 return 0; 1143 return 0;
@@ -1657,7 +1663,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
1657 f->frequency * 625UL / 10UL); 1663 f->frequency * 625UL / 10UL);
1658} 1664}
1659 1665
1660const struct v4l2_ioctl_ops msi3101_ioctl_ops = { 1666static const struct v4l2_ioctl_ops msi3101_ioctl_ops = {
1661 .vidioc_querycap = msi3101_querycap, 1667 .vidioc_querycap = msi3101_querycap,
1662 1668
1663 .vidioc_enum_input = msi3101_enum_input, 1669 .vidioc_enum_input = msi3101_enum_input,
diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
index 6ccb64fb0786..6ce0af9977d8 100644
--- a/drivers/staging/ozwpan/ozcdev.c
+++ b/drivers/staging/ozwpan/ozcdev.c
@@ -155,6 +155,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
155 struct oz_app_hdr *app_hdr; 155 struct oz_app_hdr *app_hdr;
156 struct oz_serial_ctx *ctx; 156 struct oz_serial_ctx *ctx;
157 157
158 if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
159 return -EINVAL;
160
158 spin_lock_bh(&g_cdev.lock); 161 spin_lock_bh(&g_cdev.lock);
159 pd = g_cdev.active_pd; 162 pd = g_cdev.active_pd;
160 if (pd) 163 if (pd)
diff --git a/drivers/staging/sb105x/sb_pci_mp.c b/drivers/staging/sb105x/sb_pci_mp.c
index 23db32f07fd5..a10cdb17038b 100644
--- a/drivers/staging/sb105x/sb_pci_mp.c
+++ b/drivers/staging/sb105x/sb_pci_mp.c
@@ -1063,7 +1063,7 @@ static int mp_wait_modem_status(struct sb_uart_state *state, unsigned long arg)
1063 1063
1064static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt) 1064static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt)
1065{ 1065{
1066 struct serial_icounter_struct icount; 1066 struct serial_icounter_struct icount = {};
1067 struct sb_uart_icount cnow; 1067 struct sb_uart_icount cnow;
1068 struct sb_uart_port *port = state->port; 1068 struct sb_uart_port *port = state->port;
1069 1069
diff --git a/drivers/staging/wlags49_h2/wl_priv.c b/drivers/staging/wlags49_h2/wl_priv.c
index c97e0e154d28..7e10dcdc3090 100644
--- a/drivers/staging/wlags49_h2/wl_priv.c
+++ b/drivers/staging/wlags49_h2/wl_priv.c
@@ -570,6 +570,7 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
570 ltv_t *pLtv; 570 ltv_t *pLtv;
571 bool_t ltvAllocated = FALSE; 571 bool_t ltvAllocated = FALSE;
572 ENCSTRCT sEncryption; 572 ENCSTRCT sEncryption;
573 size_t len;
573 574
574#ifdef USE_WDS 575#ifdef USE_WDS
575 hcf_16 hcfPort = HCF_PORT_0; 576 hcf_16 hcfPort = HCF_PORT_0;
@@ -686,7 +687,8 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
686 break; 687 break;
687 case CFG_CNF_OWN_NAME: 688 case CFG_CNF_OWN_NAME:
688 memset(lp->StationName, 0, sizeof(lp->StationName)); 689 memset(lp->StationName, 0, sizeof(lp->StationName));
689 memcpy((void *)lp->StationName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]); 690 len = min_t(size_t, pLtv->u.u16[0], sizeof(lp->StationName));
691 strlcpy(lp->StationName, &pLtv->u.u8[2], len);
690 pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]); 692 pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
691 break; 693 break;
692 case CFG_CNF_LOAD_BALANCING: 694 case CFG_CNF_LOAD_BALANCING:
@@ -1783,6 +1785,7 @@ int wvlan_set_station_nickname(struct net_device *dev,
1783{ 1785{
1784 struct wl_private *lp = wl_priv(dev); 1786 struct wl_private *lp = wl_priv(dev);
1785 unsigned long flags; 1787 unsigned long flags;
1788 size_t len;
1786 int ret = 0; 1789 int ret = 0;
1787 /*------------------------------------------------------------------------*/ 1790 /*------------------------------------------------------------------------*/
1788 1791
@@ -1793,8 +1796,8 @@ int wvlan_set_station_nickname(struct net_device *dev,
1793 wl_lock(lp, &flags); 1796 wl_lock(lp, &flags);
1794 1797
1795 memset(lp->StationName, 0, sizeof(lp->StationName)); 1798 memset(lp->StationName, 0, sizeof(lp->StationName));
1796 1799 len = min_t(size_t, wrqu->data.length, sizeof(lp->StationName));
1797 memcpy(lp->StationName, extra, wrqu->data.length); 1800 strlcpy(lp->StationName, extra, len);
1798 1801
1799 /* Commit the adapter parameters */ 1802 /* Commit the adapter parameters */
1800 wl_apply(lp); 1803 wl_apply(lp);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 551c96ca60ac..0f199f6a0738 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -134,10 +134,10 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
134 * pSCSI Host ID and enable for phba mode 134 * pSCSI Host ID and enable for phba mode
135 */ 135 */
136 sh = scsi_host_lookup(phv->phv_host_id); 136 sh = scsi_host_lookup(phv->phv_host_id);
137 if (IS_ERR(sh)) { 137 if (!sh) {
138 pr_err("pSCSI: Unable to locate SCSI Host for" 138 pr_err("pSCSI: Unable to locate SCSI Host for"
139 " phv_host_id: %d\n", phv->phv_host_id); 139 " phv_host_id: %d\n", phv->phv_host_id);
140 return PTR_ERR(sh); 140 return -EINVAL;
141 } 141 }
142 142
143 phv->phv_lld_host = sh; 143 phv->phv_lld_host = sh;
@@ -515,10 +515,10 @@ static int pscsi_configure_device(struct se_device *dev)
515 sh = phv->phv_lld_host; 515 sh = phv->phv_lld_host;
516 } else { 516 } else {
517 sh = scsi_host_lookup(pdv->pdv_host_id); 517 sh = scsi_host_lookup(pdv->pdv_host_id);
518 if (IS_ERR(sh)) { 518 if (!sh) {
519 pr_err("pSCSI: Unable to locate" 519 pr_err("pSCSI: Unable to locate"
520 " pdv_host_id: %d\n", pdv->pdv_host_id); 520 " pdv_host_id: %d\n", pdv->pdv_host_id);
521 return PTR_ERR(sh); 521 return -EINVAL;
522 } 522 }
523 } 523 }
524 } else { 524 } else {
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 4714c6f8da4b..d9b92b2c524d 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -263,6 +263,11 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
263 sectors, cmd->se_dev->dev_attrib.max_write_same_len); 263 sectors, cmd->se_dev->dev_attrib.max_write_same_len);
264 return TCM_INVALID_CDB_FIELD; 264 return TCM_INVALID_CDB_FIELD;
265 } 265 }
266 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
267 if (flags[0] & 0x10) {
268 pr_warn("WRITE SAME with ANCHOR not supported\n");
269 return TCM_INVALID_CDB_FIELD;
270 }
266 /* 271 /*
267 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting 272 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
268 * translated into block discard requests within backend code. 273 * translated into block discard requests within backend code.
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 3da4fd10b9f8..474cd44fac14 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -82,6 +82,9 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
82 mutex_lock(&g_device_mutex); 82 mutex_lock(&g_device_mutex);
83 list_for_each_entry(se_dev, &g_device_list, g_dev_node) { 83 list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
84 84
85 if (!se_dev->dev_attrib.emulate_3pc)
86 continue;
87
85 memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); 88 memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
86 target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); 89 target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
87 90
@@ -357,6 +360,7 @@ struct xcopy_pt_cmd {
357 struct se_cmd se_cmd; 360 struct se_cmd se_cmd;
358 struct xcopy_op *xcopy_op; 361 struct xcopy_op *xcopy_op;
359 struct completion xpt_passthrough_sem; 362 struct completion xpt_passthrough_sem;
363 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
360}; 364};
361 365
362static struct se_port xcopy_pt_port; 366static struct se_port xcopy_pt_port;
@@ -675,7 +679,8 @@ static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
675 679
676 pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n", 680 pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
677 se_cmd->scsi_status); 681 se_cmd->scsi_status);
678 return 0; 682
683 return (se_cmd->scsi_status) ? -EINVAL : 0;
679} 684}
680 685
681static int target_xcopy_read_source( 686static int target_xcopy_read_source(
@@ -708,7 +713,7 @@ static int target_xcopy_read_source(
708 (unsigned long long)src_lba, src_sectors, length); 713 (unsigned long long)src_lba, src_sectors, length);
709 714
710 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, 715 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
711 DMA_FROM_DEVICE, 0, NULL); 716 DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
712 xop->src_pt_cmd = xpt_cmd; 717 xop->src_pt_cmd = xpt_cmd;
713 718
714 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0], 719 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
@@ -768,7 +773,7 @@ static int target_xcopy_write_destination(
768 (unsigned long long)dst_lba, dst_sectors, length); 773 (unsigned long long)dst_lba, dst_sectors, length);
769 774
770 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, 775 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
771 DMA_TO_DEVICE, 0, NULL); 776 DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
772 xop->dst_pt_cmd = xpt_cmd; 777 xop->dst_pt_cmd = xpt_cmd;
773 778
774 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0], 779 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
@@ -884,30 +889,42 @@ out:
884 889
885sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) 890sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
886{ 891{
892 struct se_device *dev = se_cmd->se_dev;
887 struct xcopy_op *xop = NULL; 893 struct xcopy_op *xop = NULL;
888 unsigned char *p = NULL, *seg_desc; 894 unsigned char *p = NULL, *seg_desc;
889 unsigned int list_id, list_id_usage, sdll, inline_dl, sa; 895 unsigned int list_id, list_id_usage, sdll, inline_dl, sa;
896 sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
890 int rc; 897 int rc;
891 unsigned short tdll; 898 unsigned short tdll;
892 899
900 if (!dev->dev_attrib.emulate_3pc) {
901 pr_err("EXTENDED_COPY operation explicitly disabled\n");
902 return TCM_UNSUPPORTED_SCSI_OPCODE;
903 }
904
893 sa = se_cmd->t_task_cdb[1] & 0x1f; 905 sa = se_cmd->t_task_cdb[1] & 0x1f;
894 if (sa != 0x00) { 906 if (sa != 0x00) {
895 pr_err("EXTENDED_COPY(LID4) not supported\n"); 907 pr_err("EXTENDED_COPY(LID4) not supported\n");
896 return TCM_UNSUPPORTED_SCSI_OPCODE; 908 return TCM_UNSUPPORTED_SCSI_OPCODE;
897 } 909 }
898 910
911 xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
912 if (!xop) {
913 pr_err("Unable to allocate xcopy_op\n");
914 return TCM_OUT_OF_RESOURCES;
915 }
916 xop->xop_se_cmd = se_cmd;
917
899 p = transport_kmap_data_sg(se_cmd); 918 p = transport_kmap_data_sg(se_cmd);
900 if (!p) { 919 if (!p) {
901 pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n"); 920 pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
921 kfree(xop);
902 return TCM_OUT_OF_RESOURCES; 922 return TCM_OUT_OF_RESOURCES;
903 } 923 }
904 924
905 list_id = p[0]; 925 list_id = p[0];
906 if (list_id != 0x00) { 926 list_id_usage = (p[1] & 0x18) >> 3;
907 pr_err("XCOPY with non zero list_id: 0x%02x\n", list_id); 927
908 goto out;
909 }
910 list_id_usage = (p[1] & 0x18);
911 /* 928 /*
912 * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH 929 * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
913 */ 930 */
@@ -920,13 +937,6 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
920 goto out; 937 goto out;
921 } 938 }
922 939
923 xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
924 if (!xop) {
925 pr_err("Unable to allocate xcopy_op\n");
926 goto out;
927 }
928 xop->xop_se_cmd = se_cmd;
929
930 pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x" 940 pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
931 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, 941 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
932 tdll, sdll, inline_dl); 942 tdll, sdll, inline_dl);
@@ -935,6 +945,17 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
935 if (rc <= 0) 945 if (rc <= 0)
936 goto out; 946 goto out;
937 947
948 if (xop->src_dev->dev_attrib.block_size !=
949 xop->dst_dev->dev_attrib.block_size) {
950 pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev"
951 " block_size: %u currently unsupported\n",
952 xop->src_dev->dev_attrib.block_size,
953 xop->dst_dev->dev_attrib.block_size);
954 xcopy_pt_undepend_remotedev(xop);
955 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
956 goto out;
957 }
958
938 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, 959 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
939 rc * XCOPY_TARGET_DESC_LEN); 960 rc * XCOPY_TARGET_DESC_LEN);
940 seg_desc = &p[16]; 961 seg_desc = &p[16];
@@ -957,7 +978,7 @@ out:
957 if (p) 978 if (p)
958 transport_kunmap_data_sg(se_cmd); 979 transport_kunmap_data_sg(se_cmd);
959 kfree(xop); 980 kfree(xop);
960 return TCM_INVALID_CDB_FIELD; 981 return ret;
961} 982}
962 983
963static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd) 984static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c
index f10a6ad37c06..c2301da08ac7 100644
--- a/drivers/thermal/samsung/exynos_thermal_common.c
+++ b/drivers/thermal/samsung/exynos_thermal_common.c
@@ -310,8 +310,6 @@ void exynos_report_trigger(struct thermal_sensor_conf *conf)
310 } 310 }
311 311
312 th_zone = conf->pzone_data; 312 th_zone = conf->pzone_data;
313 if (th_zone->therm_dev)
314 return;
315 313
316 if (th_zone->bind == false) { 314 if (th_zone->bind == false) {
317 for (i = 0; i < th_zone->cool_dev_size; i++) { 315 for (i = 0; i < th_zone->cool_dev_size; i++) {
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index b43afda8acd1..32f38b90c4f6 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -317,6 +317,9 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
317 317
318 con = readl(data->base + reg->tmu_ctrl); 318 con = readl(data->base + reg->tmu_ctrl);
319 319
320 if (pdata->test_mux)
321 con |= (pdata->test_mux << reg->test_mux_addr_shift);
322
320 if (pdata->reference_voltage) { 323 if (pdata->reference_voltage) {
321 con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift); 324 con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift);
322 con |= pdata->reference_voltage << reg->buf_vref_sel_shift; 325 con |= pdata->reference_voltage << reg->buf_vref_sel_shift;
@@ -488,7 +491,7 @@ static const struct of_device_id exynos_tmu_match[] = {
488 }, 491 },
489 { 492 {
490 .compatible = "samsung,exynos4412-tmu", 493 .compatible = "samsung,exynos4412-tmu",
491 .data = (void *)EXYNOS5250_TMU_DRV_DATA, 494 .data = (void *)EXYNOS4412_TMU_DRV_DATA,
492 }, 495 },
493 { 496 {
494 .compatible = "samsung,exynos5250-tmu", 497 .compatible = "samsung,exynos5250-tmu",
@@ -629,9 +632,10 @@ static int exynos_tmu_probe(struct platform_device *pdev)
629 if (ret) 632 if (ret)
630 return ret; 633 return ret;
631 634
632 if (pdata->type == SOC_ARCH_EXYNOS || 635 if (pdata->type == SOC_ARCH_EXYNOS4210 ||
633 pdata->type == SOC_ARCH_EXYNOS4210 || 636 pdata->type == SOC_ARCH_EXYNOS4412 ||
634 pdata->type == SOC_ARCH_EXYNOS5440) 637 pdata->type == SOC_ARCH_EXYNOS5250 ||
638 pdata->type == SOC_ARCH_EXYNOS5440)
635 data->soc = pdata->type; 639 data->soc = pdata->type;
636 else { 640 else {
637 ret = -EINVAL; 641 ret = -EINVAL;
diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h
index b364c9eee701..3fb65547e64c 100644
--- a/drivers/thermal/samsung/exynos_tmu.h
+++ b/drivers/thermal/samsung/exynos_tmu.h
@@ -41,7 +41,8 @@ enum calibration_mode {
41 41
42enum soc_type { 42enum soc_type {
43 SOC_ARCH_EXYNOS4210 = 1, 43 SOC_ARCH_EXYNOS4210 = 1,
44 SOC_ARCH_EXYNOS, 44 SOC_ARCH_EXYNOS4412,
45 SOC_ARCH_EXYNOS5250,
45 SOC_ARCH_EXYNOS5440, 46 SOC_ARCH_EXYNOS5440,
46}; 47};
47 48
@@ -84,6 +85,7 @@ enum soc_type {
84 * @triminfo_reload_shift: shift of triminfo reload enable bit in triminfo_ctrl 85 * @triminfo_reload_shift: shift of triminfo reload enable bit in triminfo_ctrl
85 reg. 86 reg.
86 * @tmu_ctrl: TMU main controller register. 87 * @tmu_ctrl: TMU main controller register.
88 * @test_mux_addr_shift: shift bits of test mux address.
87 * @buf_vref_sel_shift: shift bits of reference voltage in tmu_ctrl register. 89 * @buf_vref_sel_shift: shift bits of reference voltage in tmu_ctrl register.
88 * @buf_vref_sel_mask: mask bits of reference voltage in tmu_ctrl register. 90 * @buf_vref_sel_mask: mask bits of reference voltage in tmu_ctrl register.
89 * @therm_trip_mode_shift: shift bits of tripping mode in tmu_ctrl register. 91 * @therm_trip_mode_shift: shift bits of tripping mode in tmu_ctrl register.
@@ -150,6 +152,7 @@ struct exynos_tmu_registers {
150 u32 triminfo_reload_shift; 152 u32 triminfo_reload_shift;
151 153
152 u32 tmu_ctrl; 154 u32 tmu_ctrl;
155 u32 test_mux_addr_shift;
153 u32 buf_vref_sel_shift; 156 u32 buf_vref_sel_shift;
154 u32 buf_vref_sel_mask; 157 u32 buf_vref_sel_mask;
155 u32 therm_trip_mode_shift; 158 u32 therm_trip_mode_shift;
@@ -257,6 +260,7 @@ struct exynos_tmu_registers {
257 * @first_point_trim: temp value of the first point trimming 260 * @first_point_trim: temp value of the first point trimming
258 * @second_point_trim: temp value of the second point trimming 261 * @second_point_trim: temp value of the second point trimming
259 * @default_temp_offset: default temperature offset in case of no trimming 262 * @default_temp_offset: default temperature offset in case of no trimming
263 * @test_mux; information if SoC supports test MUX
260 * @cal_type: calibration type for temperature 264 * @cal_type: calibration type for temperature
261 * @cal_mode: calibration mode for temperature 265 * @cal_mode: calibration mode for temperature
262 * @freq_clip_table: Table representing frequency reduction percentage. 266 * @freq_clip_table: Table representing frequency reduction percentage.
@@ -286,6 +290,7 @@ struct exynos_tmu_platform_data {
286 u8 first_point_trim; 290 u8 first_point_trim;
287 u8 second_point_trim; 291 u8 second_point_trim;
288 u8 default_temp_offset; 292 u8 default_temp_offset;
293 u8 test_mux;
289 294
290 enum calibration_type cal_type; 295 enum calibration_type cal_type;
291 enum calibration_mode cal_mode; 296 enum calibration_mode cal_mode;
diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c
index 9002499c1f69..073c292baa53 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.c
+++ b/drivers/thermal/samsung/exynos_tmu_data.c
@@ -90,14 +90,15 @@ struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
90}; 90};
91#endif 91#endif
92 92
93#if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412) 93#if defined(CONFIG_SOC_EXYNOS4412) || defined(CONFIG_SOC_EXYNOS5250)
94static const struct exynos_tmu_registers exynos5250_tmu_registers = { 94static const struct exynos_tmu_registers exynos4412_tmu_registers = {
95 .triminfo_data = EXYNOS_TMU_REG_TRIMINFO, 95 .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
96 .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT, 96 .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
97 .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT, 97 .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
98 .triminfo_ctrl = EXYNOS_TMU_TRIMINFO_CON, 98 .triminfo_ctrl = EXYNOS_TMU_TRIMINFO_CON,
99 .triminfo_reload_shift = EXYNOS_TRIMINFO_RELOAD_SHIFT, 99 .triminfo_reload_shift = EXYNOS_TRIMINFO_RELOAD_SHIFT,
100 .tmu_ctrl = EXYNOS_TMU_REG_CONTROL, 100 .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
101 .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT,
101 .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT, 102 .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
102 .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK, 103 .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
103 .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT, 104 .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
@@ -128,7 +129,7 @@ static const struct exynos_tmu_registers exynos5250_tmu_registers = {
128 .emul_time_mask = EXYNOS_EMUL_TIME_MASK, 129 .emul_time_mask = EXYNOS_EMUL_TIME_MASK,
129}; 130};
130 131
131#define EXYNOS5250_TMU_DATA \ 132#define EXYNOS4412_TMU_DATA \
132 .threshold_falling = 10, \ 133 .threshold_falling = 10, \
133 .trigger_levels[0] = 85, \ 134 .trigger_levels[0] = 85, \
134 .trigger_levels[1] = 103, \ 135 .trigger_levels[1] = 103, \
@@ -162,15 +163,32 @@ static const struct exynos_tmu_registers exynos5250_tmu_registers = {
162 .temp_level = 103, \ 163 .temp_level = 103, \
163 }, \ 164 }, \
164 .freq_tab_count = 2, \ 165 .freq_tab_count = 2, \
165 .type = SOC_ARCH_EXYNOS, \ 166 .registers = &exynos4412_tmu_registers, \
166 .registers = &exynos5250_tmu_registers, \
167 .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \ 167 .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
168 TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \ 168 TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
169 TMU_SUPPORT_EMUL_TIME) 169 TMU_SUPPORT_EMUL_TIME)
170#endif
170 171
172#if defined(CONFIG_SOC_EXYNOS4412)
173struct exynos_tmu_init_data const exynos4412_default_tmu_data = {
174 .tmu_data = {
175 {
176 EXYNOS4412_TMU_DATA,
177 .type = SOC_ARCH_EXYNOS4412,
178 .test_mux = EXYNOS4412_MUX_ADDR_VALUE,
179 },
180 },
181 .tmu_count = 1,
182};
183#endif
184
185#if defined(CONFIG_SOC_EXYNOS5250)
171struct exynos_tmu_init_data const exynos5250_default_tmu_data = { 186struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
172 .tmu_data = { 187 .tmu_data = {
173 { EXYNOS5250_TMU_DATA }, 188 {
189 EXYNOS4412_TMU_DATA,
190 .type = SOC_ARCH_EXYNOS5250,
191 },
174 }, 192 },
175 .tmu_count = 1, 193 .tmu_count = 1,
176}; 194};
diff --git a/drivers/thermal/samsung/exynos_tmu_data.h b/drivers/thermal/samsung/exynos_tmu_data.h
index dc7feb51099b..a1ea19d9e0a6 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.h
+++ b/drivers/thermal/samsung/exynos_tmu_data.h
@@ -95,6 +95,10 @@
95 95
96#define EXYNOS_MAX_TRIGGER_PER_REG 4 96#define EXYNOS_MAX_TRIGGER_PER_REG 4
97 97
98/* Exynos4412 specific */
99#define EXYNOS4412_MUX_ADDR_VALUE 6
100#define EXYNOS4412_MUX_ADDR_SHIFT 20
101
98/*exynos5440 specific registers*/ 102/*exynos5440 specific registers*/
99#define EXYNOS5440_TMU_S0_7_TRIM 0x000 103#define EXYNOS5440_TMU_S0_7_TRIM 0x000
100#define EXYNOS5440_TMU_S0_7_CTRL 0x020 104#define EXYNOS5440_TMU_S0_7_CTRL 0x020
@@ -138,7 +142,14 @@ extern struct exynos_tmu_init_data const exynos4210_default_tmu_data;
138#define EXYNOS4210_TMU_DRV_DATA (NULL) 142#define EXYNOS4210_TMU_DRV_DATA (NULL)
139#endif 143#endif
140 144
141#if (defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412)) 145#if defined(CONFIG_SOC_EXYNOS4412)
146extern struct exynos_tmu_init_data const exynos4412_default_tmu_data;
147#define EXYNOS4412_TMU_DRV_DATA (&exynos4412_default_tmu_data)
148#else
149#define EXYNOS4412_TMU_DRV_DATA (NULL)
150#endif
151
152#if defined(CONFIG_SOC_EXYNOS5250)
142extern struct exynos_tmu_init_data const exynos5250_default_tmu_data; 153extern struct exynos_tmu_init_data const exynos5250_default_tmu_data;
143#define EXYNOS5250_TMU_DRV_DATA (&exynos5250_default_tmu_data) 154#define EXYNOS5250_TMU_DRV_DATA (&exynos5250_default_tmu_data)
144#else 155#else
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index eeef0e2498ca..fdb07199d9c2 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -159,7 +159,7 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
159 159
160 INIT_LIST_HEAD(&hwmon->tz_list); 160 INIT_LIST_HEAD(&hwmon->tz_list);
161 strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH); 161 strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
162 hwmon->device = hwmon_device_register(&tz->device); 162 hwmon->device = hwmon_device_register(NULL);
163 if (IS_ERR(hwmon->device)) { 163 if (IS_ERR(hwmon->device)) {
164 result = PTR_ERR(hwmon->device); 164 result = PTR_ERR(hwmon->device);
165 goto free_mem; 165 goto free_mem;
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 4f8b9af54a5a..5a47cc8c8f85 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -110,6 +110,7 @@ static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal,
110 } else { 110 } else {
111 dev_err(bgp->dev, 111 dev_err(bgp->dev,
112 "Failed to read PCB state. Using defaults\n"); 112 "Failed to read PCB state. Using defaults\n");
113 ret = 0;
113 } 114 }
114 } 115 }
115 *temp = ti_thermal_hotspot_temperature(tmp, slope, constant); 116 *temp = ti_thermal_hotspot_temperature(tmp, slope, constant);
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index f36950e4134f..7722cb9d5a80 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -316,18 +316,19 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
316 int phy_id = topology_physical_package_id(cpu); 316 int phy_id = topology_physical_package_id(cpu);
317 struct phy_dev_entry *phdev = pkg_temp_thermal_get_phy_entry(cpu); 317 struct phy_dev_entry *phdev = pkg_temp_thermal_get_phy_entry(cpu);
318 bool notify = false; 318 bool notify = false;
319 unsigned long flags;
319 320
320 if (!phdev) 321 if (!phdev)
321 return; 322 return;
322 323
323 spin_lock(&pkg_work_lock); 324 spin_lock_irqsave(&pkg_work_lock, flags);
324 ++pkg_work_cnt; 325 ++pkg_work_cnt;
325 if (unlikely(phy_id > max_phy_id)) { 326 if (unlikely(phy_id > max_phy_id)) {
326 spin_unlock(&pkg_work_lock); 327 spin_unlock_irqrestore(&pkg_work_lock, flags);
327 return; 328 return;
328 } 329 }
329 pkg_work_scheduled[phy_id] = 0; 330 pkg_work_scheduled[phy_id] = 0;
330 spin_unlock(&pkg_work_lock); 331 spin_unlock_irqrestore(&pkg_work_lock, flags);
331 332
332 enable_pkg_thres_interrupt(); 333 enable_pkg_thres_interrupt();
333 rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); 334 rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
@@ -397,6 +398,7 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
397 int thres_count; 398 int thres_count;
398 u32 eax, ebx, ecx, edx; 399 u32 eax, ebx, ecx, edx;
399 u8 *temp; 400 u8 *temp;
401 unsigned long flags;
400 402
401 cpuid(6, &eax, &ebx, &ecx, &edx); 403 cpuid(6, &eax, &ebx, &ecx, &edx);
402 thres_count = ebx & 0x07; 404 thres_count = ebx & 0x07;
@@ -420,19 +422,19 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
420 goto err_ret_unlock; 422 goto err_ret_unlock;
421 } 423 }
422 424
423 spin_lock(&pkg_work_lock); 425 spin_lock_irqsave(&pkg_work_lock, flags);
424 if (topology_physical_package_id(cpu) > max_phy_id) 426 if (topology_physical_package_id(cpu) > max_phy_id)
425 max_phy_id = topology_physical_package_id(cpu); 427 max_phy_id = topology_physical_package_id(cpu);
426 temp = krealloc(pkg_work_scheduled, 428 temp = krealloc(pkg_work_scheduled,
427 (max_phy_id+1) * sizeof(u8), GFP_ATOMIC); 429 (max_phy_id+1) * sizeof(u8), GFP_ATOMIC);
428 if (!temp) { 430 if (!temp) {
429 spin_unlock(&pkg_work_lock); 431 spin_unlock_irqrestore(&pkg_work_lock, flags);
430 err = -ENOMEM; 432 err = -ENOMEM;
431 goto err_ret_free; 433 goto err_ret_free;
432 } 434 }
433 pkg_work_scheduled = temp; 435 pkg_work_scheduled = temp;
434 pkg_work_scheduled[topology_physical_package_id(cpu)] = 0; 436 pkg_work_scheduled[topology_physical_package_id(cpu)] = 0;
435 spin_unlock(&pkg_work_lock); 437 spin_unlock_irqrestore(&pkg_work_lock, flags);
436 438
437 phy_dev_entry->phys_proc_id = topology_physical_package_id(cpu); 439 phy_dev_entry->phys_proc_id = topology_physical_package_id(cpu);
438 phy_dev_entry->first_cpu = cpu; 440 phy_dev_entry->first_cpu = cpu;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index d067285a2d20..6b0f75eac8a2 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1499,7 +1499,7 @@ static void atmel_set_ops(struct uart_port *port)
1499/* 1499/*
1500 * Get ip name usart or uart 1500 * Get ip name usart or uart
1501 */ 1501 */
1502static int atmel_get_ip_name(struct uart_port *port) 1502static void atmel_get_ip_name(struct uart_port *port)
1503{ 1503{
1504 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1504 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1505 int name = UART_GET_IP_NAME(port); 1505 int name = UART_GET_IP_NAME(port);
@@ -1518,10 +1518,7 @@ static int atmel_get_ip_name(struct uart_port *port)
1518 atmel_port->is_usart = false; 1518 atmel_port->is_usart = false;
1519 } else { 1519 } else {
1520 dev_err(port->dev, "Not supported ip name, set to uart\n"); 1520 dev_err(port->dev, "Not supported ip name, set to uart\n");
1521 return -EINVAL;
1522 } 1521 }
1523
1524 return 0;
1525} 1522}
1526 1523
1527/* 1524/*
@@ -2405,9 +2402,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
2405 /* 2402 /*
2406 * Get port name of usart or uart 2403 * Get port name of usart or uart
2407 */ 2404 */
2408 ret = atmel_get_ip_name(&port->uart); 2405 atmel_get_ip_name(&port->uart);
2409 if (ret < 0)
2410 goto err_add_port;
2411 2406
2412 return 0; 2407 return 0;
2413 2408
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index a0ebbc9ce5cd..042aa077b5b3 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1912,9 +1912,6 @@ static int serial_imx_probe_dt(struct imx_port *sport,
1912 1912
1913 sport->devdata = of_id->data; 1913 sport->devdata = of_id->data;
1914 1914
1915 if (of_device_is_stdout_path(np))
1916 add_preferred_console(imx_reg.cons->name, sport->port.line, 0);
1917
1918 return 0; 1915 return 0;
1919} 1916}
1920#else 1917#else
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index 93b697a0de65..15ad6fcda88b 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -561,12 +561,13 @@ static int vt8500_serial_probe(struct platform_device *pdev)
561 if (!mmres || !irqres) 561 if (!mmres || !irqres)
562 return -ENODEV; 562 return -ENODEV;
563 563
564 if (np) 564 if (np) {
565 port = of_alias_get_id(np, "serial"); 565 port = of_alias_get_id(np, "serial");
566 if (port >= VT8500_MAX_PORTS) 566 if (port >= VT8500_MAX_PORTS)
567 port = -1; 567 port = -1;
568 else 568 } else {
569 port = -1; 569 port = -1;
570 }
570 571
571 if (port < 0) { 572 if (port < 0) {
572 /* calculate the port id */ 573 /* calculate the port id */
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index ba475632c5fa..0e808cf91d97 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -642,16 +642,29 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
642{ 642{
643 struct uio_device *idev = vma->vm_private_data; 643 struct uio_device *idev = vma->vm_private_data;
644 int mi = uio_find_mem_index(vma); 644 int mi = uio_find_mem_index(vma);
645 struct uio_mem *mem;
645 if (mi < 0) 646 if (mi < 0)
646 return -EINVAL; 647 return -EINVAL;
648 mem = idev->info->mem + mi;
647 649
648 vma->vm_ops = &uio_physical_vm_ops; 650 if (vma->vm_end - vma->vm_start > mem->size)
651 return -EINVAL;
649 652
653 vma->vm_ops = &uio_physical_vm_ops;
650 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 654 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
651 655
656 /*
657 * We cannot use the vm_iomap_memory() helper here,
658 * because vma->vm_pgoff is the map index we looked
659 * up above in uio_find_mem_index(), rather than an
660 * actual page offset into the mmap.
661 *
662 * So we just do the physical mmap without a page
663 * offset.
664 */
652 return remap_pfn_range(vma, 665 return remap_pfn_range(vma,
653 vma->vm_start, 666 vma->vm_start,
654 idev->info->mem[mi].addr >> PAGE_SHIFT, 667 mem->addr >> PAGE_SHIFT,
655 vma->vm_end - vma->vm_start, 668 vma->vm_end - vma->vm_start,
656 vma->vm_page_prot); 669 vma->vm_page_prot);
657} 670}
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 6f96795dd20c..64d7a6d9a1ad 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -100,8 +100,10 @@ static void host_stop(struct ci_hdrc *ci)
100{ 100{
101 struct usb_hcd *hcd = ci->hcd; 101 struct usb_hcd *hcd = ci->hcd;
102 102
103 usb_remove_hcd(hcd); 103 if (hcd) {
104 usb_put_hcd(hcd); 104 usb_remove_hcd(hcd);
105 usb_put_hcd(hcd);
106 }
105 if (ci->platdata->reg_vbus) 107 if (ci->platdata->reg_vbus)
106 regulator_disable(ci->platdata->reg_vbus); 108 regulator_disable(ci->platdata->reg_vbus);
107} 109}
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 5b44cd47da5b..01fe36273f3b 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -97,6 +97,9 @@ static const struct usb_device_id usb_quirk_list[] = {
97 /* Alcor Micro Corp. Hub */ 97 /* Alcor Micro Corp. Hub */
98 { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME }, 98 { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
99 99
100 /* MicroTouch Systems touchscreen */
101 { USB_DEVICE(0x0596, 0x051e), .driver_info = USB_QUIRK_RESET_RESUME },
102
100 /* appletouch */ 103 /* appletouch */
101 { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, 104 { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
102 105
@@ -130,6 +133,9 @@ static const struct usb_device_id usb_quirk_list[] = {
130 /* Broadcom BCM92035DGROM BT dongle */ 133 /* Broadcom BCM92035DGROM BT dongle */
131 { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME }, 134 { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
132 135
136 /* MAYA44USB sound device */
137 { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
138
133 /* Action Semiconductor flash disk */ 139 /* Action Semiconductor flash disk */
134 { USB_DEVICE(0x10d6, 0x2200), .driver_info = 140 { USB_DEVICE(0x10d6, 0x2200), .driver_info =
135 USB_QUIRK_STRING_FETCH_255 }, 141 USB_QUIRK_STRING_FETCH_255 },
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 2c76ef1320ea..08ef2829a7e2 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -799,7 +799,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
799 * switchable ports. 799 * switchable ports.
800 */ 800 */
801 pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 801 pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
802 cpu_to_le32(ports_available)); 802 ports_available);
803 803
804 pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 804 pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
805 &ports_available); 805 &ports_available);
@@ -821,7 +821,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
821 * host. 821 * host.
822 */ 822 */
823 pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 823 pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
824 cpu_to_le32(ports_available)); 824 ports_available);
825 825
826 pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 826 pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
827 &ports_available); 827 &ports_available);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 773a6b28c4f1..e8b4c56dcf62 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1157,18 +1157,6 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1157 t1 = xhci_port_state_to_neutral(t1); 1157 t1 = xhci_port_state_to_neutral(t1);
1158 if (t1 != t2) 1158 if (t1 != t2)
1159 xhci_writel(xhci, t2, port_array[port_index]); 1159 xhci_writel(xhci, t2, port_array[port_index]);
1160
1161 if (hcd->speed != HCD_USB3) {
1162 /* enable remote wake up for USB 2.0 */
1163 __le32 __iomem *addr;
1164 u32 tmp;
1165
1166 /* Get the port power control register address. */
1167 addr = port_array[port_index] + PORTPMSC;
1168 tmp = xhci_readl(xhci, addr);
1169 tmp |= PORT_RWE;
1170 xhci_writel(xhci, tmp, addr);
1171 }
1172 } 1160 }
1173 hcd->state = HC_STATE_SUSPENDED; 1161 hcd->state = HC_STATE_SUSPENDED;
1174 bus_state->next_statechange = jiffies + msecs_to_jiffies(10); 1162 bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
@@ -1247,20 +1235,6 @@ int xhci_bus_resume(struct usb_hcd *hcd)
1247 xhci_ring_device(xhci, slot_id); 1235 xhci_ring_device(xhci, slot_id);
1248 } else 1236 } else
1249 xhci_writel(xhci, temp, port_array[port_index]); 1237 xhci_writel(xhci, temp, port_array[port_index]);
1250
1251 if (hcd->speed != HCD_USB3) {
1252 /* disable remote wake up for USB 2.0 */
1253 __le32 __iomem *addr;
1254 u32 tmp;
1255
1256 /* Add one to the port status register address to get
1257 * the port power control register address.
1258 */
1259 addr = port_array[port_index] + PORTPMSC;
1260 tmp = xhci_readl(xhci, addr);
1261 tmp &= ~PORT_RWE;
1262 xhci_writel(xhci, tmp, addr);
1263 }
1264 } 1238 }
1265 1239
1266 (void) xhci_readl(xhci, &xhci->op_regs->command); 1240 (void) xhci_readl(xhci, &xhci->op_regs->command);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 236c3aabe940..b8dffd59eb25 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -35,6 +35,9 @@
35#define PCI_VENDOR_ID_ETRON 0x1b6f 35#define PCI_VENDOR_ID_ETRON 0x1b6f
36#define PCI_DEVICE_ID_ASROCK_P67 0x7023 36#define PCI_DEVICE_ID_ASROCK_P67 0x7023
37 37
38#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
39#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
40
38static const char hcd_name[] = "xhci_hcd"; 41static const char hcd_name[] = "xhci_hcd";
39 42
40/* called after powerup, by probe or system-pm "wakeup" */ 43/* called after powerup, by probe or system-pm "wakeup" */
@@ -69,6 +72,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
69 "QUIRK: Fresco Logic xHC needs configure" 72 "QUIRK: Fresco Logic xHC needs configure"
70 " endpoint cmd after reset endpoint"); 73 " endpoint cmd after reset endpoint");
71 } 74 }
75 if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
76 pdev->revision == 0x4) {
77 xhci->quirks |= XHCI_SLOW_SUSPEND;
78 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
79 "QUIRK: Fresco Logic xHC revision %u"
80 "must be suspended extra slowly",
81 pdev->revision);
82 }
72 /* Fresco Logic confirms: all revisions of this chip do not 83 /* Fresco Logic confirms: all revisions of this chip do not
73 * support MSI, even though some of them claim to in their PCI 84 * support MSI, even though some of them claim to in their PCI
74 * capabilities. 85 * capabilities.
@@ -110,6 +121,15 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
110 xhci->quirks |= XHCI_SPURIOUS_REBOOT; 121 xhci->quirks |= XHCI_SPURIOUS_REBOOT;
111 xhci->quirks |= XHCI_AVOID_BEI; 122 xhci->quirks |= XHCI_AVOID_BEI;
112 } 123 }
124 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
125 (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI ||
126 pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI)) {
127 /* Workaround for occasional spurious wakeups from S5 (or
128 * any other sleep) on Haswell machines with LPT and LPT-LP
129 * with the new Intel BIOS
130 */
131 xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
132 }
113 if (pdev->vendor == PCI_VENDOR_ID_ETRON && 133 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
114 pdev->device == PCI_DEVICE_ID_ASROCK_P67) { 134 pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
115 xhci->quirks |= XHCI_RESET_ON_RESUME; 135 xhci->quirks |= XHCI_RESET_ON_RESUME;
@@ -217,6 +237,11 @@ static void xhci_pci_remove(struct pci_dev *dev)
217 usb_put_hcd(xhci->shared_hcd); 237 usb_put_hcd(xhci->shared_hcd);
218 } 238 }
219 usb_hcd_pci_remove(dev); 239 usb_hcd_pci_remove(dev);
240
241 /* Workaround for spurious wakeups at shutdown with HSW */
242 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
243 pci_set_power_state(dev, PCI_D3hot);
244
220 kfree(xhci); 245 kfree(xhci);
221} 246}
222 247
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1e36dbb48366..6e0d886bcce5 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -730,6 +730,9 @@ void xhci_shutdown(struct usb_hcd *hcd)
730 730
731 spin_lock_irq(&xhci->lock); 731 spin_lock_irq(&xhci->lock);
732 xhci_halt(xhci); 732 xhci_halt(xhci);
733 /* Workaround for spurious wakeups at shutdown with HSW */
734 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
735 xhci_reset(xhci);
733 spin_unlock_irq(&xhci->lock); 736 spin_unlock_irq(&xhci->lock);
734 737
735 xhci_cleanup_msix(xhci); 738 xhci_cleanup_msix(xhci);
@@ -737,6 +740,10 @@ void xhci_shutdown(struct usb_hcd *hcd)
737 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 740 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
738 "xhci_shutdown completed - status = %x", 741 "xhci_shutdown completed - status = %x",
739 xhci_readl(xhci, &xhci->op_regs->status)); 742 xhci_readl(xhci, &xhci->op_regs->status));
743
744 /* Yet another workaround for spurious wakeups at shutdown with HSW */
745 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
746 pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
740} 747}
741 748
742#ifdef CONFIG_PM 749#ifdef CONFIG_PM
@@ -839,6 +846,7 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
839int xhci_suspend(struct xhci_hcd *xhci) 846int xhci_suspend(struct xhci_hcd *xhci)
840{ 847{
841 int rc = 0; 848 int rc = 0;
849 unsigned int delay = XHCI_MAX_HALT_USEC;
842 struct usb_hcd *hcd = xhci_to_hcd(xhci); 850 struct usb_hcd *hcd = xhci_to_hcd(xhci);
843 u32 command; 851 u32 command;
844 852
@@ -861,8 +869,12 @@ int xhci_suspend(struct xhci_hcd *xhci)
861 command = xhci_readl(xhci, &xhci->op_regs->command); 869 command = xhci_readl(xhci, &xhci->op_regs->command);
862 command &= ~CMD_RUN; 870 command &= ~CMD_RUN;
863 xhci_writel(xhci, command, &xhci->op_regs->command); 871 xhci_writel(xhci, command, &xhci->op_regs->command);
872
873 /* Some chips from Fresco Logic need an extraordinary delay */
874 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
875
864 if (xhci_handshake(xhci, &xhci->op_regs->status, 876 if (xhci_handshake(xhci, &xhci->op_regs->status,
865 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) { 877 STS_HALT, STS_HALT, delay)) {
866 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); 878 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
867 spin_unlock_irq(&xhci->lock); 879 spin_unlock_irq(&xhci->lock);
868 return -ETIMEDOUT; 880 return -ETIMEDOUT;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 289fbfbae746..941d5f59e4dc 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1548,6 +1548,8 @@ struct xhci_hcd {
1548#define XHCI_COMP_MODE_QUIRK (1 << 14) 1548#define XHCI_COMP_MODE_QUIRK (1 << 14)
1549#define XHCI_AVOID_BEI (1 << 15) 1549#define XHCI_AVOID_BEI (1 << 15)
1550#define XHCI_PLAT (1 << 16) 1550#define XHCI_PLAT (1 << 16)
1551#define XHCI_SLOW_SUSPEND (1 << 17)
1552#define XHCI_SPURIOUS_WAKEUP (1 << 18)
1551 unsigned int num_active_eps; 1553 unsigned int num_active_eps;
1552 unsigned int limit_active_eps; 1554 unsigned int limit_active_eps;
1553 /* There are two roothubs to keep track of bus suspend info for */ 1555 /* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index e2b21c1d9c40..ba5f70f92888 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -246,6 +246,6 @@ config USB_EZUSB_FX2
246config USB_HSIC_USB3503 246config USB_HSIC_USB3503
247 tristate "USB3503 HSIC to USB20 Driver" 247 tristate "USB3503 HSIC to USB20 Driver"
248 depends on I2C 248 depends on I2C
249 select REGMAP 249 select REGMAP_I2C
250 help 250 help
251 This option enables support for SMSC USB3503 HSIC to USB 2.0 Driver. 251 This option enables support for SMSC USB3503 HSIC to USB 2.0 Driver.
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 18e877ffe7b7..cd70cc886171 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -922,6 +922,52 @@ static void musb_generic_disable(struct musb *musb)
922} 922}
923 923
924/* 924/*
925 * Program the HDRC to start (enable interrupts, dma, etc.).
926 */
927void musb_start(struct musb *musb)
928{
929 void __iomem *regs = musb->mregs;
930 u8 devctl = musb_readb(regs, MUSB_DEVCTL);
931
932 dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
933
934 /* Set INT enable registers, enable interrupts */
935 musb->intrtxe = musb->epmask;
936 musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
937 musb->intrrxe = musb->epmask & 0xfffe;
938 musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
939 musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
940
941 musb_writeb(regs, MUSB_TESTMODE, 0);
942
943 /* put into basic highspeed mode and start session */
944 musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
945 | MUSB_POWER_HSENAB
946 /* ENSUSPEND wedges tusb */
947 /* | MUSB_POWER_ENSUSPEND */
948 );
949
950 musb->is_active = 0;
951 devctl = musb_readb(regs, MUSB_DEVCTL);
952 devctl &= ~MUSB_DEVCTL_SESSION;
953
954 /* session started after:
955 * (a) ID-grounded irq, host mode;
956 * (b) vbus present/connect IRQ, peripheral mode;
957 * (c) peripheral initiates, using SRP
958 */
959 if (musb->port_mode != MUSB_PORT_MODE_HOST &&
960 (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
961 musb->is_active = 1;
962 } else {
963 devctl |= MUSB_DEVCTL_SESSION;
964 }
965
966 musb_platform_enable(musb);
967 musb_writeb(regs, MUSB_DEVCTL, devctl);
968}
969
970/*
925 * Make the HDRC stop (disable interrupts, etc.); 971 * Make the HDRC stop (disable interrupts, etc.);
926 * reversible by musb_start 972 * reversible by musb_start
927 * called on gadget driver unregister 973 * called on gadget driver unregister
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 65f3917b4fc5..1c5bf75ee8ff 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -503,6 +503,7 @@ static inline void musb_configure_ep0(struct musb *musb)
503extern const char musb_driver_name[]; 503extern const char musb_driver_name[];
504 504
505extern void musb_stop(struct musb *musb); 505extern void musb_stop(struct musb *musb);
506extern void musb_start(struct musb *musb);
506 507
507extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src); 508extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
508extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst); 509extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index b19ed213ab85..3671898a4535 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1853,11 +1853,14 @@ static int musb_gadget_start(struct usb_gadget *g,
1853 musb->gadget_driver = driver; 1853 musb->gadget_driver = driver;
1854 1854
1855 spin_lock_irqsave(&musb->lock, flags); 1855 spin_lock_irqsave(&musb->lock, flags);
1856 musb->is_active = 1;
1856 1857
1857 otg_set_peripheral(otg, &musb->g); 1858 otg_set_peripheral(otg, &musb->g);
1858 musb->xceiv->state = OTG_STATE_B_IDLE; 1859 musb->xceiv->state = OTG_STATE_B_IDLE;
1859 spin_unlock_irqrestore(&musb->lock, flags); 1860 spin_unlock_irqrestore(&musb->lock, flags);
1860 1861
1862 musb_start(musb);
1863
1861 /* REVISIT: funcall to other code, which also 1864 /* REVISIT: funcall to other code, which also
1862 * handles power budgeting ... this way also 1865 * handles power budgeting ... this way also
1863 * ensures HdrcStart is indirectly called. 1866 * ensures HdrcStart is indirectly called.
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index a523950c2b32..d1d6b83aabca 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -44,52 +44,6 @@
44 44
45#include "musb_core.h" 45#include "musb_core.h"
46 46
47/*
48* Program the HDRC to start (enable interrupts, dma, etc.).
49*/
50static void musb_start(struct musb *musb)
51{
52 void __iomem *regs = musb->mregs;
53 u8 devctl = musb_readb(regs, MUSB_DEVCTL);
54
55 dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
56
57 /* Set INT enable registers, enable interrupts */
58 musb->intrtxe = musb->epmask;
59 musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
60 musb->intrrxe = musb->epmask & 0xfffe;
61 musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
62 musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
63
64 musb_writeb(regs, MUSB_TESTMODE, 0);
65
66 /* put into basic highspeed mode and start session */
67 musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
68 | MUSB_POWER_HSENAB
69 /* ENSUSPEND wedges tusb */
70 /* | MUSB_POWER_ENSUSPEND */
71 );
72
73 musb->is_active = 0;
74 devctl = musb_readb(regs, MUSB_DEVCTL);
75 devctl &= ~MUSB_DEVCTL_SESSION;
76
77 /* session started after:
78 * (a) ID-grounded irq, host mode;
79 * (b) vbus present/connect IRQ, peripheral mode;
80 * (c) peripheral initiates, using SRP
81 */
82 if (musb->port_mode != MUSB_PORT_MODE_HOST &&
83 (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
84 musb->is_active = 1;
85 } else {
86 devctl |= MUSB_DEVCTL_SESSION;
87 }
88
89 musb_platform_enable(musb);
90 musb_writeb(regs, MUSB_DEVCTL, devctl);
91}
92
93static void musb_port_suspend(struct musb *musb, bool do_suspend) 47static void musb_port_suspend(struct musb *musb, bool do_suspend)
94{ 48{
95 struct usb_otg *otg = musb->xceiv->otg; 49 struct usb_otg *otg = musb->xceiv->otg;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index c45f9c0a1b34..b21d553c245b 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -904,6 +904,7 @@ static struct usb_device_id id_table_combined [] = {
904 { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) }, 904 { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
905 /* Crucible Devices */ 905 /* Crucible Devices */
906 { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) }, 906 { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
907 { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
907 { } /* Terminating entry */ 908 { } /* Terminating entry */
908}; 909};
909 910
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 1b8af461b522..a7019d1e3058 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1307,3 +1307,9 @@
1307 * Manufacturer: Crucible Technologies 1307 * Manufacturer: Crucible Technologies
1308 */ 1308 */
1309#define FTDI_CT_COMET_PID 0x8e08 1309#define FTDI_CT_COMET_PID 0x8e08
1310
1311/*
1312 * Product: Z3X Box
1313 * Manufacturer: Smart GSM Team
1314 */
1315#define FTDI_Z3X_PID 0x0011
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 80a7104d5ddb..acaee066b99a 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -451,6 +451,10 @@ static void option_instat_callback(struct urb *urb);
451#define CHANGHONG_VENDOR_ID 0x2077 451#define CHANGHONG_VENDOR_ID 0x2077
452#define CHANGHONG_PRODUCT_CH690 0x7001 452#define CHANGHONG_PRODUCT_CH690 0x7001
453 453
454/* Inovia */
455#define INOVIA_VENDOR_ID 0x20a6
456#define INOVIA_SEW858 0x1105
457
454/* some devices interfaces need special handling due to a number of reasons */ 458/* some devices interfaces need special handling due to a number of reasons */
455enum option_blacklist_reason { 459enum option_blacklist_reason {
456 OPTION_BLACKLIST_NONE = 0, 460 OPTION_BLACKLIST_NONE = 0,
@@ -689,6 +693,222 @@ static const struct usb_device_id option_ids[] = {
689 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) }, 693 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
690 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) }, 694 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) },
691 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) }, 695 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) },
696 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x01) },
697 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x02) },
698 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x03) },
699 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x04) },
700 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x05) },
701 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x06) },
702 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0A) },
703 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0B) },
704 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0D) },
705 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0E) },
706 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0F) },
707 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x10) },
708 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x12) },
709 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x13) },
710 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x14) },
711 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x15) },
712 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x17) },
713 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x18) },
714 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x19) },
715 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1A) },
716 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1B) },
717 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1C) },
718 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x31) },
719 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x32) },
720 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x33) },
721 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x34) },
722 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x35) },
723 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x36) },
724 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3A) },
725 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3B) },
726 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3D) },
727 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3E) },
728 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3F) },
729 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x48) },
730 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x49) },
731 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4A) },
732 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4B) },
733 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4C) },
734 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x61) },
735 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x62) },
736 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x63) },
737 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x64) },
738 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x65) },
739 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x66) },
740 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6A) },
741 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6B) },
742 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
743 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
744 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
745 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
746 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
747 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
748 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7B) },
749 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7C) },
750 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x01) },
751 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x02) },
752 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x03) },
753 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x04) },
754 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x05) },
755 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x06) },
756 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0A) },
757 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0B) },
758 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0D) },
759 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0E) },
760 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0F) },
761 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x10) },
762 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x12) },
763 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x13) },
764 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x14) },
765 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x15) },
766 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x17) },
767 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x18) },
768 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x19) },
769 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1A) },
770 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1B) },
771 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1C) },
772 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x31) },
773 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x32) },
774 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x33) },
775 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x34) },
776 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x35) },
777 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x36) },
778 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3A) },
779 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3B) },
780 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3D) },
781 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3E) },
782 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3F) },
783 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x48) },
784 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x49) },
785 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4A) },
786 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4B) },
787 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4C) },
788 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x61) },
789 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x62) },
790 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x63) },
791 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x64) },
792 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x65) },
793 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x66) },
794 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6A) },
795 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6B) },
796 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
797 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
798 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
799 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
800 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
801 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
802 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7B) },
803 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7C) },
804 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x01) },
805 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x02) },
806 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x03) },
807 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x04) },
808 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x05) },
809 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x06) },
810 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0A) },
811 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0B) },
812 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0D) },
813 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0E) },
814 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0F) },
815 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x10) },
816 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x12) },
817 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x13) },
818 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x14) },
819 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x15) },
820 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x17) },
821 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x18) },
822 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x19) },
823 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1A) },
824 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1B) },
825 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1C) },
826 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x31) },
827 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x32) },
828 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x33) },
829 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x34) },
830 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x35) },
831 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x36) },
832 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3A) },
833 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3B) },
834 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3D) },
835 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3E) },
836 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3F) },
837 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x48) },
838 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x49) },
839 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4A) },
840 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4B) },
841 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4C) },
842 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x61) },
843 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x62) },
844 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x63) },
845 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x64) },
846 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x65) },
847 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x66) },
848 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6A) },
849 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6B) },
850 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
851 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
852 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
853 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
854 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
855 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
856 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7B) },
857 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7C) },
858 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x01) },
859 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x02) },
860 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x03) },
861 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x04) },
862 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x05) },
863 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x06) },
864 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0A) },
865 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0B) },
866 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0D) },
867 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0E) },
868 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0F) },
869 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x10) },
870 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x12) },
871 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x13) },
872 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x14) },
873 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x15) },
874 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x17) },
875 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x18) },
876 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x19) },
877 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1A) },
878 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1B) },
879 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1C) },
880 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x31) },
881 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x32) },
882 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x33) },
883 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x34) },
884 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x35) },
885 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x36) },
886 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3A) },
887 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3B) },
888 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3D) },
889 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3E) },
890 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3F) },
891 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x48) },
892 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x49) },
893 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4A) },
894 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4B) },
895 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4C) },
896 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x61) },
897 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x62) },
898 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x63) },
899 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x64) },
900 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x65) },
901 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x66) },
902 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6A) },
903 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6B) },
904 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
905 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
906 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
907 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
908 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
909 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },
910 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
911 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
692 912
693 913
694 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, 914 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
@@ -1257,7 +1477,9 @@ static const struct usb_device_id option_ids[] = {
1257 1477
1258 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, 1478 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
1259 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) }, 1479 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
1260 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) }, 1480 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
1481 .driver_info = (kernel_ulong_t)&net_intf6_blacklist
1482 },
1261 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ 1483 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
1262 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ 1484 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
1263 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, 1485 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
@@ -1345,6 +1567,7 @@ static const struct usb_device_id option_ids[] = {
1345 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, 1567 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
1346 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 1568 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1347 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 1569 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1570 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
1348 { } /* Terminating entry */ 1571 { } /* Terminating entry */
1349}; 1572};
1350MODULE_DEVICE_TABLE(usb, option_ids); 1573MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index bedf8e47713b..1e6de4cd079d 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -4,11 +4,6 @@
4 * Copyright (C) 2001-2007 Greg Kroah-Hartman (greg@kroah.com) 4 * Copyright (C) 2001-2007 Greg Kroah-Hartman (greg@kroah.com)
5 * Copyright (C) 2003 IBM Corp. 5 * Copyright (C) 2003 IBM Corp.
6 * 6 *
7 * Copyright (C) 2009, 2013 Frank Schäfer <fschaefer.oss@googlemail.com>
8 * - fixes, improvements and documentation for the baud rate encoding methods
9 * Copyright (C) 2013 Reinhard Max <max@suse.de>
10 * - fixes and improvements for the divisor based baud rate encoding method
11 *
12 * Original driver for 2.2.x by anonymous 7 * Original driver for 2.2.x by anonymous
13 * 8 *
14 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -134,18 +129,10 @@ MODULE_DEVICE_TABLE(usb, id_table);
134 129
135 130
136enum pl2303_type { 131enum pl2303_type {
137 type_0, /* H version ? */ 132 type_0, /* don't know the difference between type 0 and */
138 type_1, /* H version ? */ 133 type_1, /* type 1, until someone from prolific tells us... */
139 HX_TA, /* HX(A) / X(A) / TA version */ /* TODO: improve */ 134 HX, /* HX version of the pl2303 chip */
140 HXD_EA_RA_SA, /* HXD / EA / RA / SA version */ /* TODO: improve */
141 TB, /* TB version */
142 HX_CLONE, /* Cheap and less functional clone of the HX chip */
143}; 135};
144/*
145 * NOTE: don't know the difference between type 0 and type 1,
146 * until someone from Prolific tells us...
147 * TODO: distinguish between X/HX, TA and HXD, EA, RA, SA variants
148 */
149 136
150struct pl2303_serial_private { 137struct pl2303_serial_private {
151 enum pl2303_type type; 138 enum pl2303_type type;
@@ -185,7 +172,6 @@ static int pl2303_startup(struct usb_serial *serial)
185{ 172{
186 struct pl2303_serial_private *spriv; 173 struct pl2303_serial_private *spriv;
187 enum pl2303_type type = type_0; 174 enum pl2303_type type = type_0;
188 char *type_str = "unknown (treating as type_0)";
189 unsigned char *buf; 175 unsigned char *buf;
190 176
191 spriv = kzalloc(sizeof(*spriv), GFP_KERNEL); 177 spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
@@ -198,53 +184,15 @@ static int pl2303_startup(struct usb_serial *serial)
198 return -ENOMEM; 184 return -ENOMEM;
199 } 185 }
200 186
201 if (serial->dev->descriptor.bDeviceClass == 0x02) { 187 if (serial->dev->descriptor.bDeviceClass == 0x02)
202 type = type_0; 188 type = type_0;
203 type_str = "type_0"; 189 else if (serial->dev->descriptor.bMaxPacketSize0 == 0x40)
204 } else if (serial->dev->descriptor.bMaxPacketSize0 == 0x40) { 190 type = HX;
205 /* 191 else if (serial->dev->descriptor.bDeviceClass == 0x00)
206 * NOTE: The bcdDevice version is the only difference between
207 * the device descriptors of the X/HX, HXD, EA, RA, SA, TA, TB
208 */
209 if (le16_to_cpu(serial->dev->descriptor.bcdDevice) == 0x300) {
210 /* Check if the device is a clone */
211 pl2303_vendor_read(0x9494, 0, serial, buf);
212 /*
213 * NOTE: Not sure if this read is really needed.
214 * The HX returns 0x00, the clone 0x02, but the Windows
215 * driver seems to ignore the value and continues.
216 */
217 pl2303_vendor_write(0x0606, 0xaa, serial);
218 pl2303_vendor_read(0x8686, 0, serial, buf);
219 if (buf[0] != 0xaa) {
220 type = HX_CLONE;
221 type_str = "X/HX clone (limited functionality)";
222 } else {
223 type = HX_TA;
224 type_str = "X/HX/TA";
225 }
226 pl2303_vendor_write(0x0606, 0x00, serial);
227 } else if (le16_to_cpu(serial->dev->descriptor.bcdDevice)
228 == 0x400) {
229 type = HXD_EA_RA_SA;
230 type_str = "HXD/EA/RA/SA";
231 } else if (le16_to_cpu(serial->dev->descriptor.bcdDevice)
232 == 0x500) {
233 type = TB;
234 type_str = "TB";
235 } else {
236 dev_info(&serial->interface->dev,
237 "unknown/unsupported device type\n");
238 kfree(spriv);
239 kfree(buf);
240 return -ENODEV;
241 }
242 } else if (serial->dev->descriptor.bDeviceClass == 0x00
243 || serial->dev->descriptor.bDeviceClass == 0xFF) {
244 type = type_1; 192 type = type_1;
245 type_str = "type_1"; 193 else if (serial->dev->descriptor.bDeviceClass == 0xFF)
246 } 194 type = type_1;
247 dev_dbg(&serial->interface->dev, "device type: %s\n", type_str); 195 dev_dbg(&serial->interface->dev, "device type: %d\n", type);
248 196
249 spriv->type = type; 197 spriv->type = type;
250 usb_set_serial_data(serial, spriv); 198 usb_set_serial_data(serial, spriv);
@@ -259,10 +207,10 @@ static int pl2303_startup(struct usb_serial *serial)
259 pl2303_vendor_read(0x8383, 0, serial, buf); 207 pl2303_vendor_read(0x8383, 0, serial, buf);
260 pl2303_vendor_write(0, 1, serial); 208 pl2303_vendor_write(0, 1, serial);
261 pl2303_vendor_write(1, 0, serial); 209 pl2303_vendor_write(1, 0, serial);
262 if (type == type_0 || type == type_1) 210 if (type == HX)
263 pl2303_vendor_write(2, 0x24, serial);
264 else
265 pl2303_vendor_write(2, 0x44, serial); 211 pl2303_vendor_write(2, 0x44, serial);
212 else
213 pl2303_vendor_write(2, 0x24, serial);
266 214
267 kfree(buf); 215 kfree(buf);
268 return 0; 216 return 0;
@@ -316,174 +264,65 @@ static int pl2303_set_control_lines(struct usb_serial_port *port, u8 value)
316 return retval; 264 return retval;
317} 265}
318 266
319static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type, 267static void pl2303_encode_baudrate(struct tty_struct *tty,
320 u8 buf[4]) 268 struct usb_serial_port *port,
269 u8 buf[4])
321{ 270{
322 /*
323 * NOTE: Only the values defined in baud_sup are supported !
324 * => if unsupported values are set, the PL2303 uses 9600 baud instead
325 * => HX clones just don't work at unsupported baud rates < 115200 baud,
326 * for baud rates > 115200 they run at 115200 baud
327 */
328 const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600, 271 const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600,
329 4800, 7200, 9600, 14400, 19200, 28800, 38400, 272 4800, 7200, 9600, 14400, 19200, 28800, 38400,
330 57600, 115200, 230400, 460800, 614400, 921600, 273 57600, 115200, 230400, 460800, 500000, 614400,
331 1228800, 2457600, 3000000, 6000000, 12000000 }; 274 921600, 1228800, 2457600, 3000000, 6000000 };
275
276 struct usb_serial *serial = port->serial;
277 struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
278 int baud;
279 int i;
280
332 /* 281 /*
333 * NOTE: With the exception of type_0/1 devices, the following 282 * NOTE: Only the values defined in baud_sup are supported!
334 * additional baud rates are supported (tested with HX rev. 3A only): 283 * => if unsupported values are set, the PL2303 seems to use
335 * 110*, 56000*, 128000, 134400, 161280, 201600, 256000*, 268800, 284 * 9600 baud (at least my PL2303X always does)
336 * 403200, 806400. (*: not HX and HX clones)
337 *
338 * Maximum values: HXD, TB: 12000000; HX, TA: 6000000;
339 * type_0+1: 1228800; RA: 921600; HX clones, SA: 115200
340 *
341 * As long as we are not using this encoding method for anything else
342 * than the type_0+1, HX and HX clone chips, there is no point in
343 * complicating the code to support them.
344 */ 285 */
345 int i; 286 baud = tty_get_baud_rate(tty);
287 dev_dbg(&port->dev, "baud requested = %d\n", baud);
288 if (!baud)
289 return;
346 290
347 /* Set baudrate to nearest supported value */ 291 /* Set baudrate to nearest supported value */
348 for (i = 0; i < ARRAY_SIZE(baud_sup); ++i) { 292 for (i = 0; i < ARRAY_SIZE(baud_sup); ++i) {
349 if (baud_sup[i] > baud) 293 if (baud_sup[i] > baud)
350 break; 294 break;
351 } 295 }
296
352 if (i == ARRAY_SIZE(baud_sup)) 297 if (i == ARRAY_SIZE(baud_sup))
353 baud = baud_sup[i - 1]; 298 baud = baud_sup[i - 1];
354 else if (i > 0 && (baud_sup[i] - baud) > (baud - baud_sup[i - 1])) 299 else if (i > 0 && (baud_sup[i] - baud) > (baud - baud_sup[i - 1]))
355 baud = baud_sup[i - 1]; 300 baud = baud_sup[i - 1];
356 else 301 else
357 baud = baud_sup[i]; 302 baud = baud_sup[i];
358 /* Respect the chip type specific baud rate limits */
359 /*
360 * FIXME: as long as we don't know how to distinguish between the
361 * HXD, EA, RA, and SA chip variants, allow the max. value of 12M.
362 */
363 if (type == HX_TA)
364 baud = min_t(int, baud, 6000000);
365 else if (type == type_0 || type == type_1)
366 baud = min_t(int, baud, 1228800);
367 else if (type == HX_CLONE)
368 baud = min_t(int, baud, 115200);
369 /* Direct (standard) baud rate encoding method */
370 put_unaligned_le32(baud, buf);
371
372 return baud;
373}
374 303
375static int pl2303_baudrate_encode_divisor(int baud, enum pl2303_type type, 304 /* type_0, type_1 only support up to 1228800 baud */
376 u8 buf[4]) 305 if (spriv->type != HX)
377{ 306 baud = min_t(int, baud, 1228800);
378 /*
379 * Divisor based baud rate encoding method
380 *
381 * NOTE: HX clones do NOT support this method.
382 * It's not clear if the type_0/1 chips support it.
383 *
384 * divisor = 12MHz * 32 / baudrate = 2^A * B
385 *
386 * with
387 *
388 * A = buf[1] & 0x0e
389 * B = buf[0] + (buf[1] & 0x01) << 8
390 *
391 * Special cases:
392 * => 8 < B < 16: device seems to work not properly
393 * => B <= 8: device uses the max. value B = 512 instead
394 */
395 unsigned int A, B;
396 307
397 /* 308 if (baud <= 115200) {
398 * NOTE: The Windows driver allows maximum baud rates of 110% of the 309 put_unaligned_le32(baud, buf);
399 * specified maximium value.
400 * Quick tests with early (2004) HX (rev. A) chips suggest, that even
401 * higher baud rates (up to the maximum of 24M baud !) are working fine,
402 * but that should really be tested carefully in "real life" scenarios
403 * before removing the upper limit completely.
404 * Baud rates smaller than the specified 75 baud are definitely working
405 * fine.
406 */
407 if (type == type_0 || type == type_1)
408 baud = min_t(int, baud, 1228800 * 1.1);
409 else if (type == HX_TA)
410 baud = min_t(int, baud, 6000000 * 1.1);
411 else if (type == HXD_EA_RA_SA)
412 /* HXD, EA: 12Mbps; RA: 1Mbps; SA: 115200 bps */
413 /*
414 * FIXME: as long as we don't know how to distinguish between
415 * these chip variants, allow the max. of these values
416 */
417 baud = min_t(int, baud, 12000000 * 1.1);
418 else if (type == TB)
419 baud = min_t(int, baud, 12000000 * 1.1);
420 /* Determine factors A and B */
421 A = 0;
422 B = 12000000 * 32 / baud; /* 12MHz */
423 B <<= 1; /* Add one bit for rounding */
424 while (B > (512 << 1) && A <= 14) {
425 A += 2;
426 B >>= 2;
427 }
428 if (A > 14) { /* max. divisor = min. baudrate reached */
429 A = 14;
430 B = 512;
431 /* => ~45.78 baud */
432 } else { 310 } else {
433 B = (B + 1) >> 1; /* Round the last bit */
434 }
435 /* Handle special cases */
436 if (B == 512)
437 B = 0; /* also: 1 to 8 */
438 else if (B < 16)
439 /* 311 /*
440 * NOTE: With the current algorithm this happens 312 * Apparently the formula for higher speeds is:
441 * only for A=0 and means that the min. divisor 313 * baudrate = 12M * 32 / (2^buf[1]) / buf[0]
442 * (respectively: the max. baudrate) is reached.
443 */ 314 */
444 B = 16; /* => 24 MBaud */ 315 unsigned tmp = 12000000 * 32 / baud;
445 /* Encode the baud rate */ 316 buf[3] = 0x80;
446 buf[3] = 0x80; /* Select divisor encoding method */ 317 buf[2] = 0;
447 buf[2] = 0; 318 buf[1] = (tmp >= 256);
448 buf[1] = (A & 0x0e); /* A */ 319 while (tmp >= 256) {
449 buf[1] |= ((B & 0x100) >> 8); /* MSB of B */ 320 tmp >>= 2;
450 buf[0] = B & 0xff; /* 8 LSBs of B */ 321 buf[1] <<= 1;
451 /* Calculate the actual/resulting baud rate */ 322 }
452 if (B <= 8) 323 buf[0] = tmp;
453 B = 512; 324 }
454 baud = 12000000 * 32 / ((1 << A) * B);
455
456 return baud;
457}
458
459static void pl2303_encode_baudrate(struct tty_struct *tty,
460 struct usb_serial_port *port,
461 enum pl2303_type type,
462 u8 buf[4])
463{
464 int baud;
465 325
466 baud = tty_get_baud_rate(tty);
467 dev_dbg(&port->dev, "baud requested = %d\n", baud);
468 if (!baud)
469 return;
470 /*
471 * There are two methods for setting/encoding the baud rate
472 * 1) Direct method: encodes the baud rate value directly
473 * => supported by all chip types
474 * 2) Divisor based method: encodes a divisor to a base value (12MHz*32)
475 * => not supported by HX clones (and likely type_0/1 chips)
476 *
477 * NOTE: Although the divisor based baud rate encoding method is much
478 * more flexible, some of the standard baud rate values can not be
479 * realized exactly. But the difference is very small (max. 0.2%) and
480 * the device likely uses the same baud rate generator for both methods
481 * so that there is likley no difference.
482 */
483 if (type == type_0 || type == type_1 || type == HX_CLONE)
484 baud = pl2303_baudrate_encode_direct(baud, type, buf);
485 else
486 baud = pl2303_baudrate_encode_divisor(baud, type, buf);
487 /* Save resulting baud rate */ 326 /* Save resulting baud rate */
488 tty_encode_baud_rate(tty, baud, baud); 327 tty_encode_baud_rate(tty, baud, baud);
489 dev_dbg(&port->dev, "baud set = %d\n", baud); 328 dev_dbg(&port->dev, "baud set = %d\n", baud);
@@ -540,8 +379,8 @@ static void pl2303_set_termios(struct tty_struct *tty,
540 dev_dbg(&port->dev, "data bits = %d\n", buf[6]); 379 dev_dbg(&port->dev, "data bits = %d\n", buf[6]);
541 } 380 }
542 381
543 /* For reference: buf[0]:buf[3] baud rate value */ 382 /* For reference buf[0]:buf[3] baud rate value */
544 pl2303_encode_baudrate(tty, port, spriv->type, buf); 383 pl2303_encode_baudrate(tty, port, &buf[0]);
545 384
546 /* For reference buf[4]=0 is 1 stop bits */ 385 /* For reference buf[4]=0 is 1 stop bits */
547 /* For reference buf[4]=1 is 1.5 stop bits */ 386 /* For reference buf[4]=1 is 1.5 stop bits */
@@ -618,10 +457,10 @@ static void pl2303_set_termios(struct tty_struct *tty,
618 dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf); 457 dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf);
619 458
620 if (C_CRTSCTS(tty)) { 459 if (C_CRTSCTS(tty)) {
621 if (spriv->type == type_0 || spriv->type == type_1) 460 if (spriv->type == HX)
622 pl2303_vendor_write(0x0, 0x41, serial);
623 else
624 pl2303_vendor_write(0x0, 0x61, serial); 461 pl2303_vendor_write(0x0, 0x61, serial);
462 else
463 pl2303_vendor_write(0x0, 0x41, serial);
625 } else { 464 } else {
626 pl2303_vendor_write(0x0, 0x0, serial); 465 pl2303_vendor_write(0x0, 0x0, serial);
627 } 466 }
@@ -658,7 +497,7 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
658 struct pl2303_serial_private *spriv = usb_get_serial_data(serial); 497 struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
659 int result; 498 int result;
660 499
661 if (spriv->type == type_0 || spriv->type == type_1) { 500 if (spriv->type != HX) {
662 usb_clear_halt(serial->dev, port->write_urb->pipe); 501 usb_clear_halt(serial->dev, port->write_urb->pipe);
663 usb_clear_halt(serial->dev, port->read_urb->pipe); 502 usb_clear_halt(serial->dev, port->read_urb->pipe);
664 } else { 503 } else {
@@ -833,7 +672,6 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
833 result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 672 result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
834 BREAK_REQUEST, BREAK_REQUEST_TYPE, state, 673 BREAK_REQUEST, BREAK_REQUEST_TYPE, state,
835 0, NULL, 0, 100); 674 0, NULL, 0, 100);
836 /* NOTE: HX clones don't support sending breaks, -EPIPE is returned */
837 if (result) 675 if (result)
838 dev_err(&port->dev, "error sending break = %d\n", result); 676 dev_err(&port->dev, "error sending break = %d\n", result);
839} 677}
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 760b78560f67..c9a35697ebe9 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -190,6 +190,7 @@ static struct usb_device_id ti_id_table_combined[] = {
190 { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, 190 { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
191 { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, 191 { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
192 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, 192 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
193 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
193 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, 194 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
194 { } /* terminator */ 195 { } /* terminator */
195}; 196};
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 94d75edef77f..18509e6c21ab 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -211,8 +211,11 @@ static int slave_configure(struct scsi_device *sdev)
211 /* 211 /*
212 * Many devices do not respond properly to READ_CAPACITY_16. 212 * Many devices do not respond properly to READ_CAPACITY_16.
213 * Tell the SCSI layer to try READ_CAPACITY_10 first. 213 * Tell the SCSI layer to try READ_CAPACITY_10 first.
214 * However some USB 3.0 drive enclosures return capacity
215 * modulo 2TB. Those must use READ_CAPACITY_16
214 */ 216 */
215 sdev->try_rc_10_first = 1; 217 if (!(us->fflags & US_FL_NEEDS_CAP16))
218 sdev->try_rc_10_first = 1;
216 219
217 /* assume SPC3 or latter devices support sense size > 18 */ 220 /* assume SPC3 or latter devices support sense size > 18 */
218 if (sdev->scsi_level > SCSI_SPC_2) 221 if (sdev->scsi_level > SCSI_SPC_2)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index c015f2c16729..de32cfa5bfa6 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1925,6 +1925,13 @@ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
1925 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1925 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1926 US_FL_IGNORE_RESIDUE ), 1926 US_FL_IGNORE_RESIDUE ),
1927 1927
1928/* Reported by Oliver Neukum <oneukum@suse.com> */
1929UNUSUAL_DEV( 0x174c, 0x55aa, 0x0100, 0x0100,
1930 "ASMedia",
1931 "AS2105",
1932 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1933 US_FL_NEEDS_CAP16),
1934
1928/* Reported by Jesse Feddema <jdfeddema@gmail.com> */ 1935/* Reported by Jesse Feddema <jdfeddema@gmail.com> */
1929UNUSUAL_DEV( 0x177f, 0x0400, 0x0000, 0x0000, 1936UNUSUAL_DEV( 0x177f, 0x0400, 0x0000, 0x0000,
1930 "Yarvik", 1937 "Yarvik",
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index a9807dea3887..4fb7a8f83c8a 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -545,6 +545,8 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
545 long npage; 545 long npage;
546 int ret = 0, prot = 0; 546 int ret = 0, prot = 0;
547 uint64_t mask; 547 uint64_t mask;
548 struct vfio_dma *dma = NULL;
549 unsigned long pfn;
548 550
549 end = map->iova + map->size; 551 end = map->iova + map->size;
550 552
@@ -587,8 +589,6 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
587 } 589 }
588 590
589 for (iova = map->iova; iova < end; iova += size, vaddr += size) { 591 for (iova = map->iova; iova < end; iova += size, vaddr += size) {
590 struct vfio_dma *dma = NULL;
591 unsigned long pfn;
592 long i; 592 long i;
593 593
594 /* Pin a contiguous chunk of memory */ 594 /* Pin a contiguous chunk of memory */
@@ -597,16 +597,15 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
597 if (npage <= 0) { 597 if (npage <= 0) {
598 WARN_ON(!npage); 598 WARN_ON(!npage);
599 ret = (int)npage; 599 ret = (int)npage;
600 break; 600 goto out;
601 } 601 }
602 602
603 /* Verify pages are not already mapped */ 603 /* Verify pages are not already mapped */
604 for (i = 0; i < npage; i++) { 604 for (i = 0; i < npage; i++) {
605 if (iommu_iova_to_phys(iommu->domain, 605 if (iommu_iova_to_phys(iommu->domain,
606 iova + (i << PAGE_SHIFT))) { 606 iova + (i << PAGE_SHIFT))) {
607 vfio_unpin_pages(pfn, npage, prot, true);
608 ret = -EBUSY; 607 ret = -EBUSY;
609 break; 608 goto out_unpin;
610 } 609 }
611 } 610 }
612 611
@@ -616,8 +615,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
616 if (ret) { 615 if (ret) {
617 if (ret != -EBUSY || 616 if (ret != -EBUSY ||
618 map_try_harder(iommu, iova, pfn, npage, prot)) { 617 map_try_harder(iommu, iova, pfn, npage, prot)) {
619 vfio_unpin_pages(pfn, npage, prot, true); 618 goto out_unpin;
620 break;
621 } 619 }
622 } 620 }
623 621
@@ -672,9 +670,8 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
672 dma = kzalloc(sizeof(*dma), GFP_KERNEL); 670 dma = kzalloc(sizeof(*dma), GFP_KERNEL);
673 if (!dma) { 671 if (!dma) {
674 iommu_unmap(iommu->domain, iova, size); 672 iommu_unmap(iommu->domain, iova, size);
675 vfio_unpin_pages(pfn, npage, prot, true);
676 ret = -ENOMEM; 673 ret = -ENOMEM;
677 break; 674 goto out_unpin;
678 } 675 }
679 676
680 dma->size = size; 677 dma->size = size;
@@ -685,16 +682,21 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
685 } 682 }
686 } 683 }
687 684
688 if (ret) { 685 WARN_ON(ret);
689 struct vfio_dma *tmp; 686 mutex_unlock(&iommu->lock);
690 iova = map->iova; 687 return ret;
691 size = map->size; 688
692 while ((tmp = vfio_find_dma(iommu, iova, size))) { 689out_unpin:
693 int r = vfio_remove_dma_overlap(iommu, iova, 690 vfio_unpin_pages(pfn, npage, prot, true);
694 &size, tmp); 691
695 if (WARN_ON(r || !size)) 692out:
696 break; 693 iova = map->iova;
697 } 694 size = map->size;
695 while ((dma = vfio_find_dma(iommu, iova, size))) {
696 int r = vfio_remove_dma_overlap(iommu, iova,
697 &size, dma);
698 if (WARN_ON(r || !size))
699 break;
698 } 700 }
699 701
700 mutex_unlock(&iommu->lock); 702 mutex_unlock(&iommu->lock);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index ce5221fa393a..e663921eebb6 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1056,7 +1056,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1056 if (data_direction != DMA_NONE) { 1056 if (data_direction != DMA_NONE) {
1057 ret = vhost_scsi_map_iov_to_sgl(cmd, 1057 ret = vhost_scsi_map_iov_to_sgl(cmd,
1058 &vq->iov[data_first], data_num, 1058 &vq->iov[data_first], data_num,
1059 data_direction == DMA_TO_DEVICE); 1059 data_direction == DMA_FROM_DEVICE);
1060 if (unlikely(ret)) { 1060 if (unlikely(ret)) {
1061 vq_err(vq, "Failed to map iov to sgl\n"); 1061 vq_err(vq, "Failed to map iov to sgl\n");
1062 goto err_free; 1062 goto err_free;
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index a54ccdc4d661..22ad85242e5b 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -361,37 +361,13 @@ void au1100fb_fb_rotate(struct fb_info *fbi, int angle)
361int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma) 361int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
362{ 362{
363 struct au1100fb_device *fbdev; 363 struct au1100fb_device *fbdev;
364 unsigned int len;
365 unsigned long start=0, off;
366 364
367 fbdev = to_au1100fb_device(fbi); 365 fbdev = to_au1100fb_device(fbi);
368 366
369 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
370 return -EINVAL;
371 }
372
373 start = fbdev->fb_phys & PAGE_MASK;
374 len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
375
376 off = vma->vm_pgoff << PAGE_SHIFT;
377
378 if ((vma->vm_end - vma->vm_start + off) > len) {
379 return -EINVAL;
380 }
381
382 off += start;
383 vma->vm_pgoff = off >> PAGE_SHIFT;
384
385 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 367 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
386 pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6 368 pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
387 369
388 if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, 370 return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
389 vma->vm_end - vma->vm_start,
390 vma->vm_page_prot)) {
391 return -EAGAIN;
392 }
393
394 return 0;
395} 371}
396 372
397static struct fb_ops au1100fb_ops = 373static struct fb_ops au1100fb_ops =
diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
index 301224ecc950..1d02897d17f2 100644
--- a/drivers/video/au1200fb.c
+++ b/drivers/video/au1200fb.c
@@ -1233,34 +1233,13 @@ static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
1233 * method mainly to allow the use of the TLB streaming flag (CCA=6) 1233 * method mainly to allow the use of the TLB streaming flag (CCA=6)
1234 */ 1234 */
1235static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) 1235static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
1236
1237{ 1236{
1238 unsigned int len;
1239 unsigned long start=0, off;
1240 struct au1200fb_device *fbdev = info->par; 1237 struct au1200fb_device *fbdev = info->par;
1241 1238
1242 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
1243 return -EINVAL;
1244 }
1245
1246 start = fbdev->fb_phys & PAGE_MASK;
1247 len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
1248
1249 off = vma->vm_pgoff << PAGE_SHIFT;
1250
1251 if ((vma->vm_end - vma->vm_start + off) > len) {
1252 return -EINVAL;
1253 }
1254
1255 off += start;
1256 vma->vm_pgoff = off >> PAGE_SHIFT;
1257
1258 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1239 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1259 pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */ 1240 pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
1260 1241
1261 return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, 1242 return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
1262 vma->vm_end - vma->vm_start,
1263 vma->vm_page_prot);
1264} 1243}
1265 1244
1266static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata) 1245static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index c7c64f18773d..fa932c2f7d97 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -613,6 +613,9 @@ static int w1_bus_notify(struct notifier_block *nb, unsigned long action,
613 sl = dev_to_w1_slave(dev); 613 sl = dev_to_w1_slave(dev);
614 fops = sl->family->fops; 614 fops = sl->family->fops;
615 615
616 if (!fops)
617 return 0;
618
616 switch (action) { 619 switch (action) {
617 case BUS_NOTIFY_ADD_DEVICE: 620 case BUS_NOTIFY_ADD_DEVICE:
618 /* if the family driver needs to initialize something... */ 621 /* if the family driver needs to initialize something... */
@@ -713,7 +716,10 @@ static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
713 atomic_set(&sl->refcnt, 0); 716 atomic_set(&sl->refcnt, 0);
714 init_completion(&sl->released); 717 init_completion(&sl->released);
715 718
719 /* slave modules need to be loaded in a context with unlocked mutex */
720 mutex_unlock(&dev->mutex);
716 request_module("w1-family-0x%0x", rn->family); 721 request_module("w1-family-0x%0x", rn->family);
722 mutex_lock(&dev->mutex);
717 723
718 spin_lock(&w1_flock); 724 spin_lock(&w1_flock);
719 f = w1_family_registered(rn->family); 725 f = w1_family_registered(rn->family);