aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_lpss.c5
-rw-r--r--drivers/acpi/pci_irq.c9
-rw-r--r--drivers/ata/libata-core.c4
-rw-r--r--drivers/base/regmap/internal.h8
-rw-r--r--drivers/base/regmap/regcache-rbtree.c2
-rw-r--r--drivers/base/regmap/regcache.c22
-rw-r--r--drivers/base/regmap/regmap-irq.c3
-rw-r--r--drivers/base/regmap/regmap.c32
-rw-r--r--drivers/char/virtio_console.c19
-rw-r--r--drivers/clk/clk-divider.c29
-rw-r--r--drivers/clk/clk.c27
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c13
-rw-r--r--drivers/clk/qcom/lcc-ipq806x.c1
-rw-r--r--drivers/clk/qcom/lcc-msm8960.c7
-rw-r--r--drivers/clk/ti/fapll.c6
-rw-r--r--drivers/clocksource/time-efm32.c4
-rw-r--r--drivers/clocksource/timer-sun5i.c8
-rw-r--r--drivers/cpuidle/cpuidle-mvebu-v7.c12
-rw-r--r--drivers/dma/amba-pl08x.c14
-rw-r--r--drivers/dma/at_hdmac.c184
-rw-r--r--drivers/dma/at_hdmac_regs.h7
-rw-r--r--drivers/dma/dw/platform.c5
-rw-r--r--drivers/dma/imx-sdma.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c22
-rw-r--r--drivers/gpu/drm/drm_crtc.c35
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c11
-rw-r--r--drivers/gpu/drm/drm_mm.c2
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c245
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.h20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
-rw-r--r--drivers/gpu/drm/i915/intel_display.c34
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c68
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c11
-rw-r--r--drivers/gpu/drm/radeon/si.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c78
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c14
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/hid-tivo.c1
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_wac.c84
-rw-r--r--drivers/i2c/i2c-core.c3
-rw-r--r--drivers/ide/ide-tape.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c20
-rw-r--r--drivers/infiniband/hw/mlx4/main.c6
-rw-r--r--drivers/input/mouse/synaptics.c212
-rw-r--r--drivers/input/mouse/synaptics.h28
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/exynos-iommu.c7
-rw-r--r--drivers/iommu/io-pgtable-arm.c5
-rw-r--r--drivers/iommu/omap-iommu.c7
-rw-r--r--drivers/iommu/rockchip-iommu.c7
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c21
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c157
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-gic.c20
-rw-r--r--drivers/isdn/icn/icn.c2
-rw-r--r--drivers/md/dm-io.c15
-rw-r--r--drivers/md/dm-snap.c120
-rw-r--r--drivers/md/dm-thin.c11
-rw-r--r--drivers/md/dm.c21
-rw-r--r--drivers/md/md.c3
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/mfd/kempld-core.c2
-rw-r--r--drivers/mfd/rtsx_usb.c30
-rw-r--r--drivers/mmc/core/pwrseq_simple.c2
-rw-r--r--drivers/mtd/nand/Kconfig1
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c50
-rw-r--r--drivers/mtd/ubi/eba.c3
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/usb/kvaser_usb.c83
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c31
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c17
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c131
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c37
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c20
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/usb/cx82310_eth.c41
-rw-r--r--drivers/net/virtio_net.c9
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wireless/b43/main.c1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/vendor.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c18
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex_legacy.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c38
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c21
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c9
-rw-r--r--drivers/net/wireless/rtlwifi/base.c7
-rw-r--r--drivers/net/xen-netback/netback.c23
-rw-r--r--drivers/of/Kconfig3
-rw-r--r--drivers/of/base.c18
-rw-r--r--drivers/of/irq.c10
-rw-r--r--drivers/of/overlay.c3
-rw-r--r--drivers/of/unittest.c33
-rw-r--r--drivers/pci/host/pci-xgene.c4
-rw-r--r--drivers/pci/pci-sysfs.c5
-rw-r--r--drivers/pcmcia/Kconfig12
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/rsrc_pci.c173
-rw-r--r--drivers/phy/phy-armada375-usb2.c3
-rw-r--r--drivers/phy/phy-core.c11
-rw-r--r--drivers/phy/phy-exynos-dp-video.c24
-rw-r--r--drivers/phy/phy-exynos-mipi-video.c11
-rw-r--r--drivers/phy/phy-exynos4210-usb2.c1
-rw-r--r--drivers/phy/phy-exynos4x12-usb2.c1
-rw-r--r--drivers/phy/phy-exynos5-usbdrd.c2
-rw-r--r--drivers/phy/phy-exynos5250-usb2.c1
-rw-r--r--drivers/phy/phy-hix5hd2-sata.c3
-rw-r--r--drivers/phy/phy-miphy28lp.c13
-rw-r--r--drivers/phy/phy-miphy365x.c12
-rw-r--r--drivers/phy/phy-omap-control.c2
-rw-r--r--drivers/phy/phy-omap-usb2.c7
-rw-r--r--drivers/phy/phy-rockchip-usb.c6
-rw-r--r--drivers/phy/phy-ti-pipe3.c12
-rw-r--r--drivers/phy/phy-twl4030-usb.c1
-rw-r--r--drivers/phy/phy-xgene.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c254
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c1
-rw-r--r--drivers/pinctrl/pinctrl-at91.c17
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c14
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h4
-rw-r--r--drivers/powercap/intel_rapl.c54
-rw-r--r--drivers/regulator/core.c34
-rw-r--r--drivers/regulator/palmas-regulator.c4
-rw-r--r--drivers/regulator/tps65910-regulator.c1
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c17
-rw-r--r--drivers/rtc/rtc-at91rm9200.c2
-rw-r--r--drivers/rtc/rtc-s3c.c1
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/libsas/sas_ata.c3
-rw-r--r--drivers/scsi/libsas/sas_discover.c6
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c2
-rw-r--r--drivers/spi/spi-dw-mid.c6
-rw-r--r--drivers/spi/spi-qup.c9
-rw-r--r--drivers/spi/spi.c5
-rw-r--r--drivers/staging/vt6655/device_main.c32
-rw-r--r--drivers/staging/vt6655/rf.c1
-rw-r--r--drivers/staging/vt6656/rf.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c14
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c4
-rw-r--r--drivers/target/loopback/tcm_loop.c7
-rw-r--r--drivers/target/target_core_device.c32
-rw-r--r--drivers/target/target_core_pscsi.c2
-rw-r--r--drivers/target/target_core_sbc.c3
-rw-r--r--drivers/target/target_core_spc.c19
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/target/tcm_fc/tfc_io.c3
-rw-r--r--drivers/tty/serial/8250/8250_dw.c15
-rw-r--r--drivers/usb/chipidea/udc.c11
-rw-r--r--drivers/usb/common/usb-otg-fsm.c4
-rw-r--r--drivers/usb/dwc2/core_intr.c3
-rw-r--r--drivers/usb/gadget/function/f_fs.c204
-rw-r--r--drivers/usb/gadget/function/f_loopback.c3
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c511
-rw-r--r--drivers/usb/gadget/function/g_zero.h13
-rw-r--r--drivers/usb/gadget/legacy/inode.c466
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.c5
-rw-r--r--drivers/usb/gadget/legacy/zero.c21
-rw-r--r--drivers/usb/host/ehci-atmel.c30
-rw-r--r--drivers/usb/host/xhci-ring.c2
-rw-r--r--drivers/usb/host/xhci.c100
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/isp1760/isp1760-core.c3
-rw-r--r--drivers/usb/isp1760/isp1760-udc.c16
-rw-r--r--drivers/usb/musb/Kconfig3
-rw-r--r--drivers/usb/phy/phy-am335x-control.c3
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c2
-rw-r--r--drivers/vhost/scsi.c5
-rw-r--r--drivers/virtio/virtio_balloon.c21
-rw-r--r--drivers/virtio/virtio_mmio.c90
-rw-r--r--drivers/xen/events/events_base.c18
-rw-r--r--drivers/xen/xen-pciback/conf_space.c2
-rw-r--r--drivers/xen/xen-pciback/conf_space.h2
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c61
-rw-r--r--drivers/xen/xen-scsiback.c7
206 files changed, 2652 insertions, 2564 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 657964e8ab7e..37fb19047603 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -65,6 +65,7 @@ struct lpss_private_data;
65 65
66struct lpss_device_desc { 66struct lpss_device_desc {
67 unsigned int flags; 67 unsigned int flags;
68 const char *clk_con_id;
68 unsigned int prv_offset; 69 unsigned int prv_offset;
69 size_t prv_size_override; 70 size_t prv_size_override;
70 void (*setup)(struct lpss_private_data *pdata); 71 void (*setup)(struct lpss_private_data *pdata);
@@ -140,6 +141,7 @@ static struct lpss_device_desc lpt_i2c_dev_desc = {
140 141
141static struct lpss_device_desc lpt_uart_dev_desc = { 142static struct lpss_device_desc lpt_uart_dev_desc = {
142 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, 143 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
144 .clk_con_id = "baudclk",
143 .prv_offset = 0x800, 145 .prv_offset = 0x800,
144 .setup = lpss_uart_setup, 146 .setup = lpss_uart_setup,
145}; 147};
@@ -156,6 +158,7 @@ static struct lpss_device_desc byt_pwm_dev_desc = {
156 158
157static struct lpss_device_desc byt_uart_dev_desc = { 159static struct lpss_device_desc byt_uart_dev_desc = {
158 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, 160 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
161 .clk_con_id = "baudclk",
159 .prv_offset = 0x800, 162 .prv_offset = 0x800,
160 .setup = lpss_uart_setup, 163 .setup = lpss_uart_setup,
161}; 164};
@@ -313,7 +316,7 @@ out:
313 return PTR_ERR(clk); 316 return PTR_ERR(clk);
314 317
315 pdata->clk = clk; 318 pdata->clk = clk;
316 clk_register_clkdev(clk, NULL, devname); 319 clk_register_clkdev(clk, dev_desc->clk_con_id, devname);
317 return 0; 320 return 0;
318} 321}
319 322
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index e7f718d6918a..b1def411c0b8 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -485,6 +485,14 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
485 if (!pin || !dev->irq_managed || dev->irq <= 0) 485 if (!pin || !dev->irq_managed || dev->irq <= 0)
486 return; 486 return;
487 487
488 /* Keep IOAPIC pin configuration when suspending */
489 if (dev->dev.power.is_prepared)
490 return;
491#ifdef CONFIG_PM
492 if (dev->dev.power.runtime_status == RPM_SUSPENDING)
493 return;
494#endif
495
488 entry = acpi_pci_irq_lookup(dev, pin); 496 entry = acpi_pci_irq_lookup(dev, pin);
489 if (!entry) 497 if (!entry)
490 return; 498 return;
@@ -505,6 +513,5 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
505 if (gsi >= 0) { 513 if (gsi >= 0) {
506 acpi_unregister_gsi(gsi); 514 acpi_unregister_gsi(gsi);
507 dev->irq_managed = 0; 515 dev->irq_managed = 0;
508 dev->irq = 0;
509 } 516 }
510} 517}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 4c35f0822d06..ef150ebb4c30 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4737,7 +4737,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
4737 return NULL; 4737 return NULL;
4738 4738
4739 /* libsas case */ 4739 /* libsas case */
4740 if (!ap->scsi_host) { 4740 if (ap->flags & ATA_FLAG_SAS_HOST) {
4741 tag = ata_sas_allocate_tag(ap); 4741 tag = ata_sas_allocate_tag(ap);
4742 if (tag < 0) 4742 if (tag < 0)
4743 return NULL; 4743 return NULL;
@@ -4776,7 +4776,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
4776 tag = qc->tag; 4776 tag = qc->tag;
4777 if (likely(ata_tag_valid(tag))) { 4777 if (likely(ata_tag_valid(tag))) {
4778 qc->tag = ATA_TAG_POISON; 4778 qc->tag = ATA_TAG_POISON;
4779 if (!ap->scsi_host) 4779 if (ap->flags & ATA_FLAG_SAS_HOST)
4780 ata_sas_free_tag(tag, ap); 4780 ata_sas_free_tag(tag, ap);
4781 } 4781 }
4782} 4782}
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index beb8b27d4621..a13587b5c2be 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -243,4 +243,12 @@ extern struct regcache_ops regcache_rbtree_ops;
243extern struct regcache_ops regcache_lzo_ops; 243extern struct regcache_ops regcache_lzo_ops;
244extern struct regcache_ops regcache_flat_ops; 244extern struct regcache_ops regcache_flat_ops;
245 245
246static inline const char *regmap_name(const struct regmap *map)
247{
248 if (map->dev)
249 return dev_name(map->dev);
250
251 return map->name;
252}
253
246#endif 254#endif
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index d453a2c98ad0..81751a49d8bf 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -307,7 +307,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
307 if (pos == 0) { 307 if (pos == 0) {
308 memmove(blk + offset * map->cache_word_size, 308 memmove(blk + offset * map->cache_word_size,
309 blk, rbnode->blklen * map->cache_word_size); 309 blk, rbnode->blklen * map->cache_word_size);
310 bitmap_shift_right(present, present, offset, blklen); 310 bitmap_shift_left(present, present, offset, blklen);
311 } 311 }
312 312
313 /* update the rbnode block, its size and the base register */ 313 /* update the rbnode block, its size and the base register */
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index f373c35f9e1d..87db9893b463 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -218,7 +218,7 @@ int regcache_read(struct regmap *map,
218 ret = map->cache_ops->read(map, reg, value); 218 ret = map->cache_ops->read(map, reg, value);
219 219
220 if (ret == 0) 220 if (ret == 0)
221 trace_regmap_reg_read_cache(map->dev, reg, *value); 221 trace_regmap_reg_read_cache(map, reg, *value);
222 222
223 return ret; 223 return ret;
224 } 224 }
@@ -311,7 +311,7 @@ int regcache_sync(struct regmap *map)
311 dev_dbg(map->dev, "Syncing %s cache\n", 311 dev_dbg(map->dev, "Syncing %s cache\n",
312 map->cache_ops->name); 312 map->cache_ops->name);
313 name = map->cache_ops->name; 313 name = map->cache_ops->name;
314 trace_regcache_sync(map->dev, name, "start"); 314 trace_regcache_sync(map, name, "start");
315 315
316 if (!map->cache_dirty) 316 if (!map->cache_dirty)
317 goto out; 317 goto out;
@@ -346,7 +346,7 @@ out:
346 346
347 regmap_async_complete(map); 347 regmap_async_complete(map);
348 348
349 trace_regcache_sync(map->dev, name, "stop"); 349 trace_regcache_sync(map, name, "stop");
350 350
351 return ret; 351 return ret;
352} 352}
@@ -381,7 +381,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
381 name = map->cache_ops->name; 381 name = map->cache_ops->name;
382 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); 382 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
383 383
384 trace_regcache_sync(map->dev, name, "start region"); 384 trace_regcache_sync(map, name, "start region");
385 385
386 if (!map->cache_dirty) 386 if (!map->cache_dirty)
387 goto out; 387 goto out;
@@ -401,7 +401,7 @@ out:
401 401
402 regmap_async_complete(map); 402 regmap_async_complete(map);
403 403
404 trace_regcache_sync(map->dev, name, "stop region"); 404 trace_regcache_sync(map, name, "stop region");
405 405
406 return ret; 406 return ret;
407} 407}
@@ -428,7 +428,7 @@ int regcache_drop_region(struct regmap *map, unsigned int min,
428 428
429 map->lock(map->lock_arg); 429 map->lock(map->lock_arg);
430 430
431 trace_regcache_drop_region(map->dev, min, max); 431 trace_regcache_drop_region(map, min, max);
432 432
433 ret = map->cache_ops->drop(map, min, max); 433 ret = map->cache_ops->drop(map, min, max);
434 434
@@ -455,7 +455,7 @@ void regcache_cache_only(struct regmap *map, bool enable)
455 map->lock(map->lock_arg); 455 map->lock(map->lock_arg);
456 WARN_ON(map->cache_bypass && enable); 456 WARN_ON(map->cache_bypass && enable);
457 map->cache_only = enable; 457 map->cache_only = enable;
458 trace_regmap_cache_only(map->dev, enable); 458 trace_regmap_cache_only(map, enable);
459 map->unlock(map->lock_arg); 459 map->unlock(map->lock_arg);
460} 460}
461EXPORT_SYMBOL_GPL(regcache_cache_only); 461EXPORT_SYMBOL_GPL(regcache_cache_only);
@@ -493,7 +493,7 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
493 map->lock(map->lock_arg); 493 map->lock(map->lock_arg);
494 WARN_ON(map->cache_only && enable); 494 WARN_ON(map->cache_only && enable);
495 map->cache_bypass = enable; 495 map->cache_bypass = enable;
496 trace_regmap_cache_bypass(map->dev, enable); 496 trace_regmap_cache_bypass(map, enable);
497 map->unlock(map->lock_arg); 497 map->unlock(map->lock_arg);
498} 498}
499EXPORT_SYMBOL_GPL(regcache_cache_bypass); 499EXPORT_SYMBOL_GPL(regcache_cache_bypass);
@@ -608,7 +608,8 @@ static int regcache_sync_block_single(struct regmap *map, void *block,
608 for (i = start; i < end; i++) { 608 for (i = start; i < end; i++) {
609 regtmp = block_base + (i * map->reg_stride); 609 regtmp = block_base + (i * map->reg_stride);
610 610
611 if (!regcache_reg_present(cache_present, i)) 611 if (!regcache_reg_present(cache_present, i) ||
612 !regmap_writeable(map, regtmp))
612 continue; 613 continue;
613 614
614 val = regcache_get_val(map, block, i); 615 val = regcache_get_val(map, block, i);
@@ -677,7 +678,8 @@ static int regcache_sync_block_raw(struct regmap *map, void *block,
677 for (i = start; i < end; i++) { 678 for (i = start; i < end; i++) {
678 regtmp = block_base + (i * map->reg_stride); 679 regtmp = block_base + (i * map->reg_stride);
679 680
680 if (!regcache_reg_present(cache_present, i)) { 681 if (!regcache_reg_present(cache_present, i) ||
682 !regmap_writeable(map, regtmp)) {
681 ret = regcache_sync_block_raw_flush(map, &data, 683 ret = regcache_sync_block_raw_flush(map, &data,
682 base, regtmp); 684 base, regtmp);
683 if (ret != 0) 685 if (ret != 0)
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 6299a50a5960..a6c3f75b4b01 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -499,7 +499,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
499 goto err_alloc; 499 goto err_alloc;
500 } 500 }
501 501
502 ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags, 502 ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
503 irq_flags | IRQF_ONESHOT,
503 chip->name, d); 504 chip->name, d);
504 if (ret != 0) { 505 if (ret != 0) {
505 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n", 506 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index f99b098ddabf..dbfe6a69c3da 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1281,7 +1281,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1281 if (map->async && map->bus->async_write) { 1281 if (map->async && map->bus->async_write) {
1282 struct regmap_async *async; 1282 struct regmap_async *async;
1283 1283
1284 trace_regmap_async_write_start(map->dev, reg, val_len); 1284 trace_regmap_async_write_start(map, reg, val_len);
1285 1285
1286 spin_lock_irqsave(&map->async_lock, flags); 1286 spin_lock_irqsave(&map->async_lock, flags);
1287 async = list_first_entry_or_null(&map->async_free, 1287 async = list_first_entry_or_null(&map->async_free,
@@ -1339,8 +1339,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1339 return ret; 1339 return ret;
1340 } 1340 }
1341 1341
1342 trace_regmap_hw_write_start(map->dev, reg, 1342 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1343 val_len / map->format.val_bytes);
1344 1343
1345 /* If we're doing a single register write we can probably just 1344 /* If we're doing a single register write we can probably just
1346 * send the work_buf directly, otherwise try to do a gather 1345 * send the work_buf directly, otherwise try to do a gather
@@ -1372,8 +1371,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1372 kfree(buf); 1371 kfree(buf);
1373 } 1372 }
1374 1373
1375 trace_regmap_hw_write_done(map->dev, reg, 1374 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1376 val_len / map->format.val_bytes);
1377 1375
1378 return ret; 1376 return ret;
1379} 1377}
@@ -1407,12 +1405,12 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1407 1405
1408 map->format.format_write(map, reg, val); 1406 map->format.format_write(map, reg, val);
1409 1407
1410 trace_regmap_hw_write_start(map->dev, reg, 1); 1408 trace_regmap_hw_write_start(map, reg, 1);
1411 1409
1412 ret = map->bus->write(map->bus_context, map->work_buf, 1410 ret = map->bus->write(map->bus_context, map->work_buf,
1413 map->format.buf_size); 1411 map->format.buf_size);
1414 1412
1415 trace_regmap_hw_write_done(map->dev, reg, 1); 1413 trace_regmap_hw_write_done(map, reg, 1);
1416 1414
1417 return ret; 1415 return ret;
1418} 1416}
@@ -1470,7 +1468,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
1470 dev_info(map->dev, "%x <= %x\n", reg, val); 1468 dev_info(map->dev, "%x <= %x\n", reg, val);
1471#endif 1469#endif
1472 1470
1473 trace_regmap_reg_write(map->dev, reg, val); 1471 trace_regmap_reg_write(map, reg, val);
1474 1472
1475 return map->reg_write(context, reg, val); 1473 return map->reg_write(context, reg, val);
1476} 1474}
@@ -1773,7 +1771,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
1773 for (i = 0; i < num_regs; i++) { 1771 for (i = 0; i < num_regs; i++) {
1774 int reg = regs[i].reg; 1772 int reg = regs[i].reg;
1775 int val = regs[i].def; 1773 int val = regs[i].def;
1776 trace_regmap_hw_write_start(map->dev, reg, 1); 1774 trace_regmap_hw_write_start(map, reg, 1);
1777 map->format.format_reg(u8, reg, map->reg_shift); 1775 map->format.format_reg(u8, reg, map->reg_shift);
1778 u8 += reg_bytes + pad_bytes; 1776 u8 += reg_bytes + pad_bytes;
1779 map->format.format_val(u8, val, 0); 1777 map->format.format_val(u8, val, 0);
@@ -1788,7 +1786,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
1788 1786
1789 for (i = 0; i < num_regs; i++) { 1787 for (i = 0; i < num_regs; i++) {
1790 int reg = regs[i].reg; 1788 int reg = regs[i].reg;
1791 trace_regmap_hw_write_done(map->dev, reg, 1); 1789 trace_regmap_hw_write_done(map, reg, 1);
1792 } 1790 }
1793 return ret; 1791 return ret;
1794} 1792}
@@ -2059,15 +2057,13 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2059 */ 2057 */
2060 u8[0] |= map->read_flag_mask; 2058 u8[0] |= map->read_flag_mask;
2061 2059
2062 trace_regmap_hw_read_start(map->dev, reg, 2060 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2063 val_len / map->format.val_bytes);
2064 2061
2065 ret = map->bus->read(map->bus_context, map->work_buf, 2062 ret = map->bus->read(map->bus_context, map->work_buf,
2066 map->format.reg_bytes + map->format.pad_bytes, 2063 map->format.reg_bytes + map->format.pad_bytes,
2067 val, val_len); 2064 val, val_len);
2068 2065
2069 trace_regmap_hw_read_done(map->dev, reg, 2066 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2070 val_len / map->format.val_bytes);
2071 2067
2072 return ret; 2068 return ret;
2073} 2069}
@@ -2123,7 +2119,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
2123 dev_info(map->dev, "%x => %x\n", reg, *val); 2119 dev_info(map->dev, "%x => %x\n", reg, *val);
2124#endif 2120#endif
2125 2121
2126 trace_regmap_reg_read(map->dev, reg, *val); 2122 trace_regmap_reg_read(map, reg, *val);
2127 2123
2128 if (!map->cache_bypass) 2124 if (!map->cache_bypass)
2129 regcache_write(map, reg, *val); 2125 regcache_write(map, reg, *val);
@@ -2480,7 +2476,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
2480 struct regmap *map = async->map; 2476 struct regmap *map = async->map;
2481 bool wake; 2477 bool wake;
2482 2478
2483 trace_regmap_async_io_complete(map->dev); 2479 trace_regmap_async_io_complete(map);
2484 2480
2485 spin_lock(&map->async_lock); 2481 spin_lock(&map->async_lock);
2486 list_move(&async->list, &map->async_free); 2482 list_move(&async->list, &map->async_free);
@@ -2525,7 +2521,7 @@ int regmap_async_complete(struct regmap *map)
2525 if (!map->bus || !map->bus->async_write) 2521 if (!map->bus || !map->bus->async_write)
2526 return 0; 2522 return 0;
2527 2523
2528 trace_regmap_async_complete_start(map->dev); 2524 trace_regmap_async_complete_start(map);
2529 2525
2530 wait_event(map->async_waitq, regmap_async_is_done(map)); 2526 wait_event(map->async_waitq, regmap_async_is_done(map));
2531 2527
@@ -2534,7 +2530,7 @@ int regmap_async_complete(struct regmap *map)
2534 map->async_ret = 0; 2530 map->async_ret = 0;
2535 spin_unlock_irqrestore(&map->async_lock, flags); 2531 spin_unlock_irqrestore(&map->async_lock, flags);
2536 2532
2537 trace_regmap_async_complete_done(map->dev); 2533 trace_regmap_async_complete_done(map);
2538 2534
2539 return ret; 2535 return ret;
2540} 2536}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index fae2dbbf5745..72d7028f779b 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -142,6 +142,7 @@ struct ports_device {
142 * notification 142 * notification
143 */ 143 */
144 struct work_struct control_work; 144 struct work_struct control_work;
145 struct work_struct config_work;
145 146
146 struct list_head ports; 147 struct list_head ports;
147 148
@@ -1837,10 +1838,21 @@ static void config_intr(struct virtio_device *vdev)
1837 1838
1838 portdev = vdev->priv; 1839 portdev = vdev->priv;
1839 1840
1841 if (!use_multiport(portdev))
1842 schedule_work(&portdev->config_work);
1843}
1844
1845static void config_work_handler(struct work_struct *work)
1846{
1847 struct ports_device *portdev;
1848
1849 portdev = container_of(work, struct ports_device, control_work);
1840 if (!use_multiport(portdev)) { 1850 if (!use_multiport(portdev)) {
1851 struct virtio_device *vdev;
1841 struct port *port; 1852 struct port *port;
1842 u16 rows, cols; 1853 u16 rows, cols;
1843 1854
1855 vdev = portdev->vdev;
1844 virtio_cread(vdev, struct virtio_console_config, cols, &cols); 1856 virtio_cread(vdev, struct virtio_console_config, cols, &cols);
1845 virtio_cread(vdev, struct virtio_console_config, rows, &rows); 1857 virtio_cread(vdev, struct virtio_console_config, rows, &rows);
1846 1858
@@ -2040,12 +2052,14 @@ static int virtcons_probe(struct virtio_device *vdev)
2040 2052
2041 virtio_device_ready(portdev->vdev); 2053 virtio_device_ready(portdev->vdev);
2042 2054
2055 INIT_WORK(&portdev->config_work, &config_work_handler);
2056 INIT_WORK(&portdev->control_work, &control_work_handler);
2057
2043 if (multiport) { 2058 if (multiport) {
2044 unsigned int nr_added_bufs; 2059 unsigned int nr_added_bufs;
2045 2060
2046 spin_lock_init(&portdev->c_ivq_lock); 2061 spin_lock_init(&portdev->c_ivq_lock);
2047 spin_lock_init(&portdev->c_ovq_lock); 2062 spin_lock_init(&portdev->c_ovq_lock);
2048 INIT_WORK(&portdev->control_work, &control_work_handler);
2049 2063
2050 nr_added_bufs = fill_queue(portdev->c_ivq, 2064 nr_added_bufs = fill_queue(portdev->c_ivq,
2051 &portdev->c_ivq_lock); 2065 &portdev->c_ivq_lock);
@@ -2113,6 +2127,8 @@ static void virtcons_remove(struct virtio_device *vdev)
2113 /* Finish up work that's lined up */ 2127 /* Finish up work that's lined up */
2114 if (use_multiport(portdev)) 2128 if (use_multiport(portdev))
2115 cancel_work_sync(&portdev->control_work); 2129 cancel_work_sync(&portdev->control_work);
2130 else
2131 cancel_work_sync(&portdev->config_work);
2116 2132
2117 list_for_each_entry_safe(port, port2, &portdev->ports, list) 2133 list_for_each_entry_safe(port, port2, &portdev->ports, list)
2118 unplug_port(port); 2134 unplug_port(port);
@@ -2164,6 +2180,7 @@ static int virtcons_freeze(struct virtio_device *vdev)
2164 2180
2165 virtqueue_disable_cb(portdev->c_ivq); 2181 virtqueue_disable_cb(portdev->c_ivq);
2166 cancel_work_sync(&portdev->control_work); 2182 cancel_work_sync(&portdev->control_work);
2183 cancel_work_sync(&portdev->config_work);
2167 /* 2184 /*
2168 * Once more: if control_work_handler() was running, it would 2185 * Once more: if control_work_handler() was running, it would
2169 * enable the cb as the last step. 2186 * enable the cb as the last step.
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index db7f8bce7467..25006a8bb8e6 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -144,12 +144,6 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
144 divider->flags); 144 divider->flags);
145} 145}
146 146
147/*
148 * The reverse of DIV_ROUND_UP: The maximum number which
149 * divided by m is r
150 */
151#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1)
152
153static bool _is_valid_table_div(const struct clk_div_table *table, 147static bool _is_valid_table_div(const struct clk_div_table *table,
154 unsigned int div) 148 unsigned int div)
155{ 149{
@@ -225,19 +219,24 @@ static int _div_round_closest(const struct clk_div_table *table,
225 unsigned long parent_rate, unsigned long rate, 219 unsigned long parent_rate, unsigned long rate,
226 unsigned long flags) 220 unsigned long flags)
227{ 221{
228 int up, down, div; 222 int up, down;
223 unsigned long up_rate, down_rate;
229 224
230 up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate); 225 up = DIV_ROUND_UP(parent_rate, rate);
226 down = parent_rate / rate;
231 227
232 if (flags & CLK_DIVIDER_POWER_OF_TWO) { 228 if (flags & CLK_DIVIDER_POWER_OF_TWO) {
233 up = __roundup_pow_of_two(div); 229 up = __roundup_pow_of_two(up);
234 down = __rounddown_pow_of_two(div); 230 down = __rounddown_pow_of_two(down);
235 } else if (table) { 231 } else if (table) {
236 up = _round_up_table(table, div); 232 up = _round_up_table(table, up);
237 down = _round_down_table(table, div); 233 down = _round_down_table(table, down);
238 } 234 }
239 235
240 return (up - div) <= (div - down) ? up : down; 236 up_rate = DIV_ROUND_UP(parent_rate, up);
237 down_rate = DIV_ROUND_UP(parent_rate, down);
238
239 return (rate - up_rate) <= (down_rate - rate) ? up : down;
241} 240}
242 241
243static int _div_round(const struct clk_div_table *table, 242static int _div_round(const struct clk_div_table *table,
@@ -313,7 +312,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
313 return i; 312 return i;
314 } 313 }
315 parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 314 parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
316 MULT_ROUND_UP(rate, i)); 315 rate * i);
317 now = DIV_ROUND_UP(parent_rate, i); 316 now = DIV_ROUND_UP(parent_rate, i);
318 if (_is_best_div(rate, now, best, flags)) { 317 if (_is_best_div(rate, now, best, flags)) {
319 bestdiv = i; 318 bestdiv = i;
@@ -353,7 +352,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
353 bestdiv = readl(divider->reg) >> divider->shift; 352 bestdiv = readl(divider->reg) >> divider->shift;
354 bestdiv &= div_mask(divider->width); 353 bestdiv &= div_mask(divider->width);
355 bestdiv = _get_div(divider->table, bestdiv, divider->flags); 354 bestdiv = _get_div(divider->table, bestdiv, divider->flags);
356 return bestdiv; 355 return DIV_ROUND_UP(*prate, bestdiv);
357 } 356 }
358 357
359 return divider_round_rate(hw, rate, prate, divider->table, 358 return divider_round_rate(hw, rate, prate, divider->table,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index eb0152961d3c..237f23f68bfc 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1350,7 +1350,6 @@ static unsigned long clk_core_get_rate(struct clk_core *clk)
1350 1350
1351 return rate; 1351 return rate;
1352} 1352}
1353EXPORT_SYMBOL_GPL(clk_core_get_rate);
1354 1353
1355/** 1354/**
1356 * clk_get_rate - return the rate of clk 1355 * clk_get_rate - return the rate of clk
@@ -2171,6 +2170,32 @@ int clk_get_phase(struct clk *clk)
2171} 2170}
2172 2171
2173/** 2172/**
2173 * clk_is_match - check if two clk's point to the same hardware clock
2174 * @p: clk compared against q
2175 * @q: clk compared against p
2176 *
2177 * Returns true if the two struct clk pointers both point to the same hardware
2178 * clock node. Put differently, returns true if struct clk *p and struct clk *q
2179 * share the same struct clk_core object.
2180 *
2181 * Returns false otherwise. Note that two NULL clks are treated as matching.
2182 */
2183bool clk_is_match(const struct clk *p, const struct clk *q)
2184{
2185 /* trivial case: identical struct clk's or both NULL */
2186 if (p == q)
2187 return true;
2188
2189 /* true if clk->core pointers match. Avoid derefing garbage */
2190 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
2191 if (p->core == q->core)
2192 return true;
2193
2194 return false;
2195}
2196EXPORT_SYMBOL_GPL(clk_is_match);
2197
2198/**
2174 * __clk_init - initialize the data structures in a struct clk 2199 * __clk_init - initialize the data structures in a struct clk
2175 * @dev: device initializing this clk, placeholder for now 2200 * @dev: device initializing this clk, placeholder for now
2176 * @clk: clk being initialized 2201 * @clk: clk being initialized
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index b0b562b9ce0e..e60feffc10a1 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -48,6 +48,17 @@ static struct clk_pll pll3 = {
48 }, 48 },
49}; 49};
50 50
51static struct clk_regmap pll4_vote = {
52 .enable_reg = 0x34c0,
53 .enable_mask = BIT(4),
54 .hw.init = &(struct clk_init_data){
55 .name = "pll4_vote",
56 .parent_names = (const char *[]){ "pll4" },
57 .num_parents = 1,
58 .ops = &clk_pll_vote_ops,
59 },
60};
61
51static struct clk_pll pll8 = { 62static struct clk_pll pll8 = {
52 .l_reg = 0x3144, 63 .l_reg = 0x3144,
53 .m_reg = 0x3148, 64 .m_reg = 0x3148,
@@ -3023,6 +3034,7 @@ static struct clk_branch rpm_msg_ram_h_clk = {
3023 3034
3024static struct clk_regmap *gcc_msm8960_clks[] = { 3035static struct clk_regmap *gcc_msm8960_clks[] = {
3025 [PLL3] = &pll3.clkr, 3036 [PLL3] = &pll3.clkr,
3037 [PLL4_VOTE] = &pll4_vote,
3026 [PLL8] = &pll8.clkr, 3038 [PLL8] = &pll8.clkr,
3027 [PLL8_VOTE] = &pll8_vote, 3039 [PLL8_VOTE] = &pll8_vote,
3028 [PLL14] = &pll14.clkr, 3040 [PLL14] = &pll14.clkr,
@@ -3247,6 +3259,7 @@ static const struct qcom_reset_map gcc_msm8960_resets[] = {
3247 3259
3248static struct clk_regmap *gcc_apq8064_clks[] = { 3260static struct clk_regmap *gcc_apq8064_clks[] = {
3249 [PLL3] = &pll3.clkr, 3261 [PLL3] = &pll3.clkr,
3262 [PLL4_VOTE] = &pll4_vote,
3250 [PLL8] = &pll8.clkr, 3263 [PLL8] = &pll8.clkr,
3251 [PLL8_VOTE] = &pll8_vote, 3264 [PLL8_VOTE] = &pll8_vote,
3252 [PLL14] = &pll14.clkr, 3265 [PLL14] = &pll14.clkr,
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index 121ffde25dc3..c9ff27b4648b 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -462,7 +462,6 @@ static struct platform_driver lcc_ipq806x_driver = {
462 .remove = lcc_ipq806x_remove, 462 .remove = lcc_ipq806x_remove,
463 .driver = { 463 .driver = {
464 .name = "lcc-ipq806x", 464 .name = "lcc-ipq806x",
465 .owner = THIS_MODULE,
466 .of_match_table = lcc_ipq806x_match_table, 465 .of_match_table = lcc_ipq806x_match_table,
467 }, 466 },
468}; 467};
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
index a75a408cfccd..e2c863295f00 100644
--- a/drivers/clk/qcom/lcc-msm8960.c
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -417,8 +417,8 @@ static struct clk_rcg slimbus_src = {
417 .mnctr_en_bit = 8, 417 .mnctr_en_bit = 8,
418 .mnctr_reset_bit = 7, 418 .mnctr_reset_bit = 7,
419 .mnctr_mode_shift = 5, 419 .mnctr_mode_shift = 5,
420 .n_val_shift = 16, 420 .n_val_shift = 24,
421 .m_val_shift = 16, 421 .m_val_shift = 8,
422 .width = 8, 422 .width = 8,
423 }, 423 },
424 .p = { 424 .p = {
@@ -547,7 +547,7 @@ static int lcc_msm8960_probe(struct platform_device *pdev)
547 return PTR_ERR(regmap); 547 return PTR_ERR(regmap);
548 548
549 /* Use the correct frequency plan depending on speed of PLL4 */ 549 /* Use the correct frequency plan depending on speed of PLL4 */
550 val = regmap_read(regmap, 0x4, &val); 550 regmap_read(regmap, 0x4, &val);
551 if (val == 0x12) { 551 if (val == 0x12) {
552 slimbus_src.freq_tbl = clk_tbl_aif_osr_492; 552 slimbus_src.freq_tbl = clk_tbl_aif_osr_492;
553 mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492; 553 mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492;
@@ -574,7 +574,6 @@ static struct platform_driver lcc_msm8960_driver = {
574 .remove = lcc_msm8960_remove, 574 .remove = lcc_msm8960_remove,
575 .driver = { 575 .driver = {
576 .name = "lcc-msm8960", 576 .name = "lcc-msm8960",
577 .owner = THIS_MODULE,
578 .of_match_table = lcc_msm8960_match_table, 577 .of_match_table = lcc_msm8960_match_table,
579 }, 578 },
580}; 579};
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index 6ef89639a9f6..d21640634adf 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -84,7 +84,7 @@ static int ti_fapll_enable(struct clk_hw *hw)
84 struct fapll_data *fd = to_fapll(hw); 84 struct fapll_data *fd = to_fapll(hw);
85 u32 v = readl_relaxed(fd->base); 85 u32 v = readl_relaxed(fd->base);
86 86
87 v |= (1 << FAPLL_MAIN_PLLEN); 87 v |= FAPLL_MAIN_PLLEN;
88 writel_relaxed(v, fd->base); 88 writel_relaxed(v, fd->base);
89 89
90 return 0; 90 return 0;
@@ -95,7 +95,7 @@ static void ti_fapll_disable(struct clk_hw *hw)
95 struct fapll_data *fd = to_fapll(hw); 95 struct fapll_data *fd = to_fapll(hw);
96 u32 v = readl_relaxed(fd->base); 96 u32 v = readl_relaxed(fd->base);
97 97
98 v &= ~(1 << FAPLL_MAIN_PLLEN); 98 v &= ~FAPLL_MAIN_PLLEN;
99 writel_relaxed(v, fd->base); 99 writel_relaxed(v, fd->base);
100} 100}
101 101
@@ -104,7 +104,7 @@ static int ti_fapll_is_enabled(struct clk_hw *hw)
104 struct fapll_data *fd = to_fapll(hw); 104 struct fapll_data *fd = to_fapll(hw);
105 u32 v = readl_relaxed(fd->base); 105 u32 v = readl_relaxed(fd->base);
106 106
107 return v & (1 << FAPLL_MAIN_PLLEN); 107 return v & FAPLL_MAIN_PLLEN;
108} 108}
109 109
110static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw, 110static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw,
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c
index bba62f9deefb..ec57ba2bbd87 100644
--- a/drivers/clocksource/time-efm32.c
+++ b/drivers/clocksource/time-efm32.c
@@ -225,12 +225,12 @@ static int __init efm32_clockevent_init(struct device_node *np)
225 clock_event_ddata.base = base; 225 clock_event_ddata.base = base;
226 clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ); 226 clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ);
227 227
228 setup_irq(irq, &efm32_clock_event_irq);
229
230 clockevents_config_and_register(&clock_event_ddata.evtdev, 228 clockevents_config_and_register(&clock_event_ddata.evtdev,
231 DIV_ROUND_CLOSEST(rate, 1024), 229 DIV_ROUND_CLOSEST(rate, 1024),
232 0xf, 0xffff); 230 0xf, 0xffff);
233 231
232 setup_irq(irq, &efm32_clock_event_irq);
233
234 return 0; 234 return 0;
235 235
236err_get_irq: 236err_get_irq:
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 02268448dc85..5dcbf90b8015 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -178,10 +178,6 @@ static void __init sun5i_timer_init(struct device_node *node)
178 178
179 ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); 179 ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
180 180
181 ret = setup_irq(irq, &sun5i_timer_irq);
182 if (ret)
183 pr_warn("failed to setup irq %d\n", irq);
184
185 /* Enable timer0 interrupt */ 181 /* Enable timer0 interrupt */
186 val = readl(timer_base + TIMER_IRQ_EN_REG); 182 val = readl(timer_base + TIMER_IRQ_EN_REG);
187 writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); 183 writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
@@ -191,6 +187,10 @@ static void __init sun5i_timer_init(struct device_node *node)
191 187
192 clockevents_config_and_register(&sun5i_clockevent, rate, 188 clockevents_config_and_register(&sun5i_clockevent, rate,
193 TIMER_SYNC_TICKS, 0xffffffff); 189 TIMER_SYNC_TICKS, 0xffffffff);
190
191 ret = setup_irq(irq, &sun5i_timer_irq);
192 if (ret)
193 pr_warn("failed to setup irq %d\n", irq);
194} 194}
195CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer", 195CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer",
196 sun5i_timer_init); 196 sun5i_timer_init);
diff --git a/drivers/cpuidle/cpuidle-mvebu-v7.c b/drivers/cpuidle/cpuidle-mvebu-v7.c
index 38e68618513a..980151f34707 100644
--- a/drivers/cpuidle/cpuidle-mvebu-v7.c
+++ b/drivers/cpuidle/cpuidle-mvebu-v7.c
@@ -37,11 +37,11 @@ static int mvebu_v7_enter_idle(struct cpuidle_device *dev,
37 deepidle = true; 37 deepidle = true;
38 38
39 ret = mvebu_v7_cpu_suspend(deepidle); 39 ret = mvebu_v7_cpu_suspend(deepidle);
40 cpu_pm_exit();
41
40 if (ret) 42 if (ret)
41 return ret; 43 return ret;
42 44
43 cpu_pm_exit();
44
45 return index; 45 return index;
46} 46}
47 47
@@ -50,17 +50,17 @@ static struct cpuidle_driver armadaxp_idle_driver = {
50 .states[0] = ARM_CPUIDLE_WFI_STATE, 50 .states[0] = ARM_CPUIDLE_WFI_STATE,
51 .states[1] = { 51 .states[1] = {
52 .enter = mvebu_v7_enter_idle, 52 .enter = mvebu_v7_enter_idle,
53 .exit_latency = 10, 53 .exit_latency = 100,
54 .power_usage = 50, 54 .power_usage = 50,
55 .target_residency = 100, 55 .target_residency = 1000,
56 .name = "MV CPU IDLE", 56 .name = "MV CPU IDLE",
57 .desc = "CPU power down", 57 .desc = "CPU power down",
58 }, 58 },
59 .states[2] = { 59 .states[2] = {
60 .enter = mvebu_v7_enter_idle, 60 .enter = mvebu_v7_enter_idle,
61 .exit_latency = 100, 61 .exit_latency = 1000,
62 .power_usage = 5, 62 .power_usage = 5,
63 .target_residency = 1000, 63 .target_residency = 10000,
64 .flags = MVEBU_V7_FLAG_DEEP_IDLE, 64 .flags = MVEBU_V7_FLAG_DEEP_IDLE,
65 .name = "MV CPU DEEP IDLE", 65 .name = "MV CPU DEEP IDLE",
66 .desc = "CPU and L2 Fabric power down", 66 .desc = "CPU and L2 Fabric power down",
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 4a5fd245014e..83aa55d6fa5d 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -97,6 +97,12 @@
97 97
98#define DRIVER_NAME "pl08xdmac" 98#define DRIVER_NAME "pl08xdmac"
99 99
100#define PL80X_DMA_BUSWIDTHS \
101 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
102 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
103 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
104 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
105
100static struct amba_driver pl08x_amba_driver; 106static struct amba_driver pl08x_amba_driver;
101struct pl08x_driver_data; 107struct pl08x_driver_data;
102 108
@@ -2070,6 +2076,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2070 pl08x->memcpy.device_pause = pl08x_pause; 2076 pl08x->memcpy.device_pause = pl08x_pause;
2071 pl08x->memcpy.device_resume = pl08x_resume; 2077 pl08x->memcpy.device_resume = pl08x_resume;
2072 pl08x->memcpy.device_terminate_all = pl08x_terminate_all; 2078 pl08x->memcpy.device_terminate_all = pl08x_terminate_all;
2079 pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS;
2080 pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
2081 pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM);
2082 pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2073 2083
2074 /* Initialize slave engine */ 2084 /* Initialize slave engine */
2075 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 2085 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
@@ -2086,6 +2096,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2086 pl08x->slave.device_pause = pl08x_pause; 2096 pl08x->slave.device_pause = pl08x_pause;
2087 pl08x->slave.device_resume = pl08x_resume; 2097 pl08x->slave.device_resume = pl08x_resume;
2088 pl08x->slave.device_terminate_all = pl08x_terminate_all; 2098 pl08x->slave.device_terminate_all = pl08x_terminate_all;
2099 pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS;
2100 pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
2101 pl08x->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2102 pl08x->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2089 2103
2090 /* Get the platform data */ 2104 /* Get the platform data */
2091 pl08x->pd = dev_get_platdata(&adev->dev); 2105 pl08x->pd = dev_get_platdata(&adev->dev);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 1e1a4c567542..0b4fc6fb48ce 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -238,93 +238,126 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
238} 238}
239 239
240/* 240/*
241 * atc_get_current_descriptors - 241 * atc_get_desc_by_cookie - get the descriptor of a cookie
242 * locate the descriptor which equal to physical address in DSCR 242 * @atchan: the DMA channel
243 * @atchan: the channel we want to start 243 * @cookie: the cookie to get the descriptor for
244 * @dscr_addr: physical descriptor address in DSCR
245 */ 244 */
246static struct at_desc *atc_get_current_descriptors(struct at_dma_chan *atchan, 245static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
247 u32 dscr_addr) 246 dma_cookie_t cookie)
248{ 247{
249 struct at_desc *desc, *_desc, *child, *desc_cur = NULL; 248 struct at_desc *desc, *_desc;
250 249
251 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { 250 list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
252 if (desc->lli.dscr == dscr_addr) { 251 if (desc->txd.cookie == cookie)
253 desc_cur = desc; 252 return desc;
254 break; 253 }
255 }
256 254
257 list_for_each_entry(child, &desc->tx_list, desc_node) { 255 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
258 if (child->lli.dscr == dscr_addr) { 256 if (desc->txd.cookie == cookie)
259 desc_cur = child; 257 return desc;
260 break;
261 }
262 }
263 } 258 }
264 259
265 return desc_cur; 260 return NULL;
266} 261}
267 262
268/* 263/**
269 * atc_get_bytes_left - 264 * atc_calc_bytes_left - calculates the number of bytes left according to the
270 * Get the number of bytes residue in dma buffer, 265 * value read from CTRLA.
271 * @chan: the channel we want to start 266 *
267 * @current_len: the number of bytes left before reading CTRLA
268 * @ctrla: the value of CTRLA
269 * @desc: the descriptor containing the transfer width
270 */
271static inline int atc_calc_bytes_left(int current_len, u32 ctrla,
272 struct at_desc *desc)
273{
274 return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width);
275}
276
277/**
278 * atc_calc_bytes_left_from_reg - calculates the number of bytes left according
279 * to the current value of CTRLA.
280 *
281 * @current_len: the number of bytes left before reading CTRLA
282 * @atchan: the channel to read CTRLA for
283 * @desc: the descriptor containing the transfer width
284 */
285static inline int atc_calc_bytes_left_from_reg(int current_len,
286 struct at_dma_chan *atchan, struct at_desc *desc)
287{
288 u32 ctrla = channel_readl(atchan, CTRLA);
289
290 return atc_calc_bytes_left(current_len, ctrla, desc);
291}
292
293/**
294 * atc_get_bytes_left - get the number of bytes residue for a cookie
295 * @chan: DMA channel
296 * @cookie: transaction identifier to check status of
272 */ 297 */
273static int atc_get_bytes_left(struct dma_chan *chan) 298static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
274{ 299{
275 struct at_dma_chan *atchan = to_at_dma_chan(chan); 300 struct at_dma_chan *atchan = to_at_dma_chan(chan);
276 struct at_dma *atdma = to_at_dma(chan->device);
277 int chan_id = atchan->chan_common.chan_id;
278 struct at_desc *desc_first = atc_first_active(atchan); 301 struct at_desc *desc_first = atc_first_active(atchan);
279 struct at_desc *desc_cur; 302 struct at_desc *desc;
280 int ret = 0, count = 0; 303 int ret;
304 u32 ctrla, dscr;
281 305
282 /* 306 /*
283 * Initialize necessary values in the first time. 307 * If the cookie doesn't match to the currently running transfer then
284 * remain_desc record remain desc length. 308 * we can return the total length of the associated DMA transfer,
309 * because it is still queued.
285 */ 310 */
286 if (atchan->remain_desc == 0) 311 desc = atc_get_desc_by_cookie(atchan, cookie);
287 /* First descriptor embedds the transaction length */ 312 if (desc == NULL)
288 atchan->remain_desc = desc_first->len; 313 return -EINVAL;
314 else if (desc != desc_first)
315 return desc->total_len;
289 316
290 /* 317 /* cookie matches to the currently running transfer */
291 * This happens when current descriptor transfer complete. 318 ret = desc_first->total_len;
292 * The residual buffer size should reduce current descriptor length.
293 */
294 if (unlikely(test_bit(ATC_IS_BTC, &atchan->status))) {
295 clear_bit(ATC_IS_BTC, &atchan->status);
296 desc_cur = atc_get_current_descriptors(atchan,
297 channel_readl(atchan, DSCR));
298 if (!desc_cur) {
299 ret = -EINVAL;
300 goto out;
301 }
302 319
303 count = (desc_cur->lli.ctrla & ATC_BTSIZE_MAX) 320 if (desc_first->lli.dscr) {
304 << desc_first->tx_width; 321 /* hardware linked list transfer */
305 if (atchan->remain_desc < count) { 322
306 ret = -EINVAL; 323 /*
307 goto out; 324 * Calculate the residue by removing the length of the child
325 * descriptors already transferred from the total length.
326 * To get the current child descriptor we can use the value of
327 * the channel's DSCR register and compare it against the value
328 * of the hardware linked list structure of each child
329 * descriptor.
330 */
331
332 ctrla = channel_readl(atchan, CTRLA);
333 rmb(); /* ensure CTRLA is read before DSCR */
334 dscr = channel_readl(atchan, DSCR);
335
336 /* for the first descriptor we can be more accurate */
337 if (desc_first->lli.dscr == dscr)
338 return atc_calc_bytes_left(ret, ctrla, desc_first);
339
340 ret -= desc_first->len;
341 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
342 if (desc->lli.dscr == dscr)
343 break;
344
345 ret -= desc->len;
308 } 346 }
309 347
310 atchan->remain_desc -= count;
311 ret = atchan->remain_desc;
312 } else {
313 /* 348 /*
314 * Get residual bytes when current 349 * For the last descriptor in the chain we can calculate
315 * descriptor transfer in progress. 350 * the remaining bytes using the channel's register.
351 * Note that the transfer width of the first and last
352 * descriptor may differ.
316 */ 353 */
317 count = (channel_readl(atchan, CTRLA) & ATC_BTSIZE_MAX) 354 if (!desc->lli.dscr)
318 << (desc_first->tx_width); 355 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc);
319 ret = atchan->remain_desc - count; 356 } else {
357 /* single transfer */
358 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first);
320 } 359 }
321 /*
322 * Check fifo empty.
323 */
324 if (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id)))
325 atc_issue_pending(chan);
326 360
327out:
328 return ret; 361 return ret;
329} 362}
330 363
@@ -539,8 +572,6 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
539 /* Give information to tasklet */ 572 /* Give information to tasklet */
540 set_bit(ATC_IS_ERROR, &atchan->status); 573 set_bit(ATC_IS_ERROR, &atchan->status);
541 } 574 }
542 if (pending & AT_DMA_BTC(i))
543 set_bit(ATC_IS_BTC, &atchan->status);
544 tasklet_schedule(&atchan->tasklet); 575 tasklet_schedule(&atchan->tasklet);
545 ret = IRQ_HANDLED; 576 ret = IRQ_HANDLED;
546 } 577 }
@@ -653,14 +684,18 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
653 desc->lli.ctrlb = ctrlb; 684 desc->lli.ctrlb = ctrlb;
654 685
655 desc->txd.cookie = 0; 686 desc->txd.cookie = 0;
687 desc->len = xfer_count << src_width;
656 688
657 atc_desc_chain(&first, &prev, desc); 689 atc_desc_chain(&first, &prev, desc);
658 } 690 }
659 691
660 /* First descriptor of the chain embedds additional information */ 692 /* First descriptor of the chain embedds additional information */
661 first->txd.cookie = -EBUSY; 693 first->txd.cookie = -EBUSY;
662 first->len = len; 694 first->total_len = len;
695
696 /* set transfer width for the calculation of the residue */
663 first->tx_width = src_width; 697 first->tx_width = src_width;
698 prev->tx_width = src_width;
664 699
665 /* set end-of-link to the last link descriptor of list*/ 700 /* set end-of-link to the last link descriptor of list*/
666 set_desc_eol(desc); 701 set_desc_eol(desc);
@@ -752,6 +787,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
752 | ATC_SRC_WIDTH(mem_width) 787 | ATC_SRC_WIDTH(mem_width)
753 | len >> mem_width; 788 | len >> mem_width;
754 desc->lli.ctrlb = ctrlb; 789 desc->lli.ctrlb = ctrlb;
790 desc->len = len;
755 791
756 atc_desc_chain(&first, &prev, desc); 792 atc_desc_chain(&first, &prev, desc);
757 total_len += len; 793 total_len += len;
@@ -792,6 +828,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
792 | ATC_DST_WIDTH(mem_width) 828 | ATC_DST_WIDTH(mem_width)
793 | len >> reg_width; 829 | len >> reg_width;
794 desc->lli.ctrlb = ctrlb; 830 desc->lli.ctrlb = ctrlb;
831 desc->len = len;
795 832
796 atc_desc_chain(&first, &prev, desc); 833 atc_desc_chain(&first, &prev, desc);
797 total_len += len; 834 total_len += len;
@@ -806,8 +843,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
806 843
807 /* First descriptor of the chain embedds additional information */ 844 /* First descriptor of the chain embedds additional information */
808 first->txd.cookie = -EBUSY; 845 first->txd.cookie = -EBUSY;
809 first->len = total_len; 846 first->total_len = total_len;
847
848 /* set transfer width for the calculation of the residue */
810 first->tx_width = reg_width; 849 first->tx_width = reg_width;
850 prev->tx_width = reg_width;
811 851
812 /* first link descriptor of list is responsible of flags */ 852 /* first link descriptor of list is responsible of flags */
813 first->txd.flags = flags; /* client is in control of this ack */ 853 first->txd.flags = flags; /* client is in control of this ack */
@@ -872,6 +912,7 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
872 | ATC_FC_MEM2PER 912 | ATC_FC_MEM2PER
873 | ATC_SIF(atchan->mem_if) 913 | ATC_SIF(atchan->mem_if)
874 | ATC_DIF(atchan->per_if); 914 | ATC_DIF(atchan->per_if);
915 desc->len = period_len;
875 break; 916 break;
876 917
877 case DMA_DEV_TO_MEM: 918 case DMA_DEV_TO_MEM:
@@ -883,6 +924,7 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
883 | ATC_FC_PER2MEM 924 | ATC_FC_PER2MEM
884 | ATC_SIF(atchan->per_if) 925 | ATC_SIF(atchan->per_if)
885 | ATC_DIF(atchan->mem_if); 926 | ATC_DIF(atchan->mem_if);
927 desc->len = period_len;
886 break; 928 break;
887 929
888 default: 930 default:
@@ -964,7 +1006,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
964 1006
965 /* First descriptor of the chain embedds additional information */ 1007 /* First descriptor of the chain embedds additional information */
966 first->txd.cookie = -EBUSY; 1008 first->txd.cookie = -EBUSY;
967 first->len = buf_len; 1009 first->total_len = buf_len;
968 first->tx_width = reg_width; 1010 first->tx_width = reg_width;
969 1011
970 return &first->txd; 1012 return &first->txd;
@@ -1118,7 +1160,7 @@ atc_tx_status(struct dma_chan *chan,
1118 spin_lock_irqsave(&atchan->lock, flags); 1160 spin_lock_irqsave(&atchan->lock, flags);
1119 1161
1120 /* Get number of bytes left in the active transactions */ 1162 /* Get number of bytes left in the active transactions */
1121 bytes = atc_get_bytes_left(chan); 1163 bytes = atc_get_bytes_left(chan, cookie);
1122 1164
1123 spin_unlock_irqrestore(&atchan->lock, flags); 1165 spin_unlock_irqrestore(&atchan->lock, flags);
1124 1166
@@ -1214,7 +1256,6 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
1214 1256
1215 spin_lock_irqsave(&atchan->lock, flags); 1257 spin_lock_irqsave(&atchan->lock, flags);
1216 atchan->descs_allocated = i; 1258 atchan->descs_allocated = i;
1217 atchan->remain_desc = 0;
1218 list_splice(&tmp_list, &atchan->free_list); 1259 list_splice(&tmp_list, &atchan->free_list);
1219 dma_cookie_init(chan); 1260 dma_cookie_init(chan);
1220 spin_unlock_irqrestore(&atchan->lock, flags); 1261 spin_unlock_irqrestore(&atchan->lock, flags);
@@ -1257,7 +1298,6 @@ static void atc_free_chan_resources(struct dma_chan *chan)
1257 list_splice_init(&atchan->free_list, &list); 1298 list_splice_init(&atchan->free_list, &list);
1258 atchan->descs_allocated = 0; 1299 atchan->descs_allocated = 0;
1259 atchan->status = 0; 1300 atchan->status = 0;
1260 atchan->remain_desc = 0;
1261 1301
1262 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1302 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1263} 1303}
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index d6bba6c636c2..2727ca560572 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -181,8 +181,9 @@ struct at_lli {
181 * @at_lli: hardware lli structure 181 * @at_lli: hardware lli structure
182 * @txd: support for the async_tx api 182 * @txd: support for the async_tx api
183 * @desc_node: node on the channed descriptors list 183 * @desc_node: node on the channed descriptors list
184 * @len: total transaction bytecount 184 * @len: descriptor byte count
185 * @tx_width: transfer width 185 * @tx_width: transfer width
186 * @total_len: total transaction byte count
186 */ 187 */
187struct at_desc { 188struct at_desc {
188 /* FIRST values the hardware uses */ 189 /* FIRST values the hardware uses */
@@ -194,6 +195,7 @@ struct at_desc {
194 struct list_head desc_node; 195 struct list_head desc_node;
195 size_t len; 196 size_t len;
196 u32 tx_width; 197 u32 tx_width;
198 size_t total_len;
197}; 199};
198 200
199static inline struct at_desc * 201static inline struct at_desc *
@@ -213,7 +215,6 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd)
213enum atc_status { 215enum atc_status {
214 ATC_IS_ERROR = 0, 216 ATC_IS_ERROR = 0,
215 ATC_IS_PAUSED = 1, 217 ATC_IS_PAUSED = 1,
216 ATC_IS_BTC = 2,
217 ATC_IS_CYCLIC = 24, 218 ATC_IS_CYCLIC = 24,
218}; 219};
219 220
@@ -231,7 +232,6 @@ enum atc_status {
231 * @save_cfg: configuration register that is saved on suspend/resume cycle 232 * @save_cfg: configuration register that is saved on suspend/resume cycle
232 * @save_dscr: for cyclic operations, preserve next descriptor address in 233 * @save_dscr: for cyclic operations, preserve next descriptor address in
233 * the cyclic list on suspend/resume cycle 234 * the cyclic list on suspend/resume cycle
234 * @remain_desc: to save remain desc length
235 * @dma_sconfig: configuration for slave transfers, passed via 235 * @dma_sconfig: configuration for slave transfers, passed via
236 * .device_config 236 * .device_config
237 * @lock: serializes enqueue/dequeue operations to descriptors lists 237 * @lock: serializes enqueue/dequeue operations to descriptors lists
@@ -251,7 +251,6 @@ struct at_dma_chan {
251 struct tasklet_struct tasklet; 251 struct tasklet_struct tasklet;
252 u32 save_cfg; 252 u32 save_cfg;
253 u32 save_dscr; 253 u32 save_dscr;
254 u32 remain_desc;
255 struct dma_slave_config dma_sconfig; 254 struct dma_slave_config dma_sconfig;
256 255
257 spinlock_t lock; 256 spinlock_t lock;
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 6565a361e7e5..b2c3ae071429 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -26,6 +26,8 @@
26 26
27#include "internal.h" 27#include "internal.h"
28 28
29#define DRV_NAME "dw_dmac"
30
29static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, 31static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
30 struct of_dma *ofdma) 32 struct of_dma *ofdma)
31{ 33{
@@ -284,7 +286,7 @@ static struct platform_driver dw_driver = {
284 .remove = dw_remove, 286 .remove = dw_remove,
285 .shutdown = dw_shutdown, 287 .shutdown = dw_shutdown,
286 .driver = { 288 .driver = {
287 .name = "dw_dmac", 289 .name = DRV_NAME,
288 .pm = &dw_dev_pm_ops, 290 .pm = &dw_dev_pm_ops,
289 .of_match_table = of_match_ptr(dw_dma_of_id_table), 291 .of_match_table = of_match_ptr(dw_dma_of_id_table),
290 .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), 292 .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
@@ -305,3 +307,4 @@ module_exit(dw_exit);
305 307
306MODULE_LICENSE("GPL v2"); 308MODULE_LICENSE("GPL v2");
307MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver"); 309MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
310MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 18c0a131e4e4..66a0efb9651d 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -531,6 +531,10 @@ static int sdma_run_channel0(struct sdma_engine *sdma)
531 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); 531 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
532 } 532 }
533 533
534 /* Set bits of CONFIG register with dynamic context switching */
535 if (readl(sdma->regs + SDMA_H_CONFIG) == 0)
536 writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
537
534 return ret ? 0 : -ETIMEDOUT; 538 return ret ? 0 : -ETIMEDOUT;
535} 539}
536 540
@@ -1394,9 +1398,6 @@ static int sdma_init(struct sdma_engine *sdma)
1394 1398
1395 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); 1399 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1396 1400
1397 /* Set bits of CONFIG register with given context switching mode */
1398 writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
1399
1400 /* Initializes channel's priorities */ 1401 /* Initializes channel's priorities */
1401 sdma_set_channel_priority(&sdma->channel[0], 7); 1402 sdma_set_channel_priority(&sdma->channel[0], 7);
1402 1403
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 910ff8ab9c9c..d8135adb2238 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -645,6 +645,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
645 pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id); 645 pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id);
646 pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id); 646 pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id);
647 647
648 init_sdma_vm(dqm, q, qpd);
648 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, 649 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
649 &q->gart_mqd_addr, &q->properties); 650 &q->gart_mqd_addr, &q->properties);
650 if (retval != 0) { 651 if (retval != 0) {
@@ -652,7 +653,14 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
652 return retval; 653 return retval;
653 } 654 }
654 655
655 init_sdma_vm(dqm, q, qpd); 656 retval = mqd->load_mqd(mqd, q->mqd, 0,
657 0, NULL);
658 if (retval != 0) {
659 deallocate_sdma_queue(dqm, q->sdma_id);
660 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
661 return retval;
662 }
663
656 return 0; 664 return 0;
657} 665}
658 666
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index e415a2a9207e..c7d298e62c96 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -44,7 +44,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
44 BUG_ON(!kq || !dev); 44 BUG_ON(!kq || !dev);
45 BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ); 45 BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ);
46 46
47 pr_debug("kfd: In func %s initializing queue type %d size %d\n", 47 pr_debug("amdkfd: In func %s initializing queue type %d size %d\n",
48 __func__, KFD_QUEUE_TYPE_HIQ, queue_size); 48 __func__, KFD_QUEUE_TYPE_HIQ, queue_size);
49 49
50 nop.opcode = IT_NOP; 50 nop.opcode = IT_NOP;
@@ -69,12 +69,16 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
69 69
70 prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); 70 prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
71 71
72 if (prop.doorbell_ptr == NULL) 72 if (prop.doorbell_ptr == NULL) {
73 pr_err("amdkfd: error init doorbell");
73 goto err_get_kernel_doorbell; 74 goto err_get_kernel_doorbell;
75 }
74 76
75 retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq); 77 retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq);
76 if (retval != 0) 78 if (retval != 0) {
79 pr_err("amdkfd: error init pq queues size (%d)\n", queue_size);
77 goto err_pq_allocate_vidmem; 80 goto err_pq_allocate_vidmem;
81 }
78 82
79 kq->pq_kernel_addr = kq->pq->cpu_ptr; 83 kq->pq_kernel_addr = kq->pq->cpu_ptr;
80 kq->pq_gpu_addr = kq->pq->gpu_addr; 84 kq->pq_gpu_addr = kq->pq->gpu_addr;
@@ -165,10 +169,8 @@ err_rptr_allocate_vidmem:
165err_eop_allocate_vidmem: 169err_eop_allocate_vidmem:
166 kfd_gtt_sa_free(dev, kq->pq); 170 kfd_gtt_sa_free(dev, kq->pq);
167err_pq_allocate_vidmem: 171err_pq_allocate_vidmem:
168 pr_err("kfd: error init pq\n");
169 kfd_release_kernel_doorbell(dev, prop.doorbell_ptr); 172 kfd_release_kernel_doorbell(dev, prop.doorbell_ptr);
170err_get_kernel_doorbell: 173err_get_kernel_doorbell:
171 pr_err("kfd: error init doorbell");
172 return false; 174 return false;
173 175
174} 176}
@@ -187,6 +189,8 @@ static void uninitialize(struct kernel_queue *kq)
187 else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ) 189 else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
188 kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj); 190 kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
189 191
192 kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj);
193
190 kfd_gtt_sa_free(kq->dev, kq->rptr_mem); 194 kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
191 kfd_gtt_sa_free(kq->dev, kq->wptr_mem); 195 kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
192 kq->ops_asic_specific.uninitialize(kq); 196 kq->ops_asic_specific.uninitialize(kq);
@@ -211,7 +215,7 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
211 queue_address = (unsigned int *)kq->pq_kernel_addr; 215 queue_address = (unsigned int *)kq->pq_kernel_addr;
212 queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t); 216 queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t);
213 217
214 pr_debug("kfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n", 218 pr_debug("amdkfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n",
215 __func__, rptr, wptr, queue_address); 219 __func__, rptr, wptr, queue_address);
216 220
217 available_size = (rptr - 1 - wptr + queue_size_dwords) % 221 available_size = (rptr - 1 - wptr + queue_size_dwords) %
@@ -296,7 +300,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
296 } 300 }
297 301
298 if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) { 302 if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) {
299 pr_err("kfd: failed to init kernel queue\n"); 303 pr_err("amdkfd: failed to init kernel queue\n");
300 kfree(kq); 304 kfree(kq);
301 return NULL; 305 return NULL;
302 } 306 }
@@ -319,7 +323,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
319 323
320 BUG_ON(!dev); 324 BUG_ON(!dev);
321 325
322 pr_err("kfd: starting kernel queue test\n"); 326 pr_err("amdkfd: starting kernel queue test\n");
323 327
324 kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); 328 kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
325 BUG_ON(!kq); 329 BUG_ON(!kq);
@@ -330,7 +334,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
330 buffer[i] = kq->nop_packet; 334 buffer[i] = kq->nop_packet;
331 kq->ops.submit_packet(kq); 335 kq->ops.submit_packet(kq);
332 336
333 pr_err("kfd: ending kernel queue test\n"); 337 pr_err("amdkfd: ending kernel queue test\n");
334} 338}
335 339
336 340
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 6b6b07ff720b..f6d04c7b5115 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -43,9 +43,10 @@
43#include "drm_crtc_internal.h" 43#include "drm_crtc_internal.h"
44#include "drm_internal.h" 44#include "drm_internal.h"
45 45
46static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, 46static struct drm_framebuffer *
47 struct drm_mode_fb_cmd2 *r, 47internal_framebuffer_create(struct drm_device *dev,
48 struct drm_file *file_priv); 48 struct drm_mode_fb_cmd2 *r,
49 struct drm_file *file_priv);
49 50
50/* Avoid boilerplate. I'm tired of typing. */ 51/* Avoid boilerplate. I'm tired of typing. */
51#define DRM_ENUM_NAME_FN(fnname, list) \ 52#define DRM_ENUM_NAME_FN(fnname, list) \
@@ -2908,13 +2909,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
2908 */ 2909 */
2909 if (req->flags & DRM_MODE_CURSOR_BO) { 2910 if (req->flags & DRM_MODE_CURSOR_BO) {
2910 if (req->handle) { 2911 if (req->handle) {
2911 fb = add_framebuffer_internal(dev, &fbreq, file_priv); 2912 fb = internal_framebuffer_create(dev, &fbreq, file_priv);
2912 if (IS_ERR(fb)) { 2913 if (IS_ERR(fb)) {
2913 DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); 2914 DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
2914 return PTR_ERR(fb); 2915 return PTR_ERR(fb);
2915 } 2916 }
2916
2917 drm_framebuffer_reference(fb);
2918 } else { 2917 } else {
2919 fb = NULL; 2918 fb = NULL;
2920 } 2919 }
@@ -3267,9 +3266,10 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
3267 return 0; 3266 return 0;
3268} 3267}
3269 3268
3270static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, 3269static struct drm_framebuffer *
3271 struct drm_mode_fb_cmd2 *r, 3270internal_framebuffer_create(struct drm_device *dev,
3272 struct drm_file *file_priv) 3271 struct drm_mode_fb_cmd2 *r,
3272 struct drm_file *file_priv)
3273{ 3273{
3274 struct drm_mode_config *config = &dev->mode_config; 3274 struct drm_mode_config *config = &dev->mode_config;
3275 struct drm_framebuffer *fb; 3275 struct drm_framebuffer *fb;
@@ -3301,12 +3301,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
3301 return fb; 3301 return fb;
3302 } 3302 }
3303 3303
3304 mutex_lock(&file_priv->fbs_lock);
3305 r->fb_id = fb->base.id;
3306 list_add(&fb->filp_head, &file_priv->fbs);
3307 DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
3308 mutex_unlock(&file_priv->fbs_lock);
3309
3310 return fb; 3304 return fb;
3311} 3305}
3312 3306
@@ -3328,15 +3322,24 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
3328int drm_mode_addfb2(struct drm_device *dev, 3322int drm_mode_addfb2(struct drm_device *dev,
3329 void *data, struct drm_file *file_priv) 3323 void *data, struct drm_file *file_priv)
3330{ 3324{
3325 struct drm_mode_fb_cmd2 *r = data;
3331 struct drm_framebuffer *fb; 3326 struct drm_framebuffer *fb;
3332 3327
3333 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3328 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3334 return -EINVAL; 3329 return -EINVAL;
3335 3330
3336 fb = add_framebuffer_internal(dev, data, file_priv); 3331 fb = internal_framebuffer_create(dev, r, file_priv);
3337 if (IS_ERR(fb)) 3332 if (IS_ERR(fb))
3338 return PTR_ERR(fb); 3333 return PTR_ERR(fb);
3339 3334
3335 /* Transfer ownership to the filp for reaping on close */
3336
3337 DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
3338 mutex_lock(&file_priv->fbs_lock);
3339 r->fb_id = fb->base.id;
3340 list_add(&fb->filp_head, &file_priv->fbs);
3341 mutex_unlock(&file_priv->fbs_lock);
3342
3340 return 0; 3343 return 0;
3341} 3344}
3342 3345
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 9a5b68717ec8..379ab4555756 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -733,10 +733,14 @@ static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
733 struct drm_dp_sideband_msg_tx *txmsg) 733 struct drm_dp_sideband_msg_tx *txmsg)
734{ 734{
735 bool ret; 735 bool ret;
736 mutex_lock(&mgr->qlock); 736
737 /*
738 * All updates to txmsg->state are protected by mgr->qlock, and the two
739 * cases we check here are terminal states. For those the barriers
740 * provided by the wake_up/wait_event pair are enough.
741 */
737 ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || 742 ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
738 txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); 743 txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
739 mutex_unlock(&mgr->qlock);
740 return ret; 744 return ret;
741} 745}
742 746
@@ -1363,12 +1367,13 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1363 return 0; 1367 return 0;
1364} 1368}
1365 1369
1366/* must be called holding qlock */
1367static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) 1370static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1368{ 1371{
1369 struct drm_dp_sideband_msg_tx *txmsg; 1372 struct drm_dp_sideband_msg_tx *txmsg;
1370 int ret; 1373 int ret;
1371 1374
1375 WARN_ON(!mutex_is_locked(&mgr->qlock));
1376
1372 /* construct a chunk from the first msg in the tx_msg queue */ 1377 /* construct a chunk from the first msg in the tx_msg queue */
1373 if (list_empty(&mgr->tx_msg_downq)) { 1378 if (list_empty(&mgr->tx_msg_downq)) {
1374 mgr->tx_down_in_progress = false; 1379 mgr->tx_down_in_progress = false;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 7fc6f8bd4821..1134526286c8 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -403,7 +403,7 @@ static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
403 unsigned rem; 403 unsigned rem;
404 404
405 rem = do_div(tmp, alignment); 405 rem = do_div(tmp, alignment);
406 if (tmp) 406 if (rem)
407 start += alignment - rem; 407 start += alignment - rem;
408 } 408 }
409 409
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index a5e74612100e..0a6780367d28 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -50,7 +50,7 @@ config DRM_EXYNOS_DSI
50 50
51config DRM_EXYNOS_DP 51config DRM_EXYNOS_DP
52 bool "EXYNOS DRM DP driver support" 52 bool "EXYNOS DRM DP driver support"
53 depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) 53 depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
54 default DRM_EXYNOS 54 default DRM_EXYNOS
55 select DRM_PANEL 55 select DRM_PANEL
56 help 56 help
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 63f02e2380ae..970046199608 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -888,8 +888,8 @@ static int decon_probe(struct platform_device *pdev)
888 of_node_put(i80_if_timings); 888 of_node_put(i80_if_timings);
889 889
890 ctx->regs = of_iomap(dev->of_node, 0); 890 ctx->regs = of_iomap(dev->of_node, 0);
891 if (IS_ERR(ctx->regs)) { 891 if (!ctx->regs) {
892 ret = PTR_ERR(ctx->regs); 892 ret = -ENOMEM;
893 goto err_del_component; 893 goto err_del_component;
894 } 894 }
895 895
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
deleted file mode 100644
index ba9b3d5ed672..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ /dev/null
@@ -1,245 +0,0 @@
1/*
2 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
3 * Authors:
4 * Inki Dae <inki.dae@samsung.com>
5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <drm/drmP.h>
15#include <drm/drm_crtc_helper.h>
16
17#include <drm/exynos_drm.h>
18#include "exynos_drm_drv.h"
19#include "exynos_drm_encoder.h"
20#include "exynos_drm_connector.h"
21
22#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\
23 drm_connector)
24
25struct exynos_drm_connector {
26 struct drm_connector drm_connector;
27 uint32_t encoder_id;
28 struct exynos_drm_display *display;
29};
30
31static int exynos_drm_connector_get_modes(struct drm_connector *connector)
32{
33 struct exynos_drm_connector *exynos_connector =
34 to_exynos_connector(connector);
35 struct exynos_drm_display *display = exynos_connector->display;
36 struct edid *edid = NULL;
37 unsigned int count = 0;
38 int ret;
39
40 /*
41 * if get_edid() exists then get_edid() callback of hdmi side
42 * is called to get edid data through i2c interface else
43 * get timing from the FIMD driver(display controller).
44 *
45 * P.S. in case of lcd panel, count is always 1 if success
46 * because lcd panel has only one mode.
47 */
48 if (display->ops->get_edid) {
49 edid = display->ops->get_edid(display, connector);
50 if (IS_ERR_OR_NULL(edid)) {
51 ret = PTR_ERR(edid);
52 edid = NULL;
53 DRM_ERROR("Panel operation get_edid failed %d\n", ret);
54 goto out;
55 }
56
57 count = drm_add_edid_modes(connector, edid);
58 if (!count) {
59 DRM_ERROR("Add edid modes failed %d\n", count);
60 goto out;
61 }
62
63 drm_mode_connector_update_edid_property(connector, edid);
64 } else {
65 struct exynos_drm_panel_info *panel;
66 struct drm_display_mode *mode = drm_mode_create(connector->dev);
67 if (!mode) {
68 DRM_ERROR("failed to create a new display mode.\n");
69 return 0;
70 }
71
72 if (display->ops->get_panel)
73 panel = display->ops->get_panel(display);
74 else {
75 drm_mode_destroy(connector->dev, mode);
76 return 0;
77 }
78
79 drm_display_mode_from_videomode(&panel->vm, mode);
80 mode->width_mm = panel->width_mm;
81 mode->height_mm = panel->height_mm;
82 connector->display_info.width_mm = mode->width_mm;
83 connector->display_info.height_mm = mode->height_mm;
84
85 mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
86 drm_mode_set_name(mode);
87 drm_mode_probed_add(connector, mode);
88
89 count = 1;
90 }
91
92out:
93 kfree(edid);
94 return count;
95}
96
97static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
98 struct drm_display_mode *mode)
99{
100 struct exynos_drm_connector *exynos_connector =
101 to_exynos_connector(connector);
102 struct exynos_drm_display *display = exynos_connector->display;
103 int ret = MODE_BAD;
104
105 DRM_DEBUG_KMS("%s\n", __FILE__);
106
107 if (display->ops->check_mode)
108 if (!display->ops->check_mode(display, mode))
109 ret = MODE_OK;
110
111 return ret;
112}
113
114static struct drm_encoder *exynos_drm_best_encoder(
115 struct drm_connector *connector)
116{
117 struct drm_device *dev = connector->dev;
118 struct exynos_drm_connector *exynos_connector =
119 to_exynos_connector(connector);
120 return drm_encoder_find(dev, exynos_connector->encoder_id);
121}
122
123static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
124 .get_modes = exynos_drm_connector_get_modes,
125 .mode_valid = exynos_drm_connector_mode_valid,
126 .best_encoder = exynos_drm_best_encoder,
127};
128
129static int exynos_drm_connector_fill_modes(struct drm_connector *connector,
130 unsigned int max_width, unsigned int max_height)
131{
132 struct exynos_drm_connector *exynos_connector =
133 to_exynos_connector(connector);
134 struct exynos_drm_display *display = exynos_connector->display;
135 unsigned int width, height;
136
137 width = max_width;
138 height = max_height;
139
140 /*
141 * if specific driver want to find desired_mode using maxmum
142 * resolution then get max width and height from that driver.
143 */
144 if (display->ops->get_max_resol)
145 display->ops->get_max_resol(display, &width, &height);
146
147 return drm_helper_probe_single_connector_modes(connector, width,
148 height);
149}
150
151/* get detection status of display device. */
152static enum drm_connector_status
153exynos_drm_connector_detect(struct drm_connector *connector, bool force)
154{
155 struct exynos_drm_connector *exynos_connector =
156 to_exynos_connector(connector);
157 struct exynos_drm_display *display = exynos_connector->display;
158 enum drm_connector_status status = connector_status_disconnected;
159
160 if (display->ops->is_connected) {
161 if (display->ops->is_connected(display))
162 status = connector_status_connected;
163 else
164 status = connector_status_disconnected;
165 }
166
167 return status;
168}
169
170static void exynos_drm_connector_destroy(struct drm_connector *connector)
171{
172 struct exynos_drm_connector *exynos_connector =
173 to_exynos_connector(connector);
174
175 drm_connector_unregister(connector);
176 drm_connector_cleanup(connector);
177 kfree(exynos_connector);
178}
179
180static struct drm_connector_funcs exynos_connector_funcs = {
181 .dpms = drm_helper_connector_dpms,
182 .fill_modes = exynos_drm_connector_fill_modes,
183 .detect = exynos_drm_connector_detect,
184 .destroy = exynos_drm_connector_destroy,
185};
186
187struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
188 struct drm_encoder *encoder)
189{
190 struct exynos_drm_connector *exynos_connector;
191 struct exynos_drm_display *display = exynos_drm_get_display(encoder);
192 struct drm_connector *connector;
193 int type;
194 int err;
195
196 exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL);
197 if (!exynos_connector)
198 return NULL;
199
200 connector = &exynos_connector->drm_connector;
201
202 switch (display->type) {
203 case EXYNOS_DISPLAY_TYPE_HDMI:
204 type = DRM_MODE_CONNECTOR_HDMIA;
205 connector->interlace_allowed = true;
206 connector->polled = DRM_CONNECTOR_POLL_HPD;
207 break;
208 case EXYNOS_DISPLAY_TYPE_VIDI:
209 type = DRM_MODE_CONNECTOR_VIRTUAL;
210 connector->polled = DRM_CONNECTOR_POLL_HPD;
211 break;
212 default:
213 type = DRM_MODE_CONNECTOR_Unknown;
214 break;
215 }
216
217 drm_connector_init(dev, connector, &exynos_connector_funcs, type);
218 drm_connector_helper_add(connector, &exynos_connector_helper_funcs);
219
220 err = drm_connector_register(connector);
221 if (err)
222 goto err_connector;
223
224 exynos_connector->encoder_id = encoder->base.id;
225 exynos_connector->display = display;
226 connector->dpms = DRM_MODE_DPMS_OFF;
227 connector->encoder = encoder;
228
229 err = drm_mode_connector_attach_encoder(connector, encoder);
230 if (err) {
231 DRM_ERROR("failed to attach a connector to a encoder\n");
232 goto err_sysfs;
233 }
234
235 DRM_DEBUG_KMS("connector has been created\n");
236
237 return connector;
238
239err_sysfs:
240 drm_connector_unregister(connector);
241err_connector:
242 drm_connector_cleanup(connector);
243 kfree(exynos_connector);
244 return NULL;
245}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h
deleted file mode 100644
index 4eb20d78379a..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
3 * Authors:
4 * Inki Dae <inki.dae@samsung.com>
5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#ifndef _EXYNOS_DRM_CONNECTOR_H_
15#define _EXYNOS_DRM_CONNECTOR_H_
16
17struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
18 struct drm_encoder *encoder);
19
20#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 925fc69af1a0..c300e22da8ac 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -284,14 +284,9 @@ static void fimd_clear_channel(struct fimd_context *ctx)
284 } 284 }
285} 285}
286 286
287static int fimd_ctx_initialize(struct fimd_context *ctx, 287static int fimd_iommu_attach_devices(struct fimd_context *ctx,
288 struct drm_device *drm_dev) 288 struct drm_device *drm_dev)
289{ 289{
290 struct exynos_drm_private *priv;
291 priv = drm_dev->dev_private;
292
293 ctx->drm_dev = drm_dev;
294 ctx->pipe = priv->pipe++;
295 290
296 /* attach this sub driver to iommu mapping if supported. */ 291 /* attach this sub driver to iommu mapping if supported. */
297 if (is_drm_iommu_supported(ctx->drm_dev)) { 292 if (is_drm_iommu_supported(ctx->drm_dev)) {
@@ -313,7 +308,7 @@ static int fimd_ctx_initialize(struct fimd_context *ctx,
313 return 0; 308 return 0;
314} 309}
315 310
316static void fimd_ctx_remove(struct fimd_context *ctx) 311static void fimd_iommu_detach_devices(struct fimd_context *ctx)
317{ 312{
318 /* detach this sub driver from iommu mapping if supported. */ 313 /* detach this sub driver from iommu mapping if supported. */
319 if (is_drm_iommu_supported(ctx->drm_dev)) 314 if (is_drm_iommu_supported(ctx->drm_dev))
@@ -1056,25 +1051,23 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
1056{ 1051{
1057 struct fimd_context *ctx = dev_get_drvdata(dev); 1052 struct fimd_context *ctx = dev_get_drvdata(dev);
1058 struct drm_device *drm_dev = data; 1053 struct drm_device *drm_dev = data;
1054 struct exynos_drm_private *priv = drm_dev->dev_private;
1059 int ret; 1055 int ret;
1060 1056
1061 ret = fimd_ctx_initialize(ctx, drm_dev); 1057 ctx->drm_dev = drm_dev;
1062 if (ret) { 1058 ctx->pipe = priv->pipe++;
1063 DRM_ERROR("fimd_ctx_initialize failed.\n");
1064 return ret;
1065 }
1066 1059
1067 ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe, 1060 ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
1068 EXYNOS_DISPLAY_TYPE_LCD, 1061 EXYNOS_DISPLAY_TYPE_LCD,
1069 &fimd_crtc_ops, ctx); 1062 &fimd_crtc_ops, ctx);
1070 if (IS_ERR(ctx->crtc)) {
1071 fimd_ctx_remove(ctx);
1072 return PTR_ERR(ctx->crtc);
1073 }
1074 1063
1075 if (ctx->display) 1064 if (ctx->display)
1076 exynos_drm_create_enc_conn(drm_dev, ctx->display); 1065 exynos_drm_create_enc_conn(drm_dev, ctx->display);
1077 1066
1067 ret = fimd_iommu_attach_devices(ctx, drm_dev);
1068 if (ret)
1069 return ret;
1070
1078 return 0; 1071 return 0;
1079 1072
1080} 1073}
@@ -1086,10 +1079,10 @@ static void fimd_unbind(struct device *dev, struct device *master,
1086 1079
1087 fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF); 1080 fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF);
1088 1081
1082 fimd_iommu_detach_devices(ctx);
1083
1089 if (ctx->display) 1084 if (ctx->display)
1090 exynos_dpi_remove(ctx->display); 1085 exynos_dpi_remove(ctx->display);
1091
1092 fimd_ctx_remove(ctx);
1093} 1086}
1094 1087
1095static const struct component_ops fimd_component_ops = { 1088static const struct component_ops fimd_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index a5616872eee7..8ad5b7294eb4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -175,7 +175,7 @@ static int exynos_disable_plane(struct drm_plane *plane)
175 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); 175 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
176 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc); 176 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc);
177 177
178 if (exynos_crtc->ops->win_disable) 178 if (exynos_crtc && exynos_crtc->ops->win_disable)
179 exynos_crtc->ops->win_disable(exynos_crtc, 179 exynos_crtc->ops->win_disable(exynos_crtc,
180 exynos_plane->zpos); 180 exynos_plane->zpos);
181 181
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e5daad5f75fb..5b205863b659 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2936,9 +2936,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2936 req = obj->last_read_req; 2936 req = obj->last_read_req;
2937 2937
2938 /* Do this after OLR check to make sure we make forward progress polling 2938 /* Do this after OLR check to make sure we make forward progress polling
2939 * on this IOCTL with a timeout <=0 (like busy ioctl) 2939 * on this IOCTL with a timeout == 0 (like busy ioctl)
2940 */ 2940 */
2941 if (args->timeout_ns <= 0) { 2941 if (args->timeout_ns == 0) {
2942 ret = -ETIME; 2942 ret = -ETIME;
2943 goto out; 2943 goto out;
2944 } 2944 }
@@ -2948,7 +2948,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2948 i915_gem_request_reference(req); 2948 i915_gem_request_reference(req);
2949 mutex_unlock(&dev->struct_mutex); 2949 mutex_unlock(&dev->struct_mutex);
2950 2950
2951 ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns, 2951 ret = __i915_wait_request(req, reset_counter, true,
2952 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
2952 file->driver_priv); 2953 file->driver_priv);
2953 mutex_lock(&dev->struct_mutex); 2954 mutex_lock(&dev->struct_mutex);
2954 i915_gem_request_unreference(req); 2955 i915_gem_request_unreference(req);
@@ -4792,6 +4793,9 @@ i915_gem_init_hw(struct drm_device *dev)
4792 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) 4793 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4793 return -EIO; 4794 return -EIO;
4794 4795
4796 /* Double layer security blanket, see i915_gem_init() */
4797 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4798
4795 if (dev_priv->ellc_size) 4799 if (dev_priv->ellc_size)
4796 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 4800 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4797 4801
@@ -4824,7 +4828,7 @@ i915_gem_init_hw(struct drm_device *dev)
4824 for_each_ring(ring, dev_priv, i) { 4828 for_each_ring(ring, dev_priv, i) {
4825 ret = ring->init_hw(ring); 4829 ret = ring->init_hw(ring);
4826 if (ret) 4830 if (ret)
4827 return ret; 4831 goto out;
4828 } 4832 }
4829 4833
4830 for (i = 0; i < NUM_L3_SLICES(dev); i++) 4834 for (i = 0; i < NUM_L3_SLICES(dev); i++)
@@ -4841,9 +4845,11 @@ i915_gem_init_hw(struct drm_device *dev)
4841 DRM_ERROR("Context enable failed %d\n", ret); 4845 DRM_ERROR("Context enable failed %d\n", ret);
4842 i915_gem_cleanup_ringbuffer(dev); 4846 i915_gem_cleanup_ringbuffer(dev);
4843 4847
4844 return ret; 4848 goto out;
4845 } 4849 }
4846 4850
4851out:
4852 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4847 return ret; 4853 return ret;
4848} 4854}
4849 4855
@@ -4877,6 +4883,14 @@ int i915_gem_init(struct drm_device *dev)
4877 dev_priv->gt.stop_ring = intel_logical_ring_stop; 4883 dev_priv->gt.stop_ring = intel_logical_ring_stop;
4878 } 4884 }
4879 4885
4886 /* This is just a security blanket to placate dragons.
4887 * On some systems, we very sporadically observe that the first TLBs
4888 * used by the CS may be stale, despite us poking the TLB reset. If
4889 * we hold the forcewake during initialisation these problems
4890 * just magically go away.
4891 */
4892 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4893
4880 ret = i915_gem_init_userptr(dev); 4894 ret = i915_gem_init_userptr(dev);
4881 if (ret) 4895 if (ret)
4882 goto out_unlock; 4896 goto out_unlock;
@@ -4903,6 +4917,7 @@ int i915_gem_init(struct drm_device *dev)
4903 } 4917 }
4904 4918
4905out_unlock: 4919out_unlock:
4920 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4906 mutex_unlock(&dev->struct_mutex); 4921 mutex_unlock(&dev->struct_mutex);
4907 4922
4908 return ret; 4923 return ret;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e730789b53b7..6d22128d97b1 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -37,6 +37,7 @@
37#include <drm/i915_drm.h> 37#include <drm/i915_drm.h>
38#include "i915_drv.h" 38#include "i915_drv.h"
39#include "i915_trace.h" 39#include "i915_trace.h"
40#include <drm/drm_atomic.h>
40#include <drm/drm_atomic_helper.h> 41#include <drm/drm_atomic_helper.h>
41#include <drm/drm_dp_helper.h> 42#include <drm/drm_dp_helper.h>
42#include <drm/drm_crtc_helper.h> 43#include <drm/drm_crtc_helper.h>
@@ -2416,6 +2417,14 @@ out_unref_obj:
2416 return false; 2417 return false;
2417} 2418}
2418 2419
2420/* Update plane->state->fb to match plane->fb after driver-internal updates */
2421static void
2422update_state_fb(struct drm_plane *plane)
2423{
2424 if (plane->fb != plane->state->fb)
2425 drm_atomic_set_fb_for_plane(plane->state, plane->fb);
2426}
2427
2419static void 2428static void
2420intel_find_plane_obj(struct intel_crtc *intel_crtc, 2429intel_find_plane_obj(struct intel_crtc *intel_crtc,
2421 struct intel_initial_plane_config *plane_config) 2430 struct intel_initial_plane_config *plane_config)
@@ -2462,6 +2471,8 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
2462 break; 2471 break;
2463 } 2472 }
2464 } 2473 }
2474
2475 update_state_fb(intel_crtc->base.primary);
2465} 2476}
2466 2477
2467static void i9xx_update_primary_plane(struct drm_crtc *crtc, 2478static void i9xx_update_primary_plane(struct drm_crtc *crtc,
@@ -6602,6 +6613,10 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
6602 struct drm_framebuffer *fb; 6613 struct drm_framebuffer *fb;
6603 struct intel_framebuffer *intel_fb; 6614 struct intel_framebuffer *intel_fb;
6604 6615
6616 val = I915_READ(DSPCNTR(plane));
6617 if (!(val & DISPLAY_PLANE_ENABLE))
6618 return;
6619
6605 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 6620 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6606 if (!intel_fb) { 6621 if (!intel_fb) {
6607 DRM_DEBUG_KMS("failed to alloc fb\n"); 6622 DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -6610,8 +6625,6 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
6610 6625
6611 fb = &intel_fb->base; 6626 fb = &intel_fb->base;
6612 6627
6613 val = I915_READ(DSPCNTR(plane));
6614
6615 if (INTEL_INFO(dev)->gen >= 4) 6628 if (INTEL_INFO(dev)->gen >= 4)
6616 if (val & DISPPLANE_TILED) 6629 if (val & DISPPLANE_TILED)
6617 plane_config->tiling = I915_TILING_X; 6630 plane_config->tiling = I915_TILING_X;
@@ -6650,6 +6663,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
6650 plane_config->size); 6663 plane_config->size);
6651 6664
6652 crtc->base.primary->fb = fb; 6665 crtc->base.primary->fb = fb;
6666 update_state_fb(crtc->base.primary);
6653} 6667}
6654 6668
6655static void chv_crtc_clock_get(struct intel_crtc *crtc, 6669static void chv_crtc_clock_get(struct intel_crtc *crtc,
@@ -7643,6 +7657,9 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
7643 fb = &intel_fb->base; 7657 fb = &intel_fb->base;
7644 7658
7645 val = I915_READ(PLANE_CTL(pipe, 0)); 7659 val = I915_READ(PLANE_CTL(pipe, 0));
7660 if (!(val & PLANE_CTL_ENABLE))
7661 goto error;
7662
7646 if (val & PLANE_CTL_TILED_MASK) 7663 if (val & PLANE_CTL_TILED_MASK)
7647 plane_config->tiling = I915_TILING_X; 7664 plane_config->tiling = I915_TILING_X;
7648 7665
@@ -7687,6 +7704,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
7687 plane_config->size); 7704 plane_config->size);
7688 7705
7689 crtc->base.primary->fb = fb; 7706 crtc->base.primary->fb = fb;
7707 update_state_fb(crtc->base.primary);
7690 return; 7708 return;
7691 7709
7692error: 7710error:
@@ -7730,6 +7748,10 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
7730 struct drm_framebuffer *fb; 7748 struct drm_framebuffer *fb;
7731 struct intel_framebuffer *intel_fb; 7749 struct intel_framebuffer *intel_fb;
7732 7750
7751 val = I915_READ(DSPCNTR(pipe));
7752 if (!(val & DISPLAY_PLANE_ENABLE))
7753 return;
7754
7733 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 7755 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7734 if (!intel_fb) { 7756 if (!intel_fb) {
7735 DRM_DEBUG_KMS("failed to alloc fb\n"); 7757 DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -7738,8 +7760,6 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
7738 7760
7739 fb = &intel_fb->base; 7761 fb = &intel_fb->base;
7740 7762
7741 val = I915_READ(DSPCNTR(pipe));
7742
7743 if (INTEL_INFO(dev)->gen >= 4) 7763 if (INTEL_INFO(dev)->gen >= 4)
7744 if (val & DISPPLANE_TILED) 7764 if (val & DISPPLANE_TILED)
7745 plane_config->tiling = I915_TILING_X; 7765 plane_config->tiling = I915_TILING_X;
@@ -7778,6 +7798,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
7778 plane_config->size); 7798 plane_config->size);
7779 7799
7780 crtc->base.primary->fb = fb; 7800 crtc->base.primary->fb = fb;
7801 update_state_fb(crtc->base.primary);
7781} 7802}
7782 7803
7783static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 7804static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
@@ -9716,7 +9737,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
9716 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 9737 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
9717 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9738 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9718 9739
9719 WARN_ON(!in_irq()); 9740 WARN_ON(!in_interrupt());
9720 9741
9721 if (crtc == NULL) 9742 if (crtc == NULL)
9722 return; 9743 return;
@@ -9816,6 +9837,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9816 drm_gem_object_reference(&obj->base); 9837 drm_gem_object_reference(&obj->base);
9817 9838
9818 crtc->primary->fb = fb; 9839 crtc->primary->fb = fb;
9840 update_state_fb(crtc->primary);
9819 9841
9820 work->pending_flip_obj = obj; 9842 work->pending_flip_obj = obj;
9821 9843
@@ -9884,6 +9906,7 @@ cleanup_unpin:
9884cleanup_pending: 9906cleanup_pending:
9885 atomic_dec(&intel_crtc->unpin_work_count); 9907 atomic_dec(&intel_crtc->unpin_work_count);
9886 crtc->primary->fb = old_fb; 9908 crtc->primary->fb = old_fb;
9909 update_state_fb(crtc->primary);
9887 drm_gem_object_unreference(&work->old_fb_obj->base); 9910 drm_gem_object_unreference(&work->old_fb_obj->base);
9888 drm_gem_object_unreference(&obj->base); 9911 drm_gem_object_unreference(&obj->base);
9889 mutex_unlock(&dev->struct_mutex); 9912 mutex_unlock(&dev->struct_mutex);
@@ -13718,6 +13741,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
13718 to_intel_crtc(c)->pipe); 13741 to_intel_crtc(c)->pipe);
13719 drm_framebuffer_unreference(c->primary->fb); 13742 drm_framebuffer_unreference(c->primary->fb);
13720 c->primary->fb = NULL; 13743 c->primary->fb = NULL;
13744 update_state_fb(c->primary);
13721 } 13745 }
13722 } 13746 }
13723 mutex_unlock(&dev->struct_mutex); 13747 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index c47a3baa53d5..4e8fb891d4ea 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1048,8 +1048,14 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1048 1048
1049 /* We need to init first for ECOBUS access and then 1049 /* We need to init first for ECOBUS access and then
1050 * determine later if we want to reinit, in case of MT access is 1050 * determine later if we want to reinit, in case of MT access is
1051 * not working 1051 * not working. In this stage we don't know which flavour this
1052 * ivb is, so it is better to reset also the gen6 fw registers
1053 * before the ecobus check.
1052 */ 1054 */
1055
1056 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1057 __raw_posting_read(dev_priv, ECOBUS);
1058
1053 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1059 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1054 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1060 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1055 1061
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 29bd539af183..6efa8f38ff54 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -340,11 +340,13 @@ nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
340 340
341 /* switch mmio to cpu's native endianness */ 341 /* switch mmio to cpu's native endianness */
342#ifndef __BIG_ENDIAN 342#ifndef __BIG_ENDIAN
343 if (ioread32_native(map + 0x000004) != 0x00000000) 343 if (ioread32_native(map + 0x000004) != 0x00000000) {
344#else 344#else
345 if (ioread32_native(map + 0x000004) == 0x00000000) 345 if (ioread32_native(map + 0x000004) == 0x00000000) {
346#endif 346#endif
347 iowrite32_native(0x01000001, map + 0x000004); 347 iowrite32_native(0x01000001, map + 0x000004);
348 ioread32_native(map);
349 }
348 350
349 /* read boot0 and strapping information */ 351 /* read boot0 and strapping information */
350 boot0 = ioread32_native(map + 0x000000); 352 boot0 = ioread32_native(map + 0x000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
index 539561ed3281..108d048da764 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
@@ -142,6 +142,49 @@ gm100_identify(struct nvkm_device *device)
142 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass; 142 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
143#endif 143#endif
144 break; 144 break;
145 case 0x126:
146 device->cname = "GM206";
147 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass;
148 device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass;
149 device->oclass[NVDEV_SUBDEV_I2C ] = gm204_i2c_oclass;
150 device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass;
151#if 0
152 /* looks to be some non-trivial changes */
153 device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass;
154 /* priv ring says no to 0x10eb14 writes */
155 device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass;
156#endif
157 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
158 device->oclass[NVDEV_SUBDEV_DEVINIT] = gm204_devinit_oclass;
159 device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
160 device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass;
161 device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
162 device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
163 device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass;
164 device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass;
165 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
166 device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass;
167 device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass;
168 device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass;
169#if 0
170 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
171#endif
172 device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass;
173#if 0
174 device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass;
175 device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass;
176 device->oclass[NVDEV_ENGINE_GR ] = gm107_gr_oclass;
177#endif
178 device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass;
179#if 0
180 device->oclass[NVDEV_ENGINE_CE0 ] = &gm204_ce0_oclass;
181 device->oclass[NVDEV_ENGINE_CE1 ] = &gm204_ce1_oclass;
182 device->oclass[NVDEV_ENGINE_CE2 ] = &gm204_ce2_oclass;
183 device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass;
184 device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
185 device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass;
186#endif
187 break;
145 default: 188 default:
146 nv_fatal(device, "unknown Maxwell chipset\n"); 189 nv_fatal(device, "unknown Maxwell chipset\n");
147 return -EINVAL; 190 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
index b038b6eb51db..043e4296084c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -502,72 +502,57 @@ nv04_fifo_intr(struct nvkm_subdev *subdev)
502{ 502{
503 struct nvkm_device *device = nv_device(subdev); 503 struct nvkm_device *device = nv_device(subdev);
504 struct nv04_fifo_priv *priv = (void *)subdev; 504 struct nv04_fifo_priv *priv = (void *)subdev;
505 uint32_t status, reassign; 505 u32 mask = nv_rd32(priv, NV03_PFIFO_INTR_EN_0);
506 int cnt = 0; 506 u32 stat = nv_rd32(priv, NV03_PFIFO_INTR_0) & mask;
507 u32 reassign, chid, get, sem;
507 508
508 reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1; 509 reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1;
509 while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { 510 nv_wr32(priv, NV03_PFIFO_CACHES, 0);
510 uint32_t chid, get;
511
512 nv_wr32(priv, NV03_PFIFO_CACHES, 0);
513
514 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
515 get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
516 511
517 if (status & NV_PFIFO_INTR_CACHE_ERROR) { 512 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
518 nv04_fifo_cache_error(device, priv, chid, get); 513 get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
519 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
520 }
521 514
522 if (status & NV_PFIFO_INTR_DMA_PUSHER) { 515 if (stat & NV_PFIFO_INTR_CACHE_ERROR) {
523 nv04_fifo_dma_pusher(device, priv, chid); 516 nv04_fifo_cache_error(device, priv, chid, get);
524 status &= ~NV_PFIFO_INTR_DMA_PUSHER; 517 stat &= ~NV_PFIFO_INTR_CACHE_ERROR;
525 } 518 }
526 519
527 if (status & NV_PFIFO_INTR_SEMAPHORE) { 520 if (stat & NV_PFIFO_INTR_DMA_PUSHER) {
528 uint32_t sem; 521 nv04_fifo_dma_pusher(device, priv, chid);
522 stat &= ~NV_PFIFO_INTR_DMA_PUSHER;
523 }
529 524
530 status &= ~NV_PFIFO_INTR_SEMAPHORE; 525 if (stat & NV_PFIFO_INTR_SEMAPHORE) {
531 nv_wr32(priv, NV03_PFIFO_INTR_0, 526 stat &= ~NV_PFIFO_INTR_SEMAPHORE;
532 NV_PFIFO_INTR_SEMAPHORE); 527 nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
533 528
534 sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); 529 sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE);
535 nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); 530 nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
536 531
537 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); 532 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
538 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); 533 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
539 } 534 }
540 535
541 if (device->card_type == NV_50) { 536 if (device->card_type == NV_50) {
542 if (status & 0x00000010) { 537 if (stat & 0x00000010) {
543 status &= ~0x00000010; 538 stat &= ~0x00000010;
544 nv_wr32(priv, 0x002100, 0x00000010); 539 nv_wr32(priv, 0x002100, 0x00000010);
545 }
546
547 if (status & 0x40000000) {
548 nv_wr32(priv, 0x002100, 0x40000000);
549 nvkm_fifo_uevent(&priv->base);
550 status &= ~0x40000000;
551 }
552 } 540 }
553 541
554 if (status) { 542 if (stat & 0x40000000) {
555 nv_warn(priv, "unknown intr 0x%08x, ch %d\n", 543 nv_wr32(priv, 0x002100, 0x40000000);
556 status, chid); 544 nvkm_fifo_uevent(&priv->base);
557 nv_wr32(priv, NV03_PFIFO_INTR_0, status); 545 stat &= ~0x40000000;
558 status = 0;
559 } 546 }
560
561 nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
562 } 547 }
563 548
564 if (status) { 549 if (stat) {
565 nv_error(priv, "still angry after %d spins, halt\n", cnt); 550 nv_warn(priv, "unknown intr 0x%08x\n", stat);
566 nv_wr32(priv, 0x002140, 0); 551 nv_mask(priv, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
567 nv_wr32(priv, 0x000140, 0); 552 nv_wr32(priv, NV03_PFIFO_INTR_0, stat);
568 } 553 }
569 554
570 nv_wr32(priv, 0x000100, 0x00000100); 555 nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
571} 556}
572 557
573static int 558static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 2e7ec389eea7..57e2c5b13123 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1032,9 +1032,9 @@ gf100_grctx_generate_bundle(struct gf100_grctx *info)
1032 const int s = 8; 1032 const int s = 8;
1033 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); 1033 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
1034 mmio_refn(info, 0x408004, 0x00000000, s, b); 1034 mmio_refn(info, 0x408004, 0x00000000, s, b);
1035 mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); 1035 mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
1036 mmio_refn(info, 0x418808, 0x00000000, s, b); 1036 mmio_refn(info, 0x418808, 0x00000000, s, b);
1037 mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); 1037 mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
1038} 1038}
1039 1039
1040void 1040void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index b52300d8861a..5e9454ba158f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -851,9 +851,9 @@ gk104_grctx_generate_bundle(struct gf100_grctx *info)
851 const int s = 8; 851 const int s = 8;
852 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); 852 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
853 mmio_refn(info, 0x408004, 0x00000000, s, b); 853 mmio_refn(info, 0x408004, 0x00000000, s, b);
854 mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); 854 mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
855 mmio_refn(info, 0x418808, 0x00000000, s, b); 855 mmio_refn(info, 0x418808, 0x00000000, s, b);
856 mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); 856 mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
857 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); 857 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
858} 858}
859 859
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 956f4dce960c..b2fae6e389e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -871,9 +871,9 @@ gm107_grctx_generate_bundle(struct gf100_grctx *info)
871 const int s = 8; 871 const int s = 8;
872 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); 872 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
873 mmio_refn(info, 0x408004, 0x00000000, s, b); 873 mmio_refn(info, 0x408004, 0x00000000, s, b);
874 mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); 874 mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
875 mmio_refn(info, 0x418e24, 0x00000000, s, b); 875 mmio_refn(info, 0x418e24, 0x00000000, s, b);
876 mmio_refn(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s), 0, b); 876 mmio_wr32(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s));
877 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); 877 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
878} 878}
879 879
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
index d1a89b2bd5c1..c4e1f085ee10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
@@ -74,7 +74,11 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
74 u16 ent = dcb_i2c_entry(bios, idx, &ver, &len); 74 u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
75 if (ent) { 75 if (ent) {
76 if (ver >= 0x41) { 76 if (ver >= 0x41) {
77 if (!(nv_ro32(bios, ent) & 0x80000000)) 77 u32 ent_value = nv_ro32(bios, ent);
78 u8 i2c_port = (ent_value >> 27) & 0x1f;
79 u8 dpaux_port = (ent_value >> 22) & 0x1f;
80 /* value 0x1f means unused according to DCB 4.x spec */
81 if (i2c_port == 0x1f && dpaux_port == 0x1f)
78 info->type = DCB_I2C_UNUSED; 82 info->type = DCB_I2C_UNUSED;
79 else 83 else
80 info->type = DCB_I2C_PMGR; 84 info->type = DCB_I2C_PMGR;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index d13d1b5a859f..df09ca7c4889 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -1030,37 +1030,59 @@ static inline bool radeon_test_signaled(struct radeon_fence *fence)
1030 return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); 1030 return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
1031} 1031}
1032 1032
1033struct radeon_wait_cb {
1034 struct fence_cb base;
1035 struct task_struct *task;
1036};
1037
1038static void
1039radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
1040{
1041 struct radeon_wait_cb *wait =
1042 container_of(cb, struct radeon_wait_cb, base);
1043
1044 wake_up_process(wait->task);
1045}
1046
1033static signed long radeon_fence_default_wait(struct fence *f, bool intr, 1047static signed long radeon_fence_default_wait(struct fence *f, bool intr,
1034 signed long t) 1048 signed long t)
1035{ 1049{
1036 struct radeon_fence *fence = to_radeon_fence(f); 1050 struct radeon_fence *fence = to_radeon_fence(f);
1037 struct radeon_device *rdev = fence->rdev; 1051 struct radeon_device *rdev = fence->rdev;
1038 bool signaled; 1052 struct radeon_wait_cb cb;
1039 1053
1040 fence_enable_sw_signaling(&fence->base); 1054 cb.task = current;
1041 1055
1042 /* 1056 if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
1043 * This function has to return -EDEADLK, but cannot hold 1057 return t;
1044 * exclusive_lock during the wait because some callers 1058
1045 * may already hold it. This means checking needs_reset without 1059 while (t > 0) {
1046 * lock, and not fiddling with any gpu internals. 1060 if (intr)
1047 * 1061 set_current_state(TASK_INTERRUPTIBLE);
1048 * The callback installed with fence_enable_sw_signaling will 1062 else
1049 * run before our wait_event_*timeout call, so we will see 1063 set_current_state(TASK_UNINTERRUPTIBLE);
1050 * both the signaled fence and the changes to needs_reset. 1064
1051 */ 1065 /*
1066 * radeon_test_signaled must be called after
1067 * set_current_state to prevent a race with wake_up_process
1068 */
1069 if (radeon_test_signaled(fence))
1070 break;
1071
1072 if (rdev->needs_reset) {
1073 t = -EDEADLK;
1074 break;
1075 }
1076
1077 t = schedule_timeout(t);
1078
1079 if (t > 0 && intr && signal_pending(current))
1080 t = -ERESTARTSYS;
1081 }
1082
1083 __set_current_state(TASK_RUNNING);
1084 fence_remove_callback(f, &cb.base);
1052 1085
1053 if (intr)
1054 t = wait_event_interruptible_timeout(rdev->fence_queue,
1055 ((signaled = radeon_test_signaled(fence)) ||
1056 rdev->needs_reset), t);
1057 else
1058 t = wait_event_timeout(rdev->fence_queue,
1059 ((signaled = radeon_test_signaled(fence)) ||
1060 rdev->needs_reset), t);
1061
1062 if (t > 0 && !signaled)
1063 return -EDEADLK;
1064 return t; 1086 return t;
1065} 1087}
1066 1088
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 061eaa9c19c7..122eb5693ba1 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -153,7 +153,7 @@ void radeon_kfd_device_init(struct radeon_device *rdev)
153 .compute_vmid_bitmap = 0xFF00, 153 .compute_vmid_bitmap = 0xFF00,
154 154
155 .first_compute_pipe = 1, 155 .first_compute_pipe = 1,
156 .compute_pipe_count = 8 - 1, 156 .compute_pipe_count = 4 - 1,
157 }; 157 };
158 158
159 radeon_doorbell_get_kfd_info(rdev, 159 radeon_doorbell_get_kfd_info(rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 43e09942823e..318165d4855c 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -173,17 +173,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
173 else 173 else
174 rbo->placements[i].lpfn = 0; 174 rbo->placements[i].lpfn = 0;
175 } 175 }
176
177 /*
178 * Use two-ended allocation depending on the buffer size to
179 * improve fragmentation quality.
180 * 512kb was measured as the most optimal number.
181 */
182 if (rbo->tbo.mem.size > 512 * 1024) {
183 for (i = 0; i < c; i++) {
184 rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
185 }
186 }
187} 176}
188 177
189int radeon_bo_create(struct radeon_device *rdev, 178int radeon_bo_create(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index e088e5558da0..a7fb2735d4a9 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -7130,8 +7130,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
7130 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); 7130 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
7131 7131
7132 if (!vclk || !dclk) { 7132 if (!vclk || !dclk) {
7133 /* keep the Bypass mode, put PLL to sleep */ 7133 /* keep the Bypass mode */
7134 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
7135 return 0; 7134 return 0;
7136 } 7135 }
7137 7136
@@ -7147,8 +7146,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
7147 /* set VCO_MODE to 1 */ 7146 /* set VCO_MODE to 1 */
7148 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); 7147 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
7149 7148
7150 /* toggle UPLL_SLEEP to 1 then back to 0 */ 7149 /* disable sleep mode */
7151 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
7152 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); 7150 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
7153 7151
7154 /* deassert UPLL_RESET */ 7152 /* deassert UPLL_RESET */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 6c6b655defcf..e13b9cbc304e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -725,32 +725,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
725 goto out_err1; 725 goto out_err1;
726 } 726 }
727 727
728 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
729 (dev_priv->vram_size >> PAGE_SHIFT));
730 if (unlikely(ret != 0)) {
731 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
732 goto out_err2;
733 }
734
735 dev_priv->has_gmr = true;
736 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
737 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
738 VMW_PL_GMR) != 0) {
739 DRM_INFO("No GMR memory available. "
740 "Graphics memory resources are very limited.\n");
741 dev_priv->has_gmr = false;
742 }
743
744 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
745 dev_priv->has_mob = true;
746 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
747 VMW_PL_MOB) != 0) {
748 DRM_INFO("No MOB memory available. "
749 "3D will be disabled.\n");
750 dev_priv->has_mob = false;
751 }
752 }
753
754 dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, 728 dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
755 dev_priv->mmio_size); 729 dev_priv->mmio_size);
756 730
@@ -813,6 +787,33 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
813 goto out_no_fman; 787 goto out_no_fman;
814 } 788 }
815 789
790
791 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
792 (dev_priv->vram_size >> PAGE_SHIFT));
793 if (unlikely(ret != 0)) {
794 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
795 goto out_no_vram;
796 }
797
798 dev_priv->has_gmr = true;
799 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
800 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
801 VMW_PL_GMR) != 0) {
802 DRM_INFO("No GMR memory available. "
803 "Graphics memory resources are very limited.\n");
804 dev_priv->has_gmr = false;
805 }
806
807 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
808 dev_priv->has_mob = true;
809 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
810 VMW_PL_MOB) != 0) {
811 DRM_INFO("No MOB memory available. "
812 "3D will be disabled.\n");
813 dev_priv->has_mob = false;
814 }
815 }
816
816 vmw_kms_save_vga(dev_priv); 817 vmw_kms_save_vga(dev_priv);
817 818
818 /* Start kms and overlay systems, needs fifo. */ 819 /* Start kms and overlay systems, needs fifo. */
@@ -838,6 +839,12 @@ out_no_fifo:
838 vmw_kms_close(dev_priv); 839 vmw_kms_close(dev_priv);
839out_no_kms: 840out_no_kms:
840 vmw_kms_restore_vga(dev_priv); 841 vmw_kms_restore_vga(dev_priv);
842 if (dev_priv->has_mob)
843 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
844 if (dev_priv->has_gmr)
845 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
846 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
847out_no_vram:
841 vmw_fence_manager_takedown(dev_priv->fman); 848 vmw_fence_manager_takedown(dev_priv->fman);
842out_no_fman: 849out_no_fman:
843 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 850 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@@ -853,12 +860,6 @@ out_err4:
853 iounmap(dev_priv->mmio_virt); 860 iounmap(dev_priv->mmio_virt);
854out_err3: 861out_err3:
855 arch_phys_wc_del(dev_priv->mmio_mtrr); 862 arch_phys_wc_del(dev_priv->mmio_mtrr);
856 if (dev_priv->has_mob)
857 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
858 if (dev_priv->has_gmr)
859 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
860 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
861out_err2:
862 (void)ttm_bo_device_release(&dev_priv->bdev); 863 (void)ttm_bo_device_release(&dev_priv->bdev);
863out_err1: 864out_err1:
864 vmw_ttm_global_release(dev_priv); 865 vmw_ttm_global_release(dev_priv);
@@ -887,6 +888,13 @@ static int vmw_driver_unload(struct drm_device *dev)
887 } 888 }
888 vmw_kms_close(dev_priv); 889 vmw_kms_close(dev_priv);
889 vmw_overlay_close(dev_priv); 890 vmw_overlay_close(dev_priv);
891
892 if (dev_priv->has_mob)
893 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
894 if (dev_priv->has_gmr)
895 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
896 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
897
890 vmw_fence_manager_takedown(dev_priv->fman); 898 vmw_fence_manager_takedown(dev_priv->fman);
891 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 899 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
892 drm_irq_uninstall(dev_priv->dev); 900 drm_irq_uninstall(dev_priv->dev);
@@ -898,11 +906,6 @@ static int vmw_driver_unload(struct drm_device *dev)
898 ttm_object_device_release(&dev_priv->tdev); 906 ttm_object_device_release(&dev_priv->tdev);
899 iounmap(dev_priv->mmio_virt); 907 iounmap(dev_priv->mmio_virt);
900 arch_phys_wc_del(dev_priv->mmio_mtrr); 908 arch_phys_wc_del(dev_priv->mmio_mtrr);
901 if (dev_priv->has_mob)
902 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
903 if (dev_priv->has_gmr)
904 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
905 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
906 (void)ttm_bo_device_release(&dev_priv->bdev); 909 (void)ttm_bo_device_release(&dev_priv->bdev);
907 vmw_ttm_global_release(dev_priv); 910 vmw_ttm_global_release(dev_priv);
908 911
@@ -1235,6 +1238,7 @@ static void vmw_remove(struct pci_dev *pdev)
1235{ 1238{
1236 struct drm_device *dev = pci_get_drvdata(pdev); 1239 struct drm_device *dev = pci_get_drvdata(pdev);
1237 1240
1241 pci_disable_device(pdev);
1238 drm_put_dev(dev); 1242 drm_put_dev(dev);
1239} 1243}
1240 1244
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 33176d05db35..654c8daeb5ab 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -890,7 +890,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
890 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); 890 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
891 if (unlikely(ret != 0)) { 891 if (unlikely(ret != 0)) {
892 DRM_ERROR("Could not find or use MOB buffer.\n"); 892 DRM_ERROR("Could not find or use MOB buffer.\n");
893 return -EINVAL; 893 ret = -EINVAL;
894 goto out_no_reloc;
894 } 895 }
895 bo = &vmw_bo->base; 896 bo = &vmw_bo->base;
896 897
@@ -914,7 +915,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
914 915
915out_no_reloc: 916out_no_reloc:
916 vmw_dmabuf_unreference(&vmw_bo); 917 vmw_dmabuf_unreference(&vmw_bo);
917 vmw_bo_p = NULL; 918 *vmw_bo_p = NULL;
918 return ret; 919 return ret;
919} 920}
920 921
@@ -951,7 +952,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
951 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); 952 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
952 if (unlikely(ret != 0)) { 953 if (unlikely(ret != 0)) {
953 DRM_ERROR("Could not find or use GMR region.\n"); 954 DRM_ERROR("Could not find or use GMR region.\n");
954 return -EINVAL; 955 ret = -EINVAL;
956 goto out_no_reloc;
955 } 957 }
956 bo = &vmw_bo->base; 958 bo = &vmw_bo->base;
957 959
@@ -974,7 +976,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
974 976
975out_no_reloc: 977out_no_reloc:
976 vmw_dmabuf_unreference(&vmw_bo); 978 vmw_dmabuf_unreference(&vmw_bo);
977 vmw_bo_p = NULL; 979 *vmw_bo_p = NULL;
978 return ret; 980 return ret;
979} 981}
980 982
@@ -2780,13 +2782,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2780 NULL, arg->command_size, arg->throttle_us, 2782 NULL, arg->command_size, arg->throttle_us,
2781 (void __user *)(unsigned long)arg->fence_rep, 2783 (void __user *)(unsigned long)arg->fence_rep,
2782 NULL); 2784 NULL);
2783 2785 ttm_read_unlock(&dev_priv->reservation_sem);
2784 if (unlikely(ret != 0)) 2786 if (unlikely(ret != 0))
2785 goto out_unlock; 2787 return ret;
2786 2788
2787 vmw_kms_cursor_post_execbuf(dev_priv); 2789 vmw_kms_cursor_post_execbuf(dev_priv);
2788 2790
2789out_unlock: 2791 return 0;
2790 ttm_read_unlock(&dev_priv->reservation_sem);
2791 return ret;
2792} 2792}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 8725b79e7847..07cda8cbbddb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2033,23 +2033,17 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2033 int i; 2033 int i;
2034 struct drm_mode_config *mode_config = &dev->mode_config; 2034 struct drm_mode_config *mode_config = &dev->mode_config;
2035 2035
2036 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
2037 if (unlikely(ret != 0))
2038 return ret;
2039
2040 if (!arg->num_outputs) { 2036 if (!arg->num_outputs) {
2041 struct drm_vmw_rect def_rect = {0, 0, 800, 600}; 2037 struct drm_vmw_rect def_rect = {0, 0, 800, 600};
2042 vmw_du_update_layout(dev_priv, 1, &def_rect); 2038 vmw_du_update_layout(dev_priv, 1, &def_rect);
2043 goto out_unlock; 2039 return 0;
2044 } 2040 }
2045 2041
2046 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); 2042 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2047 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), 2043 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2048 GFP_KERNEL); 2044 GFP_KERNEL);
2049 if (unlikely(!rects)) { 2045 if (unlikely(!rects))
2050 ret = -ENOMEM; 2046 return -ENOMEM;
2051 goto out_unlock;
2052 }
2053 2047
2054 user_rects = (void __user *)(unsigned long)arg->rects; 2048 user_rects = (void __user *)(unsigned long)arg->rects;
2055 ret = copy_from_user(rects, user_rects, rects_size); 2049 ret = copy_from_user(rects, user_rects, rects_size);
@@ -2074,7 +2068,5 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2074 2068
2075out_free: 2069out_free:
2076 kfree(rects); 2070 kfree(rects);
2077out_unlock:
2078 ttm_read_unlock(&dev_priv->reservation_sem);
2079 return ret; 2071 return ret;
2080} 2072}
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 7c669c328c4c..56ce8c2b5530 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1959,6 +1959,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1959 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, 1959 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
1960 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, 1960 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
1961 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, 1961 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
1962 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
1962 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, 1963 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
1963 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, 1964 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
1964 { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, 1965 { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 204312bfab2c..9c4786759f16 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -586,6 +586,7 @@
586#define USB_VENDOR_ID_LOGITECH 0x046d 586#define USB_VENDOR_ID_LOGITECH 0x046d
587#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e 587#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
588#define USB_DEVICE_ID_LOGITECH_T651 0xb00c 588#define USB_DEVICE_ID_LOGITECH_T651 0xb00c
589#define USB_DEVICE_ID_LOGITECH_C077 0xc007
589#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 590#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
590#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 591#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110
591#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f 592#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f
@@ -898,6 +899,7 @@
898#define USB_VENDOR_ID_TIVO 0x150a 899#define USB_VENDOR_ID_TIVO 0x150a
899#define USB_DEVICE_ID_TIVO_SLIDE_BT 0x1200 900#define USB_DEVICE_ID_TIVO_SLIDE_BT 0x1200
900#define USB_DEVICE_ID_TIVO_SLIDE 0x1201 901#define USB_DEVICE_ID_TIVO_SLIDE 0x1201
902#define USB_DEVICE_ID_TIVO_SLIDE_PRO 0x1203
901 903
902#define USB_VENDOR_ID_TOPSEED 0x0766 904#define USB_VENDOR_ID_TOPSEED 0x0766
903#define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204 905#define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204
diff --git a/drivers/hid/hid-tivo.c b/drivers/hid/hid-tivo.c
index d790d8d71f7f..d98696927453 100644
--- a/drivers/hid/hid-tivo.c
+++ b/drivers/hid/hid-tivo.c
@@ -64,6 +64,7 @@ static const struct hid_device_id tivo_devices[] = {
64 /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */ 64 /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */
65 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, 65 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
66 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, 66 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
67 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
67 { } 68 { }
68}; 69};
69MODULE_DEVICE_TABLE(hid, tivo_devices); 70MODULE_DEVICE_TABLE(hid, tivo_devices);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 9be99a67bfe2..a82127753461 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -78,6 +78,7 @@ static const struct hid_blacklist {
78 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, 78 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
79 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, 79 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
80 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, 80 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
81 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
81 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, 82 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
82 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, 83 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
83 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, 84 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 046351cf17f3..bbe32d66e500 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -551,9 +551,13 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
551 (features->type == CINTIQ && !(data[1] & 0x40))) 551 (features->type == CINTIQ && !(data[1] & 0x40)))
552 return 1; 552 return 1;
553 553
554 if (features->quirks & WACOM_QUIRK_MULTI_INPUT) 554 if (wacom->shared) {
555 wacom->shared->stylus_in_proximity = true; 555 wacom->shared->stylus_in_proximity = true;
556 556
557 if (wacom->shared->touch_down)
558 return 1;
559 }
560
557 /* in Range while exiting */ 561 /* in Range while exiting */
558 if (((data[1] & 0xfe) == 0x20) && wacom->reporting_data) { 562 if (((data[1] & 0xfe) == 0x20) && wacom->reporting_data) {
559 input_report_key(input, BTN_TOUCH, 0); 563 input_report_key(input, BTN_TOUCH, 0);
@@ -1043,27 +1047,28 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
1043 struct input_dev *input = wacom->input; 1047 struct input_dev *input = wacom->input;
1044 unsigned char *data = wacom->data; 1048 unsigned char *data = wacom->data;
1045 int i; 1049 int i;
1046 int current_num_contacts = 0; 1050 int current_num_contacts = data[61];
1047 int contacts_to_send = 0; 1051 int contacts_to_send = 0;
1048 int num_contacts_left = 4; /* maximum contacts per packet */ 1052 int num_contacts_left = 4; /* maximum contacts per packet */
1049 int byte_per_packet = WACOM_BYTES_PER_24HDT_PACKET; 1053 int byte_per_packet = WACOM_BYTES_PER_24HDT_PACKET;
1050 int y_offset = 2; 1054 int y_offset = 2;
1055 static int contact_with_no_pen_down_count = 0;
1051 1056
1052 if (wacom->features.type == WACOM_27QHDT) { 1057 if (wacom->features.type == WACOM_27QHDT) {
1053 current_num_contacts = data[63]; 1058 current_num_contacts = data[63];
1054 num_contacts_left = 10; 1059 num_contacts_left = 10;
1055 byte_per_packet = WACOM_BYTES_PER_QHDTHID_PACKET; 1060 byte_per_packet = WACOM_BYTES_PER_QHDTHID_PACKET;
1056 y_offset = 0; 1061 y_offset = 0;
1057 } else {
1058 current_num_contacts = data[61];
1059 } 1062 }
1060 1063
1061 /* 1064 /*
1062 * First packet resets the counter since only the first 1065 * First packet resets the counter since only the first
1063 * packet in series will have non-zero current_num_contacts. 1066 * packet in series will have non-zero current_num_contacts.
1064 */ 1067 */
1065 if (current_num_contacts) 1068 if (current_num_contacts) {
1066 wacom->num_contacts_left = current_num_contacts; 1069 wacom->num_contacts_left = current_num_contacts;
1070 contact_with_no_pen_down_count = 0;
1071 }
1067 1072
1068 contacts_to_send = min(num_contacts_left, wacom->num_contacts_left); 1073 contacts_to_send = min(num_contacts_left, wacom->num_contacts_left);
1069 1074
@@ -1096,15 +1101,16 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
1096 input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h)); 1101 input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h));
1097 input_report_abs(input, ABS_MT_ORIENTATION, w > h); 1102 input_report_abs(input, ABS_MT_ORIENTATION, w > h);
1098 } 1103 }
1104 contact_with_no_pen_down_count++;
1099 } 1105 }
1100 } 1106 }
1101 input_mt_report_pointer_emulation(input, true); 1107 input_mt_report_pointer_emulation(input, true);
1102 1108
1103 wacom->num_contacts_left -= contacts_to_send; 1109 wacom->num_contacts_left -= contacts_to_send;
1104 if (wacom->num_contacts_left <= 0) 1110 if (wacom->num_contacts_left <= 0) {
1105 wacom->num_contacts_left = 0; 1111 wacom->num_contacts_left = 0;
1106 1112 wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
1107 wacom->shared->touch_down = (wacom->num_contacts_left > 0); 1113 }
1108 return 1; 1114 return 1;
1109} 1115}
1110 1116
@@ -1116,6 +1122,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
1116 int current_num_contacts = data[2]; 1122 int current_num_contacts = data[2];
1117 int contacts_to_send = 0; 1123 int contacts_to_send = 0;
1118 int x_offset = 0; 1124 int x_offset = 0;
1125 static int contact_with_no_pen_down_count = 0;
1119 1126
1120 /* MTTPC does not support Height and Width */ 1127 /* MTTPC does not support Height and Width */
1121 if (wacom->features.type == MTTPC || wacom->features.type == MTTPC_B) 1128 if (wacom->features.type == MTTPC || wacom->features.type == MTTPC_B)
@@ -1125,8 +1132,10 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
1125 * First packet resets the counter since only the first 1132 * First packet resets the counter since only the first
1126 * packet in series will have non-zero current_num_contacts. 1133 * packet in series will have non-zero current_num_contacts.
1127 */ 1134 */
1128 if (current_num_contacts) 1135 if (current_num_contacts) {
1129 wacom->num_contacts_left = current_num_contacts; 1136 wacom->num_contacts_left = current_num_contacts;
1137 contact_with_no_pen_down_count = 0;
1138 }
1130 1139
1131 /* There are at most 5 contacts per packet */ 1140 /* There are at most 5 contacts per packet */
1132 contacts_to_send = min(5, wacom->num_contacts_left); 1141 contacts_to_send = min(5, wacom->num_contacts_left);
@@ -1147,15 +1156,16 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
1147 int y = get_unaligned_le16(&data[offset + x_offset + 9]); 1156 int y = get_unaligned_le16(&data[offset + x_offset + 9]);
1148 input_report_abs(input, ABS_MT_POSITION_X, x); 1157 input_report_abs(input, ABS_MT_POSITION_X, x);
1149 input_report_abs(input, ABS_MT_POSITION_Y, y); 1158 input_report_abs(input, ABS_MT_POSITION_Y, y);
1159 contact_with_no_pen_down_count++;
1150 } 1160 }
1151 } 1161 }
1152 input_mt_report_pointer_emulation(input, true); 1162 input_mt_report_pointer_emulation(input, true);
1153 1163
1154 wacom->num_contacts_left -= contacts_to_send; 1164 wacom->num_contacts_left -= contacts_to_send;
1155 if (wacom->num_contacts_left < 0) 1165 if (wacom->num_contacts_left <= 0) {
1156 wacom->num_contacts_left = 0; 1166 wacom->num_contacts_left = 0;
1157 1167 wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
1158 wacom->shared->touch_down = (wacom->num_contacts_left > 0); 1168 }
1159 return 1; 1169 return 1;
1160} 1170}
1161 1171
@@ -1193,29 +1203,25 @@ static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len)
1193{ 1203{
1194 unsigned char *data = wacom->data; 1204 unsigned char *data = wacom->data;
1195 struct input_dev *input = wacom->input; 1205 struct input_dev *input = wacom->input;
1196 bool prox; 1206 bool prox = !wacom->shared->stylus_in_proximity;
1197 int x = 0, y = 0; 1207 int x = 0, y = 0;
1198 1208
1199 if (wacom->features.touch_max > 1 || len > WACOM_PKGLEN_TPC2FG) 1209 if (wacom->features.touch_max > 1 || len > WACOM_PKGLEN_TPC2FG)
1200 return 0; 1210 return 0;
1201 1211
1202 if (!wacom->shared->stylus_in_proximity) { 1212 if (len == WACOM_PKGLEN_TPC1FG) {
1203 if (len == WACOM_PKGLEN_TPC1FG) { 1213 prox = prox && (data[0] & 0x01);
1204 prox = data[0] & 0x01; 1214 x = get_unaligned_le16(&data[1]);
1205 x = get_unaligned_le16(&data[1]); 1215 y = get_unaligned_le16(&data[3]);
1206 y = get_unaligned_le16(&data[3]); 1216 } else if (len == WACOM_PKGLEN_TPC1FG_B) {
1207 } else if (len == WACOM_PKGLEN_TPC1FG_B) { 1217 prox = prox && (data[2] & 0x01);
1208 prox = data[2] & 0x01; 1218 x = get_unaligned_le16(&data[3]);
1209 x = get_unaligned_le16(&data[3]); 1219 y = get_unaligned_le16(&data[5]);
1210 y = get_unaligned_le16(&data[5]); 1220 } else {
1211 } else { 1221 prox = prox && (data[1] & 0x01);
1212 prox = data[1] & 0x01; 1222 x = le16_to_cpup((__le16 *)&data[2]);
1213 x = le16_to_cpup((__le16 *)&data[2]); 1223 y = le16_to_cpup((__le16 *)&data[4]);
1214 y = le16_to_cpup((__le16 *)&data[4]); 1224 }
1215 }
1216 } else
1217 /* force touch out when pen is in prox */
1218 prox = 0;
1219 1225
1220 if (prox) { 1226 if (prox) {
1221 input_report_abs(input, ABS_X, x); 1227 input_report_abs(input, ABS_X, x);
@@ -1613,6 +1619,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
1613 struct input_dev *pad_input = wacom->pad_input; 1619 struct input_dev *pad_input = wacom->pad_input;
1614 unsigned char *data = wacom->data; 1620 unsigned char *data = wacom->data;
1615 int i; 1621 int i;
1622 int contact_with_no_pen_down_count = 0;
1616 1623
1617 if (data[0] != 0x02) 1624 if (data[0] != 0x02)
1618 return 0; 1625 return 0;
@@ -1640,6 +1647,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
1640 } 1647 }
1641 input_report_abs(input, ABS_MT_POSITION_X, x); 1648 input_report_abs(input, ABS_MT_POSITION_X, x);
1642 input_report_abs(input, ABS_MT_POSITION_Y, y); 1649 input_report_abs(input, ABS_MT_POSITION_Y, y);
1650 contact_with_no_pen_down_count++;
1643 } 1651 }
1644 } 1652 }
1645 1653
@@ -1649,11 +1657,12 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
1649 input_report_key(pad_input, BTN_FORWARD, (data[1] & 0x04) != 0); 1657 input_report_key(pad_input, BTN_FORWARD, (data[1] & 0x04) != 0);
1650 input_report_key(pad_input, BTN_BACK, (data[1] & 0x02) != 0); 1658 input_report_key(pad_input, BTN_BACK, (data[1] & 0x02) != 0);
1651 input_report_key(pad_input, BTN_RIGHT, (data[1] & 0x01) != 0); 1659 input_report_key(pad_input, BTN_RIGHT, (data[1] & 0x01) != 0);
1660 wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
1652 1661
1653 return 1; 1662 return 1;
1654} 1663}
1655 1664
1656static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) 1665static int wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data, int last_touch_count)
1657{ 1666{
1658 struct wacom_features *features = &wacom->features; 1667 struct wacom_features *features = &wacom->features;
1659 struct input_dev *input = wacom->input; 1668 struct input_dev *input = wacom->input;
@@ -1661,7 +1670,7 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
1661 int slot = input_mt_get_slot_by_key(input, data[0]); 1670 int slot = input_mt_get_slot_by_key(input, data[0]);
1662 1671
1663 if (slot < 0) 1672 if (slot < 0)
1664 return; 1673 return 0;
1665 1674
1666 touch = touch && !wacom->shared->stylus_in_proximity; 1675 touch = touch && !wacom->shared->stylus_in_proximity;
1667 1676
@@ -1693,7 +1702,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
1693 input_report_abs(input, ABS_MT_POSITION_Y, y); 1702 input_report_abs(input, ABS_MT_POSITION_Y, y);
1694 input_report_abs(input, ABS_MT_TOUCH_MAJOR, width); 1703 input_report_abs(input, ABS_MT_TOUCH_MAJOR, width);
1695 input_report_abs(input, ABS_MT_TOUCH_MINOR, height); 1704 input_report_abs(input, ABS_MT_TOUCH_MINOR, height);
1705 last_touch_count++;
1696 } 1706 }
1707 return last_touch_count;
1697} 1708}
1698 1709
1699static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data) 1710static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data)
@@ -1718,6 +1729,7 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
1718 unsigned char *data = wacom->data; 1729 unsigned char *data = wacom->data;
1719 int count = data[1] & 0x07; 1730 int count = data[1] & 0x07;
1720 int i; 1731 int i;
1732 int contact_with_no_pen_down_count = 0;
1721 1733
1722 if (data[0] != 0x02) 1734 if (data[0] != 0x02)
1723 return 0; 1735 return 0;
@@ -1728,12 +1740,15 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
1728 int msg_id = data[offset]; 1740 int msg_id = data[offset];
1729 1741
1730 if (msg_id >= 2 && msg_id <= 17) 1742 if (msg_id >= 2 && msg_id <= 17)
1731 wacom_bpt3_touch_msg(wacom, data + offset); 1743 contact_with_no_pen_down_count =
1744 wacom_bpt3_touch_msg(wacom, data + offset,
1745 contact_with_no_pen_down_count);
1732 else if (msg_id == 128) 1746 else if (msg_id == 128)
1733 wacom_bpt3_button_msg(wacom, data + offset); 1747 wacom_bpt3_button_msg(wacom, data + offset);
1734 1748
1735 } 1749 }
1736 input_mt_report_pointer_emulation(input, true); 1750 input_mt_report_pointer_emulation(input, true);
1751 wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
1737 1752
1738 return 1; 1753 return 1;
1739} 1754}
@@ -1759,6 +1774,9 @@ static int wacom_bpt_pen(struct wacom_wac *wacom)
1759 return 0; 1774 return 0;
1760 } 1775 }
1761 1776
1777 if (wacom->shared->touch_down)
1778 return 0;
1779
1762 prox = (data[1] & 0x20) == 0x20; 1780 prox = (data[1] & 0x20) == 0x20;
1763 1781
1764 /* 1782 /*
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 210cf4874cb7..edf274cabe81 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -679,9 +679,6 @@ static int i2c_device_remove(struct device *dev)
679 status = driver->remove(client); 679 status = driver->remove(client);
680 } 680 }
681 681
682 if (dev->of_node)
683 irq_dispose_mapping(client->irq);
684
685 dev_pm_domain_detach(&client->dev, true); 682 dev_pm_domain_detach(&client->dev, true);
686 return status; 683 return status;
687} 684}
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 1793aea4a7d2..6eb738ca6d2f 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1793,11 +1793,11 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
1793 tape->best_dsc_rw_freq = clamp_t(unsigned long, t, IDETAPE_DSC_RW_MIN, 1793 tape->best_dsc_rw_freq = clamp_t(unsigned long, t, IDETAPE_DSC_RW_MIN,
1794 IDETAPE_DSC_RW_MAX); 1794 IDETAPE_DSC_RW_MAX);
1795 printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, " 1795 printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
1796 "%lums tDSC%s\n", 1796 "%ums tDSC%s\n",
1797 drive->name, tape->name, *(u16 *)&tape->caps[14], 1797 drive->name, tape->name, *(u16 *)&tape->caps[14],
1798 (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size, 1798 (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size,
1799 tape->buffer_size / 1024, 1799 tape->buffer_size / 1024,
1800 tape->best_dsc_rw_freq * 1000 / HZ, 1800 jiffies_to_msecs(tape->best_dsc_rw_freq),
1801 (drive->dev_flags & IDE_DFLAG_USING_DMA) ? ", DMA" : ""); 1801 (drive->dev_flags & IDE_DFLAG_USING_DMA) ? ", DMA" : "");
1802 1802
1803 ide_proc_register_driver(drive, tape->driver); 1803 ide_proc_register_driver(drive, tape->driver);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index c7619716c31d..59040265e361 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -64,6 +64,14 @@ enum {
64#define GUID_TBL_BLK_NUM_ENTRIES 8 64#define GUID_TBL_BLK_NUM_ENTRIES 8
65#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES) 65#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
66 66
67/* Counters should be saturate once they reach their maximum value */
68#define ASSIGN_32BIT_COUNTER(counter, value) do {\
69 if ((value) > U32_MAX) \
70 counter = cpu_to_be32(U32_MAX); \
71 else \
72 counter = cpu_to_be32(value); \
73} while (0)
74
67struct mlx4_mad_rcv_buf { 75struct mlx4_mad_rcv_buf {
68 struct ib_grh grh; 76 struct ib_grh grh;
69 u8 payload[256]; 77 u8 payload[256];
@@ -806,10 +814,14 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
806static void edit_counter(struct mlx4_counter *cnt, 814static void edit_counter(struct mlx4_counter *cnt,
807 struct ib_pma_portcounters *pma_cnt) 815 struct ib_pma_portcounters *pma_cnt)
808{ 816{
809 pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2)); 817 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
810 pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2)); 818 (be64_to_cpu(cnt->tx_bytes) >> 2));
811 pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames)); 819 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
812 pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames)); 820 (be64_to_cpu(cnt->rx_bytes) >> 2));
821 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
822 be64_to_cpu(cnt->tx_frames));
823 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
824 be64_to_cpu(cnt->rx_frames));
813} 825}
814 826
815static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 827static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ac6e2b710ea6..b972c0b41799 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2697,8 +2697,12 @@ static void handle_bonded_port_state_event(struct work_struct *work)
2697 spin_lock_bh(&ibdev->iboe.lock); 2697 spin_lock_bh(&ibdev->iboe.lock);
2698 for (i = 0; i < MLX4_MAX_PORTS; ++i) { 2698 for (i = 0; i < MLX4_MAX_PORTS; ++i) {
2699 struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; 2699 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
2700 enum ib_port_state curr_port_state;
2700 2701
2701 enum ib_port_state curr_port_state = 2702 if (!curr_netdev)
2703 continue;
2704
2705 curr_port_state =
2702 (netif_running(curr_netdev) && 2706 (netif_running(curr_netdev) &&
2703 netif_carrier_ok(curr_netdev)) ? 2707 netif_carrier_ok(curr_netdev)) ?
2704 IB_PORT_ACTIVE : IB_PORT_DOWN; 2708 IB_PORT_ACTIVE : IB_PORT_DOWN;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index f2cceb6493a0..dda605836546 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -67,9 +67,6 @@
67#define X_MAX_POSITIVE 8176 67#define X_MAX_POSITIVE 8176
68#define Y_MAX_POSITIVE 8176 68#define Y_MAX_POSITIVE 8176
69 69
70/* maximum ABS_MT_POSITION displacement (in mm) */
71#define DMAX 10
72
73/***************************************************************************** 70/*****************************************************************************
74 * Stuff we need even when we do not want native Synaptics support 71 * Stuff we need even when we do not want native Synaptics support
75 ****************************************************************************/ 72 ****************************************************************************/
@@ -123,32 +120,41 @@ void synaptics_reset(struct psmouse *psmouse)
123 120
124static bool cr48_profile_sensor; 121static bool cr48_profile_sensor;
125 122
123#define ANY_BOARD_ID 0
126struct min_max_quirk { 124struct min_max_quirk {
127 const char * const *pnp_ids; 125 const char * const *pnp_ids;
126 struct {
127 unsigned long int min, max;
128 } board_id;
128 int x_min, x_max, y_min, y_max; 129 int x_min, x_max, y_min, y_max;
129}; 130};
130 131
131static const struct min_max_quirk min_max_pnpid_table[] = { 132static const struct min_max_quirk min_max_pnpid_table[] = {
132 { 133 {
133 (const char * const []){"LEN0033", NULL}, 134 (const char * const []){"LEN0033", NULL},
135 {ANY_BOARD_ID, ANY_BOARD_ID},
134 1024, 5052, 2258, 4832 136 1024, 5052, 2258, 4832
135 }, 137 },
136 { 138 {
137 (const char * const []){"LEN0035", "LEN0042", NULL}, 139 (const char * const []){"LEN0042", NULL},
140 {ANY_BOARD_ID, ANY_BOARD_ID},
138 1232, 5710, 1156, 4696 141 1232, 5710, 1156, 4696
139 }, 142 },
140 { 143 {
141 (const char * const []){"LEN0034", "LEN0036", "LEN0037", 144 (const char * const []){"LEN0034", "LEN0036", "LEN0037",
142 "LEN0039", "LEN2002", "LEN2004", 145 "LEN0039", "LEN2002", "LEN2004",
143 NULL}, 146 NULL},
147 {ANY_BOARD_ID, 2961},
144 1024, 5112, 2024, 4832 148 1024, 5112, 2024, 4832
145 }, 149 },
146 { 150 {
147 (const char * const []){"LEN2001", NULL}, 151 (const char * const []){"LEN2001", NULL},
152 {ANY_BOARD_ID, ANY_BOARD_ID},
148 1024, 5022, 2508, 4832 153 1024, 5022, 2508, 4832
149 }, 154 },
150 { 155 {
151 (const char * const []){"LEN2006", NULL}, 156 (const char * const []){"LEN2006", NULL},
157 {ANY_BOARD_ID, ANY_BOARD_ID},
152 1264, 5675, 1171, 4688 158 1264, 5675, 1171, 4688
153 }, 159 },
154 { } 160 { }
@@ -175,9 +181,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
175 "LEN0041", 181 "LEN0041",
176 "LEN0042", /* Yoga */ 182 "LEN0042", /* Yoga */
177 "LEN0045", 183 "LEN0045",
178 "LEN0046",
179 "LEN0047", 184 "LEN0047",
180 "LEN0048",
181 "LEN0049", 185 "LEN0049",
182 "LEN2000", 186 "LEN2000",
183 "LEN2001", /* Edge E431 */ 187 "LEN2001", /* Edge E431 */
@@ -235,18 +239,39 @@ static int synaptics_model_id(struct psmouse *psmouse)
235 return 0; 239 return 0;
236} 240}
237 241
242static int synaptics_more_extended_queries(struct psmouse *psmouse)
243{
244 struct synaptics_data *priv = psmouse->private;
245 unsigned char buf[3];
246
247 if (synaptics_send_cmd(psmouse, SYN_QUE_MEXT_CAPAB_10, buf))
248 return -1;
249
250 priv->ext_cap_10 = (buf[0]<<16) | (buf[1]<<8) | buf[2];
251
252 return 0;
253}
254
238/* 255/*
239 * Read the board id from the touchpad 256 * Read the board id and the "More Extended Queries" from the touchpad
240 * The board id is encoded in the "QUERY MODES" response 257 * The board id is encoded in the "QUERY MODES" response
241 */ 258 */
242static int synaptics_board_id(struct psmouse *psmouse) 259static int synaptics_query_modes(struct psmouse *psmouse)
243{ 260{
244 struct synaptics_data *priv = psmouse->private; 261 struct synaptics_data *priv = psmouse->private;
245 unsigned char bid[3]; 262 unsigned char bid[3];
246 263
264 /* firmwares prior 7.5 have no board_id encoded */
265 if (SYN_ID_FULL(priv->identity) < 0x705)
266 return 0;
267
247 if (synaptics_send_cmd(psmouse, SYN_QUE_MODES, bid)) 268 if (synaptics_send_cmd(psmouse, SYN_QUE_MODES, bid))
248 return -1; 269 return -1;
249 priv->board_id = ((bid[0] & 0xfc) << 6) | bid[1]; 270 priv->board_id = ((bid[0] & 0xfc) << 6) | bid[1];
271
272 if (SYN_MEXT_CAP_BIT(bid[0]))
273 return synaptics_more_extended_queries(psmouse);
274
250 return 0; 275 return 0;
251} 276}
252 277
@@ -346,7 +371,6 @@ static int synaptics_resolution(struct psmouse *psmouse)
346{ 371{
347 struct synaptics_data *priv = psmouse->private; 372 struct synaptics_data *priv = psmouse->private;
348 unsigned char resp[3]; 373 unsigned char resp[3];
349 int i;
350 374
351 if (SYN_ID_MAJOR(priv->identity) < 4) 375 if (SYN_ID_MAJOR(priv->identity) < 4)
352 return 0; 376 return 0;
@@ -358,17 +382,6 @@ static int synaptics_resolution(struct psmouse *psmouse)
358 } 382 }
359 } 383 }
360 384
361 for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) {
362 if (psmouse_matches_pnp_id(psmouse,
363 min_max_pnpid_table[i].pnp_ids)) {
364 priv->x_min = min_max_pnpid_table[i].x_min;
365 priv->x_max = min_max_pnpid_table[i].x_max;
366 priv->y_min = min_max_pnpid_table[i].y_min;
367 priv->y_max = min_max_pnpid_table[i].y_max;
368 return 0;
369 }
370 }
371
372 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 && 385 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 &&
373 SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) { 386 SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) {
374 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) { 387 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) {
@@ -377,23 +390,69 @@ static int synaptics_resolution(struct psmouse *psmouse)
377 } else { 390 } else {
378 priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); 391 priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
379 priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); 392 priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
393 psmouse_info(psmouse,
394 "queried max coordinates: x [..%d], y [..%d]\n",
395 priv->x_max, priv->y_max);
380 } 396 }
381 } 397 }
382 398
383 if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 && 399 if (SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c) &&
384 SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c)) { 400 (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 ||
401 /*
402 * Firmware v8.1 does not report proper number of extended
403 * capabilities, but has been proven to report correct min
404 * coordinates.
405 */
406 SYN_ID_FULL(priv->identity) == 0x801)) {
385 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) { 407 if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) {
386 psmouse_warn(psmouse, 408 psmouse_warn(psmouse,
387 "device claims to have min coordinates query, but I'm not able to read it.\n"); 409 "device claims to have min coordinates query, but I'm not able to read it.\n");
388 } else { 410 } else {
389 priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); 411 priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
390 priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); 412 priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
413 psmouse_info(psmouse,
414 "queried min coordinates: x [%d..], y [%d..]\n",
415 priv->x_min, priv->y_min);
391 } 416 }
392 } 417 }
393 418
394 return 0; 419 return 0;
395} 420}
396 421
422/*
423 * Apply quirk(s) if the hardware matches
424 */
425
426static void synaptics_apply_quirks(struct psmouse *psmouse)
427{
428 struct synaptics_data *priv = psmouse->private;
429 int i;
430
431 for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) {
432 if (!psmouse_matches_pnp_id(psmouse,
433 min_max_pnpid_table[i].pnp_ids))
434 continue;
435
436 if (min_max_pnpid_table[i].board_id.min != ANY_BOARD_ID &&
437 priv->board_id < min_max_pnpid_table[i].board_id.min)
438 continue;
439
440 if (min_max_pnpid_table[i].board_id.max != ANY_BOARD_ID &&
441 priv->board_id > min_max_pnpid_table[i].board_id.max)
442 continue;
443
444 priv->x_min = min_max_pnpid_table[i].x_min;
445 priv->x_max = min_max_pnpid_table[i].x_max;
446 priv->y_min = min_max_pnpid_table[i].y_min;
447 priv->y_max = min_max_pnpid_table[i].y_max;
448 psmouse_info(psmouse,
449 "quirked min/max coordinates: x [%d..%d], y [%d..%d]\n",
450 priv->x_min, priv->x_max,
451 priv->y_min, priv->y_max);
452 break;
453 }
454}
455
397static int synaptics_query_hardware(struct psmouse *psmouse) 456static int synaptics_query_hardware(struct psmouse *psmouse)
398{ 457{
399 if (synaptics_identify(psmouse)) 458 if (synaptics_identify(psmouse))
@@ -402,13 +461,15 @@ static int synaptics_query_hardware(struct psmouse *psmouse)
402 return -1; 461 return -1;
403 if (synaptics_firmware_id(psmouse)) 462 if (synaptics_firmware_id(psmouse))
404 return -1; 463 return -1;
405 if (synaptics_board_id(psmouse)) 464 if (synaptics_query_modes(psmouse))
406 return -1; 465 return -1;
407 if (synaptics_capability(psmouse)) 466 if (synaptics_capability(psmouse))
408 return -1; 467 return -1;
409 if (synaptics_resolution(psmouse)) 468 if (synaptics_resolution(psmouse))
410 return -1; 469 return -1;
411 470
471 synaptics_apply_quirks(psmouse);
472
412 return 0; 473 return 0;
413} 474}
414 475
@@ -516,18 +577,22 @@ static int synaptics_is_pt_packet(unsigned char *buf)
516 return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4; 577 return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4;
517} 578}
518 579
519static void synaptics_pass_pt_packet(struct serio *ptport, unsigned char *packet) 580static void synaptics_pass_pt_packet(struct psmouse *psmouse,
581 struct serio *ptport,
582 unsigned char *packet)
520{ 583{
584 struct synaptics_data *priv = psmouse->private;
521 struct psmouse *child = serio_get_drvdata(ptport); 585 struct psmouse *child = serio_get_drvdata(ptport);
522 586
523 if (child && child->state == PSMOUSE_ACTIVATED) { 587 if (child && child->state == PSMOUSE_ACTIVATED) {
524 serio_interrupt(ptport, packet[1], 0); 588 serio_interrupt(ptport, packet[1] | priv->pt_buttons, 0);
525 serio_interrupt(ptport, packet[4], 0); 589 serio_interrupt(ptport, packet[4], 0);
526 serio_interrupt(ptport, packet[5], 0); 590 serio_interrupt(ptport, packet[5], 0);
527 if (child->pktsize == 4) 591 if (child->pktsize == 4)
528 serio_interrupt(ptport, packet[2], 0); 592 serio_interrupt(ptport, packet[2], 0);
529 } else 593 } else {
530 serio_interrupt(ptport, packet[1], 0); 594 serio_interrupt(ptport, packet[1], 0);
595 }
531} 596}
532 597
533static void synaptics_pt_activate(struct psmouse *psmouse) 598static void synaptics_pt_activate(struct psmouse *psmouse)
@@ -605,6 +670,18 @@ static void synaptics_parse_agm(const unsigned char buf[],
605 } 670 }
606} 671}
607 672
673static void synaptics_parse_ext_buttons(const unsigned char buf[],
674 struct synaptics_data *priv,
675 struct synaptics_hw_state *hw)
676{
677 unsigned int ext_bits =
678 (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1;
679 unsigned int ext_mask = GENMASK(ext_bits - 1, 0);
680
681 hw->ext_buttons = buf[4] & ext_mask;
682 hw->ext_buttons |= (buf[5] & ext_mask) << ext_bits;
683}
684
608static bool is_forcepad; 685static bool is_forcepad;
609 686
610static int synaptics_parse_hw_state(const unsigned char buf[], 687static int synaptics_parse_hw_state(const unsigned char buf[],
@@ -691,28 +768,9 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
691 hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0; 768 hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0;
692 } 769 }
693 770
694 if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) && 771 if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) > 0 &&
695 ((buf[0] ^ buf[3]) & 0x02)) { 772 ((buf[0] ^ buf[3]) & 0x02)) {
696 switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) { 773 synaptics_parse_ext_buttons(buf, priv, hw);
697 default:
698 /*
699 * if nExtBtn is greater than 8 it should be
700 * considered invalid and treated as 0
701 */
702 break;
703 case 8:
704 hw->ext_buttons |= ((buf[5] & 0x08)) ? 0x80 : 0;
705 hw->ext_buttons |= ((buf[4] & 0x08)) ? 0x40 : 0;
706 case 6:
707 hw->ext_buttons |= ((buf[5] & 0x04)) ? 0x20 : 0;
708 hw->ext_buttons |= ((buf[4] & 0x04)) ? 0x10 : 0;
709 case 4:
710 hw->ext_buttons |= ((buf[5] & 0x02)) ? 0x08 : 0;
711 hw->ext_buttons |= ((buf[4] & 0x02)) ? 0x04 : 0;
712 case 2:
713 hw->ext_buttons |= ((buf[5] & 0x01)) ? 0x02 : 0;
714 hw->ext_buttons |= ((buf[4] & 0x01)) ? 0x01 : 0;
715 }
716 } 774 }
717 } else { 775 } else {
718 hw->x = (((buf[1] & 0x1f) << 8) | buf[2]); 776 hw->x = (((buf[1] & 0x1f) << 8) | buf[2]);
@@ -774,12 +832,54 @@ static void synaptics_report_semi_mt_data(struct input_dev *dev,
774 } 832 }
775} 833}
776 834
835static void synaptics_report_ext_buttons(struct psmouse *psmouse,
836 const struct synaptics_hw_state *hw)
837{
838 struct input_dev *dev = psmouse->dev;
839 struct synaptics_data *priv = psmouse->private;
840 int ext_bits = (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1;
841 char buf[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
842 int i;
843
844 if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap))
845 return;
846
847 /* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */
848 if (SYN_ID_FULL(priv->identity) == 0x801 &&
849 !((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02))
850 return;
851
852 if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10)) {
853 for (i = 0; i < ext_bits; i++) {
854 input_report_key(dev, BTN_0 + 2 * i,
855 hw->ext_buttons & (1 << i));
856 input_report_key(dev, BTN_1 + 2 * i,
857 hw->ext_buttons & (1 << (i + ext_bits)));
858 }
859 return;
860 }
861
862 /*
863 * This generation of touchpads has the trackstick buttons
864 * physically wired to the touchpad. Re-route them through
865 * the pass-through interface.
866 */
867 if (!priv->pt_port)
868 return;
869
870 /* The trackstick expects at most 3 buttons */
871 priv->pt_buttons = SYN_CAP_EXT_BUTTON_STICK_L(hw->ext_buttons) |
872 SYN_CAP_EXT_BUTTON_STICK_R(hw->ext_buttons) << 1 |
873 SYN_CAP_EXT_BUTTON_STICK_M(hw->ext_buttons) << 2;
874
875 synaptics_pass_pt_packet(psmouse, priv->pt_port, buf);
876}
877
777static void synaptics_report_buttons(struct psmouse *psmouse, 878static void synaptics_report_buttons(struct psmouse *psmouse,
778 const struct synaptics_hw_state *hw) 879 const struct synaptics_hw_state *hw)
779{ 880{
780 struct input_dev *dev = psmouse->dev; 881 struct input_dev *dev = psmouse->dev;
781 struct synaptics_data *priv = psmouse->private; 882 struct synaptics_data *priv = psmouse->private;
782 int i;
783 883
784 input_report_key(dev, BTN_LEFT, hw->left); 884 input_report_key(dev, BTN_LEFT, hw->left);
785 input_report_key(dev, BTN_RIGHT, hw->right); 885 input_report_key(dev, BTN_RIGHT, hw->right);
@@ -792,8 +892,7 @@ static void synaptics_report_buttons(struct psmouse *psmouse,
792 input_report_key(dev, BTN_BACK, hw->down); 892 input_report_key(dev, BTN_BACK, hw->down);
793 } 893 }
794 894
795 for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) 895 synaptics_report_ext_buttons(psmouse, hw);
796 input_report_key(dev, BTN_0 + i, hw->ext_buttons & (1 << i));
797} 896}
798 897
799static void synaptics_report_mt_data(struct psmouse *psmouse, 898static void synaptics_report_mt_data(struct psmouse *psmouse,
@@ -813,7 +912,7 @@ static void synaptics_report_mt_data(struct psmouse *psmouse,
813 pos[i].y = synaptics_invert_y(hw[i]->y); 912 pos[i].y = synaptics_invert_y(hw[i]->y);
814 } 913 }
815 914
816 input_mt_assign_slots(dev, slot, pos, nsemi, DMAX * priv->x_res); 915 input_mt_assign_slots(dev, slot, pos, nsemi, 0);
817 916
818 for (i = 0; i < nsemi; i++) { 917 for (i = 0; i < nsemi; i++) {
819 input_mt_slot(dev, slot[i]); 918 input_mt_slot(dev, slot[i]);
@@ -1014,7 +1113,8 @@ static psmouse_ret_t synaptics_process_byte(struct psmouse *psmouse)
1014 if (SYN_CAP_PASS_THROUGH(priv->capabilities) && 1113 if (SYN_CAP_PASS_THROUGH(priv->capabilities) &&
1015 synaptics_is_pt_packet(psmouse->packet)) { 1114 synaptics_is_pt_packet(psmouse->packet)) {
1016 if (priv->pt_port) 1115 if (priv->pt_port)
1017 synaptics_pass_pt_packet(priv->pt_port, psmouse->packet); 1116 synaptics_pass_pt_packet(psmouse, priv->pt_port,
1117 psmouse->packet);
1018 } else 1118 } else
1019 synaptics_process_packet(psmouse); 1119 synaptics_process_packet(psmouse);
1020 1120
@@ -1116,8 +1216,9 @@ static void set_input_params(struct psmouse *psmouse,
1116 __set_bit(BTN_BACK, dev->keybit); 1216 __set_bit(BTN_BACK, dev->keybit);
1117 } 1217 }
1118 1218
1119 for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) 1219 if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10))
1120 __set_bit(BTN_0 + i, dev->keybit); 1220 for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++)
1221 __set_bit(BTN_0 + i, dev->keybit);
1121 1222
1122 __clear_bit(EV_REL, dev->evbit); 1223 __clear_bit(EV_REL, dev->evbit);
1123 __clear_bit(REL_X, dev->relbit); 1224 __clear_bit(REL_X, dev->relbit);
@@ -1125,7 +1226,8 @@ static void set_input_params(struct psmouse *psmouse,
1125 1226
1126 if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { 1227 if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
1127 __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); 1228 __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
1128 if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids)) 1229 if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) &&
1230 !SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10))
1129 __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit); 1231 __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit);
1130 /* Clickpads report only left button */ 1232 /* Clickpads report only left button */
1131 __clear_bit(BTN_RIGHT, dev->keybit); 1233 __clear_bit(BTN_RIGHT, dev->keybit);
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index aedc3299b14e..ee4bd0d12b26 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -22,6 +22,7 @@
22#define SYN_QUE_EXT_CAPAB_0C 0x0c 22#define SYN_QUE_EXT_CAPAB_0C 0x0c
23#define SYN_QUE_EXT_MAX_COORDS 0x0d 23#define SYN_QUE_EXT_MAX_COORDS 0x0d
24#define SYN_QUE_EXT_MIN_COORDS 0x0f 24#define SYN_QUE_EXT_MIN_COORDS 0x0f
25#define SYN_QUE_MEXT_CAPAB_10 0x10
25 26
26/* synatics modes */ 27/* synatics modes */
27#define SYN_BIT_ABSOLUTE_MODE (1 << 7) 28#define SYN_BIT_ABSOLUTE_MODE (1 << 7)
@@ -53,6 +54,7 @@
53#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) 54#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20)
54#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) 55#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12)
55#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) 56#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16)
57#define SYN_MEXT_CAP_BIT(m) ((m) & (1 << 1))
56 58
57/* 59/*
58 * The following describes response for the 0x0c query. 60 * The following describes response for the 0x0c query.
@@ -89,6 +91,30 @@
89#define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400) 91#define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400)
90#define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800) 92#define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800)
91 93
94/*
95 * The following descibes response for the 0x10 query.
96 *
97 * byte mask name meaning
98 * ---- ---- ------- ------------
99 * 1 0x01 ext buttons are stick buttons exported in the extended
100 * capability are actually meant to be used
101 * by the tracktick (pass-through).
102 * 1 0x02 SecurePad the touchpad is a SecurePad, so it
103 * contains a built-in fingerprint reader.
104 * 1 0xe0 more ext count how many more extented queries are
105 * available after this one.
106 * 2 0xff SecurePad width the width of the SecurePad fingerprint
107 * reader.
108 * 3 0xff SecurePad height the height of the SecurePad fingerprint
109 * reader.
110 */
111#define SYN_CAP_EXT_BUTTONS_STICK(ex10) ((ex10) & 0x010000)
112#define SYN_CAP_SECUREPAD(ex10) ((ex10) & 0x020000)
113
114#define SYN_CAP_EXT_BUTTON_STICK_L(eb) (!!((eb) & 0x01))
115#define SYN_CAP_EXT_BUTTON_STICK_M(eb) (!!((eb) & 0x02))
116#define SYN_CAP_EXT_BUTTON_STICK_R(eb) (!!((eb) & 0x04))
117
92/* synaptics modes query bits */ 118/* synaptics modes query bits */
93#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7)) 119#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
94#define SYN_MODE_RATE(m) ((m) & (1 << 6)) 120#define SYN_MODE_RATE(m) ((m) & (1 << 6))
@@ -143,6 +169,7 @@ struct synaptics_data {
143 unsigned long int capabilities; /* Capabilities */ 169 unsigned long int capabilities; /* Capabilities */
144 unsigned long int ext_cap; /* Extended Capabilities */ 170 unsigned long int ext_cap; /* Extended Capabilities */
145 unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */ 171 unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */
172 unsigned long int ext_cap_10; /* Ext Caps from 0x10 query */
146 unsigned long int identity; /* Identification */ 173 unsigned long int identity; /* Identification */
147 unsigned int x_res, y_res; /* X/Y resolution in units/mm */ 174 unsigned int x_res, y_res; /* X/Y resolution in units/mm */
148 unsigned int x_max, y_max; /* Max coordinates (from FW) */ 175 unsigned int x_max, y_max; /* Max coordinates (from FW) */
@@ -156,6 +183,7 @@ struct synaptics_data {
156 bool disable_gesture; /* disable gestures */ 183 bool disable_gesture; /* disable gestures */
157 184
158 struct serio *pt_port; /* Pass-through serio port */ 185 struct serio *pt_port; /* Pass-through serio port */
186 unsigned char pt_buttons; /* Pass-through buttons */
159 187
160 /* 188 /*
161 * Last received Advanced Gesture Mode (AGM) packet. An AGM packet 189 * Last received Advanced Gesture Mode (AGM) packet. An AGM packet
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index baa0d9786f50..1ae4e547b419 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -23,6 +23,7 @@ config IOMMU_IO_PGTABLE
23config IOMMU_IO_PGTABLE_LPAE 23config IOMMU_IO_PGTABLE_LPAE
24 bool "ARMv7/v8 Long Descriptor Format" 24 bool "ARMv7/v8 Long Descriptor Format"
25 select IOMMU_IO_PGTABLE 25 select IOMMU_IO_PGTABLE
26 depends on ARM || ARM64 || COMPILE_TEST
26 help 27 help
27 Enable support for the ARM long descriptor pagetable format. 28 Enable support for the ARM long descriptor pagetable format.
28 This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page 29 This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
@@ -63,6 +64,7 @@ config MSM_IOMMU
63 bool "MSM IOMMU Support" 64 bool "MSM IOMMU Support"
64 depends on ARM 65 depends on ARM
65 depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST 66 depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST
67 depends on BROKEN
66 select IOMMU_API 68 select IOMMU_API
67 help 69 help
68 Support for the IOMMUs found on certain Qualcomm SOCs. 70 Support for the IOMMUs found on certain Qualcomm SOCs.
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 7ce52737c7a1..dc14fec4ede1 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1186,8 +1186,15 @@ static const struct iommu_ops exynos_iommu_ops = {
1186 1186
1187static int __init exynos_iommu_init(void) 1187static int __init exynos_iommu_init(void)
1188{ 1188{
1189 struct device_node *np;
1189 int ret; 1190 int ret;
1190 1191
1192 np = of_find_matching_node(NULL, sysmmu_of_match);
1193 if (!np)
1194 return 0;
1195
1196 of_node_put(np);
1197
1191 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", 1198 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1192 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); 1199 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1193 if (!lv2table_kmem_cache) { 1200 if (!lv2table_kmem_cache) {
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 5a500edf00cc..b610a8dee238 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -56,7 +56,8 @@
56 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ 56 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
57 * (d)->bits_per_level) + (d)->pg_shift) 57 * (d)->bits_per_level) + (d)->pg_shift)
58 58
59#define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift) 59#define ARM_LPAE_PAGES_PER_PGD(d) \
60 DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift)
60 61
61/* 62/*
62 * Calculate the index at level l used to map virtual address a using the 63 * Calculate the index at level l used to map virtual address a using the
@@ -66,7 +67,7 @@
66 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) 67 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
67 68
68#define ARM_LPAE_LVL_IDX(a,l,d) \ 69#define ARM_LPAE_LVL_IDX(a,l,d) \
69 (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ 70 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
70 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) 71 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
71 72
72/* Calculate the block/page mapping size at level l for pagetable in d. */ 73/* Calculate the block/page mapping size at level l for pagetable in d. */
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index f59f857b702e..a4ba851825c2 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1376,6 +1376,13 @@ static int __init omap_iommu_init(void)
1376 struct kmem_cache *p; 1376 struct kmem_cache *p;
1377 const unsigned long flags = SLAB_HWCACHE_ALIGN; 1377 const unsigned long flags = SLAB_HWCACHE_ALIGN;
1378 size_t align = 1 << 10; /* L2 pagetable alignement */ 1378 size_t align = 1 << 10; /* L2 pagetable alignement */
1379 struct device_node *np;
1380
1381 np = of_find_matching_node(NULL, omap_iommu_of_match);
1382 if (!np)
1383 return 0;
1384
1385 of_node_put(np);
1379 1386
1380 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, 1387 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1381 iopte_cachep_ctor); 1388 iopte_cachep_ctor);
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 6a8b1ec4a48a..9f74fddcd304 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1015,8 +1015,15 @@ static struct platform_driver rk_iommu_driver = {
1015 1015
1016static int __init rk_iommu_init(void) 1016static int __init rk_iommu_init(void)
1017{ 1017{
1018 struct device_node *np;
1018 int ret; 1019 int ret;
1019 1020
1021 np = of_find_matching_node(NULL, rk_iommu_dt_ids);
1022 if (!np)
1023 return 0;
1024
1025 of_node_put(np);
1026
1020 ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); 1027 ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1021 if (ret) 1028 if (ret)
1022 return ret; 1029 return ret;
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 463c235acbdc..4387dae14e45 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -69,6 +69,7 @@ static void __iomem *per_cpu_int_base;
69static void __iomem *main_int_base; 69static void __iomem *main_int_base;
70static struct irq_domain *armada_370_xp_mpic_domain; 70static struct irq_domain *armada_370_xp_mpic_domain;
71static u32 doorbell_mask_reg; 71static u32 doorbell_mask_reg;
72static int parent_irq;
72#ifdef CONFIG_PCI_MSI 73#ifdef CONFIG_PCI_MSI
73static struct irq_domain *armada_370_xp_msi_domain; 74static struct irq_domain *armada_370_xp_msi_domain;
74static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); 75static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
@@ -356,6 +357,7 @@ static int armada_xp_mpic_secondary_init(struct notifier_block *nfb,
356{ 357{
357 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) 358 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
358 armada_xp_mpic_smp_cpu_init(); 359 armada_xp_mpic_smp_cpu_init();
360
359 return NOTIFY_OK; 361 return NOTIFY_OK;
360} 362}
361 363
@@ -364,6 +366,20 @@ static struct notifier_block armada_370_xp_mpic_cpu_notifier = {
364 .priority = 100, 366 .priority = 100,
365}; 367};
366 368
369static int mpic_cascaded_secondary_init(struct notifier_block *nfb,
370 unsigned long action, void *hcpu)
371{
372 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
373 enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
374
375 return NOTIFY_OK;
376}
377
378static struct notifier_block mpic_cascaded_cpu_notifier = {
379 .notifier_call = mpic_cascaded_secondary_init,
380 .priority = 100,
381};
382
367#endif /* CONFIG_SMP */ 383#endif /* CONFIG_SMP */
368 384
369static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { 385static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
@@ -539,7 +555,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
539 struct device_node *parent) 555 struct device_node *parent)
540{ 556{
541 struct resource main_int_res, per_cpu_int_res; 557 struct resource main_int_res, per_cpu_int_res;
542 int parent_irq, nr_irqs, i; 558 int nr_irqs, i;
543 u32 control; 559 u32 control;
544 560
545 BUG_ON(of_address_to_resource(node, 0, &main_int_res)); 561 BUG_ON(of_address_to_resource(node, 0, &main_int_res));
@@ -587,6 +603,9 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
587 register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); 603 register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier);
588#endif 604#endif
589 } else { 605 } else {
606#ifdef CONFIG_SMP
607 register_cpu_notifier(&mpic_cascaded_cpu_notifier);
608#endif
590 irq_set_chained_handler(parent_irq, 609 irq_set_chained_handler(parent_irq,
591 armada_370_xp_mpic_handle_cascade_irq); 610 armada_370_xp_mpic_handle_cascade_irq);
592 } 611 }
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index d8996bdf0f61..596b0a9eee99 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -416,13 +416,14 @@ static void its_send_single_command(struct its_node *its,
416{ 416{
417 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; 417 struct its_cmd_block *cmd, *sync_cmd, *next_cmd;
418 struct its_collection *sync_col; 418 struct its_collection *sync_col;
419 unsigned long flags;
419 420
420 raw_spin_lock(&its->lock); 421 raw_spin_lock_irqsave(&its->lock, flags);
421 422
422 cmd = its_allocate_entry(its); 423 cmd = its_allocate_entry(its);
423 if (!cmd) { /* We're soooooo screewed... */ 424 if (!cmd) { /* We're soooooo screewed... */
424 pr_err_ratelimited("ITS can't allocate, dropping command\n"); 425 pr_err_ratelimited("ITS can't allocate, dropping command\n");
425 raw_spin_unlock(&its->lock); 426 raw_spin_unlock_irqrestore(&its->lock, flags);
426 return; 427 return;
427 } 428 }
428 sync_col = builder(cmd, desc); 429 sync_col = builder(cmd, desc);
@@ -442,7 +443,7 @@ static void its_send_single_command(struct its_node *its,
442 443
443post: 444post:
444 next_cmd = its_post_commands(its); 445 next_cmd = its_post_commands(its);
445 raw_spin_unlock(&its->lock); 446 raw_spin_unlock_irqrestore(&its->lock, flags);
446 447
447 its_wait_for_range_completion(its, cmd, next_cmd); 448 its_wait_for_range_completion(its, cmd, next_cmd);
448} 449}
@@ -799,21 +800,43 @@ static int its_alloc_tables(struct its_node *its)
799{ 800{
800 int err; 801 int err;
801 int i; 802 int i;
802 int psz = PAGE_SIZE; 803 int psz = SZ_64K;
803 u64 shr = GITS_BASER_InnerShareable; 804 u64 shr = GITS_BASER_InnerShareable;
804 805
805 for (i = 0; i < GITS_BASER_NR_REGS; i++) { 806 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
806 u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); 807 u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
807 u64 type = GITS_BASER_TYPE(val); 808 u64 type = GITS_BASER_TYPE(val);
808 u64 entry_size = GITS_BASER_ENTRY_SIZE(val); 809 u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
810 int order = get_order(psz);
811 int alloc_size;
809 u64 tmp; 812 u64 tmp;
810 void *base; 813 void *base;
811 814
812 if (type == GITS_BASER_TYPE_NONE) 815 if (type == GITS_BASER_TYPE_NONE)
813 continue; 816 continue;
814 817
815 /* We're lazy and only allocate a single page for now */ 818 /*
816 base = (void *)get_zeroed_page(GFP_KERNEL); 819 * Allocate as many entries as required to fit the
820 * range of device IDs that the ITS can grok... The ID
821 * space being incredibly sparse, this results in a
822 * massive waste of memory.
823 *
824 * For other tables, only allocate a single page.
825 */
826 if (type == GITS_BASER_TYPE_DEVICE) {
827 u64 typer = readq_relaxed(its->base + GITS_TYPER);
828 u32 ids = GITS_TYPER_DEVBITS(typer);
829
830 order = get_order((1UL << ids) * entry_size);
831 if (order >= MAX_ORDER) {
832 order = MAX_ORDER - 1;
833 pr_warn("%s: Device Table too large, reduce its page order to %u\n",
834 its->msi_chip.of_node->full_name, order);
835 }
836 }
837
838 alloc_size = (1 << order) * PAGE_SIZE;
839 base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
817 if (!base) { 840 if (!base) {
818 err = -ENOMEM; 841 err = -ENOMEM;
819 goto out_free; 842 goto out_free;
@@ -841,7 +864,7 @@ retry_baser:
841 break; 864 break;
842 } 865 }
843 866
844 val |= (PAGE_SIZE / psz) - 1; 867 val |= (alloc_size / psz) - 1;
845 868
846 writeq_relaxed(val, its->base + GITS_BASER + i * 8); 869 writeq_relaxed(val, its->base + GITS_BASER + i * 8);
847 tmp = readq_relaxed(its->base + GITS_BASER + i * 8); 870 tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
@@ -882,7 +905,7 @@ retry_baser:
882 } 905 }
883 906
884 pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", 907 pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
885 (int)(PAGE_SIZE / entry_size), 908 (int)(alloc_size / entry_size),
886 its_base_type_string[type], 909 its_base_type_string[type],
887 (unsigned long)virt_to_phys(base), 910 (unsigned long)virt_to_phys(base),
888 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); 911 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
@@ -1020,8 +1043,9 @@ static void its_cpu_init_collection(void)
1020static struct its_device *its_find_device(struct its_node *its, u32 dev_id) 1043static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
1021{ 1044{
1022 struct its_device *its_dev = NULL, *tmp; 1045 struct its_device *its_dev = NULL, *tmp;
1046 unsigned long flags;
1023 1047
1024 raw_spin_lock(&its->lock); 1048 raw_spin_lock_irqsave(&its->lock, flags);
1025 1049
1026 list_for_each_entry(tmp, &its->its_device_list, entry) { 1050 list_for_each_entry(tmp, &its->its_device_list, entry) {
1027 if (tmp->device_id == dev_id) { 1051 if (tmp->device_id == dev_id) {
@@ -1030,7 +1054,7 @@ static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
1030 } 1054 }
1031 } 1055 }
1032 1056
1033 raw_spin_unlock(&its->lock); 1057 raw_spin_unlock_irqrestore(&its->lock, flags);
1034 1058
1035 return its_dev; 1059 return its_dev;
1036} 1060}
@@ -1040,6 +1064,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1040{ 1064{
1041 struct its_device *dev; 1065 struct its_device *dev;
1042 unsigned long *lpi_map; 1066 unsigned long *lpi_map;
1067 unsigned long flags;
1043 void *itt; 1068 void *itt;
1044 int lpi_base; 1069 int lpi_base;
1045 int nr_lpis; 1070 int nr_lpis;
@@ -1056,7 +1081,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1056 nr_ites = max(2UL, roundup_pow_of_two(nvecs)); 1081 nr_ites = max(2UL, roundup_pow_of_two(nvecs));
1057 sz = nr_ites * its->ite_size; 1082 sz = nr_ites * its->ite_size;
1058 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 1083 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
1059 itt = kmalloc(sz, GFP_KERNEL); 1084 itt = kzalloc(sz, GFP_KERNEL);
1060 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); 1085 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
1061 1086
1062 if (!dev || !itt || !lpi_map) { 1087 if (!dev || !itt || !lpi_map) {
@@ -1075,9 +1100,9 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1075 dev->device_id = dev_id; 1100 dev->device_id = dev_id;
1076 INIT_LIST_HEAD(&dev->entry); 1101 INIT_LIST_HEAD(&dev->entry);
1077 1102
1078 raw_spin_lock(&its->lock); 1103 raw_spin_lock_irqsave(&its->lock, flags);
1079 list_add(&dev->entry, &its->its_device_list); 1104 list_add(&dev->entry, &its->its_device_list);
1080 raw_spin_unlock(&its->lock); 1105 raw_spin_unlock_irqrestore(&its->lock, flags);
1081 1106
1082 /* Bind the device to the first possible CPU */ 1107 /* Bind the device to the first possible CPU */
1083 cpu = cpumask_first(cpu_online_mask); 1108 cpu = cpumask_first(cpu_online_mask);
@@ -1091,9 +1116,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1091 1116
1092static void its_free_device(struct its_device *its_dev) 1117static void its_free_device(struct its_device *its_dev)
1093{ 1118{
1094 raw_spin_lock(&its_dev->its->lock); 1119 unsigned long flags;
1120
1121 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
1095 list_del(&its_dev->entry); 1122 list_del(&its_dev->entry);
1096 raw_spin_unlock(&its_dev->its->lock); 1123 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
1097 kfree(its_dev->itt); 1124 kfree(its_dev->itt);
1098 kfree(its_dev); 1125 kfree(its_dev);
1099} 1126}
@@ -1112,31 +1139,69 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
1112 return 0; 1139 return 0;
1113} 1140}
1114 1141
1142struct its_pci_alias {
1143 struct pci_dev *pdev;
1144 u32 dev_id;
1145 u32 count;
1146};
1147
1148static int its_pci_msi_vec_count(struct pci_dev *pdev)
1149{
1150 int msi, msix;
1151
1152 msi = max(pci_msi_vec_count(pdev), 0);
1153 msix = max(pci_msix_vec_count(pdev), 0);
1154
1155 return max(msi, msix);
1156}
1157
1158static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
1159{
1160 struct its_pci_alias *dev_alias = data;
1161
1162 dev_alias->dev_id = alias;
1163 if (pdev != dev_alias->pdev)
1164 dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev);
1165
1166 return 0;
1167}
1168
1115static int its_msi_prepare(struct irq_domain *domain, struct device *dev, 1169static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
1116 int nvec, msi_alloc_info_t *info) 1170 int nvec, msi_alloc_info_t *info)
1117{ 1171{
1118 struct pci_dev *pdev; 1172 struct pci_dev *pdev;
1119 struct its_node *its; 1173 struct its_node *its;
1120 u32 dev_id;
1121 struct its_device *its_dev; 1174 struct its_device *its_dev;
1175 struct its_pci_alias dev_alias;
1122 1176
1123 if (!dev_is_pci(dev)) 1177 if (!dev_is_pci(dev))
1124 return -EINVAL; 1178 return -EINVAL;
1125 1179
1126 pdev = to_pci_dev(dev); 1180 pdev = to_pci_dev(dev);
1127 dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn); 1181 dev_alias.pdev = pdev;
1182 dev_alias.count = nvec;
1183
1184 pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias);
1128 its = domain->parent->host_data; 1185 its = domain->parent->host_data;
1129 1186
1130 its_dev = its_find_device(its, dev_id); 1187 its_dev = its_find_device(its, dev_alias.dev_id);
1131 if (WARN_ON(its_dev)) 1188 if (its_dev) {
1132 return -EINVAL; 1189 /*
1190 * We already have seen this ID, probably through
1191 * another alias (PCI bridge of some sort). No need to
1192 * create the device.
1193 */
1194 dev_dbg(dev, "Reusing ITT for devID %x\n", dev_alias.dev_id);
1195 goto out;
1196 }
1133 1197
1134 its_dev = its_create_device(its, dev_id, nvec); 1198 its_dev = its_create_device(its, dev_alias.dev_id, dev_alias.count);
1135 if (!its_dev) 1199 if (!its_dev)
1136 return -ENOMEM; 1200 return -ENOMEM;
1137 1201
1138 dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec)); 1202 dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n",
1139 1203 dev_alias.count, ilog2(dev_alias.count));
1204out:
1140 info->scratchpad[0].ptr = its_dev; 1205 info->scratchpad[0].ptr = its_dev;
1141 info->scratchpad[1].ptr = dev; 1206 info->scratchpad[1].ptr = dev;
1142 return 0; 1207 return 0;
@@ -1255,6 +1320,34 @@ static const struct irq_domain_ops its_domain_ops = {
1255 .deactivate = its_irq_domain_deactivate, 1320 .deactivate = its_irq_domain_deactivate,
1256}; 1321};
1257 1322
1323static int its_force_quiescent(void __iomem *base)
1324{
1325 u32 count = 1000000; /* 1s */
1326 u32 val;
1327
1328 val = readl_relaxed(base + GITS_CTLR);
1329 if (val & GITS_CTLR_QUIESCENT)
1330 return 0;
1331
1332 /* Disable the generation of all interrupts to this ITS */
1333 val &= ~GITS_CTLR_ENABLE;
1334 writel_relaxed(val, base + GITS_CTLR);
1335
1336 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
1337 while (1) {
1338 val = readl_relaxed(base + GITS_CTLR);
1339 if (val & GITS_CTLR_QUIESCENT)
1340 return 0;
1341
1342 count--;
1343 if (!count)
1344 return -EBUSY;
1345
1346 cpu_relax();
1347 udelay(1);
1348 }
1349}
1350
1258static int its_probe(struct device_node *node, struct irq_domain *parent) 1351static int its_probe(struct device_node *node, struct irq_domain *parent)
1259{ 1352{
1260 struct resource res; 1353 struct resource res;
@@ -1283,6 +1376,13 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
1283 goto out_unmap; 1376 goto out_unmap;
1284 } 1377 }
1285 1378
1379 err = its_force_quiescent(its_base);
1380 if (err) {
1381 pr_warn("%s: failed to quiesce, giving up\n",
1382 node->full_name);
1383 goto out_unmap;
1384 }
1385
1286 pr_info("ITS: %s\n", node->full_name); 1386 pr_info("ITS: %s\n", node->full_name);
1287 1387
1288 its = kzalloc(sizeof(*its), GFP_KERNEL); 1388 its = kzalloc(sizeof(*its), GFP_KERNEL);
@@ -1323,7 +1423,7 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
1323 writeq_relaxed(baser, its->base + GITS_CBASER); 1423 writeq_relaxed(baser, its->base + GITS_CBASER);
1324 tmp = readq_relaxed(its->base + GITS_CBASER); 1424 tmp = readq_relaxed(its->base + GITS_CBASER);
1325 writeq_relaxed(0, its->base + GITS_CWRITER); 1425 writeq_relaxed(0, its->base + GITS_CWRITER);
1326 writel_relaxed(1, its->base + GITS_CTLR); 1426 writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
1327 1427
1328 if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) { 1428 if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) {
1329 pr_info("ITS: using cache flushing for cmd queue\n"); 1429 pr_info("ITS: using cache flushing for cmd queue\n");
@@ -1382,12 +1482,11 @@ static bool gic_rdists_supports_plpis(void)
1382 1482
1383int its_cpu_init(void) 1483int its_cpu_init(void)
1384{ 1484{
1385 if (!gic_rdists_supports_plpis()) {
1386 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
1387 return -ENXIO;
1388 }
1389
1390 if (!list_empty(&its_nodes)) { 1485 if (!list_empty(&its_nodes)) {
1486 if (!gic_rdists_supports_plpis()) {
1487 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
1488 return -ENXIO;
1489 }
1391 its_cpu_init_lpis(); 1490 its_cpu_init_lpis();
1392 its_cpu_init_collection(); 1491 its_cpu_init_collection();
1393 } 1492 }
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 1c6dea2fbc34..fd8850def1b8 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -466,7 +466,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
466 tlist |= 1 << (mpidr & 0xf); 466 tlist |= 1 << (mpidr & 0xf);
467 467
468 cpu = cpumask_next(cpu, mask); 468 cpu = cpumask_next(cpu, mask);
469 if (cpu == nr_cpu_ids) 469 if (cpu >= nr_cpu_ids)
470 goto out; 470 goto out;
471 471
472 mpidr = cpu_logical_map(cpu); 472 mpidr = cpu_logical_map(cpu);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 4634cf7d0ec3..471e1cdc1933 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -154,23 +154,25 @@ static inline unsigned int gic_irq(struct irq_data *d)
154static void gic_mask_irq(struct irq_data *d) 154static void gic_mask_irq(struct irq_data *d)
155{ 155{
156 u32 mask = 1 << (gic_irq(d) % 32); 156 u32 mask = 1 << (gic_irq(d) % 32);
157 unsigned long flags;
157 158
158 raw_spin_lock(&irq_controller_lock); 159 raw_spin_lock_irqsave(&irq_controller_lock, flags);
159 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); 160 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
160 if (gic_arch_extn.irq_mask) 161 if (gic_arch_extn.irq_mask)
161 gic_arch_extn.irq_mask(d); 162 gic_arch_extn.irq_mask(d);
162 raw_spin_unlock(&irq_controller_lock); 163 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
163} 164}
164 165
165static void gic_unmask_irq(struct irq_data *d) 166static void gic_unmask_irq(struct irq_data *d)
166{ 167{
167 u32 mask = 1 << (gic_irq(d) % 32); 168 u32 mask = 1 << (gic_irq(d) % 32);
169 unsigned long flags;
168 170
169 raw_spin_lock(&irq_controller_lock); 171 raw_spin_lock_irqsave(&irq_controller_lock, flags);
170 if (gic_arch_extn.irq_unmask) 172 if (gic_arch_extn.irq_unmask)
171 gic_arch_extn.irq_unmask(d); 173 gic_arch_extn.irq_unmask(d);
172 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); 174 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
173 raw_spin_unlock(&irq_controller_lock); 175 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
174} 176}
175 177
176static void gic_eoi_irq(struct irq_data *d) 178static void gic_eoi_irq(struct irq_data *d)
@@ -188,6 +190,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
188{ 190{
189 void __iomem *base = gic_dist_base(d); 191 void __iomem *base = gic_dist_base(d);
190 unsigned int gicirq = gic_irq(d); 192 unsigned int gicirq = gic_irq(d);
193 unsigned long flags;
191 int ret; 194 int ret;
192 195
193 /* Interrupt configuration for SGIs can't be changed */ 196 /* Interrupt configuration for SGIs can't be changed */
@@ -199,14 +202,14 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
199 type != IRQ_TYPE_EDGE_RISING) 202 type != IRQ_TYPE_EDGE_RISING)
200 return -EINVAL; 203 return -EINVAL;
201 204
202 raw_spin_lock(&irq_controller_lock); 205 raw_spin_lock_irqsave(&irq_controller_lock, flags);
203 206
204 if (gic_arch_extn.irq_set_type) 207 if (gic_arch_extn.irq_set_type)
205 gic_arch_extn.irq_set_type(d, type); 208 gic_arch_extn.irq_set_type(d, type);
206 209
207 ret = gic_configure_irq(gicirq, type, base, NULL); 210 ret = gic_configure_irq(gicirq, type, base, NULL);
208 211
209 raw_spin_unlock(&irq_controller_lock); 212 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
210 213
211 return ret; 214 return ret;
212} 215}
@@ -227,6 +230,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
227 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); 230 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
228 unsigned int cpu, shift = (gic_irq(d) % 4) * 8; 231 unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
229 u32 val, mask, bit; 232 u32 val, mask, bit;
233 unsigned long flags;
230 234
231 if (!force) 235 if (!force)
232 cpu = cpumask_any_and(mask_val, cpu_online_mask); 236 cpu = cpumask_any_and(mask_val, cpu_online_mask);
@@ -236,12 +240,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
236 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) 240 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
237 return -EINVAL; 241 return -EINVAL;
238 242
239 raw_spin_lock(&irq_controller_lock); 243 raw_spin_lock_irqsave(&irq_controller_lock, flags);
240 mask = 0xff << shift; 244 mask = 0xff << shift;
241 bit = gic_cpu_map[cpu] << shift; 245 bit = gic_cpu_map[cpu] << shift;
242 val = readl_relaxed(reg) & ~mask; 246 val = readl_relaxed(reg) & ~mask;
243 writel_relaxed(val | bit, reg); 247 writel_relaxed(val | bit, reg);
244 raw_spin_unlock(&irq_controller_lock); 248 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
245 249
246 return IRQ_SET_MASK_OK; 250 return IRQ_SET_MASK_OK;
247} 251}
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index 6a7447c304ac..358a574d9e8b 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -1609,7 +1609,7 @@ icn_setup(char *line)
1609 if (ints[0] > 1) 1609 if (ints[0] > 1)
1610 membase = (unsigned long)ints[2]; 1610 membase = (unsigned long)ints[2];
1611 if (str && *str) { 1611 if (str && *str) {
1612 strcpy(sid, str); 1612 strlcpy(sid, str, sizeof(sid));
1613 icn_id = sid; 1613 icn_id = sid;
1614 if ((p = strchr(sid, ','))) { 1614 if ((p = strchr(sid, ','))) {
1615 *p++ = 0; 1615 *p++ = 0;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 37de0173b6d2..74adcd2c967e 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -289,9 +289,16 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
289 struct request_queue *q = bdev_get_queue(where->bdev); 289 struct request_queue *q = bdev_get_queue(where->bdev);
290 unsigned short logical_block_size = queue_logical_block_size(q); 290 unsigned short logical_block_size = queue_logical_block_size(q);
291 sector_t num_sectors; 291 sector_t num_sectors;
292 unsigned int uninitialized_var(special_cmd_max_sectors);
292 293
293 /* Reject unsupported discard requests */ 294 /*
294 if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) { 295 * Reject unsupported discard and write same requests.
296 */
297 if (rw & REQ_DISCARD)
298 special_cmd_max_sectors = q->limits.max_discard_sectors;
299 else if (rw & REQ_WRITE_SAME)
300 special_cmd_max_sectors = q->limits.max_write_same_sectors;
301 if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
295 dec_count(io, region, -EOPNOTSUPP); 302 dec_count(io, region, -EOPNOTSUPP);
296 return; 303 return;
297 } 304 }
@@ -317,7 +324,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
317 store_io_and_region_in_bio(bio, io, region); 324 store_io_and_region_in_bio(bio, io, region);
318 325
319 if (rw & REQ_DISCARD) { 326 if (rw & REQ_DISCARD) {
320 num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); 327 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
321 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; 328 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
322 remaining -= num_sectors; 329 remaining -= num_sectors;
323 } else if (rw & REQ_WRITE_SAME) { 330 } else if (rw & REQ_WRITE_SAME) {
@@ -326,7 +333,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
326 */ 333 */
327 dp->get_page(dp, &page, &len, &offset); 334 dp->get_page(dp, &page, &len, &offset);
328 bio_add_page(bio, page, logical_block_size, offset); 335 bio_add_page(bio, page, logical_block_size, offset);
329 num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); 336 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
330 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; 337 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
331 338
332 offset = 0; 339 offset = 0;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 8b204ae216ab..f83a0f3fc365 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -20,6 +20,8 @@
20#include <linux/log2.h> 20#include <linux/log2.h>
21#include <linux/dm-kcopyd.h> 21#include <linux/dm-kcopyd.h>
22 22
23#include "dm.h"
24
23#include "dm-exception-store.h" 25#include "dm-exception-store.h"
24 26
25#define DM_MSG_PREFIX "snapshots" 27#define DM_MSG_PREFIX "snapshots"
@@ -291,12 +293,23 @@ struct origin {
291}; 293};
292 294
293/* 295/*
296 * This structure is allocated for each origin target
297 */
298struct dm_origin {
299 struct dm_dev *dev;
300 struct dm_target *ti;
301 unsigned split_boundary;
302 struct list_head hash_list;
303};
304
305/*
294 * Size of the hash table for origin volumes. If we make this 306 * Size of the hash table for origin volumes. If we make this
295 * the size of the minors list then it should be nearly perfect 307 * the size of the minors list then it should be nearly perfect
296 */ 308 */
297#define ORIGIN_HASH_SIZE 256 309#define ORIGIN_HASH_SIZE 256
298#define ORIGIN_MASK 0xFF 310#define ORIGIN_MASK 0xFF
299static struct list_head *_origins; 311static struct list_head *_origins;
312static struct list_head *_dm_origins;
300static struct rw_semaphore _origins_lock; 313static struct rw_semaphore _origins_lock;
301 314
302static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); 315static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
@@ -310,12 +323,22 @@ static int init_origin_hash(void)
310 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), 323 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
311 GFP_KERNEL); 324 GFP_KERNEL);
312 if (!_origins) { 325 if (!_origins) {
313 DMERR("unable to allocate memory"); 326 DMERR("unable to allocate memory for _origins");
314 return -ENOMEM; 327 return -ENOMEM;
315 } 328 }
316
317 for (i = 0; i < ORIGIN_HASH_SIZE; i++) 329 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
318 INIT_LIST_HEAD(_origins + i); 330 INIT_LIST_HEAD(_origins + i);
331
332 _dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
333 GFP_KERNEL);
334 if (!_dm_origins) {
335 DMERR("unable to allocate memory for _dm_origins");
336 kfree(_origins);
337 return -ENOMEM;
338 }
339 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
340 INIT_LIST_HEAD(_dm_origins + i);
341
319 init_rwsem(&_origins_lock); 342 init_rwsem(&_origins_lock);
320 343
321 return 0; 344 return 0;
@@ -324,6 +347,7 @@ static int init_origin_hash(void)
324static void exit_origin_hash(void) 347static void exit_origin_hash(void)
325{ 348{
326 kfree(_origins); 349 kfree(_origins);
350 kfree(_dm_origins);
327} 351}
328 352
329static unsigned origin_hash(struct block_device *bdev) 353static unsigned origin_hash(struct block_device *bdev)
@@ -350,6 +374,30 @@ static void __insert_origin(struct origin *o)
350 list_add_tail(&o->hash_list, sl); 374 list_add_tail(&o->hash_list, sl);
351} 375}
352 376
377static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
378{
379 struct list_head *ol;
380 struct dm_origin *o;
381
382 ol = &_dm_origins[origin_hash(origin)];
383 list_for_each_entry (o, ol, hash_list)
384 if (bdev_equal(o->dev->bdev, origin))
385 return o;
386
387 return NULL;
388}
389
390static void __insert_dm_origin(struct dm_origin *o)
391{
392 struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
393 list_add_tail(&o->hash_list, sl);
394}
395
396static void __remove_dm_origin(struct dm_origin *o)
397{
398 list_del(&o->hash_list);
399}
400
353/* 401/*
354 * _origins_lock must be held when calling this function. 402 * _origins_lock must be held when calling this function.
355 * Returns number of snapshots registered using the supplied cow device, plus: 403 * Returns number of snapshots registered using the supplied cow device, plus:
@@ -1840,9 +1888,40 @@ static int snapshot_preresume(struct dm_target *ti)
1840static void snapshot_resume(struct dm_target *ti) 1888static void snapshot_resume(struct dm_target *ti)
1841{ 1889{
1842 struct dm_snapshot *s = ti->private; 1890 struct dm_snapshot *s = ti->private;
1843 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 1891 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL;
1892 struct dm_origin *o;
1893 struct mapped_device *origin_md = NULL;
1894 bool must_restart_merging = false;
1844 1895
1845 down_read(&_origins_lock); 1896 down_read(&_origins_lock);
1897
1898 o = __lookup_dm_origin(s->origin->bdev);
1899 if (o)
1900 origin_md = dm_table_get_md(o->ti->table);
1901 if (!origin_md) {
1902 (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging);
1903 if (snap_merging)
1904 origin_md = dm_table_get_md(snap_merging->ti->table);
1905 }
1906 if (origin_md == dm_table_get_md(ti->table))
1907 origin_md = NULL;
1908 if (origin_md) {
1909 if (dm_hold(origin_md))
1910 origin_md = NULL;
1911 }
1912
1913 up_read(&_origins_lock);
1914
1915 if (origin_md) {
1916 dm_internal_suspend_fast(origin_md);
1917 if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) {
1918 must_restart_merging = true;
1919 stop_merge(snap_merging);
1920 }
1921 }
1922
1923 down_read(&_origins_lock);
1924
1846 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); 1925 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1847 if (snap_src && snap_dest) { 1926 if (snap_src && snap_dest) {
1848 down_write(&snap_src->lock); 1927 down_write(&snap_src->lock);
@@ -1851,8 +1930,16 @@ static void snapshot_resume(struct dm_target *ti)
1851 up_write(&snap_dest->lock); 1930 up_write(&snap_dest->lock);
1852 up_write(&snap_src->lock); 1931 up_write(&snap_src->lock);
1853 } 1932 }
1933
1854 up_read(&_origins_lock); 1934 up_read(&_origins_lock);
1855 1935
1936 if (origin_md) {
1937 if (must_restart_merging)
1938 start_merge(snap_merging);
1939 dm_internal_resume_fast(origin_md);
1940 dm_put(origin_md);
1941 }
1942
1856 /* Now we have correct chunk size, reregister */ 1943 /* Now we have correct chunk size, reregister */
1857 reregister_snapshot(s); 1944 reregister_snapshot(s);
1858 1945
@@ -2133,11 +2220,6 @@ static int origin_write_extent(struct dm_snapshot *merging_snap,
2133 * Origin: maps a linear range of a device, with hooks for snapshotting. 2220 * Origin: maps a linear range of a device, with hooks for snapshotting.
2134 */ 2221 */
2135 2222
2136struct dm_origin {
2137 struct dm_dev *dev;
2138 unsigned split_boundary;
2139};
2140
2141/* 2223/*
2142 * Construct an origin mapping: <dev_path> 2224 * Construct an origin mapping: <dev_path>
2143 * The context for an origin is merely a 'struct dm_dev *' 2225 * The context for an origin is merely a 'struct dm_dev *'
@@ -2166,6 +2248,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2166 goto bad_open; 2248 goto bad_open;
2167 } 2249 }
2168 2250
2251 o->ti = ti;
2169 ti->private = o; 2252 ti->private = o;
2170 ti->num_flush_bios = 1; 2253 ti->num_flush_bios = 1;
2171 2254
@@ -2180,6 +2263,7 @@ bad_alloc:
2180static void origin_dtr(struct dm_target *ti) 2263static void origin_dtr(struct dm_target *ti)
2181{ 2264{
2182 struct dm_origin *o = ti->private; 2265 struct dm_origin *o = ti->private;
2266
2183 dm_put_device(ti, o->dev); 2267 dm_put_device(ti, o->dev);
2184 kfree(o); 2268 kfree(o);
2185} 2269}
@@ -2216,6 +2300,19 @@ static void origin_resume(struct dm_target *ti)
2216 struct dm_origin *o = ti->private; 2300 struct dm_origin *o = ti->private;
2217 2301
2218 o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev); 2302 o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
2303
2304 down_write(&_origins_lock);
2305 __insert_dm_origin(o);
2306 up_write(&_origins_lock);
2307}
2308
2309static void origin_postsuspend(struct dm_target *ti)
2310{
2311 struct dm_origin *o = ti->private;
2312
2313 down_write(&_origins_lock);
2314 __remove_dm_origin(o);
2315 up_write(&_origins_lock);
2219} 2316}
2220 2317
2221static void origin_status(struct dm_target *ti, status_type_t type, 2318static void origin_status(struct dm_target *ti, status_type_t type,
@@ -2258,12 +2355,13 @@ static int origin_iterate_devices(struct dm_target *ti,
2258 2355
2259static struct target_type origin_target = { 2356static struct target_type origin_target = {
2260 .name = "snapshot-origin", 2357 .name = "snapshot-origin",
2261 .version = {1, 8, 1}, 2358 .version = {1, 9, 0},
2262 .module = THIS_MODULE, 2359 .module = THIS_MODULE,
2263 .ctr = origin_ctr, 2360 .ctr = origin_ctr,
2264 .dtr = origin_dtr, 2361 .dtr = origin_dtr,
2265 .map = origin_map, 2362 .map = origin_map,
2266 .resume = origin_resume, 2363 .resume = origin_resume,
2364 .postsuspend = origin_postsuspend,
2267 .status = origin_status, 2365 .status = origin_status,
2268 .merge = origin_merge, 2366 .merge = origin_merge,
2269 .iterate_devices = origin_iterate_devices, 2367 .iterate_devices = origin_iterate_devices,
@@ -2271,7 +2369,7 @@ static struct target_type origin_target = {
2271 2369
2272static struct target_type snapshot_target = { 2370static struct target_type snapshot_target = {
2273 .name = "snapshot", 2371 .name = "snapshot",
2274 .version = {1, 12, 0}, 2372 .version = {1, 13, 0},
2275 .module = THIS_MODULE, 2373 .module = THIS_MODULE,
2276 .ctr = snapshot_ctr, 2374 .ctr = snapshot_ctr,
2277 .dtr = snapshot_dtr, 2375 .dtr = snapshot_dtr,
@@ -2285,7 +2383,7 @@ static struct target_type snapshot_target = {
2285 2383
2286static struct target_type merge_target = { 2384static struct target_type merge_target = {
2287 .name = dm_snapshot_merge_target_name, 2385 .name = dm_snapshot_merge_target_name,
2288 .version = {1, 2, 0}, 2386 .version = {1, 3, 0},
2289 .module = THIS_MODULE, 2387 .module = THIS_MODULE,
2290 .ctr = snapshot_ctr, 2388 .ctr = snapshot_ctr,
2291 .dtr = snapshot_dtr, 2389 .dtr = snapshot_dtr,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 654773cb1eee..921aafd12aee 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2358,17 +2358,6 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2358 return DM_MAPIO_REMAPPED; 2358 return DM_MAPIO_REMAPPED;
2359 2359
2360 case -ENODATA: 2360 case -ENODATA:
2361 if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
2362 /*
2363 * This block isn't provisioned, and we have no way
2364 * of doing so.
2365 */
2366 handle_unserviceable_bio(tc->pool, bio);
2367 cell_defer_no_holder(tc, virt_cell);
2368 return DM_MAPIO_SUBMITTED;
2369 }
2370 /* fall through */
2371
2372 case -EWOULDBLOCK: 2361 case -EWOULDBLOCK:
2373 thin_defer_cell(tc, virt_cell); 2362 thin_defer_cell(tc, virt_cell);
2374 return DM_MAPIO_SUBMITTED; 2363 return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 73f28802dc7a..9b641b38b857 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2616,6 +2616,19 @@ void dm_get(struct mapped_device *md)
2616 BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2616 BUG_ON(test_bit(DMF_FREEING, &md->flags));
2617} 2617}
2618 2618
2619int dm_hold(struct mapped_device *md)
2620{
2621 spin_lock(&_minor_lock);
2622 if (test_bit(DMF_FREEING, &md->flags)) {
2623 spin_unlock(&_minor_lock);
2624 return -EBUSY;
2625 }
2626 dm_get(md);
2627 spin_unlock(&_minor_lock);
2628 return 0;
2629}
2630EXPORT_SYMBOL_GPL(dm_hold);
2631
2619const char *dm_device_name(struct mapped_device *md) 2632const char *dm_device_name(struct mapped_device *md)
2620{ 2633{
2621 return md->name; 2634 return md->name;
@@ -2638,10 +2651,16 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
2638 if (dm_request_based(md)) 2651 if (dm_request_based(md))
2639 flush_kthread_worker(&md->kworker); 2652 flush_kthread_worker(&md->kworker);
2640 2653
2654 /*
2655 * Take suspend_lock so that presuspend and postsuspend methods
2656 * do not race with internal suspend.
2657 */
2658 mutex_lock(&md->suspend_lock);
2641 if (!dm_suspended_md(md)) { 2659 if (!dm_suspended_md(md)) {
2642 dm_table_presuspend_targets(map); 2660 dm_table_presuspend_targets(map);
2643 dm_table_postsuspend_targets(map); 2661 dm_table_postsuspend_targets(map);
2644 } 2662 }
2663 mutex_unlock(&md->suspend_lock);
2645 2664
2646 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2665 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2647 dm_put_live_table(md, srcu_idx); 2666 dm_put_live_table(md, srcu_idx);
@@ -3115,6 +3134,7 @@ void dm_internal_suspend_fast(struct mapped_device *md)
3115 flush_workqueue(md->wq); 3134 flush_workqueue(md->wq);
3116 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); 3135 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
3117} 3136}
3137EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
3118 3138
3119void dm_internal_resume_fast(struct mapped_device *md) 3139void dm_internal_resume_fast(struct mapped_device *md)
3120{ 3140{
@@ -3126,6 +3146,7 @@ void dm_internal_resume_fast(struct mapped_device *md)
3126done: 3146done:
3127 mutex_unlock(&md->suspend_lock); 3147 mutex_unlock(&md->suspend_lock);
3128} 3148}
3149EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
3129 3150
3130/*----------------------------------------------------------------- 3151/*-----------------------------------------------------------------
3131 * Event notification. 3152 * Event notification.
diff --git a/drivers/md/md.c b/drivers/md/md.c
index cadf9cc02b25..717daad71fb1 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5080,7 +5080,8 @@ int md_run(struct mddev *mddev)
5080 } 5080 }
5081 if (err) { 5081 if (err) {
5082 mddev_detach(mddev); 5082 mddev_detach(mddev);
5083 pers->free(mddev, mddev->private); 5083 if (mddev->private)
5084 pers->free(mddev, mddev->private);
5084 module_put(pers->owner); 5085 module_put(pers->owner);
5085 bitmap_destroy(mddev); 5086 bitmap_destroy(mddev);
5086 return err; 5087 return err;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index a13f738a7b39..3ed9f42ddca6 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -467,8 +467,6 @@ static int raid0_run(struct mddev *mddev)
467 dump_zones(mddev); 467 dump_zones(mddev);
468 468
469 ret = md_integrity_register(mddev); 469 ret = md_integrity_register(mddev);
470 if (ret)
471 raid0_free(mddev, conf);
472 470
473 return ret; 471 return ret;
474} 472}
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index f38ec424872e..5615522f8d62 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -739,7 +739,7 @@ static int __init kempld_init(void)
739 for (id = kempld_dmi_table; 739 for (id = kempld_dmi_table;
740 id->matches[0].slot != DMI_NONE; id++) 740 id->matches[0].slot != DMI_NONE; id++)
741 if (strstr(id->ident, force_device_id)) 741 if (strstr(id->ident, force_device_id))
742 if (id->callback && id->callback(id)) 742 if (id->callback && !id->callback(id))
743 break; 743 break;
744 if (id->matches[0].slot == DMI_NONE) 744 if (id->matches[0].slot == DMI_NONE)
745 return -ENODEV; 745 return -ENODEV;
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
index ede50244f265..dbd907d7170e 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/mfd/rtsx_usb.c
@@ -196,18 +196,27 @@ EXPORT_SYMBOL_GPL(rtsx_usb_ep0_write_register);
196int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data) 196int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data)
197{ 197{
198 u16 value; 198 u16 value;
199 u8 *buf;
200 int ret;
199 201
200 if (!data) 202 if (!data)
201 return -EINVAL; 203 return -EINVAL;
202 *data = 0; 204
205 buf = kzalloc(sizeof(u8), GFP_KERNEL);
206 if (!buf)
207 return -ENOMEM;
203 208
204 addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT; 209 addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT;
205 value = swab16(addr); 210 value = swab16(addr);
206 211
207 return usb_control_msg(ucr->pusb_dev, 212 ret = usb_control_msg(ucr->pusb_dev,
208 usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP, 213 usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP,
209 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 214 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
210 value, 0, data, 1, 100); 215 value, 0, buf, 1, 100);
216 *data = *buf;
217
218 kfree(buf);
219 return ret;
211} 220}
212EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register); 221EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register);
213 222
@@ -288,18 +297,27 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status)
288int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status) 297int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
289{ 298{
290 int ret; 299 int ret;
300 u16 *buf;
291 301
292 if (!status) 302 if (!status)
293 return -EINVAL; 303 return -EINVAL;
294 304
295 if (polling_pipe == 0) 305 if (polling_pipe == 0) {
306 buf = kzalloc(sizeof(u16), GFP_KERNEL);
307 if (!buf)
308 return -ENOMEM;
309
296 ret = usb_control_msg(ucr->pusb_dev, 310 ret = usb_control_msg(ucr->pusb_dev,
297 usb_rcvctrlpipe(ucr->pusb_dev, 0), 311 usb_rcvctrlpipe(ucr->pusb_dev, 0),
298 RTSX_USB_REQ_POLL, 312 RTSX_USB_REQ_POLL,
299 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 313 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
300 0, 0, status, 2, 100); 314 0, 0, buf, 2, 100);
301 else 315 *status = *buf;
316
317 kfree(buf);
318 } else {
302 ret = rtsx_usb_get_status_with_bulk(ucr, status); 319 ret = rtsx_usb_get_status_with_bulk(ucr, status);
320 }
303 321
304 /* usb_control_msg may return positive when success */ 322 /* usb_control_msg may return positive when success */
305 if (ret < 0) 323 if (ret < 0)
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index e9f1d8d84613..c53f14a7ce54 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -124,7 +124,7 @@ int mmc_pwrseq_simple_alloc(struct mmc_host *host, struct device *dev)
124 PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) { 124 PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) {
125 ret = PTR_ERR(pwrseq->reset_gpios[i]); 125 ret = PTR_ERR(pwrseq->reset_gpios[i]);
126 126
127 while (--i) 127 while (i--)
128 gpiod_put(pwrseq->reset_gpios[i]); 128 gpiod_put(pwrseq->reset_gpios[i]);
129 129
130 goto clk_put; 130 goto clk_put;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 5b76a173cd95..5897d8d8fa5a 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -526,6 +526,7 @@ config MTD_NAND_SUNXI
526 526
527config MTD_NAND_HISI504 527config MTD_NAND_HISI504
528 tristate "Support for NAND controller on Hisilicon SoC Hip04" 528 tristate "Support for NAND controller on Hisilicon SoC Hip04"
529 depends on HAS_DMA
529 help 530 help
530 Enables support for NAND controller on Hisilicon SoC Hip04. 531 Enables support for NAND controller on Hisilicon SoC Hip04.
531 532
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 96b0b1d27df1..10b1f7a4fe50 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -480,6 +480,42 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
480 nand_writel(info, NDCR, ndcr | int_mask); 480 nand_writel(info, NDCR, ndcr | int_mask);
481} 481}
482 482
483static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
484{
485 if (info->ecc_bch) {
486 int timeout;
487
488 /*
489 * According to the datasheet, when reading from NDDB
490 * with BCH enabled, after each 32 bytes reads, we
491 * have to make sure that the NDSR.RDDREQ bit is set.
492 *
493 * Drain the FIFO 8 32 bits reads at a time, and skip
494 * the polling on the last read.
495 */
496 while (len > 8) {
497 __raw_readsl(info->mmio_base + NDDB, data, 8);
498
499 for (timeout = 0;
500 !(nand_readl(info, NDSR) & NDSR_RDDREQ);
501 timeout++) {
502 if (timeout >= 5) {
503 dev_err(&info->pdev->dev,
504 "Timeout on RDDREQ while draining the FIFO\n");
505 return;
506 }
507
508 mdelay(1);
509 }
510
511 data += 32;
512 len -= 8;
513 }
514 }
515
516 __raw_readsl(info->mmio_base + NDDB, data, len);
517}
518
483static void handle_data_pio(struct pxa3xx_nand_info *info) 519static void handle_data_pio(struct pxa3xx_nand_info *info)
484{ 520{
485 unsigned int do_bytes = min(info->data_size, info->chunk_size); 521 unsigned int do_bytes = min(info->data_size, info->chunk_size);
@@ -496,14 +532,14 @@ static void handle_data_pio(struct pxa3xx_nand_info *info)
496 DIV_ROUND_UP(info->oob_size, 4)); 532 DIV_ROUND_UP(info->oob_size, 4));
497 break; 533 break;
498 case STATE_PIO_READING: 534 case STATE_PIO_READING:
499 __raw_readsl(info->mmio_base + NDDB, 535 drain_fifo(info,
500 info->data_buff + info->data_buff_pos, 536 info->data_buff + info->data_buff_pos,
501 DIV_ROUND_UP(do_bytes, 4)); 537 DIV_ROUND_UP(do_bytes, 4));
502 538
503 if (info->oob_size > 0) 539 if (info->oob_size > 0)
504 __raw_readsl(info->mmio_base + NDDB, 540 drain_fifo(info,
505 info->oob_buff + info->oob_buff_pos, 541 info->oob_buff + info->oob_buff_pos,
506 DIV_ROUND_UP(info->oob_size, 4)); 542 DIV_ROUND_UP(info->oob_size, 4));
507 break; 543 break;
508 default: 544 default:
509 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, 545 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
@@ -1572,6 +1608,8 @@ static int alloc_nand_resource(struct platform_device *pdev)
1572 int ret, irq, cs; 1608 int ret, irq, cs;
1573 1609
1574 pdata = dev_get_platdata(&pdev->dev); 1610 pdata = dev_get_platdata(&pdev->dev);
1611 if (pdata->num_cs <= 0)
1612 return -ENODEV;
1575 info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) + 1613 info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1576 sizeof(*host)) * pdata->num_cs, GFP_KERNEL); 1614 sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1577 if (!info) 1615 if (!info)
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index da4c79259f67..16e34b37d134 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -425,9 +425,10 @@ retry:
425 ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d", 425 ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
426 pnum, vol_id, lnum); 426 pnum, vol_id, lnum);
427 err = -EBADMSG; 427 err = -EBADMSG;
428 } else 428 } else {
429 err = -EINVAL; 429 err = -EINVAL;
430 ubi_ro_mode(ubi); 430 ubi_ro_mode(ubi);
431 }
431 } 432 }
432 goto out_free; 433 goto out_free;
433 } else if (err == UBI_IO_BITFLIPS) 434 } else if (err == UBI_IO_BITFLIPS)
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 98d73aab52fe..58808f651452 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -131,7 +131,7 @@ config CAN_RCAR
131 131
132config CAN_XILINXCAN 132config CAN_XILINXCAN
133 tristate "Xilinx CAN" 133 tristate "Xilinx CAN"
134 depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST 134 depends on ARCH_ZYNQ || ARM64 || MICROBLAZE || COMPILE_TEST
135 depends on COMMON_CLK && HAS_IOMEM 135 depends on COMMON_CLK && HAS_IOMEM
136 ---help--- 136 ---help---
137 Xilinx CAN driver. This driver supports both soft AXI CAN IP and 137 Xilinx CAN driver. This driver supports both soft AXI CAN IP and
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index a316fa4b91ab..e97a08ce0b90 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -14,6 +14,7 @@
14 * Copyright (C) 2015 Valeo S.A. 14 * Copyright (C) 2015 Valeo S.A.
15 */ 15 */
16 16
17#include <linux/spinlock.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/completion.h> 19#include <linux/completion.h>
19#include <linux/module.h> 20#include <linux/module.h>
@@ -467,10 +468,11 @@ struct kvaser_usb {
467struct kvaser_usb_net_priv { 468struct kvaser_usb_net_priv {
468 struct can_priv can; 469 struct can_priv can;
469 470
470 atomic_t active_tx_urbs; 471 spinlock_t tx_contexts_lock;
471 struct usb_anchor tx_submitted; 472 int active_tx_contexts;
472 struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS]; 473 struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS];
473 474
475 struct usb_anchor tx_submitted;
474 struct completion start_comp, stop_comp; 476 struct completion start_comp, stop_comp;
475 477
476 struct kvaser_usb *dev; 478 struct kvaser_usb *dev;
@@ -694,6 +696,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
694 struct kvaser_usb_net_priv *priv; 696 struct kvaser_usb_net_priv *priv;
695 struct sk_buff *skb; 697 struct sk_buff *skb;
696 struct can_frame *cf; 698 struct can_frame *cf;
699 unsigned long flags;
697 u8 channel, tid; 700 u8 channel, tid;
698 701
699 channel = msg->u.tx_acknowledge_header.channel; 702 channel = msg->u.tx_acknowledge_header.channel;
@@ -737,12 +740,15 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
737 740
738 stats->tx_packets++; 741 stats->tx_packets++;
739 stats->tx_bytes += context->dlc; 742 stats->tx_bytes += context->dlc;
740 can_get_echo_skb(priv->netdev, context->echo_index);
741 743
742 context->echo_index = MAX_TX_URBS; 744 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
743 atomic_dec(&priv->active_tx_urbs);
744 745
746 can_get_echo_skb(priv->netdev, context->echo_index);
747 context->echo_index = MAX_TX_URBS;
748 --priv->active_tx_contexts;
745 netif_wake_queue(priv->netdev); 749 netif_wake_queue(priv->netdev);
750
751 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
746} 752}
747 753
748static void kvaser_usb_simple_msg_callback(struct urb *urb) 754static void kvaser_usb_simple_msg_callback(struct urb *urb)
@@ -803,17 +809,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
803 return 0; 809 return 0;
804} 810}
805 811
806static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
807{
808 int i;
809
810 usb_kill_anchored_urbs(&priv->tx_submitted);
811 atomic_set(&priv->active_tx_urbs, 0);
812
813 for (i = 0; i < MAX_TX_URBS; i++)
814 priv->tx_contexts[i].echo_index = MAX_TX_URBS;
815}
816
817static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, 812static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
818 const struct kvaser_usb_error_summary *es, 813 const struct kvaser_usb_error_summary *es,
819 struct can_frame *cf) 814 struct can_frame *cf)
@@ -1515,6 +1510,24 @@ error:
1515 return err; 1510 return err;
1516} 1511}
1517 1512
1513static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
1514{
1515 int i;
1516
1517 priv->active_tx_contexts = 0;
1518 for (i = 0; i < MAX_TX_URBS; i++)
1519 priv->tx_contexts[i].echo_index = MAX_TX_URBS;
1520}
1521
1522/* This method might sleep. Do not call it in the atomic context
1523 * of URB completions.
1524 */
1525static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
1526{
1527 usb_kill_anchored_urbs(&priv->tx_submitted);
1528 kvaser_usb_reset_tx_urb_contexts(priv);
1529}
1530
1518static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev) 1531static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
1519{ 1532{
1520 int i; 1533 int i;
@@ -1634,6 +1647,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1634 struct kvaser_msg *msg; 1647 struct kvaser_msg *msg;
1635 int i, err, ret = NETDEV_TX_OK; 1648 int i, err, ret = NETDEV_TX_OK;
1636 u8 *msg_tx_can_flags = NULL; /* GCC */ 1649 u8 *msg_tx_can_flags = NULL; /* GCC */
1650 unsigned long flags;
1637 1651
1638 if (can_dropped_invalid_skb(netdev, skb)) 1652 if (can_dropped_invalid_skb(netdev, skb))
1639 return NETDEV_TX_OK; 1653 return NETDEV_TX_OK;
@@ -1687,12 +1701,21 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1687 if (cf->can_id & CAN_RTR_FLAG) 1701 if (cf->can_id & CAN_RTR_FLAG)
1688 *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; 1702 *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
1689 1703
1704 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
1690 for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) { 1705 for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) {
1691 if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { 1706 if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
1692 context = &priv->tx_contexts[i]; 1707 context = &priv->tx_contexts[i];
1708
1709 context->echo_index = i;
1710 can_put_echo_skb(skb, netdev, context->echo_index);
1711 ++priv->active_tx_contexts;
1712 if (priv->active_tx_contexts >= MAX_TX_URBS)
1713 netif_stop_queue(netdev);
1714
1693 break; 1715 break;
1694 } 1716 }
1695 } 1717 }
1718 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
1696 1719
1697 /* This should never happen; it implies a flow control bug */ 1720 /* This should never happen; it implies a flow control bug */
1698 if (!context) { 1721 if (!context) {
@@ -1704,7 +1727,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1704 } 1727 }
1705 1728
1706 context->priv = priv; 1729 context->priv = priv;
1707 context->echo_index = i;
1708 context->dlc = cf->can_dlc; 1730 context->dlc = cf->can_dlc;
1709 1731
1710 msg->u.tx_can.tid = context->echo_index; 1732 msg->u.tx_can.tid = context->echo_index;
@@ -1716,18 +1738,17 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1716 kvaser_usb_write_bulk_callback, context); 1738 kvaser_usb_write_bulk_callback, context);
1717 usb_anchor_urb(urb, &priv->tx_submitted); 1739 usb_anchor_urb(urb, &priv->tx_submitted);
1718 1740
1719 can_put_echo_skb(skb, netdev, context->echo_index);
1720
1721 atomic_inc(&priv->active_tx_urbs);
1722
1723 if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
1724 netif_stop_queue(netdev);
1725
1726 err = usb_submit_urb(urb, GFP_ATOMIC); 1741 err = usb_submit_urb(urb, GFP_ATOMIC);
1727 if (unlikely(err)) { 1742 if (unlikely(err)) {
1743 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
1744
1728 can_free_echo_skb(netdev, context->echo_index); 1745 can_free_echo_skb(netdev, context->echo_index);
1746 context->echo_index = MAX_TX_URBS;
1747 --priv->active_tx_contexts;
1748 netif_wake_queue(netdev);
1749
1750 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
1729 1751
1730 atomic_dec(&priv->active_tx_urbs);
1731 usb_unanchor_urb(urb); 1752 usb_unanchor_urb(urb);
1732 1753
1733 stats->tx_dropped++; 1754 stats->tx_dropped++;
@@ -1854,7 +1875,7 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
1854 struct kvaser_usb *dev = usb_get_intfdata(intf); 1875 struct kvaser_usb *dev = usb_get_intfdata(intf);
1855 struct net_device *netdev; 1876 struct net_device *netdev;
1856 struct kvaser_usb_net_priv *priv; 1877 struct kvaser_usb_net_priv *priv;
1857 int i, err; 1878 int err;
1858 1879
1859 err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel); 1880 err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
1860 if (err) 1881 if (err)
@@ -1868,19 +1889,17 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
1868 1889
1869 priv = netdev_priv(netdev); 1890 priv = netdev_priv(netdev);
1870 1891
1892 init_usb_anchor(&priv->tx_submitted);
1871 init_completion(&priv->start_comp); 1893 init_completion(&priv->start_comp);
1872 init_completion(&priv->stop_comp); 1894 init_completion(&priv->stop_comp);
1873 1895
1874 init_usb_anchor(&priv->tx_submitted);
1875 atomic_set(&priv->active_tx_urbs, 0);
1876
1877 for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++)
1878 priv->tx_contexts[i].echo_index = MAX_TX_URBS;
1879
1880 priv->dev = dev; 1896 priv->dev = dev;
1881 priv->netdev = netdev; 1897 priv->netdev = netdev;
1882 priv->channel = channel; 1898 priv->channel = channel;
1883 1899
1900 spin_lock_init(&priv->tx_contexts_lock);
1901 kvaser_usb_reset_tx_urb_contexts(priv);
1902
1884 priv->can.state = CAN_STATE_STOPPED; 1903 priv->can.state = CAN_STATE_STOPPED;
1885 priv->can.clock.freq = CAN_USB_CLOCK; 1904 priv->can.clock.freq = CAN_USB_CLOCK;
1886 priv->can.bittiming_const = &kvaser_usb_bittiming_const; 1905 priv->can.bittiming_const = &kvaser_usb_bittiming_const;
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 11d6e6561df1..15a8190a6f75 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1543,7 +1543,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1543{ 1543{
1544 struct pcnet32_private *lp; 1544 struct pcnet32_private *lp;
1545 int i, media; 1545 int i, media;
1546 int fdx, mii, fset, dxsuflo; 1546 int fdx, mii, fset, dxsuflo, sram;
1547 int chip_version; 1547 int chip_version;
1548 char *chipname; 1548 char *chipname;
1549 struct net_device *dev; 1549 struct net_device *dev;
@@ -1580,7 +1580,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1580 } 1580 }
1581 1581
1582 /* initialize variables */ 1582 /* initialize variables */
1583 fdx = mii = fset = dxsuflo = 0; 1583 fdx = mii = fset = dxsuflo = sram = 0;
1584 chip_version = (chip_version >> 12) & 0xffff; 1584 chip_version = (chip_version >> 12) & 0xffff;
1585 1585
1586 switch (chip_version) { 1586 switch (chip_version) {
@@ -1613,6 +1613,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1613 chipname = "PCnet/FAST III 79C973"; /* PCI */ 1613 chipname = "PCnet/FAST III 79C973"; /* PCI */
1614 fdx = 1; 1614 fdx = 1;
1615 mii = 1; 1615 mii = 1;
1616 sram = 1;
1616 break; 1617 break;
1617 case 0x2626: 1618 case 0x2626:
1618 chipname = "PCnet/Home 79C978"; /* PCI */ 1619 chipname = "PCnet/Home 79C978"; /* PCI */
@@ -1636,6 +1637,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1636 chipname = "PCnet/FAST III 79C975"; /* PCI */ 1637 chipname = "PCnet/FAST III 79C975"; /* PCI */
1637 fdx = 1; 1638 fdx = 1;
1638 mii = 1; 1639 mii = 1;
1640 sram = 1;
1639 break; 1641 break;
1640 case 0x2628: 1642 case 0x2628:
1641 chipname = "PCnet/PRO 79C976"; 1643 chipname = "PCnet/PRO 79C976";
@@ -1664,6 +1666,31 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1664 dxsuflo = 1; 1666 dxsuflo = 1;
1665 } 1667 }
1666 1668
1669 /*
1670 * The Am79C973/Am79C975 controllers come with 12K of SRAM
1671 * which we can use for the Tx/Rx buffers but most importantly,
1672 * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid
1673 * Tx fifo underflows.
1674 */
1675 if (sram) {
1676 /*
1677 * The SRAM is being configured in two steps. First we
1678 * set the SRAM size in the BCR25:SRAM_SIZE bits. According
1679 * to the datasheet, each bit corresponds to a 512-byte
1680 * page so we can have at most 24 pages. The SRAM_SIZE
1681 * holds the value of the upper 8 bits of the 16-bit SRAM size.
1682 * The low 8-bits start at 0x00 and end at 0xff. So the
1683 * address range is from 0x0000 up to 0x17ff. Therefore,
1684 * the SRAM_SIZE is set to 0x17. The next step is to set
1685 * the BCR26:SRAM_BND midway through so the Tx and Rx
1686 * buffers can share the SRAM equally.
1687 */
1688 a->write_bcr(ioaddr, 25, 0x17);
1689 a->write_bcr(ioaddr, 26, 0xc);
1690 /* And finally enable the NOUFLO bit */
1691 a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11));
1692 }
1693
1667 dev = alloc_etherdev(sizeof(*lp)); 1694 dev = alloc_etherdev(sizeof(*lp));
1668 if (!dev) { 1695 if (!dev) {
1669 ret = -ENOMEM; 1696 ret = -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index bef750a09027..996e215fc324 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12769,7 +12769,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
12769 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | 12769 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
12770 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | 12770 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
12771 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; 12771 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
12772 if (!CHIP_IS_E1x(bp)) { 12772 if (!chip_is_e1x) {
12773 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | 12773 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
12774 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; 12774 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
12775 dev->hw_enc_features = 12775 dev->hw_enc_features =
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 853c38997c82..1abdfa123c6c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -1120,7 +1120,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1120 } 1120 }
1121 1121
1122 /* Installed successfully, update the cached header too. */ 1122 /* Installed successfully, update the cached header too. */
1123 memcpy(card_fw, fs_fw, sizeof(*card_fw)); 1123 *card_fw = *fs_fw;
1124 card_fw_usable = 1; 1124 card_fw_usable = 1;
1125 *reset = 0; /* already reset as part of load_fw */ 1125 *reset = 0; /* already reset as part of load_fw */
1126 } 1126 }
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 3b42556f7f8d..ed41559bae77 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -589,7 +589,7 @@ static void tulip_tx_timeout(struct net_device *dev)
589 (unsigned int)tp->rx_ring[i].buffer1, 589 (unsigned int)tp->rx_ring[i].buffer1,
590 (unsigned int)tp->rx_ring[i].buffer2, 590 (unsigned int)tp->rx_ring[i].buffer2,
591 buf[0], buf[1], buf[2]); 591 buf[0], buf[1], buf[2]);
592 for (j = 0; buf[j] != 0xee && j < 1600; j++) 592 for (j = 0; ((j < 1600) && buf[j] != 0xee); j++)
593 if (j < 100) 593 if (j < 100)
594 pr_cont(" %02x", buf[j]); 594 pr_cont(" %02x", buf[j]);
595 pr_cont(" j=%d\n", j); 595 pr_cont(" j=%d\n", j);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 27de37aa90af..27b9fe99a9bd 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -354,6 +354,7 @@ struct be_vf_cfg {
354 u16 vlan_tag; 354 u16 vlan_tag;
355 u32 tx_rate; 355 u32 tx_rate;
356 u32 plink_tracking; 356 u32 plink_tracking;
357 u32 privileges;
357}; 358};
358 359
359enum vf_state { 360enum vf_state {
@@ -423,6 +424,7 @@ struct be_adapter {
423 424
424 u8 __iomem *csr; /* CSR BAR used only for BE2/3 */ 425 u8 __iomem *csr; /* CSR BAR used only for BE2/3 */
425 u8 __iomem *db; /* Door Bell */ 426 u8 __iomem *db; /* Door Bell */
427 u8 __iomem *pcicfg; /* On SH,BEx only. Shadow of PCI config space */
426 428
427 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ 429 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
428 struct be_dma_mem mbox_mem; 430 struct be_dma_mem mbox_mem;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 36916cfa70f9..7f05f309e935 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1902,15 +1902,11 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1902{ 1902{
1903 int num_eqs, i = 0; 1903 int num_eqs, i = 0;
1904 1904
1905 if (lancer_chip(adapter) && num > 8) { 1905 while (num) {
1906 while (num) { 1906 num_eqs = min(num, 8);
1907 num_eqs = min(num, 8); 1907 __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
1908 __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); 1908 i += num_eqs;
1909 i += num_eqs; 1909 num -= num_eqs;
1910 num -= num_eqs;
1911 }
1912 } else {
1913 __be_cmd_modify_eqd(adapter, set_eqd, num);
1914 } 1910 }
1915 1911
1916 return 0; 1912 return 0;
@@ -1918,7 +1914,7 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1918 1914
1919/* Uses sycnhronous mcc */ 1915/* Uses sycnhronous mcc */
1920int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 1916int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1921 u32 num) 1917 u32 num, u32 domain)
1922{ 1918{
1923 struct be_mcc_wrb *wrb; 1919 struct be_mcc_wrb *wrb;
1924 struct be_cmd_req_vlan_config *req; 1920 struct be_cmd_req_vlan_config *req;
@@ -1936,6 +1932,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1936 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1932 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1937 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), 1933 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1938 wrb, NULL); 1934 wrb, NULL);
1935 req->hdr.domain = domain;
1939 1936
1940 req->interface_id = if_id; 1937 req->interface_id = if_id;
1941 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; 1938 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index db761e8e42a3..a7634a3f052a 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2256,7 +2256,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
2256int be_cmd_get_fw_ver(struct be_adapter *adapter); 2256int be_cmd_get_fw_ver(struct be_adapter *adapter);
2257int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); 2257int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
2258int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 2258int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
2259 u32 num); 2259 u32 num, u32 domain);
2260int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); 2260int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
2261int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); 2261int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
2262int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); 2262int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 0a816859aca5..e6b790f0d9dc 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1171,7 +1171,7 @@ static int be_vid_config(struct be_adapter *adapter)
1171 for_each_set_bit(i, adapter->vids, VLAN_N_VID) 1171 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1172 vids[num++] = cpu_to_le16(i); 1172 vids[num++] = cpu_to_le16(i);
1173 1173
1174 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num); 1174 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
1175 if (status) { 1175 if (status) {
1176 dev_err(dev, "Setting HW VLAN filtering failed\n"); 1176 dev_err(dev, "Setting HW VLAN filtering failed\n");
1177 /* Set to VLAN promisc mode as setting VLAN filter failed */ 1177 /* Set to VLAN promisc mode as setting VLAN filter failed */
@@ -1380,11 +1380,67 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
1380 return 0; 1380 return 0;
1381} 1381}
1382 1382
1383static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1384{
1385 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1386 u16 vids[BE_NUM_VLANS_SUPPORTED];
1387 int vf_if_id = vf_cfg->if_handle;
1388 int status;
1389
1390 /* Enable Transparent VLAN Tagging */
1391 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
1392 if (status)
1393 return status;
1394
1395 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1396 vids[0] = 0;
1397 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1398 if (!status)
1399 dev_info(&adapter->pdev->dev,
1400 "Cleared guest VLANs on VF%d", vf);
1401
1402 /* After TVT is enabled, disallow VFs to program VLAN filters */
1403 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1404 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1405 ~BE_PRIV_FILTMGMT, vf + 1);
1406 if (!status)
1407 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1408 }
1409 return 0;
1410}
1411
1412static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1413{
1414 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1415 struct device *dev = &adapter->pdev->dev;
1416 int status;
1417
1418 /* Reset Transparent VLAN Tagging. */
1419 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
1420 vf_cfg->if_handle, 0);
1421 if (status)
1422 return status;
1423
1424 /* Allow VFs to program VLAN filtering */
1425 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1426 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1427 BE_PRIV_FILTMGMT, vf + 1);
1428 if (!status) {
1429 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1430 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1431 }
1432 }
1433
1434 dev_info(dev,
1435 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1436 return 0;
1437}
1438
1383static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) 1439static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1384{ 1440{
1385 struct be_adapter *adapter = netdev_priv(netdev); 1441 struct be_adapter *adapter = netdev_priv(netdev);
1386 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; 1442 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1387 int status = 0; 1443 int status;
1388 1444
1389 if (!sriov_enabled(adapter)) 1445 if (!sriov_enabled(adapter))
1390 return -EPERM; 1446 return -EPERM;
@@ -1394,24 +1450,19 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1394 1450
1395 if (vlan || qos) { 1451 if (vlan || qos) {
1396 vlan |= qos << VLAN_PRIO_SHIFT; 1452 vlan |= qos << VLAN_PRIO_SHIFT;
1397 if (vf_cfg->vlan_tag != vlan) 1453 status = be_set_vf_tvt(adapter, vf, vlan);
1398 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1399 vf_cfg->if_handle, 0);
1400 } else { 1454 } else {
1401 /* Reset Transparent Vlan Tagging. */ 1455 status = be_clear_vf_tvt(adapter, vf);
1402 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1403 vf + 1, vf_cfg->if_handle, 0);
1404 } 1456 }
1405 1457
1406 if (status) { 1458 if (status) {
1407 dev_err(&adapter->pdev->dev, 1459 dev_err(&adapter->pdev->dev,
1408 "VLAN %d config on VF %d failed : %#x\n", vlan, 1460 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1409 vf, status); 1461 status);
1410 return be_cmd_status(status); 1462 return be_cmd_status(status);
1411 } 1463 }
1412 1464
1413 vf_cfg->vlan_tag = vlan; 1465 vf_cfg->vlan_tag = vlan;
1414
1415 return 0; 1466 return 0;
1416} 1467}
1417 1468
@@ -2772,14 +2823,12 @@ void be_detect_error(struct be_adapter *adapter)
2772 } 2823 }
2773 } 2824 }
2774 } else { 2825 } else {
2775 pci_read_config_dword(adapter->pdev, 2826 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
2776 PCICFG_UE_STATUS_LOW, &ue_lo); 2827 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
2777 pci_read_config_dword(adapter->pdev, 2828 ue_lo_mask = ioread32(adapter->pcicfg +
2778 PCICFG_UE_STATUS_HIGH, &ue_hi); 2829 PCICFG_UE_STATUS_LOW_MASK);
2779 pci_read_config_dword(adapter->pdev, 2830 ue_hi_mask = ioread32(adapter->pcicfg +
2780 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask); 2831 PCICFG_UE_STATUS_HI_MASK);
2781 pci_read_config_dword(adapter->pdev,
2782 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2783 2832
2784 ue_lo = (ue_lo & ~ue_lo_mask); 2833 ue_lo = (ue_lo & ~ue_lo_mask);
2785 ue_hi = (ue_hi & ~ue_hi_mask); 2834 ue_hi = (ue_hi & ~ue_hi_mask);
@@ -3339,7 +3388,6 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3339 u32 cap_flags, u32 vf) 3388 u32 cap_flags, u32 vf)
3340{ 3389{
3341 u32 en_flags; 3390 u32 en_flags;
3342 int status;
3343 3391
3344 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 3392 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3345 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS | 3393 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
@@ -3347,10 +3395,7 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3347 3395
3348 en_flags &= cap_flags; 3396 en_flags &= cap_flags;
3349 3397
3350 status = be_cmd_if_create(adapter, cap_flags, en_flags, 3398 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
3351 if_handle, vf);
3352
3353 return status;
3354} 3399}
3355 3400
3356static int be_vfs_if_create(struct be_adapter *adapter) 3401static int be_vfs_if_create(struct be_adapter *adapter)
@@ -3368,8 +3413,13 @@ static int be_vfs_if_create(struct be_adapter *adapter)
3368 if (!BE3_chip(adapter)) { 3413 if (!BE3_chip(adapter)) {
3369 status = be_cmd_get_profile_config(adapter, &res, 3414 status = be_cmd_get_profile_config(adapter, &res,
3370 vf + 1); 3415 vf + 1);
3371 if (!status) 3416 if (!status) {
3372 cap_flags = res.if_cap_flags; 3417 cap_flags = res.if_cap_flags;
3418 /* Prevent VFs from enabling VLAN promiscuous
3419 * mode
3420 */
3421 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3422 }
3373 } 3423 }
3374 3424
3375 status = be_if_create(adapter, &vf_cfg->if_handle, 3425 status = be_if_create(adapter, &vf_cfg->if_handle,
@@ -3403,7 +3453,6 @@ static int be_vf_setup(struct be_adapter *adapter)
3403 struct device *dev = &adapter->pdev->dev; 3453 struct device *dev = &adapter->pdev->dev;
3404 struct be_vf_cfg *vf_cfg; 3454 struct be_vf_cfg *vf_cfg;
3405 int status, old_vfs, vf; 3455 int status, old_vfs, vf;
3406 u32 privileges;
3407 3456
3408 old_vfs = pci_num_vf(adapter->pdev); 3457 old_vfs = pci_num_vf(adapter->pdev);
3409 3458
@@ -3433,15 +3482,18 @@ static int be_vf_setup(struct be_adapter *adapter)
3433 3482
3434 for_all_vfs(adapter, vf_cfg, vf) { 3483 for_all_vfs(adapter, vf_cfg, vf) {
3435 /* Allow VFs to programs MAC/VLAN filters */ 3484 /* Allow VFs to programs MAC/VLAN filters */
3436 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1); 3485 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3437 if (!status && !(privileges & BE_PRIV_FILTMGMT)) { 3486 vf + 1);
3487 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
3438 status = be_cmd_set_fn_privileges(adapter, 3488 status = be_cmd_set_fn_privileges(adapter,
3439 privileges | 3489 vf_cfg->privileges |
3440 BE_PRIV_FILTMGMT, 3490 BE_PRIV_FILTMGMT,
3441 vf + 1); 3491 vf + 1);
3442 if (!status) 3492 if (!status) {
3493 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
3443 dev_info(dev, "VF%d has FILTMGMT privilege\n", 3494 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3444 vf); 3495 vf);
3496 }
3445 } 3497 }
3446 3498
3447 /* Allow full available bandwidth */ 3499 /* Allow full available bandwidth */
@@ -4820,24 +4872,37 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter)
4820 4872
4821static int be_map_pci_bars(struct be_adapter *adapter) 4873static int be_map_pci_bars(struct be_adapter *adapter)
4822{ 4874{
4875 struct pci_dev *pdev = adapter->pdev;
4823 u8 __iomem *addr; 4876 u8 __iomem *addr;
4824 4877
4825 if (BEx_chip(adapter) && be_physfn(adapter)) { 4878 if (BEx_chip(adapter) && be_physfn(adapter)) {
4826 adapter->csr = pci_iomap(adapter->pdev, 2, 0); 4879 adapter->csr = pci_iomap(pdev, 2, 0);
4827 if (!adapter->csr) 4880 if (!adapter->csr)
4828 return -ENOMEM; 4881 return -ENOMEM;
4829 } 4882 }
4830 4883
4831 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0); 4884 addr = pci_iomap(pdev, db_bar(adapter), 0);
4832 if (!addr) 4885 if (!addr)
4833 goto pci_map_err; 4886 goto pci_map_err;
4834 adapter->db = addr; 4887 adapter->db = addr;
4835 4888
4889 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
4890 if (be_physfn(adapter)) {
4891 /* PCICFG is the 2nd BAR in BE2 */
4892 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
4893 if (!addr)
4894 goto pci_map_err;
4895 adapter->pcicfg = addr;
4896 } else {
4897 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
4898 }
4899 }
4900
4836 be_roce_map_pci_bars(adapter); 4901 be_roce_map_pci_bars(adapter);
4837 return 0; 4902 return 0;
4838 4903
4839pci_map_err: 4904pci_map_err:
4840 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n"); 4905 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
4841 be_unmap_pci_bars(adapter); 4906 be_unmap_pci_bars(adapter);
4842 return -ENOMEM; 4907 return -ENOMEM;
4843} 4908}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 99492b7e3713..78e1ce09b1ab 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1189,13 +1189,12 @@ static void
1189fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) 1189fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1190{ 1190{
1191 struct fec_enet_private *fep; 1191 struct fec_enet_private *fep;
1192 struct bufdesc *bdp, *bdp_t; 1192 struct bufdesc *bdp;
1193 unsigned short status; 1193 unsigned short status;
1194 struct sk_buff *skb; 1194 struct sk_buff *skb;
1195 struct fec_enet_priv_tx_q *txq; 1195 struct fec_enet_priv_tx_q *txq;
1196 struct netdev_queue *nq; 1196 struct netdev_queue *nq;
1197 int index = 0; 1197 int index = 0;
1198 int i, bdnum;
1199 int entries_free; 1198 int entries_free;
1200 1199
1201 fep = netdev_priv(ndev); 1200 fep = netdev_priv(ndev);
@@ -1216,29 +1215,18 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1216 if (bdp == txq->cur_tx) 1215 if (bdp == txq->cur_tx)
1217 break; 1216 break;
1218 1217
1219 bdp_t = bdp; 1218 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
1220 bdnum = 1;
1221 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
1222 skb = txq->tx_skbuff[index];
1223 while (!skb) {
1224 bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id);
1225 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
1226 skb = txq->tx_skbuff[index];
1227 bdnum++;
1228 }
1229 if (skb_shinfo(skb)->nr_frags &&
1230 (status = bdp_t->cbd_sc) & BD_ENET_TX_READY)
1231 break;
1232 1219
1233 for (i = 0; i < bdnum; i++) { 1220 skb = txq->tx_skbuff[index];
1234 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
1235 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1236 bdp->cbd_datlen, DMA_TO_DEVICE);
1237 bdp->cbd_bufaddr = 0;
1238 if (i < bdnum - 1)
1239 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1240 }
1241 txq->tx_skbuff[index] = NULL; 1221 txq->tx_skbuff[index] = NULL;
1222 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
1223 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1224 bdp->cbd_datlen, DMA_TO_DEVICE);
1225 bdp->cbd_bufaddr = 0;
1226 if (!skb) {
1227 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1228 continue;
1229 }
1242 1230
1243 /* Check for errors. */ 1231 /* Check for errors. */
1244 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1232 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1479,8 +1467,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1479 1467
1480 vlan_packet_rcvd = true; 1468 vlan_packet_rcvd = true;
1481 1469
1482 skb_copy_to_linear_data_offset(skb, VLAN_HLEN, 1470 memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
1483 data, (2 * ETH_ALEN));
1484 skb_pull(skb, VLAN_HLEN); 1471 skb_pull(skb, VLAN_HLEN);
1485 } 1472 }
1486 1473
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 072426a72745..cd7675ac5bf9 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1136,6 +1136,8 @@ restart_poll:
1136 ibmveth_replenish_task(adapter); 1136 ibmveth_replenish_task(adapter);
1137 1137
1138 if (frames_processed < budget) { 1138 if (frames_processed < budget) {
1139 napi_complete(napi);
1140
1139 /* We think we are done - reenable interrupts, 1141 /* We think we are done - reenable interrupts,
1140 * then check once more to make sure we are done. 1142 * then check once more to make sure we are done.
1141 */ 1143 */
@@ -1144,8 +1146,6 @@ restart_poll:
1144 1146
1145 BUG_ON(lpar_rc != H_SUCCESS); 1147 BUG_ON(lpar_rc != H_SUCCESS);
1146 1148
1147 napi_complete(napi);
1148
1149 if (ibmveth_rxq_pending_buffer(adapter) && 1149 if (ibmveth_rxq_pending_buffer(adapter) &&
1150 napi_reschedule(napi)) { 1150 napi_reschedule(napi)) {
1151 lpar_rc = h_vio_signal(adapter->vdev->unit_address, 1151 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 2a210c4efb89..ebce5bb24df9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1698,8 +1698,6 @@ int mlx4_en_start_port(struct net_device *dev)
1698 /* Schedule multicast task to populate multicast list */ 1698 /* Schedule multicast task to populate multicast list */
1699 queue_work(mdev->workqueue, &priv->rx_mode_task); 1699 queue_work(mdev->workqueue, &priv->rx_mode_task);
1700 1700
1701 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
1702
1703#ifdef CONFIG_MLX4_EN_VXLAN 1701#ifdef CONFIG_MLX4_EN_VXLAN
1704 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 1702 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1705 vxlan_get_rx_port(dev); 1703 vxlan_get_rx_port(dev);
@@ -2853,6 +2851,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2853 queue_delayed_work(mdev->workqueue, &priv->service_task, 2851 queue_delayed_work(mdev->workqueue, &priv->service_task,
2854 SERVICE_TASK_DELAY); 2852 SERVICE_TASK_DELAY);
2855 2853
2854 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
2855
2856 return 0; 2856 return 0;
2857 2857
2858out: 2858out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 2a8268e6be15..ebbe244e80dd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -453,7 +453,7 @@ struct mlx4_en_port_stats {
453 unsigned long rx_chksum_none; 453 unsigned long rx_chksum_none;
454 unsigned long rx_chksum_complete; 454 unsigned long rx_chksum_complete;
455 unsigned long tx_chksum_offload; 455 unsigned long tx_chksum_offload;
456#define NUM_PORT_STATS 9 456#define NUM_PORT_STATS 10
457}; 457};
458 458
459struct mlx4_en_perf_stats { 459struct mlx4_en_perf_stats {
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 5d093dc0f5f5..8678e39aba08 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2248,10 +2248,9 @@ static int smc_drv_probe(struct platform_device *pdev)
2248 const struct of_device_id *match = NULL; 2248 const struct of_device_id *match = NULL;
2249 struct smc_local *lp; 2249 struct smc_local *lp;
2250 struct net_device *ndev; 2250 struct net_device *ndev;
2251 struct resource *res; 2251 struct resource *res, *ires;
2252 unsigned int __iomem *addr; 2252 unsigned int __iomem *addr;
2253 unsigned long irq_flags = SMC_IRQ_FLAGS; 2253 unsigned long irq_flags = SMC_IRQ_FLAGS;
2254 unsigned long irq_resflags;
2255 int ret; 2254 int ret;
2256 2255
2257 ndev = alloc_etherdev(sizeof(struct smc_local)); 2256 ndev = alloc_etherdev(sizeof(struct smc_local));
@@ -2343,19 +2342,16 @@ static int smc_drv_probe(struct platform_device *pdev)
2343 goto out_free_netdev; 2342 goto out_free_netdev;
2344 } 2343 }
2345 2344
2346 ndev->irq = platform_get_irq(pdev, 0); 2345 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2347 if (ndev->irq <= 0) { 2346 if (!ires) {
2348 ret = -ENODEV; 2347 ret = -ENODEV;
2349 goto out_release_io; 2348 goto out_release_io;
2350 } 2349 }
2351 /* 2350
2352 * If this platform does not specify any special irqflags, or if 2351 ndev->irq = ires->start;
2353 * the resource supplies a trigger, override the irqflags with 2352
2354 * the trigger flags from the resource. 2353 if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
2355 */ 2354 irq_flags = ires->flags & IRQF_TRIGGER_MASK;
2356 irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
2357 if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
2358 irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
2359 2355
2360 ret = smc_request_attrib(pdev, ndev); 2356 ret = smc_request_attrib(pdev, ndev);
2361 if (ret) 2357 if (ret)
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index a495931a66a1..0e0fbb5842b3 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -498,9 +498,9 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget)
498 } 498 }
499 499
500 if (rx_count < budget) { 500 if (rx_count < budget) {
501 napi_complete(napi);
501 w5100_write(priv, W5100_IMR, IR_S0); 502 w5100_write(priv, W5100_IMR, IR_S0);
502 mmiowb(); 503 mmiowb();
503 napi_complete(napi);
504 } 504 }
505 505
506 return rx_count; 506 return rx_count;
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 09322d9db578..4b310002258d 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -418,9 +418,9 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget)
418 } 418 }
419 419
420 if (rx_count < budget) { 420 if (rx_count < budget) {
421 napi_complete(napi);
421 w5300_write(priv, W5300_IMR, IR_S0); 422 w5300_write(priv, W5300_IMR, IR_S0);
422 mmiowb(); 423 mmiowb();
423 napi_complete(napi);
424 } 424 }
425 425
426 return rx_count; 426 return rx_count;
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 3eed708a6182..1762ad3910b2 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -46,8 +46,7 @@ enum cx82310_status {
46}; 46};
47 47
48#define CMD_PACKET_SIZE 64 48#define CMD_PACKET_SIZE 64
49/* first command after power on can take around 8 seconds */ 49#define CMD_TIMEOUT 100
50#define CMD_TIMEOUT 15000
51#define CMD_REPLY_RETRY 5 50#define CMD_REPLY_RETRY 5
52 51
53#define CX82310_MTU 1514 52#define CX82310_MTU 1514
@@ -78,8 +77,9 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
78 ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf, 77 ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf,
79 CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT); 78 CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT);
80 if (ret < 0) { 79 if (ret < 0) {
81 dev_err(&dev->udev->dev, "send command %#x: error %d\n", 80 if (cmd != CMD_GET_LINK_STATUS)
82 cmd, ret); 81 dev_err(&dev->udev->dev, "send command %#x: error %d\n",
82 cmd, ret);
83 goto end; 83 goto end;
84 } 84 }
85 85
@@ -90,8 +90,10 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
90 buf, CMD_PACKET_SIZE, &actual_len, 90 buf, CMD_PACKET_SIZE, &actual_len,
91 CMD_TIMEOUT); 91 CMD_TIMEOUT);
92 if (ret < 0) { 92 if (ret < 0) {
93 dev_err(&dev->udev->dev, 93 if (cmd != CMD_GET_LINK_STATUS)
94 "reply receive error %d\n", ret); 94 dev_err(&dev->udev->dev,
95 "reply receive error %d\n",
96 ret);
95 goto end; 97 goto end;
96 } 98 }
97 if (actual_len > 0) 99 if (actual_len > 0)
@@ -134,6 +136,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
134 int ret; 136 int ret;
135 char buf[15]; 137 char buf[15];
136 struct usb_device *udev = dev->udev; 138 struct usb_device *udev = dev->udev;
139 u8 link[3];
140 int timeout = 50;
137 141
138 /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */ 142 /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */
139 if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0 143 if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0
@@ -160,6 +164,20 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
160 if (!dev->partial_data) 164 if (!dev->partial_data)
161 return -ENOMEM; 165 return -ENOMEM;
162 166
167 /* wait for firmware to become ready (indicated by the link being up) */
168 while (--timeout) {
169 ret = cx82310_cmd(dev, CMD_GET_LINK_STATUS, true, NULL, 0,
170 link, sizeof(link));
171 /* the command can time out during boot - it's not an error */
172 if (!ret && link[0] == 1 && link[2] == 1)
173 break;
174 msleep(500);
175 };
176 if (!timeout) {
177 dev_err(&udev->dev, "firmware not ready in time\n");
178 return -ETIMEDOUT;
179 }
180
163 /* enable ethernet mode (?) */ 181 /* enable ethernet mode (?) */
164 ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0); 182 ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0);
165 if (ret) { 183 if (ret) {
@@ -300,9 +318,18 @@ static const struct driver_info cx82310_info = {
300 .tx_fixup = cx82310_tx_fixup, 318 .tx_fixup = cx82310_tx_fixup,
301}; 319};
302 320
321#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
322 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
323 USB_DEVICE_ID_MATCH_DEV_INFO, \
324 .idVendor = (vend), \
325 .idProduct = (prod), \
326 .bDeviceClass = (cl), \
327 .bDeviceSubClass = (sc), \
328 .bDeviceProtocol = (pr)
329
303static const struct usb_device_id products[] = { 330static const struct usb_device_id products[] = {
304 { 331 {
305 USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0), 332 USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0),
306 .driver_info = (unsigned long) &cx82310_info 333 .driver_info = (unsigned long) &cx82310_info
307 }, 334 },
308 { }, 335 { },
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f1ff3666f090..59b0e9754ae3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1448,8 +1448,10 @@ static void virtnet_free_queues(struct virtnet_info *vi)
1448{ 1448{
1449 int i; 1449 int i;
1450 1450
1451 for (i = 0; i < vi->max_queue_pairs; i++) 1451 for (i = 0; i < vi->max_queue_pairs; i++) {
1452 napi_hash_del(&vi->rq[i].napi);
1452 netif_napi_del(&vi->rq[i].napi); 1453 netif_napi_del(&vi->rq[i].napi);
1454 }
1453 1455
1454 kfree(vi->rq); 1456 kfree(vi->rq);
1455 kfree(vi->sq); 1457 kfree(vi->sq);
@@ -1948,11 +1950,8 @@ static int virtnet_freeze(struct virtio_device *vdev)
1948 cancel_delayed_work_sync(&vi->refill); 1950 cancel_delayed_work_sync(&vi->refill);
1949 1951
1950 if (netif_running(vi->dev)) { 1952 if (netif_running(vi->dev)) {
1951 for (i = 0; i < vi->max_queue_pairs; i++) { 1953 for (i = 0; i < vi->max_queue_pairs; i++)
1952 napi_disable(&vi->rq[i].napi); 1954 napi_disable(&vi->rq[i].napi);
1953 napi_hash_del(&vi->rq[i].napi);
1954 netif_napi_del(&vi->rq[i].napi);
1955 }
1956 } 1955 }
1957 1956
1958 remove_vq_common(vi); 1957 remove_vq_common(vi);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 1e0a775ea882..f8528a4cf54f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1218,7 +1218,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1218 goto drop; 1218 goto drop;
1219 1219
1220 flags &= ~VXLAN_HF_RCO; 1220 flags &= ~VXLAN_HF_RCO;
1221 vni &= VXLAN_VID_MASK; 1221 vni &= VXLAN_VNI_MASK;
1222 } 1222 }
1223 1223
1224 /* For backwards compatibility, only allow reserved fields to be 1224 /* For backwards compatibility, only allow reserved fields to be
@@ -1239,7 +1239,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1239 flags &= ~VXLAN_GBP_USED_BITS; 1239 flags &= ~VXLAN_GBP_USED_BITS;
1240 } 1240 }
1241 1241
1242 if (flags || (vni & ~VXLAN_VID_MASK)) { 1242 if (flags || vni & ~VXLAN_VNI_MASK) {
1243 /* If there are any unprocessed flags remaining treat 1243 /* If there are any unprocessed flags remaining treat
1244 * this as a malformed packet. This behavior diverges from 1244 * this as a malformed packet. This behavior diverges from
1245 * VXLAN RFC (RFC7348) which stipulates that bits in reserved 1245 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index ccbdb05b28cd..75345c1e8c34 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -5370,6 +5370,7 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
5370 case 0x432a: /* BCM4321 */ 5370 case 0x432a: /* BCM4321 */
5371 case 0x432d: /* BCM4322 */ 5371 case 0x432d: /* BCM4322 */
5372 case 0x4352: /* BCM43222 */ 5372 case 0x4352: /* BCM43222 */
5373 case 0x435a: /* BCM43228 */
5373 case 0x4333: /* BCM4331 */ 5374 case 0x4333: /* BCM4331 */
5374 case 0x43a2: /* BCM4360 */ 5375 case 0x43a2: /* BCM4360 */
5375 case 0x43b3: /* BCM4352 */ 5376 case 0x43b3: /* BCM4352 */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
index 50cdf7090198..8eff2753abad 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
@@ -39,13 +39,22 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
39 void *dcmd_buf = NULL, *wr_pointer; 39 void *dcmd_buf = NULL, *wr_pointer;
40 u16 msglen, maxmsglen = PAGE_SIZE - 0x100; 40 u16 msglen, maxmsglen = PAGE_SIZE - 0x100;
41 41
42 brcmf_dbg(TRACE, "cmd %x set %d len %d\n", cmdhdr->cmd, cmdhdr->set, 42 if (len < sizeof(*cmdhdr)) {
43 cmdhdr->len); 43 brcmf_err("vendor command too short: %d\n", len);
44 return -EINVAL;
45 }
44 46
45 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); 47 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
46 ifp = vif->ifp; 48 ifp = vif->ifp;
47 49
48 len -= sizeof(struct brcmf_vndr_dcmd_hdr); 50 brcmf_dbg(TRACE, "ifidx=%d, cmd=%d\n", ifp->ifidx, cmdhdr->cmd);
51
52 if (cmdhdr->offset > len) {
53 brcmf_err("bad buffer offset %d > %d\n", cmdhdr->offset, len);
54 return -EINVAL;
55 }
56
57 len -= cmdhdr->offset;
49 ret_len = cmdhdr->len; 58 ret_len = cmdhdr->len;
50 if (ret_len > 0 || len > 0) { 59 if (ret_len > 0 || len > 0) {
51 if (len > BRCMF_DCMD_MAXLEN) { 60 if (len > BRCMF_DCMD_MAXLEN) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index c3817fae16c0..06f6cc08f451 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -95,7 +95,8 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
95 .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ 95 .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
96 .base_params = &iwl1000_base_params, \ 96 .base_params = &iwl1000_base_params, \
97 .eeprom_params = &iwl1000_eeprom_params, \ 97 .eeprom_params = &iwl1000_eeprom_params, \
98 .led_mode = IWL_LED_BLINK 98 .led_mode = IWL_LED_BLINK, \
99 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
99 100
100const struct iwl_cfg iwl1000_bgn_cfg = { 101const struct iwl_cfg iwl1000_bgn_cfg = {
101 .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", 102 .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
@@ -121,7 +122,8 @@ const struct iwl_cfg iwl1000_bg_cfg = {
121 .base_params = &iwl1000_base_params, \ 122 .base_params = &iwl1000_base_params, \
122 .eeprom_params = &iwl1000_eeprom_params, \ 123 .eeprom_params = &iwl1000_eeprom_params, \
123 .led_mode = IWL_LED_RF_STATE, \ 124 .led_mode = IWL_LED_RF_STATE, \
124 .rx_with_siso_diversity = true 125 .rx_with_siso_diversity = true, \
126 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
125 127
126const struct iwl_cfg iwl100_bgn_cfg = { 128const struct iwl_cfg iwl100_bgn_cfg = {
127 .name = "Intel(R) Centrino(R) Wireless-N 100 BGN", 129 .name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 21e5d0843a62..890b95f497d6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -123,7 +123,9 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
123 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 123 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
124 .base_params = &iwl2000_base_params, \ 124 .base_params = &iwl2000_base_params, \
125 .eeprom_params = &iwl20x0_eeprom_params, \ 125 .eeprom_params = &iwl20x0_eeprom_params, \
126 .led_mode = IWL_LED_RF_STATE 126 .led_mode = IWL_LED_RF_STATE, \
127 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
128
127 129
128const struct iwl_cfg iwl2000_2bgn_cfg = { 130const struct iwl_cfg iwl2000_2bgn_cfg = {
129 .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN", 131 .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
@@ -149,7 +151,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
149 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 151 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
150 .base_params = &iwl2030_base_params, \ 152 .base_params = &iwl2030_base_params, \
151 .eeprom_params = &iwl20x0_eeprom_params, \ 153 .eeprom_params = &iwl20x0_eeprom_params, \
152 .led_mode = IWL_LED_RF_STATE 154 .led_mode = IWL_LED_RF_STATE, \
155 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
153 156
154const struct iwl_cfg iwl2030_2bgn_cfg = { 157const struct iwl_cfg iwl2030_2bgn_cfg = {
155 .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", 158 .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
@@ -170,7 +173,8 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
170 .base_params = &iwl2000_base_params, \ 173 .base_params = &iwl2000_base_params, \
171 .eeprom_params = &iwl20x0_eeprom_params, \ 174 .eeprom_params = &iwl20x0_eeprom_params, \
172 .led_mode = IWL_LED_RF_STATE, \ 175 .led_mode = IWL_LED_RF_STATE, \
173 .rx_with_siso_diversity = true 176 .rx_with_siso_diversity = true, \
177 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
174 178
175const struct iwl_cfg iwl105_bgn_cfg = { 179const struct iwl_cfg iwl105_bgn_cfg = {
176 .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", 180 .name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
@@ -197,7 +201,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
197 .base_params = &iwl2030_base_params, \ 201 .base_params = &iwl2030_base_params, \
198 .eeprom_params = &iwl20x0_eeprom_params, \ 202 .eeprom_params = &iwl20x0_eeprom_params, \
199 .led_mode = IWL_LED_RF_STATE, \ 203 .led_mode = IWL_LED_RF_STATE, \
200 .rx_with_siso_diversity = true 204 .rx_with_siso_diversity = true, \
205 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
201 206
202const struct iwl_cfg iwl135_bgn_cfg = { 207const struct iwl_cfg iwl135_bgn_cfg = {
203 .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", 208 .name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 332bbede39e5..724194e23414 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -93,7 +93,8 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
93 .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ 93 .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
94 .base_params = &iwl5000_base_params, \ 94 .base_params = &iwl5000_base_params, \
95 .eeprom_params = &iwl5000_eeprom_params, \ 95 .eeprom_params = &iwl5000_eeprom_params, \
96 .led_mode = IWL_LED_BLINK 96 .led_mode = IWL_LED_BLINK, \
97 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
97 98
98const struct iwl_cfg iwl5300_agn_cfg = { 99const struct iwl_cfg iwl5300_agn_cfg = {
99 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", 100 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
@@ -158,7 +159,8 @@ const struct iwl_cfg iwl5350_agn_cfg = {
158 .base_params = &iwl5000_base_params, \ 159 .base_params = &iwl5000_base_params, \
159 .eeprom_params = &iwl5000_eeprom_params, \ 160 .eeprom_params = &iwl5000_eeprom_params, \
160 .led_mode = IWL_LED_BLINK, \ 161 .led_mode = IWL_LED_BLINK, \
161 .internal_wimax_coex = true 162 .internal_wimax_coex = true, \
163 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
162 164
163const struct iwl_cfg iwl5150_agn_cfg = { 165const struct iwl_cfg iwl5150_agn_cfg = {
164 .name = "Intel(R) WiMAX/WiFi Link 5150 AGN", 166 .name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 8f2c3c8c6b84..21b2630763dc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -145,7 +145,8 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
145 .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ 145 .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
146 .base_params = &iwl6000_g2_base_params, \ 146 .base_params = &iwl6000_g2_base_params, \
147 .eeprom_params = &iwl6000_eeprom_params, \ 147 .eeprom_params = &iwl6000_eeprom_params, \
148 .led_mode = IWL_LED_RF_STATE 148 .led_mode = IWL_LED_RF_STATE, \
149 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
149 150
150const struct iwl_cfg iwl6005_2agn_cfg = { 151const struct iwl_cfg iwl6005_2agn_cfg = {
151 .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN", 152 .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
@@ -199,7 +200,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
199 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ 200 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
200 .base_params = &iwl6000_g2_base_params, \ 201 .base_params = &iwl6000_g2_base_params, \
201 .eeprom_params = &iwl6000_eeprom_params, \ 202 .eeprom_params = &iwl6000_eeprom_params, \
202 .led_mode = IWL_LED_RF_STATE 203 .led_mode = IWL_LED_RF_STATE, \
204 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
203 205
204const struct iwl_cfg iwl6030_2agn_cfg = { 206const struct iwl_cfg iwl6030_2agn_cfg = {
205 .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", 207 .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
@@ -235,7 +237,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
235 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ 237 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
236 .base_params = &iwl6000_g2_base_params, \ 238 .base_params = &iwl6000_g2_base_params, \
237 .eeprom_params = &iwl6000_eeprom_params, \ 239 .eeprom_params = &iwl6000_eeprom_params, \
238 .led_mode = IWL_LED_RF_STATE 240 .led_mode = IWL_LED_RF_STATE, \
241 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
239 242
240const struct iwl_cfg iwl6035_2agn_cfg = { 243const struct iwl_cfg iwl6035_2agn_cfg = {
241 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", 244 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
@@ -290,7 +293,8 @@ const struct iwl_cfg iwl130_bg_cfg = {
290 .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ 293 .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
291 .base_params = &iwl6000_base_params, \ 294 .base_params = &iwl6000_base_params, \
292 .eeprom_params = &iwl6000_eeprom_params, \ 295 .eeprom_params = &iwl6000_eeprom_params, \
293 .led_mode = IWL_LED_BLINK 296 .led_mode = IWL_LED_BLINK, \
297 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
294 298
295const struct iwl_cfg iwl6000i_2agn_cfg = { 299const struct iwl_cfg iwl6000i_2agn_cfg = {
296 .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", 300 .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
@@ -322,7 +326,8 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
322 .base_params = &iwl6050_base_params, \ 326 .base_params = &iwl6050_base_params, \
323 .eeprom_params = &iwl6000_eeprom_params, \ 327 .eeprom_params = &iwl6000_eeprom_params, \
324 .led_mode = IWL_LED_BLINK, \ 328 .led_mode = IWL_LED_BLINK, \
325 .internal_wimax_coex = true 329 .internal_wimax_coex = true, \
330 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
326 331
327const struct iwl_cfg iwl6050_2agn_cfg = { 332const struct iwl_cfg iwl6050_2agn_cfg = {
328 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN", 333 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
@@ -347,7 +352,8 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
347 .base_params = &iwl6050_base_params, \ 352 .base_params = &iwl6050_base_params, \
348 .eeprom_params = &iwl6000_eeprom_params, \ 353 .eeprom_params = &iwl6000_eeprom_params, \
349 .led_mode = IWL_LED_BLINK, \ 354 .led_mode = IWL_LED_BLINK, \
350 .internal_wimax_coex = true 355 .internal_wimax_coex = true, \
356 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
351 357
352const struct iwl_cfg iwl6150_bgn_cfg = { 358const struct iwl_cfg iwl6150_bgn_cfg = {
353 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", 359 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 1ec4d55155f7..7810c41cf9a7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -793,7 +793,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
793 if (!vif->bss_conf.assoc) 793 if (!vif->bss_conf.assoc)
794 smps_mode = IEEE80211_SMPS_AUTOMATIC; 794 smps_mode = IEEE80211_SMPS_AUTOMATIC;
795 795
796 if (IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status, 796 if (mvmvif->phy_ctxt &&
797 IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status,
797 mvmvif->phy_ctxt->id)) 798 mvmvif->phy_ctxt->id))
798 smps_mode = IEEE80211_SMPS_AUTOMATIC; 799 smps_mode = IEEE80211_SMPS_AUTOMATIC;
799 800
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
index d530ef3da107..542ee74f290a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
@@ -832,7 +832,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
832 if (!vif->bss_conf.assoc) 832 if (!vif->bss_conf.assoc)
833 smps_mode = IEEE80211_SMPS_AUTOMATIC; 833 smps_mode = IEEE80211_SMPS_AUTOMATIC;
834 834
835 if (data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id)) 835 if (mvmvif->phy_ctxt &&
836 data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
836 smps_mode = IEEE80211_SMPS_AUTOMATIC; 837 smps_mode = IEEE80211_SMPS_AUTOMATIC;
837 838
838 IWL_DEBUG_COEX(data->mvm, 839 IWL_DEBUG_COEX(data->mvm,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 1ff7ec08532d..09654e73a533 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -405,7 +405,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
405 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 405 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
406 &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; 406 &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
407 407
408 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER) 408 if ((mvm->fw->ucode_capa.capa[0] &
409 IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
410 (mvm->fw->ucode_capa.api[0] &
411 IWL_UCODE_TLV_API_LQ_SS_PARAMS))
409 hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |= 412 hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
410 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; 413 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
411 } 414 }
@@ -2215,7 +2218,19 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
2215 2218
2216 mutex_lock(&mvm->mutex); 2219 mutex_lock(&mvm->mutex);
2217 2220
2218 iwl_mvm_cancel_scan(mvm); 2221 /* Due to a race condition, it's possible that mac80211 asks
2222 * us to stop a hw_scan when it's already stopped. This can
2223 * happen, for instance, if we stopped the scan ourselves,
2224 * called ieee80211_scan_completed() and the userspace called
2225 * cancel scan scan before ieee80211_scan_work() could run.
2226 * To handle that, simply return if the scan is not running.
2227 */
2228 /* FIXME: for now, we ignore this race for UMAC scans, since
2229 * they don't set the scan_status.
2230 */
2231 if ((mvm->scan_status == IWL_MVM_SCAN_OS) ||
2232 (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN))
2233 iwl_mvm_cancel_scan(mvm);
2219 2234
2220 mutex_unlock(&mvm->mutex); 2235 mutex_unlock(&mvm->mutex);
2221} 2236}
@@ -2559,12 +2574,29 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
2559 int ret; 2574 int ret;
2560 2575
2561 mutex_lock(&mvm->mutex); 2576 mutex_lock(&mvm->mutex);
2577
2578 /* Due to a race condition, it's possible that mac80211 asks
2579 * us to stop a sched_scan when it's already stopped. This
2580 * can happen, for instance, if we stopped the scan ourselves,
2581 * called ieee80211_sched_scan_stopped() and the userspace called
2582 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
2583 * could run. To handle this, simply return if the scan is
2584 * not running.
2585 */
2586 /* FIXME: for now, we ignore this race for UMAC scans, since
2587 * they don't set the scan_status.
2588 */
2589 if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
2590 !(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
2591 mutex_unlock(&mvm->mutex);
2592 return 0;
2593 }
2594
2562 ret = iwl_mvm_scan_offload_stop(mvm, false); 2595 ret = iwl_mvm_scan_offload_stop(mvm, false);
2563 mutex_unlock(&mvm->mutex); 2596 mutex_unlock(&mvm->mutex);
2564 iwl_mvm_wait_for_async_handlers(mvm); 2597 iwl_mvm_wait_for_async_handlers(mvm);
2565 2598
2566 return ret; 2599 return ret;
2567
2568} 2600}
2569 2601
2570static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, 2602static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 194bd1f939ca..efa9688a4cf1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -134,9 +134,12 @@ enum rs_column_mode {
134#define MAX_NEXT_COLUMNS 7 134#define MAX_NEXT_COLUMNS 7
135#define MAX_COLUMN_CHECKS 3 135#define MAX_COLUMN_CHECKS 3
136 136
137struct rs_tx_column;
138
137typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm, 139typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
138 struct ieee80211_sta *sta, 140 struct ieee80211_sta *sta,
139 struct iwl_scale_tbl_info *tbl); 141 struct iwl_scale_tbl_info *tbl,
142 const struct rs_tx_column *next_col);
140 143
141struct rs_tx_column { 144struct rs_tx_column {
142 enum rs_column_mode mode; 145 enum rs_column_mode mode;
@@ -147,13 +150,15 @@ struct rs_tx_column {
147}; 150};
148 151
149static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 152static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
150 struct iwl_scale_tbl_info *tbl) 153 struct iwl_scale_tbl_info *tbl,
154 const struct rs_tx_column *next_col)
151{ 155{
152 return iwl_mvm_bt_coex_is_ant_avail(mvm, tbl->rate.ant); 156 return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
153} 157}
154 158
155static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 159static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
156 struct iwl_scale_tbl_info *tbl) 160 struct iwl_scale_tbl_info *tbl,
161 const struct rs_tx_column *next_col)
157{ 162{
158 if (!sta->ht_cap.ht_supported) 163 if (!sta->ht_cap.ht_supported)
159 return false; 164 return false;
@@ -171,7 +176,8 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
171} 176}
172 177
173static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 178static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
174 struct iwl_scale_tbl_info *tbl) 179 struct iwl_scale_tbl_info *tbl,
180 const struct rs_tx_column *next_col)
175{ 181{
176 if (!sta->ht_cap.ht_supported) 182 if (!sta->ht_cap.ht_supported)
177 return false; 183 return false;
@@ -180,7 +186,8 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
180} 186}
181 187
182static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 188static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
183 struct iwl_scale_tbl_info *tbl) 189 struct iwl_scale_tbl_info *tbl,
190 const struct rs_tx_column *next_col)
184{ 191{
185 struct rs_rate *rate = &tbl->rate; 192 struct rs_rate *rate = &tbl->rate;
186 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 193 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
@@ -1590,7 +1597,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
1590 1597
1591 for (j = 0; j < MAX_COLUMN_CHECKS; j++) { 1598 for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
1592 allow_func = next_col->checks[j]; 1599 allow_func = next_col->checks[j];
1593 if (allow_func && !allow_func(mvm, sta, tbl)) 1600 if (allow_func && !allow_func(mvm, sta, tbl, next_col))
1594 break; 1601 break;
1595 } 1602 }
1596 1603
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 7e9aa3cb3254..c47c8051da77 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -1128,8 +1128,10 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
1128 if (mvm->scan_status == IWL_MVM_SCAN_NONE) 1128 if (mvm->scan_status == IWL_MVM_SCAN_NONE)
1129 return 0; 1129 return 0;
1130 1130
1131 if (iwl_mvm_is_radio_killed(mvm)) 1131 if (iwl_mvm_is_radio_killed(mvm)) {
1132 ret = 0;
1132 goto out; 1133 goto out;
1134 }
1133 1135
1134 if (mvm->scan_status != IWL_MVM_SCAN_SCHED && 1136 if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
1135 (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || 1137 (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
@@ -1148,16 +1150,14 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
1148 IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n", 1150 IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n",
1149 sched ? "offloaded " : "", ret); 1151 sched ? "offloaded " : "", ret);
1150 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); 1152 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
1151 return ret; 1153 goto out;
1152 } 1154 }
1153 1155
1154 IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n", 1156 IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n",
1155 sched ? "offloaded " : ""); 1157 sched ? "offloaded " : "");
1156 1158
1157 ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); 1159 ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
1158 if (ret) 1160out:
1159 return ret;
1160
1161 /* 1161 /*
1162 * Clear the scan status so the next scan requests will succeed. This 1162 * Clear the scan status so the next scan requests will succeed. This
1163 * also ensures the Rx handler doesn't do anything, as the scan was 1163 * also ensures the Rx handler doesn't do anything, as the scan was
@@ -1167,7 +1167,6 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
1167 if (mvm->scan_status == IWL_MVM_SCAN_OS) 1167 if (mvm->scan_status == IWL_MVM_SCAN_OS)
1168 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); 1168 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1169 1169
1170out:
1171 mvm->scan_status = IWL_MVM_SCAN_NONE; 1170 mvm->scan_status = IWL_MVM_SCAN_NONE;
1172 1171
1173 if (notify) { 1172 if (notify) {
@@ -1177,7 +1176,7 @@ out:
1177 ieee80211_scan_completed(mvm->hw, true); 1176 ieee80211_scan_completed(mvm->hw, true);
1178 } 1177 }
1179 1178
1180 return 0; 1179 return ret;
1181} 1180}
1182 1181
1183static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm, 1182static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 54fafbf9a711..f8d6f306dd76 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -750,8 +750,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
750 * request 750 * request
751 */ 751 */
752 list_for_each_entry(te_data, &mvm->time_event_list, list) { 752 list_for_each_entry(te_data, &mvm->time_event_list, list) {
753 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE && 753 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
754 te_data->running) {
755 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); 754 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
756 is_p2p = true; 755 is_p2p = true;
757 goto remove_te; 756 goto remove_te;
@@ -766,10 +765,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
766 * request 765 * request
767 */ 766 */
768 list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) { 767 list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
769 if (te_data->running) { 768 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
770 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); 769 goto remove_te;
771 goto remove_te;
772 }
773 } 770 }
774 771
775remove_te: 772remove_te:
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 1d4677460711..074f716020aa 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1386,8 +1386,11 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
1386 } 1386 }
1387 1387
1388 return true; 1388 return true;
1389 } else if (0x86DD == ether_type) { 1389 } else if (ETH_P_IPV6 == ether_type) {
1390 return true; 1390 /* TODO: Handle any IPv6 cases that need special handling.
1391 * For now, always return false
1392 */
1393 goto end;
1391 } 1394 }
1392 1395
1393end: 1396end:
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index cab9f5257f57..997cf0901ac2 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -96,6 +96,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
96static void make_tx_response(struct xenvif_queue *queue, 96static void make_tx_response(struct xenvif_queue *queue,
97 struct xen_netif_tx_request *txp, 97 struct xen_netif_tx_request *txp,
98 s8 st); 98 s8 st);
99static void push_tx_responses(struct xenvif_queue *queue);
99 100
100static inline int tx_work_todo(struct xenvif_queue *queue); 101static inline int tx_work_todo(struct xenvif_queue *queue);
101 102
@@ -655,15 +656,10 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
655 unsigned long flags; 656 unsigned long flags;
656 657
657 do { 658 do {
658 int notify;
659
660 spin_lock_irqsave(&queue->response_lock, flags); 659 spin_lock_irqsave(&queue->response_lock, flags);
661 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); 660 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
662 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); 661 push_tx_responses(queue);
663 spin_unlock_irqrestore(&queue->response_lock, flags); 662 spin_unlock_irqrestore(&queue->response_lock, flags);
664 if (notify)
665 notify_remote_via_irq(queue->tx_irq);
666
667 if (cons == end) 663 if (cons == end)
668 break; 664 break;
669 txp = RING_GET_REQUEST(&queue->tx, cons++); 665 txp = RING_GET_REQUEST(&queue->tx, cons++);
@@ -1657,7 +1653,6 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1657{ 1653{
1658 struct pending_tx_info *pending_tx_info; 1654 struct pending_tx_info *pending_tx_info;
1659 pending_ring_idx_t index; 1655 pending_ring_idx_t index;
1660 int notify;
1661 unsigned long flags; 1656 unsigned long flags;
1662 1657
1663 pending_tx_info = &queue->pending_tx_info[pending_idx]; 1658 pending_tx_info = &queue->pending_tx_info[pending_idx];
@@ -1673,12 +1668,9 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1673 index = pending_index(queue->pending_prod++); 1668 index = pending_index(queue->pending_prod++);
1674 queue->pending_ring[index] = pending_idx; 1669 queue->pending_ring[index] = pending_idx;
1675 1670
1676 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); 1671 push_tx_responses(queue);
1677 1672
1678 spin_unlock_irqrestore(&queue->response_lock, flags); 1673 spin_unlock_irqrestore(&queue->response_lock, flags);
1679
1680 if (notify)
1681 notify_remote_via_irq(queue->tx_irq);
1682} 1674}
1683 1675
1684 1676
@@ -1699,6 +1691,15 @@ static void make_tx_response(struct xenvif_queue *queue,
1699 queue->tx.rsp_prod_pvt = ++i; 1691 queue->tx.rsp_prod_pvt = ++i;
1700} 1692}
1701 1693
1694static void push_tx_responses(struct xenvif_queue *queue)
1695{
1696 int notify;
1697
1698 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1699 if (notify)
1700 notify_remote_via_irq(queue->tx_irq);
1701}
1702
1702static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, 1703static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
1703 u16 id, 1704 u16 id,
1704 s8 st, 1705 s8 st,
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 38d1c51f58b1..7bcaeec876c0 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -84,8 +84,7 @@ config OF_RESOLVE
84 bool 84 bool
85 85
86config OF_OVERLAY 86config OF_OVERLAY
87 bool 87 bool "Device Tree overlays"
88 depends on OF
89 select OF_DYNAMIC 88 select OF_DYNAMIC
90 select OF_RESOLVE 89 select OF_RESOLVE
91 90
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 0a8aeb8523fe..8f165b112e03 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -714,16 +714,12 @@ static struct device_node *__of_find_node_by_path(struct device_node *parent,
714 const char *path) 714 const char *path)
715{ 715{
716 struct device_node *child; 716 struct device_node *child;
717 int len = strchrnul(path, '/') - path; 717 int len;
718 int term;
719 718
719 len = strcspn(path, "/:");
720 if (!len) 720 if (!len)
721 return NULL; 721 return NULL;
722 722
723 term = strchrnul(path, ':') - path;
724 if (term < len)
725 len = term;
726
727 __for_each_child_of_node(parent, child) { 723 __for_each_child_of_node(parent, child) {
728 const char *name = strrchr(child->full_name, '/'); 724 const char *name = strrchr(child->full_name, '/');
729 if (WARN(!name, "malformed device_node %s\n", child->full_name)) 725 if (WARN(!name, "malformed device_node %s\n", child->full_name))
@@ -768,8 +764,12 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt
768 764
769 /* The path could begin with an alias */ 765 /* The path could begin with an alias */
770 if (*path != '/') { 766 if (*path != '/') {
771 char *p = strchrnul(path, '/'); 767 int len;
772 int len = separator ? separator - path : p - path; 768 const char *p = separator;
769
770 if (!p)
771 p = strchrnul(path, '/');
772 len = p - path;
773 773
774 /* of_aliases must not be NULL */ 774 /* of_aliases must not be NULL */
775 if (!of_aliases) 775 if (!of_aliases)
@@ -794,6 +794,8 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt
794 path++; /* Increment past '/' delimiter */ 794 path++; /* Increment past '/' delimiter */
795 np = __of_find_node_by_path(np, path); 795 np = __of_find_node_by_path(np, path);
796 path = strchrnul(path, '/'); 796 path = strchrnul(path, '/');
797 if (separator && separator < path)
798 break;
797 } 799 }
798 raw_spin_unlock_irqrestore(&devtree_lock, flags); 800 raw_spin_unlock_irqrestore(&devtree_lock, flags);
799 return np; 801 return np;
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 0d7765807f49..1a7980692f25 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -290,7 +290,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
290 struct device_node *p; 290 struct device_node *p;
291 const __be32 *intspec, *tmp, *addr; 291 const __be32 *intspec, *tmp, *addr;
292 u32 intsize, intlen; 292 u32 intsize, intlen;
293 int i, res = -EINVAL; 293 int i, res;
294 294
295 pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index); 295 pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index);
296 296
@@ -323,15 +323,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
323 323
324 /* Get size of interrupt specifier */ 324 /* Get size of interrupt specifier */
325 tmp = of_get_property(p, "#interrupt-cells", NULL); 325 tmp = of_get_property(p, "#interrupt-cells", NULL);
326 if (tmp == NULL) 326 if (tmp == NULL) {
327 res = -EINVAL;
327 goto out; 328 goto out;
329 }
328 intsize = be32_to_cpu(*tmp); 330 intsize = be32_to_cpu(*tmp);
329 331
330 pr_debug(" intsize=%d intlen=%d\n", intsize, intlen); 332 pr_debug(" intsize=%d intlen=%d\n", intsize, intlen);
331 333
332 /* Check index */ 334 /* Check index */
333 if ((index + 1) * intsize > intlen) 335 if ((index + 1) * intsize > intlen) {
336 res = -EINVAL;
334 goto out; 337 goto out;
338 }
335 339
336 /* Copy intspec into irq structure */ 340 /* Copy intspec into irq structure */
337 intspec += index * intsize; 341 intspec += index * intsize;
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 352b4f28f82c..dee9270ba547 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -19,6 +19,7 @@
19#include <linux/string.h> 19#include <linux/string.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/err.h> 21#include <linux/err.h>
22#include <linux/idr.h>
22 23
23#include "of_private.h" 24#include "of_private.h"
24 25
@@ -85,7 +86,7 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov,
85 struct device_node *target, struct device_node *child) 86 struct device_node *target, struct device_node *child)
86{ 87{
87 const char *cname; 88 const char *cname;
88 struct device_node *tchild, *grandchild; 89 struct device_node *tchild;
89 int ret = 0; 90 int ret = 0;
90 91
91 cname = kbasename(child->full_name); 92 cname = kbasename(child->full_name);
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 0cf9a236d438..52c45c7df07f 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -92,6 +92,16 @@ static void __init of_selftest_find_node_by_name(void)
92 "option path test failed\n"); 92 "option path test failed\n");
93 of_node_put(np); 93 of_node_put(np);
94 94
95 np = of_find_node_opts_by_path("/testcase-data:test/option", &options);
96 selftest(np && !strcmp("test/option", options),
97 "option path test, subcase #1 failed\n");
98 of_node_put(np);
99
100 np = of_find_node_opts_by_path("/testcase-data/testcase-device1:test/option", &options);
101 selftest(np && !strcmp("test/option", options),
102 "option path test, subcase #2 failed\n");
103 of_node_put(np);
104
95 np = of_find_node_opts_by_path("/testcase-data:testoption", NULL); 105 np = of_find_node_opts_by_path("/testcase-data:testoption", NULL);
96 selftest(np, "NULL option path test failed\n"); 106 selftest(np, "NULL option path test failed\n");
97 of_node_put(np); 107 of_node_put(np);
@@ -102,6 +112,12 @@ static void __init of_selftest_find_node_by_name(void)
102 "option alias path test failed\n"); 112 "option alias path test failed\n");
103 of_node_put(np); 113 of_node_put(np);
104 114
115 np = of_find_node_opts_by_path("testcase-alias:test/alias/option",
116 &options);
117 selftest(np && !strcmp("test/alias/option", options),
118 "option alias path test, subcase #1 failed\n");
119 of_node_put(np);
120
105 np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL); 121 np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL);
106 selftest(np, "NULL option alias path test failed\n"); 122 selftest(np, "NULL option alias path test failed\n");
107 of_node_put(np); 123 of_node_put(np);
@@ -378,9 +394,9 @@ static void __init of_selftest_property_string(void)
378 rc = of_property_match_string(np, "phandle-list-names", "first"); 394 rc = of_property_match_string(np, "phandle-list-names", "first");
379 selftest(rc == 0, "first expected:0 got:%i\n", rc); 395 selftest(rc == 0, "first expected:0 got:%i\n", rc);
380 rc = of_property_match_string(np, "phandle-list-names", "second"); 396 rc = of_property_match_string(np, "phandle-list-names", "second");
381 selftest(rc == 1, "second expected:0 got:%i\n", rc); 397 selftest(rc == 1, "second expected:1 got:%i\n", rc);
382 rc = of_property_match_string(np, "phandle-list-names", "third"); 398 rc = of_property_match_string(np, "phandle-list-names", "third");
383 selftest(rc == 2, "third expected:0 got:%i\n", rc); 399 selftest(rc == 2, "third expected:2 got:%i\n", rc);
384 rc = of_property_match_string(np, "phandle-list-names", "fourth"); 400 rc = of_property_match_string(np, "phandle-list-names", "fourth");
385 selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc); 401 selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc);
386 rc = of_property_match_string(np, "missing-property", "blah"); 402 rc = of_property_match_string(np, "missing-property", "blah");
@@ -478,7 +494,6 @@ static void __init of_selftest_changeset(void)
478 struct device_node *n1, *n2, *n21, *nremove, *parent, *np; 494 struct device_node *n1, *n2, *n21, *nremove, *parent, *np;
479 struct of_changeset chgset; 495 struct of_changeset chgset;
480 496
481 of_changeset_init(&chgset);
482 n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1"); 497 n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1");
483 selftest(n1, "testcase setup failure\n"); 498 selftest(n1, "testcase setup failure\n");
484 n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2"); 499 n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2");
@@ -979,7 +994,7 @@ static int of_path_platform_device_exists(const char *path)
979 return pdev != NULL; 994 return pdev != NULL;
980} 995}
981 996
982#if IS_ENABLED(CONFIG_I2C) 997#if IS_BUILTIN(CONFIG_I2C)
983 998
984/* get the i2c client device instantiated at the path */ 999/* get the i2c client device instantiated at the path */
985static struct i2c_client *of_path_to_i2c_client(const char *path) 1000static struct i2c_client *of_path_to_i2c_client(const char *path)
@@ -1445,7 +1460,7 @@ static void of_selftest_overlay_11(void)
1445 return; 1460 return;
1446} 1461}
1447 1462
1448#if IS_ENABLED(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY) 1463#if IS_BUILTIN(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY)
1449 1464
1450struct selftest_i2c_bus_data { 1465struct selftest_i2c_bus_data {
1451 struct platform_device *pdev; 1466 struct platform_device *pdev;
@@ -1584,7 +1599,7 @@ static struct i2c_driver selftest_i2c_dev_driver = {
1584 .id_table = selftest_i2c_dev_id, 1599 .id_table = selftest_i2c_dev_id,
1585}; 1600};
1586 1601
1587#if IS_ENABLED(CONFIG_I2C_MUX) 1602#if IS_BUILTIN(CONFIG_I2C_MUX)
1588 1603
1589struct selftest_i2c_mux_data { 1604struct selftest_i2c_mux_data {
1590 int nchans; 1605 int nchans;
@@ -1695,7 +1710,7 @@ static int of_selftest_overlay_i2c_init(void)
1695 "could not register selftest i2c bus driver\n")) 1710 "could not register selftest i2c bus driver\n"))
1696 return ret; 1711 return ret;
1697 1712
1698#if IS_ENABLED(CONFIG_I2C_MUX) 1713#if IS_BUILTIN(CONFIG_I2C_MUX)
1699 ret = i2c_add_driver(&selftest_i2c_mux_driver); 1714 ret = i2c_add_driver(&selftest_i2c_mux_driver);
1700 if (selftest(ret == 0, 1715 if (selftest(ret == 0,
1701 "could not register selftest i2c mux driver\n")) 1716 "could not register selftest i2c mux driver\n"))
@@ -1707,7 +1722,7 @@ static int of_selftest_overlay_i2c_init(void)
1707 1722
1708static void of_selftest_overlay_i2c_cleanup(void) 1723static void of_selftest_overlay_i2c_cleanup(void)
1709{ 1724{
1710#if IS_ENABLED(CONFIG_I2C_MUX) 1725#if IS_BUILTIN(CONFIG_I2C_MUX)
1711 i2c_del_driver(&selftest_i2c_mux_driver); 1726 i2c_del_driver(&selftest_i2c_mux_driver);
1712#endif 1727#endif
1713 platform_driver_unregister(&selftest_i2c_bus_driver); 1728 platform_driver_unregister(&selftest_i2c_bus_driver);
@@ -1814,7 +1829,7 @@ static void __init of_selftest_overlay(void)
1814 of_selftest_overlay_10(); 1829 of_selftest_overlay_10();
1815 of_selftest_overlay_11(); 1830 of_selftest_overlay_11();
1816 1831
1817#if IS_ENABLED(CONFIG_I2C) 1832#if IS_BUILTIN(CONFIG_I2C)
1818 if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n")) 1833 if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n"))
1819 goto out; 1834 goto out;
1820 1835
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index aab55474dd0d..ee082c0366ec 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -127,7 +127,7 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
127 return false; 127 return false;
128} 128}
129 129
130static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, 130static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
131 int offset) 131 int offset)
132{ 132{
133 struct xgene_pcie_port *port = bus->sysdata; 133 struct xgene_pcie_port *port = bus->sysdata;
@@ -137,7 +137,7 @@ static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
137 return NULL; 137 return NULL;
138 138
139 xgene_pcie_set_rtdid_reg(bus, devfn); 139 xgene_pcie_set_rtdid_reg(bus, devfn);
140 return xgene_pcie_get_cfg_base(bus); 140 return xgene_pcie_get_cfg_base(bus) + offset;
141} 141}
142 142
143static struct pci_ops xgene_pcie_ops = { 143static struct pci_ops xgene_pcie_ops = {
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index aa012fb3834b..312f23a8429c 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -521,7 +521,8 @@ static ssize_t driver_override_store(struct device *dev,
521 struct pci_dev *pdev = to_pci_dev(dev); 521 struct pci_dev *pdev = to_pci_dev(dev);
522 char *driver_override, *old = pdev->driver_override, *cp; 522 char *driver_override, *old = pdev->driver_override, *cp;
523 523
524 if (count > PATH_MAX) 524 /* We need to keep extra room for a newline */
525 if (count >= (PAGE_SIZE - 1))
525 return -EINVAL; 526 return -EINVAL;
526 527
527 driver_override = kstrndup(buf, count, GFP_KERNEL); 528 driver_override = kstrndup(buf, count, GFP_KERNEL);
@@ -549,7 +550,7 @@ static ssize_t driver_override_show(struct device *dev,
549{ 550{
550 struct pci_dev *pdev = to_pci_dev(dev); 551 struct pci_dev *pdev = to_pci_dev(dev);
551 552
552 return sprintf(buf, "%s\n", pdev->driver_override); 553 return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
553} 554}
554static DEVICE_ATTR_RW(driver_override); 555static DEVICE_ATTR_RW(driver_override);
555 556
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 3bb49252a098..45f67c63d385 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -69,8 +69,7 @@ config YENTA
69 tristate "CardBus yenta-compatible bridge support" 69 tristate "CardBus yenta-compatible bridge support"
70 depends on PCI 70 depends on PCI
71 select CARDBUS if !EXPERT 71 select CARDBUS if !EXPERT
72 select PCCARD_NONSTATIC if PCMCIA != n && ISA 72 select PCCARD_NONSTATIC if PCMCIA != n
73 select PCCARD_PCI if PCMCIA !=n && !ISA
74 ---help--- 73 ---help---
75 This option enables support for CardBus host bridges. Virtually 74 This option enables support for CardBus host bridges. Virtually
76 all modern PCMCIA bridges are CardBus compatible. A "bridge" is 75 all modern PCMCIA bridges are CardBus compatible. A "bridge" is
@@ -110,8 +109,7 @@ config YENTA_TOSHIBA
110config PD6729 109config PD6729
111 tristate "Cirrus PD6729 compatible bridge support" 110 tristate "Cirrus PD6729 compatible bridge support"
112 depends on PCMCIA && PCI 111 depends on PCMCIA && PCI
113 select PCCARD_NONSTATIC if PCMCIA != n && ISA 112 select PCCARD_NONSTATIC
114 select PCCARD_PCI if PCMCIA !=n && !ISA
115 help 113 help
116 This provides support for the Cirrus PD6729 PCI-to-PCMCIA bridge 114 This provides support for the Cirrus PD6729 PCI-to-PCMCIA bridge
117 device, found in some older laptops and PCMCIA card readers. 115 device, found in some older laptops and PCMCIA card readers.
@@ -119,8 +117,7 @@ config PD6729
119config I82092 117config I82092
120 tristate "i82092 compatible bridge support" 118 tristate "i82092 compatible bridge support"
121 depends on PCMCIA && PCI 119 depends on PCMCIA && PCI
122 select PCCARD_NONSTATIC if PCMCIA != n && ISA 120 select PCCARD_NONSTATIC
123 select PCCARD_PCI if PCMCIA !=n && !ISA
124 help 121 help
125 This provides support for the Intel I82092AA PCI-to-PCMCIA bridge device, 122 This provides support for the Intel I82092AA PCI-to-PCMCIA bridge device,
126 found in some older laptops and more commonly in evaluation boards for the 123 found in some older laptops and more commonly in evaluation boards for the
@@ -291,9 +288,6 @@ config ELECTRA_CF
291 Say Y here to support the CompactFlash controller on the 288 Say Y here to support the CompactFlash controller on the
292 PA Semi Electra eval board. 289 PA Semi Electra eval board.
293 290
294config PCCARD_PCI
295 bool
296
297config PCCARD_NONSTATIC 291config PCCARD_NONSTATIC
298 bool 292 bool
299 293
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index f1a7ca04d89e..27e94b30cf96 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -12,7 +12,6 @@ obj-$(CONFIG_PCMCIA) += pcmcia.o
12pcmcia_rsrc-y += rsrc_mgr.o 12pcmcia_rsrc-y += rsrc_mgr.o
13pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o 13pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o
14pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o 14pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o
15pcmcia_rsrc-$(CONFIG_PCCARD_PCI) += rsrc_pci.o
16obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o 15obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o
17 16
18 17
diff --git a/drivers/pcmcia/rsrc_pci.c b/drivers/pcmcia/rsrc_pci.c
deleted file mode 100644
index 1f67b3ba70fb..000000000000
--- a/drivers/pcmcia/rsrc_pci.c
+++ /dev/null
@@ -1,173 +0,0 @@
1#include <linux/slab.h>
2#include <linux/module.h>
3#include <linux/kernel.h>
4#include <linux/pci.h>
5
6#include <pcmcia/ss.h>
7#include <pcmcia/cistpl.h>
8#include "cs_internal.h"
9
10
11struct pcmcia_align_data {
12 unsigned long mask;
13 unsigned long offset;
14};
15
16static resource_size_t pcmcia_align(void *align_data,
17 const struct resource *res,
18 resource_size_t size, resource_size_t align)
19{
20 struct pcmcia_align_data *data = align_data;
21 resource_size_t start;
22
23 start = (res->start & ~data->mask) + data->offset;
24 if (start < res->start)
25 start += data->mask + 1;
26 return start;
27}
28
29static struct resource *find_io_region(struct pcmcia_socket *s,
30 unsigned long base, int num,
31 unsigned long align)
32{
33 struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO,
34 dev_name(&s->dev));
35 struct pcmcia_align_data data;
36 int ret;
37
38 data.mask = align - 1;
39 data.offset = base & data.mask;
40
41 ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1,
42 base, 0, pcmcia_align, &data);
43 if (ret != 0) {
44 kfree(res);
45 res = NULL;
46 }
47 return res;
48}
49
50static int res_pci_find_io(struct pcmcia_socket *s, unsigned int attr,
51 unsigned int *base, unsigned int num,
52 unsigned int align, struct resource **parent)
53{
54 int i, ret = 0;
55
56 /* Check for an already-allocated window that must conflict with
57 * what was asked for. It is a hack because it does not catch all
58 * potential conflicts, just the most obvious ones.
59 */
60 for (i = 0; i < MAX_IO_WIN; i++) {
61 if (!s->io[i].res)
62 continue;
63
64 if (!*base)
65 continue;
66
67 if ((s->io[i].res->start & (align-1)) == *base)
68 return -EBUSY;
69 }
70
71 for (i = 0; i < MAX_IO_WIN; i++) {
72 struct resource *res = s->io[i].res;
73 unsigned int try;
74
75 if (res && (res->flags & IORESOURCE_BITS) !=
76 (attr & IORESOURCE_BITS))
77 continue;
78
79 if (!res) {
80 if (align == 0)
81 align = 0x10000;
82
83 res = s->io[i].res = find_io_region(s, *base, num,
84 align);
85 if (!res)
86 return -EINVAL;
87
88 *base = res->start;
89 s->io[i].res->flags =
90 ((res->flags & ~IORESOURCE_BITS) |
91 (attr & IORESOURCE_BITS));
92 s->io[i].InUse = num;
93 *parent = res;
94 return 0;
95 }
96
97 /* Try to extend top of window */
98 try = res->end + 1;
99 if ((*base == 0) || (*base == try)) {
100 ret = adjust_resource(s->io[i].res, res->start,
101 resource_size(res) + num);
102 if (ret)
103 continue;
104 *base = try;
105 s->io[i].InUse += num;
106 *parent = res;
107 return 0;
108 }
109
110 /* Try to extend bottom of window */
111 try = res->start - num;
112 if ((*base == 0) || (*base == try)) {
113 ret = adjust_resource(s->io[i].res,
114 res->start - num,
115 resource_size(res) + num);
116 if (ret)
117 continue;
118 *base = try;
119 s->io[i].InUse += num;
120 *parent = res;
121 return 0;
122 }
123 }
124 return -EINVAL;
125}
126
127static struct resource *res_pci_find_mem(u_long base, u_long num,
128 u_long align, int low, struct pcmcia_socket *s)
129{
130 struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_MEM,
131 dev_name(&s->dev));
132 struct pcmcia_align_data data;
133 unsigned long min;
134 int ret;
135
136 if (align < 0x20000)
137 align = 0x20000;
138 data.mask = align - 1;
139 data.offset = base & data.mask;
140
141 min = 0;
142 if (!low)
143 min = 0x100000UL;
144
145 ret = pci_bus_alloc_resource(s->cb_dev->bus,
146 res, num, 1, min, 0,
147 pcmcia_align, &data);
148
149 if (ret != 0) {
150 kfree(res);
151 res = NULL;
152 }
153 return res;
154}
155
156
157static int res_pci_init(struct pcmcia_socket *s)
158{
159 if (!s->cb_dev || !(s->features & SS_CAP_PAGE_REGS)) {
160 dev_err(&s->dev, "not supported by res_pci\n");
161 return -EOPNOTSUPP;
162 }
163 return 0;
164}
165
166struct pccard_resource_ops pccard_nonstatic_ops = {
167 .validate_mem = NULL,
168 .find_io = res_pci_find_io,
169 .find_mem = res_pci_find_mem,
170 .init = res_pci_init,
171 .exit = NULL,
172};
173EXPORT_SYMBOL(pccard_nonstatic_ops);
diff --git a/drivers/phy/phy-armada375-usb2.c b/drivers/phy/phy-armada375-usb2.c
index 7c99ca256f05..8ccc3952c13d 100644
--- a/drivers/phy/phy-armada375-usb2.c
+++ b/drivers/phy/phy-armada375-usb2.c
@@ -37,7 +37,7 @@ static int armada375_usb_phy_init(struct phy *phy)
37 struct armada375_cluster_phy *cluster_phy; 37 struct armada375_cluster_phy *cluster_phy;
38 u32 reg; 38 u32 reg;
39 39
40 cluster_phy = dev_get_drvdata(phy->dev.parent); 40 cluster_phy = phy_get_drvdata(phy);
41 if (!cluster_phy) 41 if (!cluster_phy)
42 return -ENODEV; 42 return -ENODEV;
43 43
@@ -131,6 +131,7 @@ static int armada375_usb_phy_probe(struct platform_device *pdev)
131 cluster_phy->reg = usb_cluster_base; 131 cluster_phy->reg = usb_cluster_base;
132 132
133 dev_set_drvdata(dev, cluster_phy); 133 dev_set_drvdata(dev, cluster_phy);
134 phy_set_drvdata(phy, cluster_phy);
134 135
135 phy_provider = devm_of_phy_provider_register(&pdev->dev, 136 phy_provider = devm_of_phy_provider_register(&pdev->dev,
136 armada375_usb_phy_xlate); 137 armada375_usb_phy_xlate);
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index a12d35338313..3791838f4bd4 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -52,7 +52,9 @@ static void devm_phy_consume(struct device *dev, void *res)
52 52
53static int devm_phy_match(struct device *dev, void *res, void *match_data) 53static int devm_phy_match(struct device *dev, void *res, void *match_data)
54{ 54{
55 return res == match_data; 55 struct phy **phy = res;
56
57 return *phy == match_data;
56} 58}
57 59
58/** 60/**
@@ -223,6 +225,7 @@ int phy_init(struct phy *phy)
223 ret = phy_pm_runtime_get_sync(phy); 225 ret = phy_pm_runtime_get_sync(phy);
224 if (ret < 0 && ret != -ENOTSUPP) 226 if (ret < 0 && ret != -ENOTSUPP)
225 return ret; 227 return ret;
228 ret = 0; /* Override possible ret == -ENOTSUPP */
226 229
227 mutex_lock(&phy->mutex); 230 mutex_lock(&phy->mutex);
228 if (phy->init_count == 0 && phy->ops->init) { 231 if (phy->init_count == 0 && phy->ops->init) {
@@ -231,8 +234,6 @@ int phy_init(struct phy *phy)
231 dev_err(&phy->dev, "phy init failed --> %d\n", ret); 234 dev_err(&phy->dev, "phy init failed --> %d\n", ret);
232 goto out; 235 goto out;
233 } 236 }
234 } else {
235 ret = 0; /* Override possible ret == -ENOTSUPP */
236 } 237 }
237 ++phy->init_count; 238 ++phy->init_count;
238 239
@@ -253,6 +254,7 @@ int phy_exit(struct phy *phy)
253 ret = phy_pm_runtime_get_sync(phy); 254 ret = phy_pm_runtime_get_sync(phy);
254 if (ret < 0 && ret != -ENOTSUPP) 255 if (ret < 0 && ret != -ENOTSUPP)
255 return ret; 256 return ret;
257 ret = 0; /* Override possible ret == -ENOTSUPP */
256 258
257 mutex_lock(&phy->mutex); 259 mutex_lock(&phy->mutex);
258 if (phy->init_count == 1 && phy->ops->exit) { 260 if (phy->init_count == 1 && phy->ops->exit) {
@@ -287,6 +289,7 @@ int phy_power_on(struct phy *phy)
287 ret = phy_pm_runtime_get_sync(phy); 289 ret = phy_pm_runtime_get_sync(phy);
288 if (ret < 0 && ret != -ENOTSUPP) 290 if (ret < 0 && ret != -ENOTSUPP)
289 return ret; 291 return ret;
292 ret = 0; /* Override possible ret == -ENOTSUPP */
290 293
291 mutex_lock(&phy->mutex); 294 mutex_lock(&phy->mutex);
292 if (phy->power_count == 0 && phy->ops->power_on) { 295 if (phy->power_count == 0 && phy->ops->power_on) {
@@ -295,8 +298,6 @@ int phy_power_on(struct phy *phy)
295 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); 298 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
296 goto out; 299 goto out;
297 } 300 }
298 } else {
299 ret = 0; /* Override possible ret == -ENOTSUPP */
300 } 301 }
301 ++phy->power_count; 302 ++phy->power_count;
302 mutex_unlock(&phy->mutex); 303 mutex_unlock(&phy->mutex);
diff --git a/drivers/phy/phy-exynos-dp-video.c b/drivers/phy/phy-exynos-dp-video.c
index f86cbe68ddaf..179cbf9451aa 100644
--- a/drivers/phy/phy-exynos-dp-video.c
+++ b/drivers/phy/phy-exynos-dp-video.c
@@ -30,28 +30,13 @@ struct exynos_dp_video_phy {
30 const struct exynos_dp_video_phy_drvdata *drvdata; 30 const struct exynos_dp_video_phy_drvdata *drvdata;
31}; 31};
32 32
33static void exynos_dp_video_phy_pwr_isol(struct exynos_dp_video_phy *state,
34 unsigned int on)
35{
36 unsigned int val;
37
38 if (IS_ERR(state->regs))
39 return;
40
41 val = on ? 0 : EXYNOS5_PHY_ENABLE;
42
43 regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset,
44 EXYNOS5_PHY_ENABLE, val);
45}
46
47static int exynos_dp_video_phy_power_on(struct phy *phy) 33static int exynos_dp_video_phy_power_on(struct phy *phy)
48{ 34{
49 struct exynos_dp_video_phy *state = phy_get_drvdata(phy); 35 struct exynos_dp_video_phy *state = phy_get_drvdata(phy);
50 36
51 /* Disable power isolation on DP-PHY */ 37 /* Disable power isolation on DP-PHY */
52 exynos_dp_video_phy_pwr_isol(state, 0); 38 return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset,
53 39 EXYNOS5_PHY_ENABLE, EXYNOS5_PHY_ENABLE);
54 return 0;
55} 40}
56 41
57static int exynos_dp_video_phy_power_off(struct phy *phy) 42static int exynos_dp_video_phy_power_off(struct phy *phy)
@@ -59,9 +44,8 @@ static int exynos_dp_video_phy_power_off(struct phy *phy)
59 struct exynos_dp_video_phy *state = phy_get_drvdata(phy); 44 struct exynos_dp_video_phy *state = phy_get_drvdata(phy);
60 45
61 /* Enable power isolation on DP-PHY */ 46 /* Enable power isolation on DP-PHY */
62 exynos_dp_video_phy_pwr_isol(state, 1); 47 return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset,
63 48 EXYNOS5_PHY_ENABLE, 0);
64 return 0;
65} 49}
66 50
67static struct phy_ops exynos_dp_video_phy_ops = { 51static struct phy_ops exynos_dp_video_phy_ops = {
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
index f017b2f2a54e..df7519a39ba0 100644
--- a/drivers/phy/phy-exynos-mipi-video.c
+++ b/drivers/phy/phy-exynos-mipi-video.c
@@ -43,7 +43,6 @@ struct exynos_mipi_video_phy {
43 } phys[EXYNOS_MIPI_PHYS_NUM]; 43 } phys[EXYNOS_MIPI_PHYS_NUM];
44 spinlock_t slock; 44 spinlock_t slock;
45 void __iomem *regs; 45 void __iomem *regs;
46 struct mutex mutex;
47 struct regmap *regmap; 46 struct regmap *regmap;
48}; 47};
49 48
@@ -59,8 +58,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state,
59 else 58 else
60 reset = EXYNOS4_MIPI_PHY_SRESETN; 59 reset = EXYNOS4_MIPI_PHY_SRESETN;
61 60
62 if (state->regmap) { 61 spin_lock(&state->slock);
63 mutex_lock(&state->mutex); 62
63 if (!IS_ERR(state->regmap)) {
64 regmap_read(state->regmap, offset, &val); 64 regmap_read(state->regmap, offset, &val);
65 if (on) 65 if (on)
66 val |= reset; 66 val |= reset;
@@ -72,11 +72,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state,
72 else if (!(val & EXYNOS4_MIPI_PHY_RESET_MASK)) 72 else if (!(val & EXYNOS4_MIPI_PHY_RESET_MASK))
73 val &= ~EXYNOS4_MIPI_PHY_ENABLE; 73 val &= ~EXYNOS4_MIPI_PHY_ENABLE;
74 regmap_write(state->regmap, offset, val); 74 regmap_write(state->regmap, offset, val);
75 mutex_unlock(&state->mutex);
76 } else { 75 } else {
77 addr = state->regs + EXYNOS_MIPI_PHY_CONTROL(id / 2); 76 addr = state->regs + EXYNOS_MIPI_PHY_CONTROL(id / 2);
78 77
79 spin_lock(&state->slock);
80 val = readl(addr); 78 val = readl(addr);
81 if (on) 79 if (on)
82 val |= reset; 80 val |= reset;
@@ -90,9 +88,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state,
90 val &= ~EXYNOS4_MIPI_PHY_ENABLE; 88 val &= ~EXYNOS4_MIPI_PHY_ENABLE;
91 89
92 writel(val, addr); 90 writel(val, addr);
93 spin_unlock(&state->slock);
94 } 91 }
95 92
93 spin_unlock(&state->slock);
96 return 0; 94 return 0;
97} 95}
98 96
@@ -158,7 +156,6 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
158 156
159 dev_set_drvdata(dev, state); 157 dev_set_drvdata(dev, state);
160 spin_lock_init(&state->slock); 158 spin_lock_init(&state->slock);
161 mutex_init(&state->mutex);
162 159
163 for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) { 160 for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) {
164 struct phy *phy = devm_phy_create(dev, NULL, 161 struct phy *phy = devm_phy_create(dev, NULL,
diff --git a/drivers/phy/phy-exynos4210-usb2.c b/drivers/phy/phy-exynos4210-usb2.c
index 236a52ad94eb..f30bbb0fb3b2 100644
--- a/drivers/phy/phy-exynos4210-usb2.c
+++ b/drivers/phy/phy-exynos4210-usb2.c
@@ -250,7 +250,6 @@ static const struct samsung_usb2_common_phy exynos4210_phys[] = {
250 .power_on = exynos4210_power_on, 250 .power_on = exynos4210_power_on,
251 .power_off = exynos4210_power_off, 251 .power_off = exynos4210_power_off,
252 }, 252 },
253 {},
254}; 253};
255 254
256const struct samsung_usb2_phy_config exynos4210_usb2_phy_config = { 255const struct samsung_usb2_phy_config exynos4210_usb2_phy_config = {
diff --git a/drivers/phy/phy-exynos4x12-usb2.c b/drivers/phy/phy-exynos4x12-usb2.c
index 0b9de88579b1..765da90a536f 100644
--- a/drivers/phy/phy-exynos4x12-usb2.c
+++ b/drivers/phy/phy-exynos4x12-usb2.c
@@ -361,7 +361,6 @@ static const struct samsung_usb2_common_phy exynos4x12_phys[] = {
361 .power_on = exynos4x12_power_on, 361 .power_on = exynos4x12_power_on,
362 .power_off = exynos4x12_power_off, 362 .power_off = exynos4x12_power_off,
363 }, 363 },
364 {},
365}; 364};
366 365
367const struct samsung_usb2_phy_config exynos3250_usb2_phy_config = { 366const struct samsung_usb2_phy_config exynos3250_usb2_phy_config = {
diff --git a/drivers/phy/phy-exynos5-usbdrd.c b/drivers/phy/phy-exynos5-usbdrd.c
index 04374018425f..e2a0be750ad9 100644
--- a/drivers/phy/phy-exynos5-usbdrd.c
+++ b/drivers/phy/phy-exynos5-usbdrd.c
@@ -531,7 +531,7 @@ static struct phy *exynos5_usbdrd_phy_xlate(struct device *dev,
531{ 531{
532 struct exynos5_usbdrd_phy *phy_drd = dev_get_drvdata(dev); 532 struct exynos5_usbdrd_phy *phy_drd = dev_get_drvdata(dev);
533 533
534 if (WARN_ON(args->args[0] > EXYNOS5_DRDPHYS_NUM)) 534 if (WARN_ON(args->args[0] >= EXYNOS5_DRDPHYS_NUM))
535 return ERR_PTR(-ENODEV); 535 return ERR_PTR(-ENODEV);
536 536
537 return phy_drd->phys[args->args[0]].phy; 537 return phy_drd->phys[args->args[0]].phy;
diff --git a/drivers/phy/phy-exynos5250-usb2.c b/drivers/phy/phy-exynos5250-usb2.c
index 1c139aa0d074..2ed1735a076a 100644
--- a/drivers/phy/phy-exynos5250-usb2.c
+++ b/drivers/phy/phy-exynos5250-usb2.c
@@ -391,7 +391,6 @@ static const struct samsung_usb2_common_phy exynos5250_phys[] = {
391 .power_on = exynos5250_power_on, 391 .power_on = exynos5250_power_on,
392 .power_off = exynos5250_power_off, 392 .power_off = exynos5250_power_off,
393 }, 393 },
394 {},
395}; 394};
396 395
397const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = { 396const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = {
diff --git a/drivers/phy/phy-hix5hd2-sata.c b/drivers/phy/phy-hix5hd2-sata.c
index 34915b4202f1..d6b22659cac1 100644
--- a/drivers/phy/phy-hix5hd2-sata.c
+++ b/drivers/phy/phy-hix5hd2-sata.c
@@ -147,6 +147,9 @@ static int hix5hd2_sata_phy_probe(struct platform_device *pdev)
147 return -ENOMEM; 147 return -ENOMEM;
148 148
149 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 149 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
150 if (!res)
151 return -EINVAL;
152
150 priv->base = devm_ioremap(dev, res->start, resource_size(res)); 153 priv->base = devm_ioremap(dev, res->start, resource_size(res));
151 if (!priv->base) 154 if (!priv->base)
152 return -ENOMEM; 155 return -ENOMEM;
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c
index 9b2848e6115d..933435214acc 100644
--- a/drivers/phy/phy-miphy28lp.c
+++ b/drivers/phy/phy-miphy28lp.c
@@ -228,6 +228,7 @@ struct miphy28lp_dev {
228 struct regmap *regmap; 228 struct regmap *regmap;
229 struct mutex miphy_mutex; 229 struct mutex miphy_mutex;
230 struct miphy28lp_phy **phys; 230 struct miphy28lp_phy **phys;
231 int nphys;
231}; 232};
232 233
233struct miphy_initval { 234struct miphy_initval {
@@ -1116,7 +1117,7 @@ static struct phy *miphy28lp_xlate(struct device *dev,
1116 return ERR_PTR(-EINVAL); 1117 return ERR_PTR(-EINVAL);
1117 } 1118 }
1118 1119
1119 for (index = 0; index < of_get_child_count(dev->of_node); index++) 1120 for (index = 0; index < miphy_dev->nphys; index++)
1120 if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { 1121 if (phynode == miphy_dev->phys[index]->phy->dev.of_node) {
1121 miphy_phy = miphy_dev->phys[index]; 1122 miphy_phy = miphy_dev->phys[index];
1122 break; 1123 break;
@@ -1138,6 +1139,7 @@ static struct phy *miphy28lp_xlate(struct device *dev,
1138 1139
1139static struct phy_ops miphy28lp_ops = { 1140static struct phy_ops miphy28lp_ops = {
1140 .init = miphy28lp_init, 1141 .init = miphy28lp_init,
1142 .owner = THIS_MODULE,
1141}; 1143};
1142 1144
1143static int miphy28lp_probe_resets(struct device_node *node, 1145static int miphy28lp_probe_resets(struct device_node *node,
@@ -1200,16 +1202,15 @@ static int miphy28lp_probe(struct platform_device *pdev)
1200 struct miphy28lp_dev *miphy_dev; 1202 struct miphy28lp_dev *miphy_dev;
1201 struct phy_provider *provider; 1203 struct phy_provider *provider;
1202 struct phy *phy; 1204 struct phy *phy;
1203 int chancount, port = 0; 1205 int ret, port = 0;
1204 int ret;
1205 1206
1206 miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); 1207 miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL);
1207 if (!miphy_dev) 1208 if (!miphy_dev)
1208 return -ENOMEM; 1209 return -ENOMEM;
1209 1210
1210 chancount = of_get_child_count(np); 1211 miphy_dev->nphys = of_get_child_count(np);
1211 miphy_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * chancount, 1212 miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys,
1212 GFP_KERNEL); 1213 sizeof(*miphy_dev->phys), GFP_KERNEL);
1213 if (!miphy_dev->phys) 1214 if (!miphy_dev->phys)
1214 return -ENOMEM; 1215 return -ENOMEM;
1215 1216
diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c
index 6c80154e8bff..51b459db9137 100644
--- a/drivers/phy/phy-miphy365x.c
+++ b/drivers/phy/phy-miphy365x.c
@@ -150,6 +150,7 @@ struct miphy365x_dev {
150 struct regmap *regmap; 150 struct regmap *regmap;
151 struct mutex miphy_mutex; 151 struct mutex miphy_mutex;
152 struct miphy365x_phy **phys; 152 struct miphy365x_phy **phys;
153 int nphys;
153}; 154};
154 155
155/* 156/*
@@ -485,7 +486,7 @@ static struct phy *miphy365x_xlate(struct device *dev,
485 return ERR_PTR(-EINVAL); 486 return ERR_PTR(-EINVAL);
486 } 487 }
487 488
488 for (index = 0; index < of_get_child_count(dev->of_node); index++) 489 for (index = 0; index < miphy_dev->nphys; index++)
489 if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { 490 if (phynode == miphy_dev->phys[index]->phy->dev.of_node) {
490 miphy_phy = miphy_dev->phys[index]; 491 miphy_phy = miphy_dev->phys[index];
491 break; 492 break;
@@ -541,16 +542,15 @@ static int miphy365x_probe(struct platform_device *pdev)
541 struct miphy365x_dev *miphy_dev; 542 struct miphy365x_dev *miphy_dev;
542 struct phy_provider *provider; 543 struct phy_provider *provider;
543 struct phy *phy; 544 struct phy *phy;
544 int chancount, port = 0; 545 int ret, port = 0;
545 int ret;
546 546
547 miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); 547 miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL);
548 if (!miphy_dev) 548 if (!miphy_dev)
549 return -ENOMEM; 549 return -ENOMEM;
550 550
551 chancount = of_get_child_count(np); 551 miphy_dev->nphys = of_get_child_count(np);
552 miphy_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * chancount, 552 miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys,
553 GFP_KERNEL); 553 sizeof(*miphy_dev->phys), GFP_KERNEL);
554 if (!miphy_dev->phys) 554 if (!miphy_dev->phys)
555 return -ENOMEM; 555 return -ENOMEM;
556 556
diff --git a/drivers/phy/phy-omap-control.c b/drivers/phy/phy-omap-control.c
index efe724f97e02..93252e053a31 100644
--- a/drivers/phy/phy-omap-control.c
+++ b/drivers/phy/phy-omap-control.c
@@ -360,7 +360,7 @@ static void __exit omap_control_phy_exit(void)
360} 360}
361module_exit(omap_control_phy_exit); 361module_exit(omap_control_phy_exit);
362 362
363MODULE_ALIAS("platform: omap_control_phy"); 363MODULE_ALIAS("platform:omap_control_phy");
364MODULE_AUTHOR("Texas Instruments Inc."); 364MODULE_AUTHOR("Texas Instruments Inc.");
365MODULE_DESCRIPTION("OMAP Control Module PHY Driver"); 365MODULE_DESCRIPTION("OMAP Control Module PHY Driver");
366MODULE_LICENSE("GPL v2"); 366MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index 6f4aef3db248..4757e765696a 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -296,10 +296,11 @@ static int omap_usb2_probe(struct platform_device *pdev)
296 dev_warn(&pdev->dev, 296 dev_warn(&pdev->dev,
297 "found usb_otg_ss_refclk960m, please fix DTS\n"); 297 "found usb_otg_ss_refclk960m, please fix DTS\n");
298 } 298 }
299 } else {
300 clk_prepare(phy->optclk);
301 } 299 }
302 300
301 if (!IS_ERR(phy->optclk))
302 clk_prepare(phy->optclk);
303
303 usb_add_phy_dev(&phy->phy); 304 usb_add_phy_dev(&phy->phy);
304 305
305 return 0; 306 return 0;
@@ -383,7 +384,7 @@ static struct platform_driver omap_usb2_driver = {
383 384
384module_platform_driver(omap_usb2_driver); 385module_platform_driver(omap_usb2_driver);
385 386
386MODULE_ALIAS("platform: omap_usb2"); 387MODULE_ALIAS("platform:omap_usb2");
387MODULE_AUTHOR("Texas Instruments Inc."); 388MODULE_AUTHOR("Texas Instruments Inc.");
388MODULE_DESCRIPTION("OMAP USB2 phy driver"); 389MODULE_DESCRIPTION("OMAP USB2 phy driver");
389MODULE_LICENSE("GPL v2"); 390MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c
index 22011c3b6a4b..7d4c33643768 100644
--- a/drivers/phy/phy-rockchip-usb.c
+++ b/drivers/phy/phy-rockchip-usb.c
@@ -61,8 +61,6 @@ static int rockchip_usb_phy_power_off(struct phy *_phy)
61 return ret; 61 return ret;
62 62
63 clk_disable_unprepare(phy->clk); 63 clk_disable_unprepare(phy->clk);
64 if (ret)
65 return ret;
66 64
67 return 0; 65 return 0;
68} 66}
@@ -78,8 +76,10 @@ static int rockchip_usb_phy_power_on(struct phy *_phy)
78 76
79 /* Power up usb phy analog blocks by set siddq 0 */ 77 /* Power up usb phy analog blocks by set siddq 0 */
80 ret = rockchip_usb_phy_power(phy, 0); 78 ret = rockchip_usb_phy_power(phy, 0);
81 if (ret) 79 if (ret) {
80 clk_disable_unprepare(phy->clk);
82 return ret; 81 return ret;
82 }
83 83
84 return 0; 84 return 0;
85} 85}
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 95c88f929f27..2ba610b72ca2 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -165,15 +165,11 @@ static int ti_pipe3_dpll_wait_lock(struct ti_pipe3 *phy)
165 cpu_relax(); 165 cpu_relax();
166 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); 166 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
167 if (val & PLL_LOCK) 167 if (val & PLL_LOCK)
168 break; 168 return 0;
169 } while (!time_after(jiffies, timeout)); 169 } while (!time_after(jiffies, timeout));
170 170
171 if (!(val & PLL_LOCK)) { 171 dev_err(phy->dev, "DPLL failed to lock\n");
172 dev_err(phy->dev, "DPLL failed to lock\n"); 172 return -EBUSY;
173 return -EBUSY;
174 }
175
176 return 0;
177} 173}
178 174
179static int ti_pipe3_dpll_program(struct ti_pipe3 *phy) 175static int ti_pipe3_dpll_program(struct ti_pipe3 *phy)
@@ -608,7 +604,7 @@ static struct platform_driver ti_pipe3_driver = {
608 604
609module_platform_driver(ti_pipe3_driver); 605module_platform_driver(ti_pipe3_driver);
610 606
611MODULE_ALIAS("platform: ti_pipe3"); 607MODULE_ALIAS("platform:ti_pipe3");
612MODULE_AUTHOR("Texas Instruments Inc."); 608MODULE_AUTHOR("Texas Instruments Inc.");
613MODULE_DESCRIPTION("TI PIPE3 phy driver"); 609MODULE_DESCRIPTION("TI PIPE3 phy driver");
614MODULE_LICENSE("GPL v2"); 610MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 8e87f54671f3..bc42d6a8939f 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -666,7 +666,6 @@ static int twl4030_usb_probe(struct platform_device *pdev)
666 twl->dev = &pdev->dev; 666 twl->dev = &pdev->dev;
667 twl->irq = platform_get_irq(pdev, 0); 667 twl->irq = platform_get_irq(pdev, 0);
668 twl->vbus_supplied = false; 668 twl->vbus_supplied = false;
669 twl->linkstat = -EINVAL;
670 twl->linkstat = OMAP_MUSB_UNKNOWN; 669 twl->linkstat = OMAP_MUSB_UNKNOWN;
671 670
672 twl->phy.dev = twl->dev; 671 twl->phy.dev = twl->dev;
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c
index 29214a36ea28..2263cd010032 100644
--- a/drivers/phy/phy-xgene.c
+++ b/drivers/phy/phy-xgene.c
@@ -1704,7 +1704,6 @@ static int xgene_phy_probe(struct platform_device *pdev)
1704 for (i = 0; i < MAX_LANE; i++) 1704 for (i = 0; i < MAX_LANE; i++)
1705 ctx->sata_param.speed[i] = 2; /* Default to Gen3 */ 1705 ctx->sata_param.speed[i] = 2; /* Default to Gen3 */
1706 1706
1707 ctx->dev = &pdev->dev;
1708 platform_set_drvdata(pdev, ctx); 1707 platform_set_drvdata(pdev, ctx);
1709 1708
1710 ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops); 1709 ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops);
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 5afe03e28b91..2062c224e32f 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -66,6 +66,10 @@
66#define BYT_DIR_MASK (BIT(1) | BIT(2)) 66#define BYT_DIR_MASK (BIT(1) | BIT(2))
67#define BYT_TRIG_MASK (BIT(26) | BIT(25) | BIT(24)) 67#define BYT_TRIG_MASK (BIT(26) | BIT(25) | BIT(24))
68 68
69#define BYT_CONF0_RESTORE_MASK (BYT_DIRECT_IRQ_EN | BYT_TRIG_MASK | \
70 BYT_PIN_MUX)
71#define BYT_VAL_RESTORE_MASK (BYT_DIR_MASK | BYT_LEVEL)
72
69#define BYT_NGPIO_SCORE 102 73#define BYT_NGPIO_SCORE 102
70#define BYT_NGPIO_NCORE 28 74#define BYT_NGPIO_NCORE 28
71#define BYT_NGPIO_SUS 44 75#define BYT_NGPIO_SUS 44
@@ -134,12 +138,18 @@ static struct pinctrl_gpio_range byt_ranges[] = {
134 }, 138 },
135}; 139};
136 140
141struct byt_gpio_pin_context {
142 u32 conf0;
143 u32 val;
144};
145
137struct byt_gpio { 146struct byt_gpio {
138 struct gpio_chip chip; 147 struct gpio_chip chip;
139 struct platform_device *pdev; 148 struct platform_device *pdev;
140 spinlock_t lock; 149 spinlock_t lock;
141 void __iomem *reg_base; 150 void __iomem *reg_base;
142 struct pinctrl_gpio_range *range; 151 struct pinctrl_gpio_range *range;
152 struct byt_gpio_pin_context *saved_context;
143}; 153};
144 154
145#define to_byt_gpio(c) container_of(c, struct byt_gpio, chip) 155#define to_byt_gpio(c) container_of(c, struct byt_gpio, chip)
@@ -158,40 +168,62 @@ static void __iomem *byt_gpio_reg(struct gpio_chip *chip, unsigned offset,
158 return vg->reg_base + reg_offset + reg; 168 return vg->reg_base + reg_offset + reg;
159} 169}
160 170
161static bool is_special_pin(struct byt_gpio *vg, unsigned offset) 171static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned offset)
172{
173 void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
174 unsigned long flags;
175 u32 value;
176
177 spin_lock_irqsave(&vg->lock, flags);
178 value = readl(reg);
179 value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
180 writel(value, reg);
181 spin_unlock_irqrestore(&vg->lock, flags);
182}
183
184static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned offset)
162{ 185{
163 /* SCORE pin 92-93 */ 186 /* SCORE pin 92-93 */
164 if (!strcmp(vg->range->name, BYT_SCORE_ACPI_UID) && 187 if (!strcmp(vg->range->name, BYT_SCORE_ACPI_UID) &&
165 offset >= 92 && offset <= 93) 188 offset >= 92 && offset <= 93)
166 return true; 189 return 1;
167 190
168 /* SUS pin 11-21 */ 191 /* SUS pin 11-21 */
169 if (!strcmp(vg->range->name, BYT_SUS_ACPI_UID) && 192 if (!strcmp(vg->range->name, BYT_SUS_ACPI_UID) &&
170 offset >= 11 && offset <= 21) 193 offset >= 11 && offset <= 21)
171 return true; 194 return 1;
172 195
173 return false; 196 return 0;
174} 197}
175 198
176static int byt_gpio_request(struct gpio_chip *chip, unsigned offset) 199static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
177{ 200{
178 struct byt_gpio *vg = to_byt_gpio(chip); 201 struct byt_gpio *vg = to_byt_gpio(chip);
179 void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG); 202 void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG);
180 u32 value; 203 u32 value, gpio_mux;
181 bool special;
182 204
183 /* 205 /*
184 * In most cases, func pin mux 000 means GPIO function. 206 * In most cases, func pin mux 000 means GPIO function.
185 * But, some pins may have func pin mux 001 represents 207 * But, some pins may have func pin mux 001 represents
186 * GPIO function. Only allow user to export pin with 208 * GPIO function.
187 * func pin mux preset as GPIO function by BIOS/FW. 209 *
210 * Because there are devices out there where some pins were not
211 * configured correctly we allow changing the mux value from
212 * request (but print out warning about that).
188 */ 213 */
189 value = readl(reg) & BYT_PIN_MUX; 214 value = readl(reg) & BYT_PIN_MUX;
190 special = is_special_pin(vg, offset); 215 gpio_mux = byt_get_gpio_mux(vg, offset);
191 if ((special && value != 1) || (!special && value)) { 216 if (WARN_ON(gpio_mux != value)) {
192 dev_err(&vg->pdev->dev, 217 unsigned long flags;
193 "pin %u cannot be used as GPIO.\n", offset); 218
194 return -EINVAL; 219 spin_lock_irqsave(&vg->lock, flags);
220 value = readl(reg) & ~BYT_PIN_MUX;
221 value |= gpio_mux;
222 writel(value, reg);
223 spin_unlock_irqrestore(&vg->lock, flags);
224
225 dev_warn(&vg->pdev->dev,
226 "pin %u forcibly re-configured as GPIO\n", offset);
195 } 227 }
196 228
197 pm_runtime_get(&vg->pdev->dev); 229 pm_runtime_get(&vg->pdev->dev);
@@ -202,14 +234,8 @@ static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
202static void byt_gpio_free(struct gpio_chip *chip, unsigned offset) 234static void byt_gpio_free(struct gpio_chip *chip, unsigned offset)
203{ 235{
204 struct byt_gpio *vg = to_byt_gpio(chip); 236 struct byt_gpio *vg = to_byt_gpio(chip);
205 void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
206 u32 value;
207
208 /* clear interrupt triggering */
209 value = readl(reg);
210 value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
211 writel(value, reg);
212 237
238 byt_gpio_clear_triggering(vg, offset);
213 pm_runtime_put(&vg->pdev->dev); 239 pm_runtime_put(&vg->pdev->dev);
214} 240}
215 241
@@ -236,23 +262,13 @@ static int byt_irq_type(struct irq_data *d, unsigned type)
236 value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG | 262 value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG |
237 BYT_TRIG_LVL); 263 BYT_TRIG_LVL);
238 264
239 switch (type) {
240 case IRQ_TYPE_LEVEL_HIGH:
241 value |= BYT_TRIG_LVL;
242 case IRQ_TYPE_EDGE_RISING:
243 value |= BYT_TRIG_POS;
244 break;
245 case IRQ_TYPE_LEVEL_LOW:
246 value |= BYT_TRIG_LVL;
247 case IRQ_TYPE_EDGE_FALLING:
248 value |= BYT_TRIG_NEG;
249 break;
250 case IRQ_TYPE_EDGE_BOTH:
251 value |= (BYT_TRIG_NEG | BYT_TRIG_POS);
252 break;
253 }
254 writel(value, reg); 265 writel(value, reg);
255 266
267 if (type & IRQ_TYPE_EDGE_BOTH)
268 __irq_set_handler_locked(d->irq, handle_edge_irq);
269 else if (type & IRQ_TYPE_LEVEL_MASK)
270 __irq_set_handler_locked(d->irq, handle_level_irq);
271
256 spin_unlock_irqrestore(&vg->lock, flags); 272 spin_unlock_irqrestore(&vg->lock, flags);
257 273
258 return 0; 274 return 0;
@@ -410,58 +426,80 @@ static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
410 struct irq_data *data = irq_desc_get_irq_data(desc); 426 struct irq_data *data = irq_desc_get_irq_data(desc);
411 struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc)); 427 struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc));
412 struct irq_chip *chip = irq_data_get_irq_chip(data); 428 struct irq_chip *chip = irq_data_get_irq_chip(data);
413 u32 base, pin, mask; 429 u32 base, pin;
414 void __iomem *reg; 430 void __iomem *reg;
415 u32 pending; 431 unsigned long pending;
416 unsigned virq; 432 unsigned virq;
417 int looplimit = 0;
418 433
419 /* check from GPIO controller which pin triggered the interrupt */ 434 /* check from GPIO controller which pin triggered the interrupt */
420 for (base = 0; base < vg->chip.ngpio; base += 32) { 435 for (base = 0; base < vg->chip.ngpio; base += 32) {
421
422 reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG); 436 reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG);
423 437 pending = readl(reg);
424 while ((pending = readl(reg))) { 438 for_each_set_bit(pin, &pending, 32) {
425 pin = __ffs(pending);
426 mask = BIT(pin);
427 /* Clear before handling so we can't lose an edge */
428 writel(mask, reg);
429
430 virq = irq_find_mapping(vg->chip.irqdomain, base + pin); 439 virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
431 generic_handle_irq(virq); 440 generic_handle_irq(virq);
432
433 /* In case bios or user sets triggering incorretly a pin
434 * might remain in "interrupt triggered" state.
435 */
436 if (looplimit++ > 32) {
437 dev_err(&vg->pdev->dev,
438 "Gpio %d interrupt flood, disabling\n",
439 base + pin);
440
441 reg = byt_gpio_reg(&vg->chip, base + pin,
442 BYT_CONF0_REG);
443 mask = readl(reg);
444 mask &= ~(BYT_TRIG_NEG | BYT_TRIG_POS |
445 BYT_TRIG_LVL);
446 writel(mask, reg);
447 mask = readl(reg); /* flush */
448 break;
449 }
450 } 441 }
451 } 442 }
452 chip->irq_eoi(data); 443 chip->irq_eoi(data);
453} 444}
454 445
446static void byt_irq_ack(struct irq_data *d)
447{
448 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
449 struct byt_gpio *vg = to_byt_gpio(gc);
450 unsigned offset = irqd_to_hwirq(d);
451 void __iomem *reg;
452
453 reg = byt_gpio_reg(&vg->chip, offset, BYT_INT_STAT_REG);
454 writel(BIT(offset % 32), reg);
455}
456
455static void byt_irq_unmask(struct irq_data *d) 457static void byt_irq_unmask(struct irq_data *d)
456{ 458{
459 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
460 struct byt_gpio *vg = to_byt_gpio(gc);
461 unsigned offset = irqd_to_hwirq(d);
462 unsigned long flags;
463 void __iomem *reg;
464 u32 value;
465
466 spin_lock_irqsave(&vg->lock, flags);
467
468 reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
469 value = readl(reg);
470
471 switch (irqd_get_trigger_type(d)) {
472 case IRQ_TYPE_LEVEL_HIGH:
473 value |= BYT_TRIG_LVL;
474 case IRQ_TYPE_EDGE_RISING:
475 value |= BYT_TRIG_POS;
476 break;
477 case IRQ_TYPE_LEVEL_LOW:
478 value |= BYT_TRIG_LVL;
479 case IRQ_TYPE_EDGE_FALLING:
480 value |= BYT_TRIG_NEG;
481 break;
482 case IRQ_TYPE_EDGE_BOTH:
483 value |= (BYT_TRIG_NEG | BYT_TRIG_POS);
484 break;
485 }
486
487 writel(value, reg);
488
489 spin_unlock_irqrestore(&vg->lock, flags);
457} 490}
458 491
459static void byt_irq_mask(struct irq_data *d) 492static void byt_irq_mask(struct irq_data *d)
460{ 493{
494 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
495 struct byt_gpio *vg = to_byt_gpio(gc);
496
497 byt_gpio_clear_triggering(vg, irqd_to_hwirq(d));
461} 498}
462 499
463static struct irq_chip byt_irqchip = { 500static struct irq_chip byt_irqchip = {
464 .name = "BYT-GPIO", 501 .name = "BYT-GPIO",
502 .irq_ack = byt_irq_ack,
465 .irq_mask = byt_irq_mask, 503 .irq_mask = byt_irq_mask,
466 .irq_unmask = byt_irq_unmask, 504 .irq_unmask = byt_irq_unmask,
467 .irq_set_type = byt_irq_type, 505 .irq_set_type = byt_irq_type,
@@ -472,6 +510,21 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
472{ 510{
473 void __iomem *reg; 511 void __iomem *reg;
474 u32 base, value; 512 u32 base, value;
513 int i;
514
515 /*
516 * Clear interrupt triggers for all pins that are GPIOs and
517 * do not use direct IRQ mode. This will prevent spurious
518 * interrupts from misconfigured pins.
519 */
520 for (i = 0; i < vg->chip.ngpio; i++) {
521 value = readl(byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG));
522 if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) &&
523 !(value & BYT_DIRECT_IRQ_EN)) {
524 byt_gpio_clear_triggering(vg, i);
525 dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i);
526 }
527 }
475 528
476 /* clear interrupt status trigger registers */ 529 /* clear interrupt status trigger registers */
477 for (base = 0; base < vg->chip.ngpio; base += 32) { 530 for (base = 0; base < vg->chip.ngpio; base += 32) {
@@ -541,6 +594,11 @@ static int byt_gpio_probe(struct platform_device *pdev)
541 gc->can_sleep = false; 594 gc->can_sleep = false;
542 gc->dev = dev; 595 gc->dev = dev;
543 596
597#ifdef CONFIG_PM_SLEEP
598 vg->saved_context = devm_kcalloc(&pdev->dev, gc->ngpio,
599 sizeof(*vg->saved_context), GFP_KERNEL);
600#endif
601
544 ret = gpiochip_add(gc); 602 ret = gpiochip_add(gc);
545 if (ret) { 603 if (ret) {
546 dev_err(&pdev->dev, "failed adding byt-gpio chip\n"); 604 dev_err(&pdev->dev, "failed adding byt-gpio chip\n");
@@ -569,6 +627,69 @@ static int byt_gpio_probe(struct platform_device *pdev)
569 return 0; 627 return 0;
570} 628}
571 629
630#ifdef CONFIG_PM_SLEEP
631static int byt_gpio_suspend(struct device *dev)
632{
633 struct platform_device *pdev = to_platform_device(dev);
634 struct byt_gpio *vg = platform_get_drvdata(pdev);
635 int i;
636
637 for (i = 0; i < vg->chip.ngpio; i++) {
638 void __iomem *reg;
639 u32 value;
640
641 reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG);
642 value = readl(reg) & BYT_CONF0_RESTORE_MASK;
643 vg->saved_context[i].conf0 = value;
644
645 reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG);
646 value = readl(reg) & BYT_VAL_RESTORE_MASK;
647 vg->saved_context[i].val = value;
648 }
649
650 return 0;
651}
652
653static int byt_gpio_resume(struct device *dev)
654{
655 struct platform_device *pdev = to_platform_device(dev);
656 struct byt_gpio *vg = platform_get_drvdata(pdev);
657 int i;
658
659 for (i = 0; i < vg->chip.ngpio; i++) {
660 void __iomem *reg;
661 u32 value;
662
663 reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG);
664 value = readl(reg);
665 if ((value & BYT_CONF0_RESTORE_MASK) !=
666 vg->saved_context[i].conf0) {
667 value &= ~BYT_CONF0_RESTORE_MASK;
668 value |= vg->saved_context[i].conf0;
669 writel(value, reg);
670 dev_info(dev, "restored pin %d conf0 %#08x", i, value);
671 }
672
673 reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG);
674 value = readl(reg);
675 if ((value & BYT_VAL_RESTORE_MASK) !=
676 vg->saved_context[i].val) {
677 u32 v;
678
679 v = value & ~BYT_VAL_RESTORE_MASK;
680 v |= vg->saved_context[i].val;
681 if (v != value) {
682 writel(v, reg);
683 dev_dbg(dev, "restored pin %d val %#08x\n",
684 i, v);
685 }
686 }
687 }
688
689 return 0;
690}
691#endif
692
572static int byt_gpio_runtime_suspend(struct device *dev) 693static int byt_gpio_runtime_suspend(struct device *dev)
573{ 694{
574 return 0; 695 return 0;
@@ -580,8 +701,9 @@ static int byt_gpio_runtime_resume(struct device *dev)
580} 701}
581 702
582static const struct dev_pm_ops byt_gpio_pm_ops = { 703static const struct dev_pm_ops byt_gpio_pm_ops = {
583 .runtime_suspend = byt_gpio_runtime_suspend, 704 SET_LATE_SYSTEM_SLEEP_PM_OPS(byt_gpio_suspend, byt_gpio_resume)
584 .runtime_resume = byt_gpio_runtime_resume, 705 SET_RUNTIME_PM_OPS(byt_gpio_runtime_suspend, byt_gpio_runtime_resume,
706 NULL)
585}; 707};
586 708
587static const struct acpi_device_id byt_gpio_acpi_match[] = { 709static const struct acpi_device_id byt_gpio_acpi_match[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 3034fd03bced..82f691eeeec4 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1226,6 +1226,7 @@ static int chv_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
1226static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned offset, 1226static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
1227 int value) 1227 int value)
1228{ 1228{
1229 chv_gpio_set(chip, offset, value);
1229 return pinctrl_gpio_direction_output(chip->base + offset); 1230 return pinctrl_gpio_direction_output(chip->base + offset);
1230} 1231}
1231 1232
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index f4cd0b9b2438..a4814066ea08 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1477,28 +1477,25 @@ static void gpio_irq_ack(struct irq_data *d)
1477 /* the interrupt is already cleared before by reading ISR */ 1477 /* the interrupt is already cleared before by reading ISR */
1478} 1478}
1479 1479
1480static unsigned int gpio_irq_startup(struct irq_data *d) 1480static int gpio_irq_request_res(struct irq_data *d)
1481{ 1481{
1482 struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); 1482 struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
1483 unsigned pin = d->hwirq; 1483 unsigned pin = d->hwirq;
1484 int ret; 1484 int ret;
1485 1485
1486 ret = gpiochip_lock_as_irq(&at91_gpio->chip, pin); 1486 ret = gpiochip_lock_as_irq(&at91_gpio->chip, pin);
1487 if (ret) { 1487 if (ret)
1488 dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n", 1488 dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n",
1489 d->hwirq); 1489 d->hwirq);
1490 return ret; 1490
1491 } 1491 return ret;
1492 gpio_irq_unmask(d);
1493 return 0;
1494} 1492}
1495 1493
1496static void gpio_irq_shutdown(struct irq_data *d) 1494static void gpio_irq_release_res(struct irq_data *d)
1497{ 1495{
1498 struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); 1496 struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
1499 unsigned pin = d->hwirq; 1497 unsigned pin = d->hwirq;
1500 1498
1501 gpio_irq_mask(d);
1502 gpiochip_unlock_as_irq(&at91_gpio->chip, pin); 1499 gpiochip_unlock_as_irq(&at91_gpio->chip, pin);
1503} 1500}
1504 1501
@@ -1577,8 +1574,8 @@ void at91_pinctrl_gpio_resume(void)
1577static struct irq_chip gpio_irqchip = { 1574static struct irq_chip gpio_irqchip = {
1578 .name = "GPIO", 1575 .name = "GPIO",
1579 .irq_ack = gpio_irq_ack, 1576 .irq_ack = gpio_irq_ack,
1580 .irq_startup = gpio_irq_startup, 1577 .irq_request_resources = gpio_irq_request_res,
1581 .irq_shutdown = gpio_irq_shutdown, 1578 .irq_release_resources = gpio_irq_release_res,
1582 .irq_disable = gpio_irq_mask, 1579 .irq_disable = gpio_irq_mask,
1583 .irq_mask = gpio_irq_mask, 1580 .irq_mask = gpio_irq_mask,
1584 .irq_unmask = gpio_irq_unmask, 1581 .irq_unmask = gpio_irq_unmask,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index 24c5d88f943f..3c68a8e5e0dd 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -1011,6 +1011,7 @@ static const struct sunxi_pinctrl_desc sun4i_a10_pinctrl_data = {
1011 .pins = sun4i_a10_pins, 1011 .pins = sun4i_a10_pins,
1012 .npins = ARRAY_SIZE(sun4i_a10_pins), 1012 .npins = ARRAY_SIZE(sun4i_a10_pins),
1013 .irq_banks = 1, 1013 .irq_banks = 1,
1014 .irq_read_needs_mux = true,
1014}; 1015};
1015 1016
1016static int sun4i_a10_pinctrl_probe(struct platform_device *pdev) 1017static int sun4i_a10_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 3d0744337736..f8e171b76693 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -29,6 +29,7 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#include "../core.h" 31#include "../core.h"
32#include "../../gpio/gpiolib.h"
32#include "pinctrl-sunxi.h" 33#include "pinctrl-sunxi.h"
33 34
34static struct irq_chip sunxi_pinctrl_edge_irq_chip; 35static struct irq_chip sunxi_pinctrl_edge_irq_chip;
@@ -464,10 +465,19 @@ static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
464static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset) 465static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
465{ 466{
466 struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); 467 struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
467
468 u32 reg = sunxi_data_reg(offset); 468 u32 reg = sunxi_data_reg(offset);
469 u8 index = sunxi_data_offset(offset); 469 u8 index = sunxi_data_offset(offset);
470 u32 val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK; 470 u32 set_mux = pctl->desc->irq_read_needs_mux &&
471 test_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags);
472 u32 val;
473
474 if (set_mux)
475 sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_INPUT);
476
477 val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK;
478
479 if (set_mux)
480 sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_IRQ);
471 481
472 return val; 482 return val;
473} 483}
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index 5a51523a3459..e248e81a0f9e 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -77,6 +77,9 @@
77#define IRQ_LEVEL_LOW 0x03 77#define IRQ_LEVEL_LOW 0x03
78#define IRQ_EDGE_BOTH 0x04 78#define IRQ_EDGE_BOTH 0x04
79 79
80#define SUN4I_FUNC_INPUT 0
81#define SUN4I_FUNC_IRQ 6
82
80struct sunxi_desc_function { 83struct sunxi_desc_function {
81 const char *name; 84 const char *name;
82 u8 muxval; 85 u8 muxval;
@@ -94,6 +97,7 @@ struct sunxi_pinctrl_desc {
94 int npins; 97 int npins;
95 unsigned pin_base; 98 unsigned pin_base;
96 unsigned irq_banks; 99 unsigned irq_banks;
100 bool irq_read_needs_mux;
97}; 101};
98 102
99struct sunxi_pinctrl_function { 103struct sunxi_pinctrl_function {
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 97b5e4ee1ca4..63d4033eb683 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -73,7 +73,7 @@
73 73
74#define TIME_WINDOW_MAX_MSEC 40000 74#define TIME_WINDOW_MAX_MSEC 40000
75#define TIME_WINDOW_MIN_MSEC 250 75#define TIME_WINDOW_MIN_MSEC 250
76 76#define ENERGY_UNIT_SCALE 1000 /* scale from driver unit to powercap unit */
77enum unit_type { 77enum unit_type {
78 ARBITRARY_UNIT, /* no translation */ 78 ARBITRARY_UNIT, /* no translation */
79 POWER_UNIT, 79 POWER_UNIT,
@@ -158,6 +158,7 @@ struct rapl_domain {
158 struct rapl_power_limit rpl[NR_POWER_LIMITS]; 158 struct rapl_power_limit rpl[NR_POWER_LIMITS];
159 u64 attr_map; /* track capabilities */ 159 u64 attr_map; /* track capabilities */
160 unsigned int state; 160 unsigned int state;
161 unsigned int domain_energy_unit;
161 int package_id; 162 int package_id;
162}; 163};
163#define power_zone_to_rapl_domain(_zone) \ 164#define power_zone_to_rapl_domain(_zone) \
@@ -190,6 +191,7 @@ struct rapl_defaults {
190 void (*set_floor_freq)(struct rapl_domain *rd, bool mode); 191 void (*set_floor_freq)(struct rapl_domain *rd, bool mode);
191 u64 (*compute_time_window)(struct rapl_package *rp, u64 val, 192 u64 (*compute_time_window)(struct rapl_package *rp, u64 val,
192 bool to_raw); 193 bool to_raw);
194 unsigned int dram_domain_energy_unit;
193}; 195};
194static struct rapl_defaults *rapl_defaults; 196static struct rapl_defaults *rapl_defaults;
195 197
@@ -227,7 +229,8 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
227static int rapl_write_data_raw(struct rapl_domain *rd, 229static int rapl_write_data_raw(struct rapl_domain *rd,
228 enum rapl_primitives prim, 230 enum rapl_primitives prim,
229 unsigned long long value); 231 unsigned long long value);
230static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, 232static u64 rapl_unit_xlate(struct rapl_domain *rd, int package,
233 enum unit_type type, u64 value,
231 int to_raw); 234 int to_raw);
232static void package_power_limit_irq_save(int package_id); 235static void package_power_limit_irq_save(int package_id);
233 236
@@ -305,7 +308,9 @@ static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw)
305 308
306static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy) 309static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy)
307{ 310{
308 *energy = rapl_unit_xlate(0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0); 311 struct rapl_domain *rd = power_zone_to_rapl_domain(pcd_dev);
312
313 *energy = rapl_unit_xlate(rd, 0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0);
309 return 0; 314 return 0;
310} 315}
311 316
@@ -639,6 +644,11 @@ static void rapl_init_domains(struct rapl_package *rp)
639 rd->msrs[4] = MSR_DRAM_POWER_INFO; 644 rd->msrs[4] = MSR_DRAM_POWER_INFO;
640 rd->rpl[0].prim_id = PL1_ENABLE; 645 rd->rpl[0].prim_id = PL1_ENABLE;
641 rd->rpl[0].name = pl1_name; 646 rd->rpl[0].name = pl1_name;
647 rd->domain_energy_unit =
648 rapl_defaults->dram_domain_energy_unit;
649 if (rd->domain_energy_unit)
650 pr_info("DRAM domain energy unit %dpj\n",
651 rd->domain_energy_unit);
642 break; 652 break;
643 } 653 }
644 if (mask) { 654 if (mask) {
@@ -648,11 +658,13 @@ static void rapl_init_domains(struct rapl_package *rp)
648 } 658 }
649} 659}
650 660
651static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, 661static u64 rapl_unit_xlate(struct rapl_domain *rd, int package,
662 enum unit_type type, u64 value,
652 int to_raw) 663 int to_raw)
653{ 664{
654 u64 units = 1; 665 u64 units = 1;
655 struct rapl_package *rp; 666 struct rapl_package *rp;
667 u64 scale = 1;
656 668
657 rp = find_package_by_id(package); 669 rp = find_package_by_id(package);
658 if (!rp) 670 if (!rp)
@@ -663,7 +675,12 @@ static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
663 units = rp->power_unit; 675 units = rp->power_unit;
664 break; 676 break;
665 case ENERGY_UNIT: 677 case ENERGY_UNIT:
666 units = rp->energy_unit; 678 scale = ENERGY_UNIT_SCALE;
679 /* per domain unit takes precedence */
680 if (rd && rd->domain_energy_unit)
681 units = rd->domain_energy_unit;
682 else
683 units = rp->energy_unit;
667 break; 684 break;
668 case TIME_UNIT: 685 case TIME_UNIT:
669 return rapl_defaults->compute_time_window(rp, value, to_raw); 686 return rapl_defaults->compute_time_window(rp, value, to_raw);
@@ -673,11 +690,11 @@ static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
673 }; 690 };
674 691
675 if (to_raw) 692 if (to_raw)
676 return div64_u64(value, units); 693 return div64_u64(value, units) * scale;
677 694
678 value *= units; 695 value *= units;
679 696
680 return value; 697 return div64_u64(value, scale);
681} 698}
682 699
683/* in the order of enum rapl_primitives */ 700/* in the order of enum rapl_primitives */
@@ -773,7 +790,7 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
773 final = value & rp->mask; 790 final = value & rp->mask;
774 final = final >> rp->shift; 791 final = final >> rp->shift;
775 if (xlate) 792 if (xlate)
776 *data = rapl_unit_xlate(rd->package_id, rp->unit, final, 0); 793 *data = rapl_unit_xlate(rd, rd->package_id, rp->unit, final, 0);
777 else 794 else
778 *data = final; 795 *data = final;
779 796
@@ -799,7 +816,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
799 "failed to read msr 0x%x on cpu %d\n", msr, cpu); 816 "failed to read msr 0x%x on cpu %d\n", msr, cpu);
800 return -EIO; 817 return -EIO;
801 } 818 }
802 value = rapl_unit_xlate(rd->package_id, rp->unit, value, 1); 819 value = rapl_unit_xlate(rd, rd->package_id, rp->unit, value, 1);
803 msr_val &= ~rp->mask; 820 msr_val &= ~rp->mask;
804 msr_val |= value << rp->shift; 821 msr_val |= value << rp->shift;
805 if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) { 822 if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) {
@@ -818,7 +835,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
818 * calculate units differ on different CPUs. 835 * calculate units differ on different CPUs.
819 * We convert the units to below format based on CPUs. 836 * We convert the units to below format based on CPUs.
820 * i.e. 837 * i.e.
821 * energy unit: microJoules : Represented in microJoules by default 838 * energy unit: picoJoules : Represented in picoJoules by default
822 * power unit : microWatts : Represented in milliWatts by default 839 * power unit : microWatts : Represented in milliWatts by default
823 * time unit : microseconds: Represented in seconds by default 840 * time unit : microseconds: Represented in seconds by default
824 */ 841 */
@@ -834,7 +851,7 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu)
834 } 851 }
835 852
836 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; 853 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
837 rp->energy_unit = 1000000 / (1 << value); 854 rp->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value);
838 855
839 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; 856 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
840 rp->power_unit = 1000000 / (1 << value); 857 rp->power_unit = 1000000 / (1 << value);
@@ -842,7 +859,7 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu)
842 value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; 859 value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
843 rp->time_unit = 1000000 / (1 << value); 860 rp->time_unit = 1000000 / (1 << value);
844 861
845 pr_debug("Core CPU package %d energy=%duJ, time=%dus, power=%duW\n", 862 pr_debug("Core CPU package %d energy=%dpJ, time=%dus, power=%duW\n",
846 rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); 863 rp->id, rp->energy_unit, rp->time_unit, rp->power_unit);
847 864
848 return 0; 865 return 0;
@@ -859,7 +876,7 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
859 return -ENODEV; 876 return -ENODEV;
860 } 877 }
861 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; 878 value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
862 rp->energy_unit = 1 << value; 879 rp->energy_unit = ENERGY_UNIT_SCALE * 1 << value;
863 880
864 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; 881 value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
865 rp->power_unit = (1 << value) * 1000; 882 rp->power_unit = (1 << value) * 1000;
@@ -867,7 +884,7 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
867 value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; 884 value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
868 rp->time_unit = 1000000 / (1 << value); 885 rp->time_unit = 1000000 / (1 << value);
869 886
870 pr_debug("Atom package %d energy=%duJ, time=%dus, power=%duW\n", 887 pr_debug("Atom package %d energy=%dpJ, time=%dus, power=%duW\n",
871 rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); 888 rp->id, rp->energy_unit, rp->time_unit, rp->power_unit);
872 889
873 return 0; 890 return 0;
@@ -1017,6 +1034,13 @@ static const struct rapl_defaults rapl_defaults_core = {
1017 .compute_time_window = rapl_compute_time_window_core, 1034 .compute_time_window = rapl_compute_time_window_core,
1018}; 1035};
1019 1036
1037static const struct rapl_defaults rapl_defaults_hsw_server = {
1038 .check_unit = rapl_check_unit_core,
1039 .set_floor_freq = set_floor_freq_default,
1040 .compute_time_window = rapl_compute_time_window_core,
1041 .dram_domain_energy_unit = 15300,
1042};
1043
1020static const struct rapl_defaults rapl_defaults_atom = { 1044static const struct rapl_defaults rapl_defaults_atom = {
1021 .check_unit = rapl_check_unit_atom, 1045 .check_unit = rapl_check_unit_atom,
1022 .set_floor_freq = set_floor_freq_atom, 1046 .set_floor_freq = set_floor_freq_atom,
@@ -1037,7 +1061,7 @@ static const struct x86_cpu_id rapl_ids[] = {
1037 RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */ 1061 RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */
1038 RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */ 1062 RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */
1039 RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */ 1063 RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */
1040 RAPL_CPU(0x3f, rapl_defaults_core),/* Haswell */ 1064 RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
1041 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ 1065 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
1042 RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */ 1066 RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */
1043 RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */ 1067 RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 1245dca79009..a4a8a6dc60c4 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1839,10 +1839,12 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
1839 } 1839 }
1840 1840
1841 if (rdev->ena_pin) { 1841 if (rdev->ena_pin) {
1842 ret = regulator_ena_gpio_ctrl(rdev, true); 1842 if (!rdev->ena_gpio_state) {
1843 if (ret < 0) 1843 ret = regulator_ena_gpio_ctrl(rdev, true);
1844 return ret; 1844 if (ret < 0)
1845 rdev->ena_gpio_state = 1; 1845 return ret;
1846 rdev->ena_gpio_state = 1;
1847 }
1846 } else if (rdev->desc->ops->enable) { 1848 } else if (rdev->desc->ops->enable) {
1847 ret = rdev->desc->ops->enable(rdev); 1849 ret = rdev->desc->ops->enable(rdev);
1848 if (ret < 0) 1850 if (ret < 0)
@@ -1939,10 +1941,12 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
1939 trace_regulator_disable(rdev_get_name(rdev)); 1941 trace_regulator_disable(rdev_get_name(rdev));
1940 1942
1941 if (rdev->ena_pin) { 1943 if (rdev->ena_pin) {
1942 ret = regulator_ena_gpio_ctrl(rdev, false); 1944 if (rdev->ena_gpio_state) {
1943 if (ret < 0) 1945 ret = regulator_ena_gpio_ctrl(rdev, false);
1944 return ret; 1946 if (ret < 0)
1945 rdev->ena_gpio_state = 0; 1947 return ret;
1948 rdev->ena_gpio_state = 0;
1949 }
1946 1950
1947 } else if (rdev->desc->ops->disable) { 1951 } else if (rdev->desc->ops->disable) {
1948 ret = rdev->desc->ops->disable(rdev); 1952 ret = rdev->desc->ops->disable(rdev);
@@ -3626,12 +3630,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
3626 config->ena_gpio, ret); 3630 config->ena_gpio, ret);
3627 goto wash; 3631 goto wash;
3628 } 3632 }
3629
3630 if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH)
3631 rdev->ena_gpio_state = 1;
3632
3633 if (config->ena_gpio_invert)
3634 rdev->ena_gpio_state = !rdev->ena_gpio_state;
3635 } 3633 }
3636 3634
3637 /* set regulator constraints */ 3635 /* set regulator constraints */
@@ -3800,9 +3798,11 @@ int regulator_suspend_finish(void)
3800 list_for_each_entry(rdev, &regulator_list, list) { 3798 list_for_each_entry(rdev, &regulator_list, list) {
3801 mutex_lock(&rdev->mutex); 3799 mutex_lock(&rdev->mutex);
3802 if (rdev->use_count > 0 || rdev->constraints->always_on) { 3800 if (rdev->use_count > 0 || rdev->constraints->always_on) {
3803 error = _regulator_do_enable(rdev); 3801 if (!_regulator_is_enabled(rdev)) {
3804 if (error) 3802 error = _regulator_do_enable(rdev);
3805 ret = error; 3803 if (error)
3804 ret = error;
3805 }
3806 } else { 3806 } else {
3807 if (!have_full_constraints()) 3807 if (!have_full_constraints())
3808 goto unlock; 3808 goto unlock;
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 9205f433573c..18198316b6cf 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -1572,6 +1572,10 @@ static int palmas_regulators_probe(struct platform_device *pdev)
1572 if (!pmic) 1572 if (!pmic)
1573 return -ENOMEM; 1573 return -ENOMEM;
1574 1574
1575 if (of_device_is_compatible(node, "ti,tps659038-pmic"))
1576 palmas_generic_regs_info[PALMAS_REG_REGEN2].ctrl_addr =
1577 TPS659038_REGEN2_CTRL;
1578
1575 pmic->dev = &pdev->dev; 1579 pmic->dev = &pdev->dev;
1576 pmic->palmas = palmas; 1580 pmic->palmas = palmas;
1577 palmas->pmic = pmic; 1581 palmas->pmic = pmic;
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index e2cffe01b807..fb991ec76423 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/of.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
21#include <linux/regulator/driver.h> 22#include <linux/regulator/driver.h>
22#include <linux/regulator/machine.h> 23#include <linux/regulator/machine.h>
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 92f6af6da699..73354ee27877 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -951,6 +951,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
951 void *bufs_va; 951 void *bufs_va;
952 int err = 0, i; 952 int err = 0, i;
953 size_t total_buf_space; 953 size_t total_buf_space;
954 bool notify;
954 955
955 vrp = kzalloc(sizeof(*vrp), GFP_KERNEL); 956 vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
956 if (!vrp) 957 if (!vrp)
@@ -1030,8 +1031,22 @@ static int rpmsg_probe(struct virtio_device *vdev)
1030 } 1031 }
1031 } 1032 }
1032 1033
1034 /*
1035 * Prepare to kick but don't notify yet - we can't do this before
1036 * device is ready.
1037 */
1038 notify = virtqueue_kick_prepare(vrp->rvq);
1039
1040 /* From this point on, we can notify and get callbacks. */
1041 virtio_device_ready(vdev);
1042
1033 /* tell the remote processor it can start sending messages */ 1043 /* tell the remote processor it can start sending messages */
1034 virtqueue_kick(vrp->rvq); 1044 /*
1045 * this might be concurrent with callbacks, but we are only
1046 * doing notify, not a full kick here, so that's ok.
1047 */
1048 if (notify)
1049 virtqueue_notify(vrp->rvq);
1035 1050
1036 dev_info(&vdev->dev, "rpmsg host is online\n"); 1051 dev_info(&vdev->dev, "rpmsg host is online\n");
1037 1052
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index b4f7744f6751..b283a1a573b3 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -324,7 +324,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
324 324
325 ret = IRQ_HANDLED; 325 ret = IRQ_HANDLED;
326 } 326 }
327 spin_lock(&suspended_lock); 327 spin_unlock(&suspended_lock);
328 328
329 return ret; 329 return ret;
330} 330}
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 4241eeab3386..f4cf6851fae9 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -849,6 +849,7 @@ static struct s3c_rtc_data const s3c2443_rtc_data = {
849 849
850static struct s3c_rtc_data const s3c6410_rtc_data = { 850static struct s3c_rtc_data const s3c6410_rtc_data = {
851 .max_user_freq = 32768, 851 .max_user_freq = 32768,
852 .needs_src_clk = true,
852 .irq_handler = s3c6410_rtc_irq, 853 .irq_handler = s3c6410_rtc_irq,
853 .set_freq = s3c6410_rtc_setfreq, 854 .set_freq = s3c6410_rtc_setfreq,
854 .enable_tick = s3c6410_rtc_enable_tick, 855 .enable_tick = s3c6410_rtc_enable_tick,
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 9219953ee949..d9afc51af7d3 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6815,7 +6815,8 @@ static struct ata_port_operations ipr_sata_ops = {
6815}; 6815};
6816 6816
6817static struct ata_port_info sata_port_info = { 6817static struct ata_port_info sata_port_info = {
6818 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, 6818 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
6819 ATA_FLAG_SAS_HOST,
6819 .pio_mask = ATA_PIO4_ONLY, 6820 .pio_mask = ATA_PIO4_ONLY,
6820 .mwdma_mask = ATA_MWDMA2, 6821 .mwdma_mask = ATA_MWDMA2,
6821 .udma_mask = ATA_UDMA6, 6822 .udma_mask = ATA_UDMA6,
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 932d9cc98d2f..9c706d8c1441 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -547,7 +547,8 @@ static struct ata_port_operations sas_sata_ops = {
547}; 547};
548 548
549static struct ata_port_info sata_port_info = { 549static struct ata_port_info sata_port_info = {
550 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ, 550 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ |
551 ATA_FLAG_SAS_HOST,
551 .pio_mask = ATA_PIO4, 552 .pio_mask = ATA_PIO4,
552 .mwdma_mask = ATA_MWDMA2, 553 .mwdma_mask = ATA_MWDMA2,
553 .udma_mask = ATA_UDMA6, 554 .udma_mask = ATA_UDMA6,
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 62b58d38ce2e..60de66252fa2 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -500,6 +500,7 @@ static void sas_revalidate_domain(struct work_struct *work)
500 struct sas_discovery_event *ev = to_sas_discovery_event(work); 500 struct sas_discovery_event *ev = to_sas_discovery_event(work);
501 struct asd_sas_port *port = ev->port; 501 struct asd_sas_port *port = ev->port;
502 struct sas_ha_struct *ha = port->ha; 502 struct sas_ha_struct *ha = port->ha;
503 struct domain_device *ddev = port->port_dev;
503 504
504 /* prevent revalidation from finding sata links in recovery */ 505 /* prevent revalidation from finding sata links in recovery */
505 mutex_lock(&ha->disco_mutex); 506 mutex_lock(&ha->disco_mutex);
@@ -514,8 +515,9 @@ static void sas_revalidate_domain(struct work_struct *work)
514 SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, 515 SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
515 task_pid_nr(current)); 516 task_pid_nr(current));
516 517
517 if (port->port_dev) 518 if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
518 res = sas_ex_revalidate_domain(port->port_dev); 519 ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE))
520 res = sas_ex_revalidate_domain(ddev);
519 521
520 SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", 522 SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
521 port->id, task_pid_nr(current), res); 523 port->id, task_pid_nr(current), res);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 99f43b7fc9ab..ab4879e12ea7 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1596,7 +1596,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
1596 /* 1596 /*
1597 * Finally register the new FC Nexus with TCM 1597 * Finally register the new FC Nexus with TCM
1598 */ 1598 */
1599 __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); 1599 transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
1600 1600
1601 return 0; 1601 return 0;
1602} 1602}
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 3ce39d10fafb..4f8c798e0633 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -108,7 +108,8 @@ static void dw_spi_dma_tx_done(void *arg)
108{ 108{
109 struct dw_spi *dws = arg; 109 struct dw_spi *dws = arg;
110 110
111 if (test_and_clear_bit(TX_BUSY, &dws->dma_chan_busy) & BIT(RX_BUSY)) 111 clear_bit(TX_BUSY, &dws->dma_chan_busy);
112 if (test_bit(RX_BUSY, &dws->dma_chan_busy))
112 return; 113 return;
113 dw_spi_xfer_done(dws); 114 dw_spi_xfer_done(dws);
114} 115}
@@ -156,7 +157,8 @@ static void dw_spi_dma_rx_done(void *arg)
156{ 157{
157 struct dw_spi *dws = arg; 158 struct dw_spi *dws = arg;
158 159
159 if (test_and_clear_bit(RX_BUSY, &dws->dma_chan_busy) & BIT(TX_BUSY)) 160 clear_bit(RX_BUSY, &dws->dma_chan_busy);
161 if (test_bit(TX_BUSY, &dws->dma_chan_busy))
160 return; 162 return;
161 dw_spi_xfer_done(dws); 163 dw_spi_xfer_done(dws);
162} 164}
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index ff9cdbdb6672..2b2c359f5a50 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -498,7 +498,7 @@ static int spi_qup_probe(struct platform_device *pdev)
498 struct resource *res; 498 struct resource *res;
499 struct device *dev; 499 struct device *dev;
500 void __iomem *base; 500 void __iomem *base;
501 u32 max_freq, iomode; 501 u32 max_freq, iomode, num_cs;
502 int ret, irq, size; 502 int ret, irq, size;
503 503
504 dev = &pdev->dev; 504 dev = &pdev->dev;
@@ -550,10 +550,11 @@ static int spi_qup_probe(struct platform_device *pdev)
550 } 550 }
551 551
552 /* use num-cs unless not present or out of range */ 552 /* use num-cs unless not present or out of range */
553 if (of_property_read_u16(dev->of_node, "num-cs", 553 if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
554 &master->num_chipselect) || 554 num_cs > SPI_NUM_CHIPSELECTS)
555 (master->num_chipselect > SPI_NUM_CHIPSELECTS))
556 master->num_chipselect = SPI_NUM_CHIPSELECTS; 555 master->num_chipselect = SPI_NUM_CHIPSELECTS;
556 else
557 master->num_chipselect = num_cs;
557 558
558 master->bus_num = pdev->id; 559 master->bus_num = pdev->id;
559 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; 560 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index c64a3e59fce3..57a195041dc7 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1105,13 +1105,14 @@ void spi_finalize_current_message(struct spi_master *master)
1105 "failed to unprepare message: %d\n", ret); 1105 "failed to unprepare message: %d\n", ret);
1106 } 1106 }
1107 } 1107 }
1108
1109 trace_spi_message_done(mesg);
1110
1108 master->cur_msg_prepared = false; 1111 master->cur_msg_prepared = false;
1109 1112
1110 mesg->state = NULL; 1113 mesg->state = NULL;
1111 if (mesg->complete) 1114 if (mesg->complete)
1112 mesg->complete(mesg->context); 1115 mesg->complete(mesg->context);
1113
1114 trace_spi_message_done(mesg);
1115} 1116}
1116EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1117EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1117 1118
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 4324282afe49..03b2a90b9ac0 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -330,16 +330,6 @@ static void device_init_registers(struct vnt_private *pDevice)
330 /* zonetype initial */ 330 /* zonetype initial */
331 pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; 331 pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
332 332
333 /* Get RFType */
334 pDevice->byRFType = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_RFTYPE);
335
336 /* force change RevID for VT3253 emu */
337 if ((pDevice->byRFType & RF_EMU) != 0)
338 pDevice->byRevId = 0x80;
339
340 pDevice->byRFType &= RF_MASK;
341 pr_debug("pDevice->byRFType = %x\n", pDevice->byRFType);
342
343 if (!pDevice->bZoneRegExist) 333 if (!pDevice->bZoneRegExist)
344 pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; 334 pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
345 335
@@ -1187,12 +1177,14 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
1187{ 1177{
1188 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1178 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1189 PSTxDesc head_td; 1179 PSTxDesc head_td;
1190 u32 dma_idx = TYPE_AC0DMA; 1180 u32 dma_idx;
1191 unsigned long flags; 1181 unsigned long flags;
1192 1182
1193 spin_lock_irqsave(&priv->lock, flags); 1183 spin_lock_irqsave(&priv->lock, flags);
1194 1184
1195 if (!ieee80211_is_data(hdr->frame_control)) 1185 if (ieee80211_is_data(hdr->frame_control))
1186 dma_idx = TYPE_AC0DMA;
1187 else
1196 dma_idx = TYPE_TXDMA0; 1188 dma_idx = TYPE_TXDMA0;
1197 1189
1198 if (AVAIL_TD(priv, dma_idx) < 1) { 1190 if (AVAIL_TD(priv, dma_idx) < 1) {
@@ -1206,6 +1198,9 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
1206 1198
1207 head_td->pTDInfo->skb = skb; 1199 head_td->pTDInfo->skb = skb;
1208 1200
1201 if (dma_idx == TYPE_AC0DMA)
1202 head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
1203
1209 priv->iTDUsed[dma_idx]++; 1204 priv->iTDUsed[dma_idx]++;
1210 1205
1211 /* Take ownership */ 1206 /* Take ownership */
@@ -1234,13 +1229,10 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
1234 1229
1235 head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma); 1230 head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma);
1236 1231
1237 if (dma_idx == TYPE_AC0DMA) { 1232 if (head_td->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)
1238 head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
1239
1240 MACvTransmitAC0(priv->PortOffset); 1233 MACvTransmitAC0(priv->PortOffset);
1241 } else { 1234 else
1242 MACvTransmit0(priv->PortOffset); 1235 MACvTransmit0(priv->PortOffset);
1243 }
1244 1236
1245 spin_unlock_irqrestore(&priv->lock, flags); 1237 spin_unlock_irqrestore(&priv->lock, flags);
1246 1238
@@ -1778,6 +1770,12 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
1778 MACvInitialize(priv->PortOffset); 1770 MACvInitialize(priv->PortOffset);
1779 MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr); 1771 MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr);
1780 1772
1773 /* Get RFType */
1774 priv->byRFType = SROMbyReadEmbedded(priv->PortOffset, EEP_OFS_RFTYPE);
1775 priv->byRFType &= RF_MASK;
1776
1777 dev_dbg(&pcid->dev, "RF Type = %x\n", priv->byRFType);
1778
1781 device_get_options(priv); 1779 device_get_options(priv);
1782 device_set_options(priv); 1780 device_set_options(priv);
1783 /* Mask out the options cannot be set to the chip */ 1781 /* Mask out the options cannot be set to the chip */
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index 941b2adca95a..7626f635f160 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -794,6 +794,7 @@ bool RFbSetPower(
794 break; 794 break;
795 case RATE_6M: 795 case RATE_6M:
796 case RATE_9M: 796 case RATE_9M:
797 case RATE_12M:
797 case RATE_18M: 798 case RATE_18M:
798 byPwr = priv->abyOFDMPwrTbl[uCH]; 799 byPwr = priv->abyOFDMPwrTbl[uCH];
799 if (priv->byRFType == RF_UW2452) 800 if (priv->byRFType == RF_UW2452)
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index c42cde59f598..c4286ccac320 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -640,6 +640,7 @@ int vnt_rf_setpower(struct vnt_private *priv, u32 rate, u32 channel)
640 break; 640 break;
641 case RATE_6M: 641 case RATE_6M:
642 case RATE_9M: 642 case RATE_9M:
643 case RATE_12M:
643 case RATE_18M: 644 case RATE_18M:
644 case RATE_24M: 645 case RATE_24M:
645 case RATE_36M: 646 case RATE_36M:
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 50bad55a0c42..2accb6e47beb 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4256,11 +4256,17 @@ int iscsit_close_connection(
4256 pr_debug("Closing iSCSI connection CID %hu on SID:" 4256 pr_debug("Closing iSCSI connection CID %hu on SID:"
4257 " %u\n", conn->cid, sess->sid); 4257 " %u\n", conn->cid, sess->sid);
4258 /* 4258 /*
4259 * Always up conn_logout_comp just in case the RX Thread is sleeping 4259 * Always up conn_logout_comp for the traditional TCP case just in case
4260 * and the logout response never got sent because the connection 4260 * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout
4261 * failed. 4261 * response never got sent because the connection failed.
4262 *
4263 * However for iser-target, isert_wait4logout() is using conn_logout_comp
4264 * to signal logout response TX interrupt completion. Go ahead and skip
4265 * this for iser since isert_rx_opcode() does not wait on logout failure,
4266 * and to avoid iscsi_conn pointer dereference in iser-target code.
4262 */ 4267 */
4263 complete(&conn->conn_logout_comp); 4268 if (conn->conn_transport->transport_type == ISCSI_TCP)
4269 complete(&conn->conn_logout_comp);
4264 4270
4265 iscsi_release_thread_set(conn); 4271 iscsi_release_thread_set(conn);
4266 4272
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 1c197bad6132..bdd8731a4daa 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -22,7 +22,6 @@
22#include <target/target_core_fabric.h> 22#include <target/target_core_fabric.h>
23 23
24#include <target/iscsi/iscsi_target_core.h> 24#include <target/iscsi/iscsi_target_core.h>
25#include <target/iscsi/iscsi_transport.h>
26#include "iscsi_target_seq_pdu_list.h" 25#include "iscsi_target_seq_pdu_list.h"
27#include "iscsi_target_tq.h" 26#include "iscsi_target_tq.h"
28#include "iscsi_target_erl0.h" 27#include "iscsi_target_erl0.h"
@@ -940,8 +939,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
940 939
941 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { 940 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
942 spin_unlock_bh(&conn->state_lock); 941 spin_unlock_bh(&conn->state_lock);
943 if (conn->conn_transport->transport_type == ISCSI_TCP) 942 iscsit_close_connection(conn);
944 iscsit_close_connection(conn);
945 return; 943 return;
946 } 944 }
947 945
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 6b3c32954689..c36bd7c29136 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -953,11 +953,8 @@ static int tcm_loop_make_nexus(
953 transport_free_session(tl_nexus->se_sess); 953 transport_free_session(tl_nexus->se_sess);
954 goto out; 954 goto out;
955 } 955 }
956 /* 956 /* Now, register the SAS I_T Nexus as active. */
957 * Now, register the SAS I_T Nexus as active with the call to 957 transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
958 * transport_register_session()
959 */
960 __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
961 tl_nexus->se_sess, tl_nexus); 958 tl_nexus->se_sess, tl_nexus);
962 tl_tpg->tl_nexus = tl_nexus; 959 tl_tpg->tl_nexus = tl_nexus;
963 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" 960 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 58f49ff69b14..79b4ec3ca2db 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -650,6 +650,18 @@ static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
650 return aligned_max_sectors; 650 return aligned_max_sectors;
651} 651}
652 652
653bool se_dev_check_wce(struct se_device *dev)
654{
655 bool wce = false;
656
657 if (dev->transport->get_write_cache)
658 wce = dev->transport->get_write_cache(dev);
659 else if (dev->dev_attrib.emulate_write_cache > 0)
660 wce = true;
661
662 return wce;
663}
664
653int se_dev_set_max_unmap_lba_count( 665int se_dev_set_max_unmap_lba_count(
654 struct se_device *dev, 666 struct se_device *dev,
655 u32 max_unmap_lba_count) 667 u32 max_unmap_lba_count)
@@ -767,6 +779,16 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
767 pr_err("Illegal value %d\n", flag); 779 pr_err("Illegal value %d\n", flag);
768 return -EINVAL; 780 return -EINVAL;
769 } 781 }
782 if (flag &&
783 dev->transport->get_write_cache) {
784 pr_err("emulate_fua_write not supported for this device\n");
785 return -EINVAL;
786 }
787 if (dev->export_count) {
788 pr_err("emulate_fua_write cannot be changed with active"
789 " exports: %d\n", dev->export_count);
790 return -EINVAL;
791 }
770 dev->dev_attrib.emulate_fua_write = flag; 792 dev->dev_attrib.emulate_fua_write = flag;
771 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", 793 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
772 dev, dev->dev_attrib.emulate_fua_write); 794 dev, dev->dev_attrib.emulate_fua_write);
@@ -801,7 +823,11 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
801 pr_err("emulate_write_cache not supported for this device\n"); 823 pr_err("emulate_write_cache not supported for this device\n");
802 return -EINVAL; 824 return -EINVAL;
803 } 825 }
804 826 if (dev->export_count) {
827 pr_err("emulate_write_cache cannot be changed with active"
828 " exports: %d\n", dev->export_count);
829 return -EINVAL;
830 }
805 dev->dev_attrib.emulate_write_cache = flag; 831 dev->dev_attrib.emulate_write_cache = flag;
806 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", 832 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
807 dev, dev->dev_attrib.emulate_write_cache); 833 dev, dev->dev_attrib.emulate_write_cache);
@@ -1534,8 +1560,6 @@ int target_configure_device(struct se_device *dev)
1534 ret = dev->transport->configure_device(dev); 1560 ret = dev->transport->configure_device(dev);
1535 if (ret) 1561 if (ret)
1536 goto out; 1562 goto out;
1537 dev->dev_flags |= DF_CONFIGURED;
1538
1539 /* 1563 /*
1540 * XXX: there is not much point to have two different values here.. 1564 * XXX: there is not much point to have two different values here..
1541 */ 1565 */
@@ -1597,6 +1621,8 @@ int target_configure_device(struct se_device *dev)
1597 list_add_tail(&dev->g_dev_node, &g_device_list); 1621 list_add_tail(&dev->g_dev_node, &g_device_list);
1598 mutex_unlock(&g_device_mutex); 1622 mutex_unlock(&g_device_mutex);
1599 1623
1624 dev->dev_flags |= DF_CONFIGURED;
1625
1600 return 0; 1626 return 0;
1601 1627
1602out_free_alua: 1628out_free_alua:
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 1045dcd7bf65..f6c954c4635f 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -1121,7 +1121,7 @@ static u32 pscsi_get_device_type(struct se_device *dev)
1121 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 1121 struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
1122 struct scsi_device *sd = pdv->pdv_sd; 1122 struct scsi_device *sd = pdv->pdv_sd;
1123 1123
1124 return sd->type; 1124 return (sd) ? sd->type : TYPE_NO_LUN;
1125} 1125}
1126 1126
1127static sector_t pscsi_get_blocks(struct se_device *dev) 1127static sector_t pscsi_get_blocks(struct se_device *dev)
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 9a2f9d3a6e70..3e7297411110 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -708,8 +708,7 @@ sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
708 } 708 }
709 } 709 }
710 if (cdb[1] & 0x8) { 710 if (cdb[1] & 0x8) {
711 if (!dev->dev_attrib.emulate_fua_write || 711 if (!dev->dev_attrib.emulate_fua_write || !se_dev_check_wce(dev)) {
712 !dev->dev_attrib.emulate_write_cache) {
713 pr_err("Got CDB: 0x%02x with FUA bit set, but device" 712 pr_err("Got CDB: 0x%02x with FUA bit set, but device"
714 " does not advertise support for FUA write\n", 713 " does not advertise support for FUA write\n",
715 cdb[0]); 714 cdb[0]);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 460e93109473..6c8bd6bc175c 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -454,19 +454,6 @@ check_scsi_name:
454} 454}
455EXPORT_SYMBOL(spc_emulate_evpd_83); 455EXPORT_SYMBOL(spc_emulate_evpd_83);
456 456
457static bool
458spc_check_dev_wce(struct se_device *dev)
459{
460 bool wce = false;
461
462 if (dev->transport->get_write_cache)
463 wce = dev->transport->get_write_cache(dev);
464 else if (dev->dev_attrib.emulate_write_cache > 0)
465 wce = true;
466
467 return wce;
468}
469
470/* Extended INQUIRY Data VPD Page */ 457/* Extended INQUIRY Data VPD Page */
471static sense_reason_t 458static sense_reason_t
472spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) 459spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
@@ -490,7 +477,7 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
490 buf[5] = 0x07; 477 buf[5] = 0x07;
491 478
492 /* If WriteCache emulation is enabled, set V_SUP */ 479 /* If WriteCache emulation is enabled, set V_SUP */
493 if (spc_check_dev_wce(dev)) 480 if (se_dev_check_wce(dev))
494 buf[6] = 0x01; 481 buf[6] = 0x01;
495 /* If an LBA map is present set R_SUP */ 482 /* If an LBA map is present set R_SUP */
496 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); 483 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
@@ -897,7 +884,7 @@ static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
897 if (pc == 1) 884 if (pc == 1)
898 goto out; 885 goto out;
899 886
900 if (spc_check_dev_wce(dev)) 887 if (se_dev_check_wce(dev))
901 p[2] = 0x04; /* Write Cache Enable */ 888 p[2] = 0x04; /* Write Cache Enable */
902 p[12] = 0x20; /* Disabled Read Ahead */ 889 p[12] = 0x20; /* Disabled Read Ahead */
903 890
@@ -1009,7 +996,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
1009 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) 996 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
1010 spc_modesense_write_protect(&buf[length], type); 997 spc_modesense_write_protect(&buf[length], type);
1011 998
1012 if ((spc_check_dev_wce(dev)) && 999 if ((se_dev_check_wce(dev)) &&
1013 (dev->dev_attrib.emulate_fua_write > 0)) 1000 (dev->dev_attrib.emulate_fua_write > 0))
1014 spc_modesense_dpofua(&buf[length], type); 1001 spc_modesense_dpofua(&buf[length], type);
1015 1002
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 0adc0f650213..ac3cbabdbdf0 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2389,6 +2389,10 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
2389 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2389 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2390out: 2390out:
2391 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2391 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2392
2393 if (ret && ack_kref)
2394 target_put_sess_cmd(se_sess, se_cmd);
2395
2392 return ret; 2396 return ret;
2393} 2397}
2394EXPORT_SYMBOL(target_get_sess_cmd); 2398EXPORT_SYMBOL(target_get_sess_cmd);
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 97b486c3dda1..583e755d8091 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -359,7 +359,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd)
359 ep = fc_seq_exch(seq); 359 ep = fc_seq_exch(seq);
360 if (ep) { 360 if (ep) {
361 lport = ep->lp; 361 lport = ep->lp;
362 if (lport && (ep->xid <= lport->lro_xid)) 362 if (lport && (ep->xid <= lport->lro_xid)) {
363 /* 363 /*
364 * "ddp_done" trigger invalidation of HW 364 * "ddp_done" trigger invalidation of HW
365 * specific DDP context 365 * specific DDP context
@@ -374,6 +374,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd)
374 * identified using ep->xid) 374 * identified using ep->xid)
375 */ 375 */
376 cmd->was_ddp_setup = 0; 376 cmd->was_ddp_setup = 0;
377 }
377 } 378 }
378 } 379 }
379} 380}
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 2ab229ddee38..6ae5b8560e4d 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -119,7 +119,10 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value)
119 dw8250_force_idle(p); 119 dw8250_force_idle(p);
120 writeb(value, p->membase + (UART_LCR << p->regshift)); 120 writeb(value, p->membase + (UART_LCR << p->regshift));
121 } 121 }
122 dev_err(p->dev, "Couldn't set LCR to %d\n", value); 122 /*
123 * FIXME: this deadlocks if port->lock is already held
124 * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
125 */
123 } 126 }
124} 127}
125 128
@@ -163,7 +166,10 @@ static void dw8250_serial_outq(struct uart_port *p, int offset, int value)
163 __raw_writeq(value & 0xff, 166 __raw_writeq(value & 0xff,
164 p->membase + (UART_LCR << p->regshift)); 167 p->membase + (UART_LCR << p->regshift));
165 } 168 }
166 dev_err(p->dev, "Couldn't set LCR to %d\n", value); 169 /*
170 * FIXME: this deadlocks if port->lock is already held
171 * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
172 */
167 } 173 }
168} 174}
169#endif /* CONFIG_64BIT */ 175#endif /* CONFIG_64BIT */
@@ -187,7 +193,10 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
187 dw8250_force_idle(p); 193 dw8250_force_idle(p);
188 writel(value, p->membase + (UART_LCR << p->regshift)); 194 writel(value, p->membase + (UART_LCR << p->regshift));
189 } 195 }
190 dev_err(p->dev, "Couldn't set LCR to %d\n", value); 196 /*
197 * FIXME: this deadlocks if port->lock is already held
198 * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
199 */
191 } 200 }
192} 201}
193 202
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index ff451048c1ac..4bfb7ac0239f 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -929,6 +929,13 @@ __acquires(hwep->lock)
929 return retval; 929 return retval;
930} 930}
931 931
932static int otg_a_alt_hnp_support(struct ci_hdrc *ci)
933{
934 dev_warn(&ci->gadget.dev,
935 "connect the device to an alternate port if you want HNP\n");
936 return isr_setup_status_phase(ci);
937}
938
932/** 939/**
933 * isr_setup_packet_handler: setup packet handler 940 * isr_setup_packet_handler: setup packet handler
934 * @ci: UDC descriptor 941 * @ci: UDC descriptor
@@ -1061,6 +1068,10 @@ __acquires(ci->lock)
1061 ci); 1068 ci);
1062 } 1069 }
1063 break; 1070 break;
1071 case USB_DEVICE_A_ALT_HNP_SUPPORT:
1072 if (ci_otg_is_fsm_mode(ci))
1073 err = otg_a_alt_hnp_support(ci);
1074 break;
1064 default: 1075 default:
1065 goto delegate; 1076 goto delegate;
1066 } 1077 }
diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
index c6b35b77dab7..61d538aa2346 100644
--- a/drivers/usb/common/usb-otg-fsm.c
+++ b/drivers/usb/common/usb-otg-fsm.c
@@ -150,9 +150,9 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
150 break; 150 break;
151 case OTG_STATE_B_PERIPHERAL: 151 case OTG_STATE_B_PERIPHERAL:
152 otg_chrg_vbus(fsm, 0); 152 otg_chrg_vbus(fsm, 0);
153 otg_loc_conn(fsm, 1);
154 otg_loc_sof(fsm, 0); 153 otg_loc_sof(fsm, 0);
155 otg_set_protocol(fsm, PROTO_GADGET); 154 otg_set_protocol(fsm, PROTO_GADGET);
155 otg_loc_conn(fsm, 1);
156 break; 156 break;
157 case OTG_STATE_B_WAIT_ACON: 157 case OTG_STATE_B_WAIT_ACON:
158 otg_chrg_vbus(fsm, 0); 158 otg_chrg_vbus(fsm, 0);
@@ -213,10 +213,10 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
213 213
214 break; 214 break;
215 case OTG_STATE_A_PERIPHERAL: 215 case OTG_STATE_A_PERIPHERAL:
216 otg_loc_conn(fsm, 1);
217 otg_loc_sof(fsm, 0); 216 otg_loc_sof(fsm, 0);
218 otg_set_protocol(fsm, PROTO_GADGET); 217 otg_set_protocol(fsm, PROTO_GADGET);
219 otg_drv_vbus(fsm, 1); 218 otg_drv_vbus(fsm, 1);
219 otg_loc_conn(fsm, 1);
220 otg_add_timer(fsm, A_BIDL_ADIS); 220 otg_add_timer(fsm, A_BIDL_ADIS);
221 break; 221 break;
222 case OTG_STATE_A_WAIT_VFALL: 222 case OTG_STATE_A_WAIT_VFALL:
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index 02e3e2d4ea56..6cf047878dba 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -377,6 +377,9 @@ static void dwc2_handle_disconnect_intr(struct dwc2_hsotg *hsotg)
377 dwc2_is_host_mode(hsotg) ? "Host" : "Device", 377 dwc2_is_host_mode(hsotg) ? "Host" : "Device",
378 dwc2_op_state_str(hsotg)); 378 dwc2_op_state_str(hsotg));
379 379
380 if (hsotg->op_state == OTG_STATE_A_HOST)
381 dwc2_hcd_disconnect(hsotg);
382
380 /* Change to L3 (OFF) state */ 383 /* Change to L3 (OFF) state */
381 hsotg->lx_state = DWC2_L3; 384 hsotg->lx_state = DWC2_L3;
382 385
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index af98b096af2f..175c9956cbe3 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -144,10 +144,9 @@ struct ffs_io_data {
144 bool read; 144 bool read;
145 145
146 struct kiocb *kiocb; 146 struct kiocb *kiocb;
147 const struct iovec *iovec; 147 struct iov_iter data;
148 unsigned long nr_segs; 148 const void *to_free;
149 char __user *buf; 149 char *buf;
150 size_t len;
151 150
152 struct mm_struct *mm; 151 struct mm_struct *mm;
153 struct work_struct work; 152 struct work_struct work;
@@ -649,29 +648,10 @@ static void ffs_user_copy_worker(struct work_struct *work)
649 io_data->req->actual; 648 io_data->req->actual;
650 649
651 if (io_data->read && ret > 0) { 650 if (io_data->read && ret > 0) {
652 int i;
653 size_t pos = 0;
654
655 /*
656 * Since req->length may be bigger than io_data->len (after
657 * being rounded up to maxpacketsize), we may end up with more
658 * data then user space has space for.
659 */
660 ret = min_t(int, ret, io_data->len);
661
662 use_mm(io_data->mm); 651 use_mm(io_data->mm);
663 for (i = 0; i < io_data->nr_segs; i++) { 652 ret = copy_to_iter(io_data->buf, ret, &io_data->data);
664 size_t len = min_t(size_t, ret - pos, 653 if (iov_iter_count(&io_data->data))
665 io_data->iovec[i].iov_len); 654 ret = -EFAULT;
666 if (!len)
667 break;
668 if (unlikely(copy_to_user(io_data->iovec[i].iov_base,
669 &io_data->buf[pos], len))) {
670 ret = -EFAULT;
671 break;
672 }
673 pos += len;
674 }
675 unuse_mm(io_data->mm); 655 unuse_mm(io_data->mm);
676 } 656 }
677 657
@@ -684,7 +664,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
684 664
685 io_data->kiocb->private = NULL; 665 io_data->kiocb->private = NULL;
686 if (io_data->read) 666 if (io_data->read)
687 kfree(io_data->iovec); 667 kfree(io_data->to_free);
688 kfree(io_data->buf); 668 kfree(io_data->buf);
689 kfree(io_data); 669 kfree(io_data);
690} 670}
@@ -743,6 +723,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
743 * before the waiting completes, so do not assign to 'gadget' earlier 723 * before the waiting completes, so do not assign to 'gadget' earlier
744 */ 724 */
745 struct usb_gadget *gadget = epfile->ffs->gadget; 725 struct usb_gadget *gadget = epfile->ffs->gadget;
726 size_t copied;
746 727
747 spin_lock_irq(&epfile->ffs->eps_lock); 728 spin_lock_irq(&epfile->ffs->eps_lock);
748 /* In the meantime, endpoint got disabled or changed. */ 729 /* In the meantime, endpoint got disabled or changed. */
@@ -750,34 +731,21 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
750 spin_unlock_irq(&epfile->ffs->eps_lock); 731 spin_unlock_irq(&epfile->ffs->eps_lock);
751 return -ESHUTDOWN; 732 return -ESHUTDOWN;
752 } 733 }
734 data_len = iov_iter_count(&io_data->data);
753 /* 735 /*
754 * Controller may require buffer size to be aligned to 736 * Controller may require buffer size to be aligned to
755 * maxpacketsize of an out endpoint. 737 * maxpacketsize of an out endpoint.
756 */ 738 */
757 data_len = io_data->read ? 739 if (io_data->read)
758 usb_ep_align_maybe(gadget, ep->ep, io_data->len) : 740 data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
759 io_data->len;
760 spin_unlock_irq(&epfile->ffs->eps_lock); 741 spin_unlock_irq(&epfile->ffs->eps_lock);
761 742
762 data = kmalloc(data_len, GFP_KERNEL); 743 data = kmalloc(data_len, GFP_KERNEL);
763 if (unlikely(!data)) 744 if (unlikely(!data))
764 return -ENOMEM; 745 return -ENOMEM;
765 if (io_data->aio && !io_data->read) { 746 if (!io_data->read) {
766 int i; 747 copied = copy_from_iter(data, data_len, &io_data->data);
767 size_t pos = 0; 748 if (copied != data_len) {
768 for (i = 0; i < io_data->nr_segs; i++) {
769 if (unlikely(copy_from_user(&data[pos],
770 io_data->iovec[i].iov_base,
771 io_data->iovec[i].iov_len))) {
772 ret = -EFAULT;
773 goto error;
774 }
775 pos += io_data->iovec[i].iov_len;
776 }
777 } else {
778 if (!io_data->read &&
779 unlikely(__copy_from_user(data, io_data->buf,
780 io_data->len))) {
781 ret = -EFAULT; 749 ret = -EFAULT;
782 goto error; 750 goto error;
783 } 751 }
@@ -876,10 +844,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
876 */ 844 */
877 ret = ep->status; 845 ret = ep->status;
878 if (io_data->read && ret > 0) { 846 if (io_data->read && ret > 0) {
879 ret = min_t(size_t, ret, io_data->len); 847 ret = copy_to_iter(data, ret, &io_data->data);
880 848 if (unlikely(iov_iter_count(&io_data->data)))
881 if (unlikely(copy_to_user(io_data->buf,
882 data, ret)))
883 ret = -EFAULT; 849 ret = -EFAULT;
884 } 850 }
885 } 851 }
@@ -898,37 +864,6 @@ error:
898 return ret; 864 return ret;
899} 865}
900 866
901static ssize_t
902ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
903 loff_t *ptr)
904{
905 struct ffs_io_data io_data;
906
907 ENTER();
908
909 io_data.aio = false;
910 io_data.read = false;
911 io_data.buf = (char * __user)buf;
912 io_data.len = len;
913
914 return ffs_epfile_io(file, &io_data);
915}
916
917static ssize_t
918ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
919{
920 struct ffs_io_data io_data;
921
922 ENTER();
923
924 io_data.aio = false;
925 io_data.read = true;
926 io_data.buf = buf;
927 io_data.len = len;
928
929 return ffs_epfile_io(file, &io_data);
930}
931
932static int 867static int
933ffs_epfile_open(struct inode *inode, struct file *file) 868ffs_epfile_open(struct inode *inode, struct file *file)
934{ 869{
@@ -965,67 +900,86 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
965 return value; 900 return value;
966} 901}
967 902
968static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb, 903static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
969 const struct iovec *iovec,
970 unsigned long nr_segs, loff_t loff)
971{ 904{
972 struct ffs_io_data *io_data; 905 struct ffs_io_data io_data, *p = &io_data;
906 ssize_t res;
973 907
974 ENTER(); 908 ENTER();
975 909
976 io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); 910 if (!is_sync_kiocb(kiocb)) {
977 if (unlikely(!io_data)) 911 p = kmalloc(sizeof(io_data), GFP_KERNEL);
978 return -ENOMEM; 912 if (unlikely(!p))
913 return -ENOMEM;
914 p->aio = true;
915 } else {
916 p->aio = false;
917 }
979 918
980 io_data->aio = true; 919 p->read = false;
981 io_data->read = false; 920 p->kiocb = kiocb;
982 io_data->kiocb = kiocb; 921 p->data = *from;
983 io_data->iovec = iovec; 922 p->mm = current->mm;
984 io_data->nr_segs = nr_segs;
985 io_data->len = kiocb->ki_nbytes;
986 io_data->mm = current->mm;
987 923
988 kiocb->private = io_data; 924 kiocb->private = p;
989 925
990 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); 926 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
991 927
992 return ffs_epfile_io(kiocb->ki_filp, io_data); 928 res = ffs_epfile_io(kiocb->ki_filp, p);
929 if (res == -EIOCBQUEUED)
930 return res;
931 if (p->aio)
932 kfree(p);
933 else
934 *from = p->data;
935 return res;
993} 936}
994 937
995static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb, 938static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
996 const struct iovec *iovec,
997 unsigned long nr_segs, loff_t loff)
998{ 939{
999 struct ffs_io_data *io_data; 940 struct ffs_io_data io_data, *p = &io_data;
1000 struct iovec *iovec_copy; 941 ssize_t res;
1001 942
1002 ENTER(); 943 ENTER();
1003 944
1004 iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL); 945 if (!is_sync_kiocb(kiocb)) {
1005 if (unlikely(!iovec_copy)) 946 p = kmalloc(sizeof(io_data), GFP_KERNEL);
1006 return -ENOMEM; 947 if (unlikely(!p))
1007 948 return -ENOMEM;
1008 memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs); 949 p->aio = true;
1009 950 } else {
1010 io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); 951 p->aio = false;
1011 if (unlikely(!io_data)) {
1012 kfree(iovec_copy);
1013 return -ENOMEM;
1014 } 952 }
1015 953
1016 io_data->aio = true; 954 p->read = true;
1017 io_data->read = true; 955 p->kiocb = kiocb;
1018 io_data->kiocb = kiocb; 956 if (p->aio) {
1019 io_data->iovec = iovec_copy; 957 p->to_free = dup_iter(&p->data, to, GFP_KERNEL);
1020 io_data->nr_segs = nr_segs; 958 if (!p->to_free) {
1021 io_data->len = kiocb->ki_nbytes; 959 kfree(p);
1022 io_data->mm = current->mm; 960 return -ENOMEM;
961 }
962 } else {
963 p->data = *to;
964 p->to_free = NULL;
965 }
966 p->mm = current->mm;
1023 967
1024 kiocb->private = io_data; 968 kiocb->private = p;
1025 969
1026 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); 970 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
1027 971
1028 return ffs_epfile_io(kiocb->ki_filp, io_data); 972 res = ffs_epfile_io(kiocb->ki_filp, p);
973 if (res == -EIOCBQUEUED)
974 return res;
975
976 if (p->aio) {
977 kfree(p->to_free);
978 kfree(p);
979 } else {
980 *to = p->data;
981 }
982 return res;
1029} 983}
1030 984
1031static int 985static int
@@ -1105,10 +1059,10 @@ static const struct file_operations ffs_epfile_operations = {
1105 .llseek = no_llseek, 1059 .llseek = no_llseek,
1106 1060
1107 .open = ffs_epfile_open, 1061 .open = ffs_epfile_open,
1108 .write = ffs_epfile_write, 1062 .write = new_sync_write,
1109 .read = ffs_epfile_read, 1063 .read = new_sync_read,
1110 .aio_write = ffs_epfile_aio_write, 1064 .write_iter = ffs_epfile_write_iter,
1111 .aio_read = ffs_epfile_aio_read, 1065 .read_iter = ffs_epfile_read_iter,
1112 .release = ffs_epfile_release, 1066 .release = ffs_epfile_release,
1113 .unlocked_ioctl = ffs_epfile_ioctl, 1067 .unlocked_ioctl = ffs_epfile_ioctl,
1114}; 1068};
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index 298b46112b1a..39f49f1ad22f 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -289,8 +289,7 @@ static void disable_loopback(struct f_loopback *loop)
289 struct usb_composite_dev *cdev; 289 struct usb_composite_dev *cdev;
290 290
291 cdev = loop->function.config->cdev; 291 cdev = loop->function.config->cdev;
292 disable_endpoints(cdev, loop->in_ep, loop->out_ep, NULL, NULL, NULL, 292 disable_endpoints(cdev, loop->in_ep, loop->out_ep, NULL, NULL);
293 NULL);
294 VDBG(cdev, "%s disabled\n", loop->function.name); 293 VDBG(cdev, "%s disabled\n", loop->function.name);
295} 294}
296 295
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index e3dae47baef3..3a5ae9900b1e 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -23,15 +23,6 @@
23#include "gadget_chips.h" 23#include "gadget_chips.h"
24#include "u_f.h" 24#include "u_f.h"
25 25
26#define USB_MS_TO_SS_INTERVAL(x) USB_MS_TO_HS_INTERVAL(x)
27
28enum eptype {
29 EP_CONTROL = 0,
30 EP_BULK,
31 EP_ISOC,
32 EP_INTERRUPT,
33};
34
35/* 26/*
36 * SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral 27 * SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral
37 * controller drivers. 28 * controller drivers.
@@ -64,8 +55,6 @@ struct f_sourcesink {
64 struct usb_ep *out_ep; 55 struct usb_ep *out_ep;
65 struct usb_ep *iso_in_ep; 56 struct usb_ep *iso_in_ep;
66 struct usb_ep *iso_out_ep; 57 struct usb_ep *iso_out_ep;
67 struct usb_ep *int_in_ep;
68 struct usb_ep *int_out_ep;
69 int cur_alt; 58 int cur_alt;
70}; 59};
71 60
@@ -79,10 +68,6 @@ static unsigned isoc_interval;
79static unsigned isoc_maxpacket; 68static unsigned isoc_maxpacket;
80static unsigned isoc_mult; 69static unsigned isoc_mult;
81static unsigned isoc_maxburst; 70static unsigned isoc_maxburst;
82static unsigned int_interval; /* In ms */
83static unsigned int_maxpacket;
84static unsigned int_mult;
85static unsigned int_maxburst;
86static unsigned buflen; 71static unsigned buflen;
87 72
88/*-------------------------------------------------------------------------*/ 73/*-------------------------------------------------------------------------*/
@@ -107,16 +92,6 @@ static struct usb_interface_descriptor source_sink_intf_alt1 = {
107 /* .iInterface = DYNAMIC */ 92 /* .iInterface = DYNAMIC */
108}; 93};
109 94
110static struct usb_interface_descriptor source_sink_intf_alt2 = {
111 .bLength = USB_DT_INTERFACE_SIZE,
112 .bDescriptorType = USB_DT_INTERFACE,
113
114 .bAlternateSetting = 2,
115 .bNumEndpoints = 2,
116 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
117 /* .iInterface = DYNAMIC */
118};
119
120/* full speed support: */ 95/* full speed support: */
121 96
122static struct usb_endpoint_descriptor fs_source_desc = { 97static struct usb_endpoint_descriptor fs_source_desc = {
@@ -155,26 +130,6 @@ static struct usb_endpoint_descriptor fs_iso_sink_desc = {
155 .bInterval = 4, 130 .bInterval = 4,
156}; 131};
157 132
158static struct usb_endpoint_descriptor fs_int_source_desc = {
159 .bLength = USB_DT_ENDPOINT_SIZE,
160 .bDescriptorType = USB_DT_ENDPOINT,
161
162 .bEndpointAddress = USB_DIR_IN,
163 .bmAttributes = USB_ENDPOINT_XFER_INT,
164 .wMaxPacketSize = cpu_to_le16(64),
165 .bInterval = GZERO_INT_INTERVAL,
166};
167
168static struct usb_endpoint_descriptor fs_int_sink_desc = {
169 .bLength = USB_DT_ENDPOINT_SIZE,
170 .bDescriptorType = USB_DT_ENDPOINT,
171
172 .bEndpointAddress = USB_DIR_OUT,
173 .bmAttributes = USB_ENDPOINT_XFER_INT,
174 .wMaxPacketSize = cpu_to_le16(64),
175 .bInterval = GZERO_INT_INTERVAL,
176};
177
178static struct usb_descriptor_header *fs_source_sink_descs[] = { 133static struct usb_descriptor_header *fs_source_sink_descs[] = {
179 (struct usb_descriptor_header *) &source_sink_intf_alt0, 134 (struct usb_descriptor_header *) &source_sink_intf_alt0,
180 (struct usb_descriptor_header *) &fs_sink_desc, 135 (struct usb_descriptor_header *) &fs_sink_desc,
@@ -185,10 +140,6 @@ static struct usb_descriptor_header *fs_source_sink_descs[] = {
185 (struct usb_descriptor_header *) &fs_source_desc, 140 (struct usb_descriptor_header *) &fs_source_desc,
186 (struct usb_descriptor_header *) &fs_iso_sink_desc, 141 (struct usb_descriptor_header *) &fs_iso_sink_desc,
187 (struct usb_descriptor_header *) &fs_iso_source_desc, 142 (struct usb_descriptor_header *) &fs_iso_source_desc,
188 (struct usb_descriptor_header *) &source_sink_intf_alt2,
189#define FS_ALT_IFC_2_OFFSET 8
190 (struct usb_descriptor_header *) &fs_int_sink_desc,
191 (struct usb_descriptor_header *) &fs_int_source_desc,
192 NULL, 143 NULL,
193}; 144};
194 145
@@ -228,24 +179,6 @@ static struct usb_endpoint_descriptor hs_iso_sink_desc = {
228 .bInterval = 4, 179 .bInterval = 4,
229}; 180};
230 181
231static struct usb_endpoint_descriptor hs_int_source_desc = {
232 .bLength = USB_DT_ENDPOINT_SIZE,
233 .bDescriptorType = USB_DT_ENDPOINT,
234
235 .bmAttributes = USB_ENDPOINT_XFER_INT,
236 .wMaxPacketSize = cpu_to_le16(1024),
237 .bInterval = USB_MS_TO_HS_INTERVAL(GZERO_INT_INTERVAL),
238};
239
240static struct usb_endpoint_descriptor hs_int_sink_desc = {
241 .bLength = USB_DT_ENDPOINT_SIZE,
242 .bDescriptorType = USB_DT_ENDPOINT,
243
244 .bmAttributes = USB_ENDPOINT_XFER_INT,
245 .wMaxPacketSize = cpu_to_le16(1024),
246 .bInterval = USB_MS_TO_HS_INTERVAL(GZERO_INT_INTERVAL),
247};
248
249static struct usb_descriptor_header *hs_source_sink_descs[] = { 182static struct usb_descriptor_header *hs_source_sink_descs[] = {
250 (struct usb_descriptor_header *) &source_sink_intf_alt0, 183 (struct usb_descriptor_header *) &source_sink_intf_alt0,
251 (struct usb_descriptor_header *) &hs_source_desc, 184 (struct usb_descriptor_header *) &hs_source_desc,
@@ -256,10 +189,6 @@ static struct usb_descriptor_header *hs_source_sink_descs[] = {
256 (struct usb_descriptor_header *) &hs_sink_desc, 189 (struct usb_descriptor_header *) &hs_sink_desc,
257 (struct usb_descriptor_header *) &hs_iso_source_desc, 190 (struct usb_descriptor_header *) &hs_iso_source_desc,
258 (struct usb_descriptor_header *) &hs_iso_sink_desc, 191 (struct usb_descriptor_header *) &hs_iso_sink_desc,
259 (struct usb_descriptor_header *) &source_sink_intf_alt2,
260#define HS_ALT_IFC_2_OFFSET 8
261 (struct usb_descriptor_header *) &hs_int_source_desc,
262 (struct usb_descriptor_header *) &hs_int_sink_desc,
263 NULL, 192 NULL,
264}; 193};
265 194
@@ -335,42 +264,6 @@ static struct usb_ss_ep_comp_descriptor ss_iso_sink_comp_desc = {
335 .wBytesPerInterval = cpu_to_le16(1024), 264 .wBytesPerInterval = cpu_to_le16(1024),
336}; 265};
337 266
338static struct usb_endpoint_descriptor ss_int_source_desc = {
339 .bLength = USB_DT_ENDPOINT_SIZE,
340 .bDescriptorType = USB_DT_ENDPOINT,
341
342 .bmAttributes = USB_ENDPOINT_XFER_INT,
343 .wMaxPacketSize = cpu_to_le16(1024),
344 .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL),
345};
346
347static struct usb_ss_ep_comp_descriptor ss_int_source_comp_desc = {
348 .bLength = USB_DT_SS_EP_COMP_SIZE,
349 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
350
351 .bMaxBurst = 0,
352 .bmAttributes = 0,
353 .wBytesPerInterval = cpu_to_le16(1024),
354};
355
356static struct usb_endpoint_descriptor ss_int_sink_desc = {
357 .bLength = USB_DT_ENDPOINT_SIZE,
358 .bDescriptorType = USB_DT_ENDPOINT,
359
360 .bmAttributes = USB_ENDPOINT_XFER_INT,
361 .wMaxPacketSize = cpu_to_le16(1024),
362 .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL),
363};
364
365static struct usb_ss_ep_comp_descriptor ss_int_sink_comp_desc = {
366 .bLength = USB_DT_SS_EP_COMP_SIZE,
367 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
368
369 .bMaxBurst = 0,
370 .bmAttributes = 0,
371 .wBytesPerInterval = cpu_to_le16(1024),
372};
373
374static struct usb_descriptor_header *ss_source_sink_descs[] = { 267static struct usb_descriptor_header *ss_source_sink_descs[] = {
375 (struct usb_descriptor_header *) &source_sink_intf_alt0, 268 (struct usb_descriptor_header *) &source_sink_intf_alt0,
376 (struct usb_descriptor_header *) &ss_source_desc, 269 (struct usb_descriptor_header *) &ss_source_desc,
@@ -387,12 +280,6 @@ static struct usb_descriptor_header *ss_source_sink_descs[] = {
387 (struct usb_descriptor_header *) &ss_iso_source_comp_desc, 280 (struct usb_descriptor_header *) &ss_iso_source_comp_desc,
388 (struct usb_descriptor_header *) &ss_iso_sink_desc, 281 (struct usb_descriptor_header *) &ss_iso_sink_desc,
389 (struct usb_descriptor_header *) &ss_iso_sink_comp_desc, 282 (struct usb_descriptor_header *) &ss_iso_sink_comp_desc,
390 (struct usb_descriptor_header *) &source_sink_intf_alt2,
391#define SS_ALT_IFC_2_OFFSET 14
392 (struct usb_descriptor_header *) &ss_int_source_desc,
393 (struct usb_descriptor_header *) &ss_int_source_comp_desc,
394 (struct usb_descriptor_header *) &ss_int_sink_desc,
395 (struct usb_descriptor_header *) &ss_int_sink_comp_desc,
396 NULL, 283 NULL,
397}; 284};
398 285
@@ -414,21 +301,6 @@ static struct usb_gadget_strings *sourcesink_strings[] = {
414}; 301};
415 302
416/*-------------------------------------------------------------------------*/ 303/*-------------------------------------------------------------------------*/
417static const char *get_ep_string(enum eptype ep_type)
418{
419 switch (ep_type) {
420 case EP_ISOC:
421 return "ISOC-";
422 case EP_INTERRUPT:
423 return "INTERRUPT-";
424 case EP_CONTROL:
425 return "CTRL-";
426 case EP_BULK:
427 return "BULK-";
428 default:
429 return "UNKNOWN-";
430 }
431}
432 304
433static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len) 305static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len)
434{ 306{
@@ -456,8 +328,7 @@ static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep)
456 328
457void disable_endpoints(struct usb_composite_dev *cdev, 329void disable_endpoints(struct usb_composite_dev *cdev,
458 struct usb_ep *in, struct usb_ep *out, 330 struct usb_ep *in, struct usb_ep *out,
459 struct usb_ep *iso_in, struct usb_ep *iso_out, 331 struct usb_ep *iso_in, struct usb_ep *iso_out)
460 struct usb_ep *int_in, struct usb_ep *int_out)
461{ 332{
462 disable_ep(cdev, in); 333 disable_ep(cdev, in);
463 disable_ep(cdev, out); 334 disable_ep(cdev, out);
@@ -465,10 +336,6 @@ void disable_endpoints(struct usb_composite_dev *cdev,
465 disable_ep(cdev, iso_in); 336 disable_ep(cdev, iso_in);
466 if (iso_out) 337 if (iso_out)
467 disable_ep(cdev, iso_out); 338 disable_ep(cdev, iso_out);
468 if (int_in)
469 disable_ep(cdev, int_in);
470 if (int_out)
471 disable_ep(cdev, int_out);
472} 339}
473 340
474static int 341static int
@@ -485,7 +352,6 @@ sourcesink_bind(struct usb_configuration *c, struct usb_function *f)
485 return id; 352 return id;
486 source_sink_intf_alt0.bInterfaceNumber = id; 353 source_sink_intf_alt0.bInterfaceNumber = id;
487 source_sink_intf_alt1.bInterfaceNumber = id; 354 source_sink_intf_alt1.bInterfaceNumber = id;
488 source_sink_intf_alt2.bInterfaceNumber = id;
489 355
490 /* allocate bulk endpoints */ 356 /* allocate bulk endpoints */
491 ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc); 357 ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc);
@@ -546,55 +412,14 @@ no_iso:
546 if (isoc_maxpacket > 1024) 412 if (isoc_maxpacket > 1024)
547 isoc_maxpacket = 1024; 413 isoc_maxpacket = 1024;
548 414
549 /* sanity check the interrupt module parameters */
550 if (int_interval < 1)
551 int_interval = 1;
552 if (int_interval > 4096)
553 int_interval = 4096;
554 if (int_mult > 2)
555 int_mult = 2;
556 if (int_maxburst > 15)
557 int_maxburst = 15;
558
559 /* fill in the FS interrupt descriptors from the module parameters */
560 fs_int_source_desc.wMaxPacketSize = int_maxpacket > 64 ?
561 64 : int_maxpacket;
562 fs_int_source_desc.bInterval = int_interval > 255 ?
563 255 : int_interval;
564 fs_int_sink_desc.wMaxPacketSize = int_maxpacket > 64 ?
565 64 : int_maxpacket;
566 fs_int_sink_desc.bInterval = int_interval > 255 ?
567 255 : int_interval;
568
569 /* allocate int endpoints */
570 ss->int_in_ep = usb_ep_autoconfig(cdev->gadget, &fs_int_source_desc);
571 if (!ss->int_in_ep)
572 goto no_int;
573 ss->int_in_ep->driver_data = cdev; /* claim */
574
575 ss->int_out_ep = usb_ep_autoconfig(cdev->gadget, &fs_int_sink_desc);
576 if (ss->int_out_ep) {
577 ss->int_out_ep->driver_data = cdev; /* claim */
578 } else {
579 ss->int_in_ep->driver_data = NULL;
580 ss->int_in_ep = NULL;
581no_int:
582 fs_source_sink_descs[FS_ALT_IFC_2_OFFSET] = NULL;
583 hs_source_sink_descs[HS_ALT_IFC_2_OFFSET] = NULL;
584 ss_source_sink_descs[SS_ALT_IFC_2_OFFSET] = NULL;
585 }
586
587 if (int_maxpacket > 1024)
588 int_maxpacket = 1024;
589
590 /* support high speed hardware */ 415 /* support high speed hardware */
591 hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress; 416 hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress;
592 hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress; 417 hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress;
593 418
594 /* 419 /*
595 * Fill in the HS isoc and interrupt descriptors from the module 420 * Fill in the HS isoc descriptors from the module parameters.
596 * parameters. We assume that the user knows what they are doing and 421 * We assume that the user knows what they are doing and won't
597 * won't give parameters that their UDC doesn't support. 422 * give parameters that their UDC doesn't support.
598 */ 423 */
599 hs_iso_source_desc.wMaxPacketSize = isoc_maxpacket; 424 hs_iso_source_desc.wMaxPacketSize = isoc_maxpacket;
600 hs_iso_source_desc.wMaxPacketSize |= isoc_mult << 11; 425 hs_iso_source_desc.wMaxPacketSize |= isoc_mult << 11;
@@ -607,17 +432,6 @@ no_int:
607 hs_iso_sink_desc.bInterval = isoc_interval; 432 hs_iso_sink_desc.bInterval = isoc_interval;
608 hs_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; 433 hs_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress;
609 434
610 hs_int_source_desc.wMaxPacketSize = int_maxpacket;
611 hs_int_source_desc.wMaxPacketSize |= int_mult << 11;
612 hs_int_source_desc.bInterval = USB_MS_TO_HS_INTERVAL(int_interval);
613 hs_int_source_desc.bEndpointAddress =
614 fs_int_source_desc.bEndpointAddress;
615
616 hs_int_sink_desc.wMaxPacketSize = int_maxpacket;
617 hs_int_sink_desc.wMaxPacketSize |= int_mult << 11;
618 hs_int_sink_desc.bInterval = USB_MS_TO_HS_INTERVAL(int_interval);
619 hs_int_sink_desc.bEndpointAddress = fs_int_sink_desc.bEndpointAddress;
620
621 /* support super speed hardware */ 435 /* support super speed hardware */
622 ss_source_desc.bEndpointAddress = 436 ss_source_desc.bEndpointAddress =
623 fs_source_desc.bEndpointAddress; 437 fs_source_desc.bEndpointAddress;
@@ -625,9 +439,9 @@ no_int:
625 fs_sink_desc.bEndpointAddress; 439 fs_sink_desc.bEndpointAddress;
626 440
627 /* 441 /*
628 * Fill in the SS isoc and interrupt descriptors from the module 442 * Fill in the SS isoc descriptors from the module parameters.
629 * parameters. We assume that the user knows what they are doing and 443 * We assume that the user knows what they are doing and won't
630 * won't give parameters that their UDC doesn't support. 444 * give parameters that their UDC doesn't support.
631 */ 445 */
632 ss_iso_source_desc.wMaxPacketSize = isoc_maxpacket; 446 ss_iso_source_desc.wMaxPacketSize = isoc_maxpacket;
633 ss_iso_source_desc.bInterval = isoc_interval; 447 ss_iso_source_desc.bInterval = isoc_interval;
@@ -646,37 +460,17 @@ no_int:
646 isoc_maxpacket * (isoc_mult + 1) * (isoc_maxburst + 1); 460 isoc_maxpacket * (isoc_mult + 1) * (isoc_maxburst + 1);
647 ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; 461 ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress;
648 462
649 ss_int_source_desc.wMaxPacketSize = int_maxpacket;
650 ss_int_source_desc.bInterval = USB_MS_TO_SS_INTERVAL(int_interval);
651 ss_int_source_comp_desc.bmAttributes = int_mult;
652 ss_int_source_comp_desc.bMaxBurst = int_maxburst;
653 ss_int_source_comp_desc.wBytesPerInterval =
654 int_maxpacket * (int_mult + 1) * (int_maxburst + 1);
655 ss_int_source_desc.bEndpointAddress =
656 fs_int_source_desc.bEndpointAddress;
657
658 ss_int_sink_desc.wMaxPacketSize = int_maxpacket;
659 ss_int_sink_desc.bInterval = USB_MS_TO_SS_INTERVAL(int_interval);
660 ss_int_sink_comp_desc.bmAttributes = int_mult;
661 ss_int_sink_comp_desc.bMaxBurst = int_maxburst;
662 ss_int_sink_comp_desc.wBytesPerInterval =
663 int_maxpacket * (int_mult + 1) * (int_maxburst + 1);
664 ss_int_sink_desc.bEndpointAddress = fs_int_sink_desc.bEndpointAddress;
665
666 ret = usb_assign_descriptors(f, fs_source_sink_descs, 463 ret = usb_assign_descriptors(f, fs_source_sink_descs,
667 hs_source_sink_descs, ss_source_sink_descs); 464 hs_source_sink_descs, ss_source_sink_descs);
668 if (ret) 465 if (ret)
669 return ret; 466 return ret;
670 467
671 DBG(cdev, "%s speed %s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s, " 468 DBG(cdev, "%s speed %s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s\n",
672 "INT-IN/%s, INT-OUT/%s\n",
673 (gadget_is_superspeed(c->cdev->gadget) ? "super" : 469 (gadget_is_superspeed(c->cdev->gadget) ? "super" :
674 (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")), 470 (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")),
675 f->name, ss->in_ep->name, ss->out_ep->name, 471 f->name, ss->in_ep->name, ss->out_ep->name,
676 ss->iso_in_ep ? ss->iso_in_ep->name : "<none>", 472 ss->iso_in_ep ? ss->iso_in_ep->name : "<none>",
677 ss->iso_out_ep ? ss->iso_out_ep->name : "<none>", 473 ss->iso_out_ep ? ss->iso_out_ep->name : "<none>");
678 ss->int_in_ep ? ss->int_in_ep->name : "<none>",
679 ss->int_out_ep ? ss->int_out_ep->name : "<none>");
680 return 0; 474 return 0;
681} 475}
682 476
@@ -807,15 +601,14 @@ static void source_sink_complete(struct usb_ep *ep, struct usb_request *req)
807} 601}
808 602
809static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, 603static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
810 enum eptype ep_type, int speed) 604 bool is_iso, int speed)
811{ 605{
812 struct usb_ep *ep; 606 struct usb_ep *ep;
813 struct usb_request *req; 607 struct usb_request *req;
814 int i, size, status; 608 int i, size, status;
815 609
816 for (i = 0; i < 8; i++) { 610 for (i = 0; i < 8; i++) {
817 switch (ep_type) { 611 if (is_iso) {
818 case EP_ISOC:
819 switch (speed) { 612 switch (speed) {
820 case USB_SPEED_SUPER: 613 case USB_SPEED_SUPER:
821 size = isoc_maxpacket * (isoc_mult + 1) * 614 size = isoc_maxpacket * (isoc_mult + 1) *
@@ -831,28 +624,9 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
831 } 624 }
832 ep = is_in ? ss->iso_in_ep : ss->iso_out_ep; 625 ep = is_in ? ss->iso_in_ep : ss->iso_out_ep;
833 req = ss_alloc_ep_req(ep, size); 626 req = ss_alloc_ep_req(ep, size);
834 break; 627 } else {
835 case EP_INTERRUPT:
836 switch (speed) {
837 case USB_SPEED_SUPER:
838 size = int_maxpacket * (int_mult + 1) *
839 (int_maxburst + 1);
840 break;
841 case USB_SPEED_HIGH:
842 size = int_maxpacket * (int_mult + 1);
843 break;
844 default:
845 size = int_maxpacket > 1023 ?
846 1023 : int_maxpacket;
847 break;
848 }
849 ep = is_in ? ss->int_in_ep : ss->int_out_ep;
850 req = ss_alloc_ep_req(ep, size);
851 break;
852 default:
853 ep = is_in ? ss->in_ep : ss->out_ep; 628 ep = is_in ? ss->in_ep : ss->out_ep;
854 req = ss_alloc_ep_req(ep, 0); 629 req = ss_alloc_ep_req(ep, 0);
855 break;
856 } 630 }
857 631
858 if (!req) 632 if (!req)
@@ -870,12 +644,12 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
870 644
871 cdev = ss->function.config->cdev; 645 cdev = ss->function.config->cdev;
872 ERROR(cdev, "start %s%s %s --> %d\n", 646 ERROR(cdev, "start %s%s %s --> %d\n",
873 get_ep_string(ep_type), is_in ? "IN" : "OUT", 647 is_iso ? "ISO-" : "", is_in ? "IN" : "OUT",
874 ep->name, status); 648 ep->name, status);
875 free_ep_req(ep, req); 649 free_ep_req(ep, req);
876 } 650 }
877 651
878 if (!(ep_type == EP_ISOC)) 652 if (!is_iso)
879 break; 653 break;
880 } 654 }
881 655
@@ -888,7 +662,7 @@ static void disable_source_sink(struct f_sourcesink *ss)
888 662
889 cdev = ss->function.config->cdev; 663 cdev = ss->function.config->cdev;
890 disable_endpoints(cdev, ss->in_ep, ss->out_ep, ss->iso_in_ep, 664 disable_endpoints(cdev, ss->in_ep, ss->out_ep, ss->iso_in_ep,
891 ss->iso_out_ep, ss->int_in_ep, ss->int_out_ep); 665 ss->iso_out_ep);
892 VDBG(cdev, "%s disabled\n", ss->function.name); 666 VDBG(cdev, "%s disabled\n", ss->function.name);
893} 667}
894 668
@@ -900,62 +674,6 @@ enable_source_sink(struct usb_composite_dev *cdev, struct f_sourcesink *ss,
900 int speed = cdev->gadget->speed; 674 int speed = cdev->gadget->speed;
901 struct usb_ep *ep; 675 struct usb_ep *ep;
902 676
903 if (alt == 2) {
904 /* Configure for periodic interrupt endpoint */
905 ep = ss->int_in_ep;
906 if (ep) {
907 result = config_ep_by_speed(cdev->gadget,
908 &(ss->function), ep);
909 if (result)
910 return result;
911
912 result = usb_ep_enable(ep);
913 if (result < 0)
914 return result;
915
916 ep->driver_data = ss;
917 result = source_sink_start_ep(ss, true, EP_INTERRUPT,
918 speed);
919 if (result < 0) {
920fail1:
921 ep = ss->int_in_ep;
922 if (ep) {
923 usb_ep_disable(ep);
924 ep->driver_data = NULL;
925 }
926 return result;
927 }
928 }
929
930 /*
931 * one interrupt endpoint reads (sinks) anything OUT (from the
932 * host)
933 */
934 ep = ss->int_out_ep;
935 if (ep) {
936 result = config_ep_by_speed(cdev->gadget,
937 &(ss->function), ep);
938 if (result)
939 goto fail1;
940
941 result = usb_ep_enable(ep);
942 if (result < 0)
943 goto fail1;
944
945 ep->driver_data = ss;
946 result = source_sink_start_ep(ss, false, EP_INTERRUPT,
947 speed);
948 if (result < 0) {
949 ep = ss->int_out_ep;
950 usb_ep_disable(ep);
951 ep->driver_data = NULL;
952 goto fail1;
953 }
954 }
955
956 goto out;
957 }
958
959 /* one bulk endpoint writes (sources) zeroes IN (to the host) */ 677 /* one bulk endpoint writes (sources) zeroes IN (to the host) */
960 ep = ss->in_ep; 678 ep = ss->in_ep;
961 result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); 679 result = config_ep_by_speed(cdev->gadget, &(ss->function), ep);
@@ -966,7 +684,7 @@ fail1:
966 return result; 684 return result;
967 ep->driver_data = ss; 685 ep->driver_data = ss;
968 686
969 result = source_sink_start_ep(ss, true, EP_BULK, speed); 687 result = source_sink_start_ep(ss, true, false, speed);
970 if (result < 0) { 688 if (result < 0) {
971fail: 689fail:
972 ep = ss->in_ep; 690 ep = ss->in_ep;
@@ -985,7 +703,7 @@ fail:
985 goto fail; 703 goto fail;
986 ep->driver_data = ss; 704 ep->driver_data = ss;
987 705
988 result = source_sink_start_ep(ss, false, EP_BULK, speed); 706 result = source_sink_start_ep(ss, false, false, speed);
989 if (result < 0) { 707 if (result < 0) {
990fail2: 708fail2:
991 ep = ss->out_ep; 709 ep = ss->out_ep;
@@ -1008,7 +726,7 @@ fail2:
1008 goto fail2; 726 goto fail2;
1009 ep->driver_data = ss; 727 ep->driver_data = ss;
1010 728
1011 result = source_sink_start_ep(ss, true, EP_ISOC, speed); 729 result = source_sink_start_ep(ss, true, true, speed);
1012 if (result < 0) { 730 if (result < 0) {
1013fail3: 731fail3:
1014 ep = ss->iso_in_ep; 732 ep = ss->iso_in_ep;
@@ -1031,14 +749,13 @@ fail3:
1031 goto fail3; 749 goto fail3;
1032 ep->driver_data = ss; 750 ep->driver_data = ss;
1033 751
1034 result = source_sink_start_ep(ss, false, EP_ISOC, speed); 752 result = source_sink_start_ep(ss, false, true, speed);
1035 if (result < 0) { 753 if (result < 0) {
1036 usb_ep_disable(ep); 754 usb_ep_disable(ep);
1037 ep->driver_data = NULL; 755 ep->driver_data = NULL;
1038 goto fail3; 756 goto fail3;
1039 } 757 }
1040 } 758 }
1041
1042out: 759out:
1043 ss->cur_alt = alt; 760 ss->cur_alt = alt;
1044 761
@@ -1054,8 +771,6 @@ static int sourcesink_set_alt(struct usb_function *f,
1054 771
1055 if (ss->in_ep->driver_data) 772 if (ss->in_ep->driver_data)
1056 disable_source_sink(ss); 773 disable_source_sink(ss);
1057 else if (alt == 2 && ss->int_in_ep->driver_data)
1058 disable_source_sink(ss);
1059 return enable_source_sink(cdev, ss, alt); 774 return enable_source_sink(cdev, ss, alt);
1060} 775}
1061 776
@@ -1168,10 +883,6 @@ static struct usb_function *source_sink_alloc_func(
1168 isoc_maxpacket = ss_opts->isoc_maxpacket; 883 isoc_maxpacket = ss_opts->isoc_maxpacket;
1169 isoc_mult = ss_opts->isoc_mult; 884 isoc_mult = ss_opts->isoc_mult;
1170 isoc_maxburst = ss_opts->isoc_maxburst; 885 isoc_maxburst = ss_opts->isoc_maxburst;
1171 int_interval = ss_opts->int_interval;
1172 int_maxpacket = ss_opts->int_maxpacket;
1173 int_mult = ss_opts->int_mult;
1174 int_maxburst = ss_opts->int_maxburst;
1175 buflen = ss_opts->bulk_buflen; 886 buflen = ss_opts->bulk_buflen;
1176 887
1177 ss->function.name = "source/sink"; 888 ss->function.name = "source/sink";
@@ -1468,182 +1179,6 @@ static struct f_ss_opts_attribute f_ss_opts_bulk_buflen =
1468 f_ss_opts_bulk_buflen_show, 1179 f_ss_opts_bulk_buflen_show,
1469 f_ss_opts_bulk_buflen_store); 1180 f_ss_opts_bulk_buflen_store);
1470 1181
1471static ssize_t f_ss_opts_int_interval_show(struct f_ss_opts *opts, char *page)
1472{
1473 int result;
1474
1475 mutex_lock(&opts->lock);
1476 result = sprintf(page, "%u", opts->int_interval);
1477 mutex_unlock(&opts->lock);
1478
1479 return result;
1480}
1481
1482static ssize_t f_ss_opts_int_interval_store(struct f_ss_opts *opts,
1483 const char *page, size_t len)
1484{
1485 int ret;
1486 u32 num;
1487
1488 mutex_lock(&opts->lock);
1489 if (opts->refcnt) {
1490 ret = -EBUSY;
1491 goto end;
1492 }
1493
1494 ret = kstrtou32(page, 0, &num);
1495 if (ret)
1496 goto end;
1497
1498 if (num > 4096) {
1499 ret = -EINVAL;
1500 goto end;
1501 }
1502
1503 opts->int_interval = num;
1504 ret = len;
1505end:
1506 mutex_unlock(&opts->lock);
1507 return ret;
1508}
1509
1510static struct f_ss_opts_attribute f_ss_opts_int_interval =
1511 __CONFIGFS_ATTR(int_interval, S_IRUGO | S_IWUSR,
1512 f_ss_opts_int_interval_show,
1513 f_ss_opts_int_interval_store);
1514
1515static ssize_t f_ss_opts_int_maxpacket_show(struct f_ss_opts *opts, char *page)
1516{
1517 int result;
1518
1519 mutex_lock(&opts->lock);
1520 result = sprintf(page, "%u", opts->int_maxpacket);
1521 mutex_unlock(&opts->lock);
1522
1523 return result;
1524}
1525
1526static ssize_t f_ss_opts_int_maxpacket_store(struct f_ss_opts *opts,
1527 const char *page, size_t len)
1528{
1529 int ret;
1530 u16 num;
1531
1532 mutex_lock(&opts->lock);
1533 if (opts->refcnt) {
1534 ret = -EBUSY;
1535 goto end;
1536 }
1537
1538 ret = kstrtou16(page, 0, &num);
1539 if (ret)
1540 goto end;
1541
1542 if (num > 1024) {
1543 ret = -EINVAL;
1544 goto end;
1545 }
1546
1547 opts->int_maxpacket = num;
1548 ret = len;
1549end:
1550 mutex_unlock(&opts->lock);
1551 return ret;
1552}
1553
1554static struct f_ss_opts_attribute f_ss_opts_int_maxpacket =
1555 __CONFIGFS_ATTR(int_maxpacket, S_IRUGO | S_IWUSR,
1556 f_ss_opts_int_maxpacket_show,
1557 f_ss_opts_int_maxpacket_store);
1558
1559static ssize_t f_ss_opts_int_mult_show(struct f_ss_opts *opts, char *page)
1560{
1561 int result;
1562
1563 mutex_lock(&opts->lock);
1564 result = sprintf(page, "%u", opts->int_mult);
1565 mutex_unlock(&opts->lock);
1566
1567 return result;
1568}
1569
1570static ssize_t f_ss_opts_int_mult_store(struct f_ss_opts *opts,
1571 const char *page, size_t len)
1572{
1573 int ret;
1574 u8 num;
1575
1576 mutex_lock(&opts->lock);
1577 if (opts->refcnt) {
1578 ret = -EBUSY;
1579 goto end;
1580 }
1581
1582 ret = kstrtou8(page, 0, &num);
1583 if (ret)
1584 goto end;
1585
1586 if (num > 2) {
1587 ret = -EINVAL;
1588 goto end;
1589 }
1590
1591 opts->int_mult = num;
1592 ret = len;
1593end:
1594 mutex_unlock(&opts->lock);
1595 return ret;
1596}
1597
1598static struct f_ss_opts_attribute f_ss_opts_int_mult =
1599 __CONFIGFS_ATTR(int_mult, S_IRUGO | S_IWUSR,
1600 f_ss_opts_int_mult_show,
1601 f_ss_opts_int_mult_store);
1602
1603static ssize_t f_ss_opts_int_maxburst_show(struct f_ss_opts *opts, char *page)
1604{
1605 int result;
1606
1607 mutex_lock(&opts->lock);
1608 result = sprintf(page, "%u", opts->int_maxburst);
1609 mutex_unlock(&opts->lock);
1610
1611 return result;
1612}
1613
1614static ssize_t f_ss_opts_int_maxburst_store(struct f_ss_opts *opts,
1615 const char *page, size_t len)
1616{
1617 int ret;
1618 u8 num;
1619
1620 mutex_lock(&opts->lock);
1621 if (opts->refcnt) {
1622 ret = -EBUSY;
1623 goto end;
1624 }
1625
1626 ret = kstrtou8(page, 0, &num);
1627 if (ret)
1628 goto end;
1629
1630 if (num > 15) {
1631 ret = -EINVAL;
1632 goto end;
1633 }
1634
1635 opts->int_maxburst = num;
1636 ret = len;
1637end:
1638 mutex_unlock(&opts->lock);
1639 return ret;
1640}
1641
1642static struct f_ss_opts_attribute f_ss_opts_int_maxburst =
1643 __CONFIGFS_ATTR(int_maxburst, S_IRUGO | S_IWUSR,
1644 f_ss_opts_int_maxburst_show,
1645 f_ss_opts_int_maxburst_store);
1646
1647static struct configfs_attribute *ss_attrs[] = { 1182static struct configfs_attribute *ss_attrs[] = {
1648 &f_ss_opts_pattern.attr, 1183 &f_ss_opts_pattern.attr,
1649 &f_ss_opts_isoc_interval.attr, 1184 &f_ss_opts_isoc_interval.attr,
@@ -1651,10 +1186,6 @@ static struct configfs_attribute *ss_attrs[] = {
1651 &f_ss_opts_isoc_mult.attr, 1186 &f_ss_opts_isoc_mult.attr,
1652 &f_ss_opts_isoc_maxburst.attr, 1187 &f_ss_opts_isoc_maxburst.attr,
1653 &f_ss_opts_bulk_buflen.attr, 1188 &f_ss_opts_bulk_buflen.attr,
1654 &f_ss_opts_int_interval.attr,
1655 &f_ss_opts_int_maxpacket.attr,
1656 &f_ss_opts_int_mult.attr,
1657 &f_ss_opts_int_maxburst.attr,
1658 NULL, 1189 NULL,
1659}; 1190};
1660 1191
@@ -1684,8 +1215,6 @@ static struct usb_function_instance *source_sink_alloc_inst(void)
1684 ss_opts->isoc_interval = GZERO_ISOC_INTERVAL; 1215 ss_opts->isoc_interval = GZERO_ISOC_INTERVAL;
1685 ss_opts->isoc_maxpacket = GZERO_ISOC_MAXPACKET; 1216 ss_opts->isoc_maxpacket = GZERO_ISOC_MAXPACKET;
1686 ss_opts->bulk_buflen = GZERO_BULK_BUFLEN; 1217 ss_opts->bulk_buflen = GZERO_BULK_BUFLEN;
1687 ss_opts->int_interval = GZERO_INT_INTERVAL;
1688 ss_opts->int_maxpacket = GZERO_INT_MAXPACKET;
1689 1218
1690 config_group_init_type_name(&ss_opts->func_inst.group, "", 1219 config_group_init_type_name(&ss_opts->func_inst.group, "",
1691 &ss_func_type); 1220 &ss_func_type);
diff --git a/drivers/usb/gadget/function/g_zero.h b/drivers/usb/gadget/function/g_zero.h
index 2ce28b9d97cc..15f180904f8a 100644
--- a/drivers/usb/gadget/function/g_zero.h
+++ b/drivers/usb/gadget/function/g_zero.h
@@ -10,8 +10,6 @@
10#define GZERO_QLEN 32 10#define GZERO_QLEN 32
11#define GZERO_ISOC_INTERVAL 4 11#define GZERO_ISOC_INTERVAL 4
12#define GZERO_ISOC_MAXPACKET 1024 12#define GZERO_ISOC_MAXPACKET 1024
13#define GZERO_INT_INTERVAL 1 /* Default interrupt interval = 1 ms */
14#define GZERO_INT_MAXPACKET 1024
15 13
16struct usb_zero_options { 14struct usb_zero_options {
17 unsigned pattern; 15 unsigned pattern;
@@ -19,10 +17,6 @@ struct usb_zero_options {
19 unsigned isoc_maxpacket; 17 unsigned isoc_maxpacket;
20 unsigned isoc_mult; 18 unsigned isoc_mult;
21 unsigned isoc_maxburst; 19 unsigned isoc_maxburst;
22 unsigned int_interval; /* In ms */
23 unsigned int_maxpacket;
24 unsigned int_mult;
25 unsigned int_maxburst;
26 unsigned bulk_buflen; 20 unsigned bulk_buflen;
27 unsigned qlen; 21 unsigned qlen;
28}; 22};
@@ -34,10 +28,6 @@ struct f_ss_opts {
34 unsigned isoc_maxpacket; 28 unsigned isoc_maxpacket;
35 unsigned isoc_mult; 29 unsigned isoc_mult;
36 unsigned isoc_maxburst; 30 unsigned isoc_maxburst;
37 unsigned int_interval; /* In ms */
38 unsigned int_maxpacket;
39 unsigned int_mult;
40 unsigned int_maxburst;
41 unsigned bulk_buflen; 31 unsigned bulk_buflen;
42 32
43 /* 33 /*
@@ -72,7 +62,6 @@ int lb_modinit(void);
72void free_ep_req(struct usb_ep *ep, struct usb_request *req); 62void free_ep_req(struct usb_ep *ep, struct usb_request *req);
73void disable_endpoints(struct usb_composite_dev *cdev, 63void disable_endpoints(struct usb_composite_dev *cdev,
74 struct usb_ep *in, struct usb_ep *out, 64 struct usb_ep *in, struct usb_ep *out,
75 struct usb_ep *iso_in, struct usb_ep *iso_out, 65 struct usb_ep *iso_in, struct usb_ep *iso_out);
76 struct usb_ep *int_in, struct usb_ep *int_out);
77 66
78#endif /* __G_ZERO_H */ 67#endif /* __G_ZERO_H */
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index db49ec4c748e..200f9a584064 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -74,6 +74,8 @@ MODULE_DESCRIPTION (DRIVER_DESC);
74MODULE_AUTHOR ("David Brownell"); 74MODULE_AUTHOR ("David Brownell");
75MODULE_LICENSE ("GPL"); 75MODULE_LICENSE ("GPL");
76 76
77static int ep_open(struct inode *, struct file *);
78
77 79
78/*----------------------------------------------------------------------*/ 80/*----------------------------------------------------------------------*/
79 81
@@ -283,14 +285,15 @@ static void epio_complete (struct usb_ep *ep, struct usb_request *req)
283 * still need dev->lock to use epdata->ep. 285 * still need dev->lock to use epdata->ep.
284 */ 286 */
285static int 287static int
286get_ready_ep (unsigned f_flags, struct ep_data *epdata) 288get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
287{ 289{
288 int val; 290 int val;
289 291
290 if (f_flags & O_NONBLOCK) { 292 if (f_flags & O_NONBLOCK) {
291 if (!mutex_trylock(&epdata->lock)) 293 if (!mutex_trylock(&epdata->lock))
292 goto nonblock; 294 goto nonblock;
293 if (epdata->state != STATE_EP_ENABLED) { 295 if (epdata->state != STATE_EP_ENABLED &&
296 (!is_write || epdata->state != STATE_EP_READY)) {
294 mutex_unlock(&epdata->lock); 297 mutex_unlock(&epdata->lock);
295nonblock: 298nonblock:
296 val = -EAGAIN; 299 val = -EAGAIN;
@@ -305,18 +308,20 @@ nonblock:
305 308
306 switch (epdata->state) { 309 switch (epdata->state) {
307 case STATE_EP_ENABLED: 310 case STATE_EP_ENABLED:
311 return 0;
312 case STATE_EP_READY: /* not configured yet */
313 if (is_write)
314 return 0;
315 // FALLTHRU
316 case STATE_EP_UNBOUND: /* clean disconnect */
308 break; 317 break;
309 // case STATE_EP_DISABLED: /* "can't happen" */ 318 // case STATE_EP_DISABLED: /* "can't happen" */
310 // case STATE_EP_READY: /* "can't happen" */
311 default: /* error! */ 319 default: /* error! */
312 pr_debug ("%s: ep %p not available, state %d\n", 320 pr_debug ("%s: ep %p not available, state %d\n",
313 shortname, epdata, epdata->state); 321 shortname, epdata, epdata->state);
314 // FALLTHROUGH
315 case STATE_EP_UNBOUND: /* clean disconnect */
316 val = -ENODEV;
317 mutex_unlock(&epdata->lock);
318 } 322 }
319 return val; 323 mutex_unlock(&epdata->lock);
324 return -ENODEV;
320} 325}
321 326
322static ssize_t 327static ssize_t
@@ -363,97 +368,6 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
363 return value; 368 return value;
364} 369}
365 370
366
367/* handle a synchronous OUT bulk/intr/iso transfer */
368static ssize_t
369ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
370{
371 struct ep_data *data = fd->private_data;
372 void *kbuf;
373 ssize_t value;
374
375 if ((value = get_ready_ep (fd->f_flags, data)) < 0)
376 return value;
377
378 /* halt any endpoint by doing a "wrong direction" i/o call */
379 if (usb_endpoint_dir_in(&data->desc)) {
380 if (usb_endpoint_xfer_isoc(&data->desc)) {
381 mutex_unlock(&data->lock);
382 return -EINVAL;
383 }
384 DBG (data->dev, "%s halt\n", data->name);
385 spin_lock_irq (&data->dev->lock);
386 if (likely (data->ep != NULL))
387 usb_ep_set_halt (data->ep);
388 spin_unlock_irq (&data->dev->lock);
389 mutex_unlock(&data->lock);
390 return -EBADMSG;
391 }
392
393 /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */
394
395 value = -ENOMEM;
396 kbuf = kmalloc (len, GFP_KERNEL);
397 if (unlikely (!kbuf))
398 goto free1;
399
400 value = ep_io (data, kbuf, len);
401 VDEBUG (data->dev, "%s read %zu OUT, status %d\n",
402 data->name, len, (int) value);
403 if (value >= 0 && copy_to_user (buf, kbuf, value))
404 value = -EFAULT;
405
406free1:
407 mutex_unlock(&data->lock);
408 kfree (kbuf);
409 return value;
410}
411
412/* handle a synchronous IN bulk/intr/iso transfer */
413static ssize_t
414ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
415{
416 struct ep_data *data = fd->private_data;
417 void *kbuf;
418 ssize_t value;
419
420 if ((value = get_ready_ep (fd->f_flags, data)) < 0)
421 return value;
422
423 /* halt any endpoint by doing a "wrong direction" i/o call */
424 if (!usb_endpoint_dir_in(&data->desc)) {
425 if (usb_endpoint_xfer_isoc(&data->desc)) {
426 mutex_unlock(&data->lock);
427 return -EINVAL;
428 }
429 DBG (data->dev, "%s halt\n", data->name);
430 spin_lock_irq (&data->dev->lock);
431 if (likely (data->ep != NULL))
432 usb_ep_set_halt (data->ep);
433 spin_unlock_irq (&data->dev->lock);
434 mutex_unlock(&data->lock);
435 return -EBADMSG;
436 }
437
438 /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
439
440 value = -ENOMEM;
441 kbuf = memdup_user(buf, len);
442 if (IS_ERR(kbuf)) {
443 value = PTR_ERR(kbuf);
444 kbuf = NULL;
445 goto free1;
446 }
447
448 value = ep_io (data, kbuf, len);
449 VDEBUG (data->dev, "%s write %zu IN, status %d\n",
450 data->name, len, (int) value);
451free1:
452 mutex_unlock(&data->lock);
453 kfree (kbuf);
454 return value;
455}
456
457static int 371static int
458ep_release (struct inode *inode, struct file *fd) 372ep_release (struct inode *inode, struct file *fd)
459{ 373{
@@ -481,7 +395,7 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
481 struct ep_data *data = fd->private_data; 395 struct ep_data *data = fd->private_data;
482 int status; 396 int status;
483 397
484 if ((status = get_ready_ep (fd->f_flags, data)) < 0) 398 if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
485 return status; 399 return status;
486 400
487 spin_lock_irq (&data->dev->lock); 401 spin_lock_irq (&data->dev->lock);
@@ -517,8 +431,8 @@ struct kiocb_priv {
517 struct mm_struct *mm; 431 struct mm_struct *mm;
518 struct work_struct work; 432 struct work_struct work;
519 void *buf; 433 void *buf;
520 const struct iovec *iv; 434 struct iov_iter to;
521 unsigned long nr_segs; 435 const void *to_free;
522 unsigned actual; 436 unsigned actual;
523}; 437};
524 438
@@ -541,35 +455,6 @@ static int ep_aio_cancel(struct kiocb *iocb)
541 return value; 455 return value;
542} 456}
543 457
544static ssize_t ep_copy_to_user(struct kiocb_priv *priv)
545{
546 ssize_t len, total;
547 void *to_copy;
548 int i;
549
550 /* copy stuff into user buffers */
551 total = priv->actual;
552 len = 0;
553 to_copy = priv->buf;
554 for (i=0; i < priv->nr_segs; i++) {
555 ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total);
556
557 if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) {
558 if (len == 0)
559 len = -EFAULT;
560 break;
561 }
562
563 total -= this;
564 len += this;
565 to_copy += this;
566 if (total == 0)
567 break;
568 }
569
570 return len;
571}
572
573static void ep_user_copy_worker(struct work_struct *work) 458static void ep_user_copy_worker(struct work_struct *work)
574{ 459{
575 struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work); 460 struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
@@ -578,13 +463,16 @@ static void ep_user_copy_worker(struct work_struct *work)
578 size_t ret; 463 size_t ret;
579 464
580 use_mm(mm); 465 use_mm(mm);
581 ret = ep_copy_to_user(priv); 466 ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
582 unuse_mm(mm); 467 unuse_mm(mm);
468 if (!ret)
469 ret = -EFAULT;
583 470
584 /* completing the iocb can drop the ctx and mm, don't touch mm after */ 471 /* completing the iocb can drop the ctx and mm, don't touch mm after */
585 aio_complete(iocb, ret, ret); 472 aio_complete(iocb, ret, ret);
586 473
587 kfree(priv->buf); 474 kfree(priv->buf);
475 kfree(priv->to_free);
588 kfree(priv); 476 kfree(priv);
589} 477}
590 478
@@ -603,8 +491,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
603 * don't need to copy anything to userspace, so we can 491 * don't need to copy anything to userspace, so we can
604 * complete the aio request immediately. 492 * complete the aio request immediately.
605 */ 493 */
606 if (priv->iv == NULL || unlikely(req->actual == 0)) { 494 if (priv->to_free == NULL || unlikely(req->actual == 0)) {
607 kfree(req->buf); 495 kfree(req->buf);
496 kfree(priv->to_free);
608 kfree(priv); 497 kfree(priv);
609 iocb->private = NULL; 498 iocb->private = NULL;
610 /* aio_complete() reports bytes-transferred _and_ faults */ 499 /* aio_complete() reports bytes-transferred _and_ faults */
@@ -618,6 +507,7 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
618 507
619 priv->buf = req->buf; 508 priv->buf = req->buf;
620 priv->actual = req->actual; 509 priv->actual = req->actual;
510 INIT_WORK(&priv->work, ep_user_copy_worker);
621 schedule_work(&priv->work); 511 schedule_work(&priv->work);
622 } 512 }
623 spin_unlock(&epdata->dev->lock); 513 spin_unlock(&epdata->dev->lock);
@@ -626,38 +516,17 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
626 put_ep(epdata); 516 put_ep(epdata);
627} 517}
628 518
629static ssize_t 519static ssize_t ep_aio(struct kiocb *iocb,
630ep_aio_rwtail( 520 struct kiocb_priv *priv,
631 struct kiocb *iocb, 521 struct ep_data *epdata,
632 char *buf, 522 char *buf,
633 size_t len, 523 size_t len)
634 struct ep_data *epdata,
635 const struct iovec *iv,
636 unsigned long nr_segs
637)
638{ 524{
639 struct kiocb_priv *priv; 525 struct usb_request *req;
640 struct usb_request *req; 526 ssize_t value;
641 ssize_t value;
642 527
643 priv = kmalloc(sizeof *priv, GFP_KERNEL);
644 if (!priv) {
645 value = -ENOMEM;
646fail:
647 kfree(buf);
648 return value;
649 }
650 iocb->private = priv; 528 iocb->private = priv;
651 priv->iocb = iocb; 529 priv->iocb = iocb;
652 priv->iv = iv;
653 priv->nr_segs = nr_segs;
654 INIT_WORK(&priv->work, ep_user_copy_worker);
655
656 value = get_ready_ep(iocb->ki_filp->f_flags, epdata);
657 if (unlikely(value < 0)) {
658 kfree(priv);
659 goto fail;
660 }
661 530
662 kiocb_set_cancel_fn(iocb, ep_aio_cancel); 531 kiocb_set_cancel_fn(iocb, ep_aio_cancel);
663 get_ep(epdata); 532 get_ep(epdata);
@@ -669,75 +538,154 @@ fail:
669 * allocate or submit those if the host disconnected. 538 * allocate or submit those if the host disconnected.
670 */ 539 */
671 spin_lock_irq(&epdata->dev->lock); 540 spin_lock_irq(&epdata->dev->lock);
672 if (likely(epdata->ep)) { 541 value = -ENODEV;
673 req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); 542 if (unlikely(epdata->ep))
674 if (likely(req)) { 543 goto fail;
675 priv->req = req;
676 req->buf = buf;
677 req->length = len;
678 req->complete = ep_aio_complete;
679 req->context = iocb;
680 value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
681 if (unlikely(0 != value))
682 usb_ep_free_request(epdata->ep, req);
683 } else
684 value = -EAGAIN;
685 } else
686 value = -ENODEV;
687 spin_unlock_irq(&epdata->dev->lock);
688 544
689 mutex_unlock(&epdata->lock); 545 req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
546 value = -ENOMEM;
547 if (unlikely(!req))
548 goto fail;
690 549
691 if (unlikely(value)) { 550 priv->req = req;
692 kfree(priv); 551 req->buf = buf;
693 put_ep(epdata); 552 req->length = len;
694 } else 553 req->complete = ep_aio_complete;
695 value = -EIOCBQUEUED; 554 req->context = iocb;
555 value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
556 if (unlikely(0 != value)) {
557 usb_ep_free_request(epdata->ep, req);
558 goto fail;
559 }
560 spin_unlock_irq(&epdata->dev->lock);
561 return -EIOCBQUEUED;
562
563fail:
564 spin_unlock_irq(&epdata->dev->lock);
565 kfree(priv->to_free);
566 kfree(priv);
567 put_ep(epdata);
696 return value; 568 return value;
697} 569}
698 570
699static ssize_t 571static ssize_t
700ep_aio_read(struct kiocb *iocb, const struct iovec *iov, 572ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
701 unsigned long nr_segs, loff_t o)
702{ 573{
703 struct ep_data *epdata = iocb->ki_filp->private_data; 574 struct file *file = iocb->ki_filp;
704 char *buf; 575 struct ep_data *epdata = file->private_data;
576 size_t len = iov_iter_count(to);
577 ssize_t value;
578 char *buf;
705 579
706 if (unlikely(usb_endpoint_dir_in(&epdata->desc))) 580 if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
707 return -EINVAL; 581 return value;
708 582
709 buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); 583 /* halt any endpoint by doing a "wrong direction" i/o call */
710 if (unlikely(!buf)) 584 if (usb_endpoint_dir_in(&epdata->desc)) {
711 return -ENOMEM; 585 if (usb_endpoint_xfer_isoc(&epdata->desc) ||
586 !is_sync_kiocb(iocb)) {
587 mutex_unlock(&epdata->lock);
588 return -EINVAL;
589 }
590 DBG (epdata->dev, "%s halt\n", epdata->name);
591 spin_lock_irq(&epdata->dev->lock);
592 if (likely(epdata->ep != NULL))
593 usb_ep_set_halt(epdata->ep);
594 spin_unlock_irq(&epdata->dev->lock);
595 mutex_unlock(&epdata->lock);
596 return -EBADMSG;
597 }
712 598
713 return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs); 599 buf = kmalloc(len, GFP_KERNEL);
600 if (unlikely(!buf)) {
601 mutex_unlock(&epdata->lock);
602 return -ENOMEM;
603 }
604 if (is_sync_kiocb(iocb)) {
605 value = ep_io(epdata, buf, len);
606 if (value >= 0 && copy_to_iter(buf, value, to))
607 value = -EFAULT;
608 } else {
609 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
610 value = -ENOMEM;
611 if (!priv)
612 goto fail;
613 priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
614 if (!priv->to_free) {
615 kfree(priv);
616 goto fail;
617 }
618 value = ep_aio(iocb, priv, epdata, buf, len);
619 if (value == -EIOCBQUEUED)
620 buf = NULL;
621 }
622fail:
623 kfree(buf);
624 mutex_unlock(&epdata->lock);
625 return value;
714} 626}
715 627
628static ssize_t ep_config(struct ep_data *, const char *, size_t);
629
716static ssize_t 630static ssize_t
717ep_aio_write(struct kiocb *iocb, const struct iovec *iov, 631ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
718 unsigned long nr_segs, loff_t o)
719{ 632{
720 struct ep_data *epdata = iocb->ki_filp->private_data; 633 struct file *file = iocb->ki_filp;
721 char *buf; 634 struct ep_data *epdata = file->private_data;
722 size_t len = 0; 635 size_t len = iov_iter_count(from);
723 int i = 0; 636 bool configured;
637 ssize_t value;
638 char *buf;
639
640 if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
641 return value;
724 642
725 if (unlikely(!usb_endpoint_dir_in(&epdata->desc))) 643 configured = epdata->state == STATE_EP_ENABLED;
726 return -EINVAL;
727 644
728 buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); 645 /* halt any endpoint by doing a "wrong direction" i/o call */
729 if (unlikely(!buf)) 646 if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
647 if (usb_endpoint_xfer_isoc(&epdata->desc) ||
648 !is_sync_kiocb(iocb)) {
649 mutex_unlock(&epdata->lock);
650 return -EINVAL;
651 }
652 DBG (epdata->dev, "%s halt\n", epdata->name);
653 spin_lock_irq(&epdata->dev->lock);
654 if (likely(epdata->ep != NULL))
655 usb_ep_set_halt(epdata->ep);
656 spin_unlock_irq(&epdata->dev->lock);
657 mutex_unlock(&epdata->lock);
658 return -EBADMSG;
659 }
660
661 buf = kmalloc(len, GFP_KERNEL);
662 if (unlikely(!buf)) {
663 mutex_unlock(&epdata->lock);
730 return -ENOMEM; 664 return -ENOMEM;
665 }
731 666
732 for (i=0; i < nr_segs; i++) { 667 if (unlikely(copy_from_iter(buf, len, from) != len)) {
733 if (unlikely(copy_from_user(&buf[len], iov[i].iov_base, 668 value = -EFAULT;
734 iov[i].iov_len) != 0)) { 669 goto out;
735 kfree(buf); 670 }
736 return -EFAULT; 671
672 if (unlikely(!configured)) {
673 value = ep_config(epdata, buf, len);
674 } else if (is_sync_kiocb(iocb)) {
675 value = ep_io(epdata, buf, len);
676 } else {
677 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
678 value = -ENOMEM;
679 if (priv) {
680 value = ep_aio(iocb, priv, epdata, buf, len);
681 if (value == -EIOCBQUEUED)
682 buf = NULL;
737 } 683 }
738 len += iov[i].iov_len;
739 } 684 }
740 return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0); 685out:
686 kfree(buf);
687 mutex_unlock(&epdata->lock);
688 return value;
741} 689}
742 690
743/*----------------------------------------------------------------------*/ 691/*----------------------------------------------------------------------*/
@@ -745,15 +693,15 @@ ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
745/* used after endpoint configuration */ 693/* used after endpoint configuration */
746static const struct file_operations ep_io_operations = { 694static const struct file_operations ep_io_operations = {
747 .owner = THIS_MODULE, 695 .owner = THIS_MODULE,
748 .llseek = no_llseek,
749 696
750 .read = ep_read, 697 .open = ep_open,
751 .write = ep_write,
752 .unlocked_ioctl = ep_ioctl,
753 .release = ep_release, 698 .release = ep_release,
754 699 .llseek = no_llseek,
755 .aio_read = ep_aio_read, 700 .read = new_sync_read,
756 .aio_write = ep_aio_write, 701 .write = new_sync_write,
702 .unlocked_ioctl = ep_ioctl,
703 .read_iter = ep_read_iter,
704 .write_iter = ep_write_iter,
757}; 705};
758 706
759/* ENDPOINT INITIALIZATION 707/* ENDPOINT INITIALIZATION
@@ -770,17 +718,12 @@ static const struct file_operations ep_io_operations = {
770 * speed descriptor, then optional high speed descriptor. 718 * speed descriptor, then optional high speed descriptor.
771 */ 719 */
772static ssize_t 720static ssize_t
773ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) 721ep_config (struct ep_data *data, const char *buf, size_t len)
774{ 722{
775 struct ep_data *data = fd->private_data;
776 struct usb_ep *ep; 723 struct usb_ep *ep;
777 u32 tag; 724 u32 tag;
778 int value, length = len; 725 int value, length = len;
779 726
780 value = mutex_lock_interruptible(&data->lock);
781 if (value < 0)
782 return value;
783
784 if (data->state != STATE_EP_READY) { 727 if (data->state != STATE_EP_READY) {
785 value = -EL2HLT; 728 value = -EL2HLT;
786 goto fail; 729 goto fail;
@@ -791,9 +734,7 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
791 goto fail0; 734 goto fail0;
792 735
793 /* we might need to change message format someday */ 736 /* we might need to change message format someday */
794 if (copy_from_user (&tag, buf, 4)) { 737 memcpy(&tag, buf, 4);
795 goto fail1;
796 }
797 if (tag != 1) { 738 if (tag != 1) {
798 DBG(data->dev, "config %s, bad tag %d\n", data->name, tag); 739 DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
799 goto fail0; 740 goto fail0;
@@ -806,19 +747,15 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
806 */ 747 */
807 748
808 /* full/low speed descriptor, then high speed */ 749 /* full/low speed descriptor, then high speed */
809 if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) { 750 memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
810 goto fail1;
811 }
812 if (data->desc.bLength != USB_DT_ENDPOINT_SIZE 751 if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
813 || data->desc.bDescriptorType != USB_DT_ENDPOINT) 752 || data->desc.bDescriptorType != USB_DT_ENDPOINT)
814 goto fail0; 753 goto fail0;
815 if (len != USB_DT_ENDPOINT_SIZE) { 754 if (len != USB_DT_ENDPOINT_SIZE) {
816 if (len != 2 * USB_DT_ENDPOINT_SIZE) 755 if (len != 2 * USB_DT_ENDPOINT_SIZE)
817 goto fail0; 756 goto fail0;
818 if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, 757 memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
819 USB_DT_ENDPOINT_SIZE)) { 758 USB_DT_ENDPOINT_SIZE);
820 goto fail1;
821 }
822 if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE 759 if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
823 || data->hs_desc.bDescriptorType 760 || data->hs_desc.bDescriptorType
824 != USB_DT_ENDPOINT) { 761 != USB_DT_ENDPOINT) {
@@ -840,24 +777,20 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
840 case USB_SPEED_LOW: 777 case USB_SPEED_LOW:
841 case USB_SPEED_FULL: 778 case USB_SPEED_FULL:
842 ep->desc = &data->desc; 779 ep->desc = &data->desc;
843 value = usb_ep_enable(ep);
844 if (value == 0)
845 data->state = STATE_EP_ENABLED;
846 break; 780 break;
847 case USB_SPEED_HIGH: 781 case USB_SPEED_HIGH:
848 /* fails if caller didn't provide that descriptor... */ 782 /* fails if caller didn't provide that descriptor... */
849 ep->desc = &data->hs_desc; 783 ep->desc = &data->hs_desc;
850 value = usb_ep_enable(ep);
851 if (value == 0)
852 data->state = STATE_EP_ENABLED;
853 break; 784 break;
854 default: 785 default:
855 DBG(data->dev, "unconnected, %s init abandoned\n", 786 DBG(data->dev, "unconnected, %s init abandoned\n",
856 data->name); 787 data->name);
857 value = -EINVAL; 788 value = -EINVAL;
789 goto gone;
858 } 790 }
791 value = usb_ep_enable(ep);
859 if (value == 0) { 792 if (value == 0) {
860 fd->f_op = &ep_io_operations; 793 data->state = STATE_EP_ENABLED;
861 value = length; 794 value = length;
862 } 795 }
863gone: 796gone:
@@ -867,14 +800,10 @@ fail:
867 data->desc.bDescriptorType = 0; 800 data->desc.bDescriptorType = 0;
868 data->hs_desc.bDescriptorType = 0; 801 data->hs_desc.bDescriptorType = 0;
869 } 802 }
870 mutex_unlock(&data->lock);
871 return value; 803 return value;
872fail0: 804fail0:
873 value = -EINVAL; 805 value = -EINVAL;
874 goto fail; 806 goto fail;
875fail1:
876 value = -EFAULT;
877 goto fail;
878} 807}
879 808
880static int 809static int
@@ -902,15 +831,6 @@ ep_open (struct inode *inode, struct file *fd)
902 return value; 831 return value;
903} 832}
904 833
905/* used before endpoint configuration */
906static const struct file_operations ep_config_operations = {
907 .llseek = no_llseek,
908
909 .open = ep_open,
910 .write = ep_config,
911 .release = ep_release,
912};
913
914/*----------------------------------------------------------------------*/ 834/*----------------------------------------------------------------------*/
915 835
916/* EP0 IMPLEMENTATION can be partly in userspace. 836/* EP0 IMPLEMENTATION can be partly in userspace.
@@ -989,6 +909,10 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
989 enum ep0_state state; 909 enum ep0_state state;
990 910
991 spin_lock_irq (&dev->lock); 911 spin_lock_irq (&dev->lock);
912 if (dev->state <= STATE_DEV_OPENED) {
913 retval = -EINVAL;
914 goto done;
915 }
992 916
993 /* report fd mode change before acting on it */ 917 /* report fd mode change before acting on it */
994 if (dev->setup_abort) { 918 if (dev->setup_abort) {
@@ -1187,8 +1111,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1187 struct dev_data *dev = fd->private_data; 1111 struct dev_data *dev = fd->private_data;
1188 ssize_t retval = -ESRCH; 1112 ssize_t retval = -ESRCH;
1189 1113
1190 spin_lock_irq (&dev->lock);
1191
1192 /* report fd mode change before acting on it */ 1114 /* report fd mode change before acting on it */
1193 if (dev->setup_abort) { 1115 if (dev->setup_abort) {
1194 dev->setup_abort = 0; 1116 dev->setup_abort = 0;
@@ -1234,7 +1156,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1234 } else 1156 } else
1235 DBG (dev, "fail %s, state %d\n", __func__, dev->state); 1157 DBG (dev, "fail %s, state %d\n", __func__, dev->state);
1236 1158
1237 spin_unlock_irq (&dev->lock);
1238 return retval; 1159 return retval;
1239} 1160}
1240 1161
@@ -1281,6 +1202,9 @@ ep0_poll (struct file *fd, poll_table *wait)
1281 struct dev_data *dev = fd->private_data; 1202 struct dev_data *dev = fd->private_data;
1282 int mask = 0; 1203 int mask = 0;
1283 1204
1205 if (dev->state <= STATE_DEV_OPENED)
1206 return DEFAULT_POLLMASK;
1207
1284 poll_wait(fd, &dev->wait, wait); 1208 poll_wait(fd, &dev->wait, wait);
1285 1209
1286 spin_lock_irq (&dev->lock); 1210 spin_lock_irq (&dev->lock);
@@ -1316,19 +1240,6 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
1316 return ret; 1240 return ret;
1317} 1241}
1318 1242
1319/* used after device configuration */
1320static const struct file_operations ep0_io_operations = {
1321 .owner = THIS_MODULE,
1322 .llseek = no_llseek,
1323
1324 .read = ep0_read,
1325 .write = ep0_write,
1326 .fasync = ep0_fasync,
1327 .poll = ep0_poll,
1328 .unlocked_ioctl = dev_ioctl,
1329 .release = dev_release,
1330};
1331
1332/*----------------------------------------------------------------------*/ 1243/*----------------------------------------------------------------------*/
1333 1244
1334/* The in-kernel gadget driver handles most ep0 issues, in particular 1245/* The in-kernel gadget driver handles most ep0 issues, in particular
@@ -1650,7 +1561,7 @@ static int activate_ep_files (struct dev_data *dev)
1650 goto enomem1; 1561 goto enomem1;
1651 1562
1652 data->dentry = gadgetfs_create_file (dev->sb, data->name, 1563 data->dentry = gadgetfs_create_file (dev->sb, data->name,
1653 data, &ep_config_operations); 1564 data, &ep_io_operations);
1654 if (!data->dentry) 1565 if (!data->dentry)
1655 goto enomem2; 1566 goto enomem2;
1656 list_add_tail (&data->epfiles, &dev->epfiles); 1567 list_add_tail (&data->epfiles, &dev->epfiles);
@@ -1852,6 +1763,14 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1852 u32 tag; 1763 u32 tag;
1853 char *kbuf; 1764 char *kbuf;
1854 1765
1766 spin_lock_irq(&dev->lock);
1767 if (dev->state > STATE_DEV_OPENED) {
1768 value = ep0_write(fd, buf, len, ptr);
1769 spin_unlock_irq(&dev->lock);
1770 return value;
1771 }
1772 spin_unlock_irq(&dev->lock);
1773
1855 if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) 1774 if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
1856 return -EINVAL; 1775 return -EINVAL;
1857 1776
@@ -1925,7 +1844,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1925 * on, they can work ... except in cleanup paths that 1844 * on, they can work ... except in cleanup paths that
1926 * kick in after the ep0 descriptor is closed. 1845 * kick in after the ep0 descriptor is closed.
1927 */ 1846 */
1928 fd->f_op = &ep0_io_operations;
1929 value = len; 1847 value = len;
1930 } 1848 }
1931 return value; 1849 return value;
@@ -1956,12 +1874,14 @@ dev_open (struct inode *inode, struct file *fd)
1956 return value; 1874 return value;
1957} 1875}
1958 1876
1959static const struct file_operations dev_init_operations = { 1877static const struct file_operations ep0_operations = {
1960 .llseek = no_llseek, 1878 .llseek = no_llseek,
1961 1879
1962 .open = dev_open, 1880 .open = dev_open,
1881 .read = ep0_read,
1963 .write = dev_config, 1882 .write = dev_config,
1964 .fasync = ep0_fasync, 1883 .fasync = ep0_fasync,
1884 .poll = ep0_poll,
1965 .unlocked_ioctl = dev_ioctl, 1885 .unlocked_ioctl = dev_ioctl,
1966 .release = dev_release, 1886 .release = dev_release,
1967}; 1887};
@@ -2077,7 +1997,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
2077 goto Enomem; 1997 goto Enomem;
2078 1998
2079 dev->sb = sb; 1999 dev->sb = sb;
2080 dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &dev_init_operations); 2000 dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
2081 if (!dev->dentry) { 2001 if (!dev->dentry) {
2082 put_dev(dev); 2002 put_dev(dev);
2083 goto Enomem; 2003 goto Enomem;
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
index 3a494168661e..6e0a019aad54 100644
--- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
@@ -1740,10 +1740,9 @@ static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
1740 goto err_session; 1740 goto err_session;
1741 } 1741 }
1742 /* 1742 /*
1743 * Now register the TCM vHost virtual I_T Nexus as active with the 1743 * Now register the TCM vHost virtual I_T Nexus as active.
1744 * call to __transport_register_session()
1745 */ 1744 */
1746 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, 1745 transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1747 tv_nexus->tvn_se_sess, tv_nexus); 1746 tv_nexus->tvn_se_sess, tv_nexus);
1748 tpg->tpg_nexus = tv_nexus; 1747 tpg->tpg_nexus = tv_nexus;
1749 mutex_unlock(&tpg->tpg_mutex); 1748 mutex_unlock(&tpg->tpg_mutex);
diff --git a/drivers/usb/gadget/legacy/zero.c b/drivers/usb/gadget/legacy/zero.c
index ff97ac93ac03..5ee95152493c 100644
--- a/drivers/usb/gadget/legacy/zero.c
+++ b/drivers/usb/gadget/legacy/zero.c
@@ -68,8 +68,6 @@ static struct usb_zero_options gzero_options = {
68 .isoc_maxpacket = GZERO_ISOC_MAXPACKET, 68 .isoc_maxpacket = GZERO_ISOC_MAXPACKET,
69 .bulk_buflen = GZERO_BULK_BUFLEN, 69 .bulk_buflen = GZERO_BULK_BUFLEN,
70 .qlen = GZERO_QLEN, 70 .qlen = GZERO_QLEN,
71 .int_interval = GZERO_INT_INTERVAL,
72 .int_maxpacket = GZERO_INT_MAXPACKET,
73}; 71};
74 72
75/*-------------------------------------------------------------------------*/ 73/*-------------------------------------------------------------------------*/
@@ -268,21 +266,6 @@ module_param_named(isoc_maxburst, gzero_options.isoc_maxburst, uint,
268 S_IRUGO|S_IWUSR); 266 S_IRUGO|S_IWUSR);
269MODULE_PARM_DESC(isoc_maxburst, "0 - 15 (ss only)"); 267MODULE_PARM_DESC(isoc_maxburst, "0 - 15 (ss only)");
270 268
271module_param_named(int_interval, gzero_options.int_interval, uint,
272 S_IRUGO|S_IWUSR);
273MODULE_PARM_DESC(int_interval, "1 - 16");
274
275module_param_named(int_maxpacket, gzero_options.int_maxpacket, uint,
276 S_IRUGO|S_IWUSR);
277MODULE_PARM_DESC(int_maxpacket, "0 - 1023 (fs), 0 - 1024 (hs/ss)");
278
279module_param_named(int_mult, gzero_options.int_mult, uint, S_IRUGO|S_IWUSR);
280MODULE_PARM_DESC(int_mult, "0 - 2 (hs/ss only)");
281
282module_param_named(int_maxburst, gzero_options.int_maxburst, uint,
283 S_IRUGO|S_IWUSR);
284MODULE_PARM_DESC(int_maxburst, "0 - 15 (ss only)");
285
286static struct usb_function *func_lb; 269static struct usb_function *func_lb;
287static struct usb_function_instance *func_inst_lb; 270static struct usb_function_instance *func_inst_lb;
288 271
@@ -318,10 +301,6 @@ static int __init zero_bind(struct usb_composite_dev *cdev)
318 ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket; 301 ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket;
319 ss_opts->isoc_mult = gzero_options.isoc_mult; 302 ss_opts->isoc_mult = gzero_options.isoc_mult;
320 ss_opts->isoc_maxburst = gzero_options.isoc_maxburst; 303 ss_opts->isoc_maxburst = gzero_options.isoc_maxburst;
321 ss_opts->int_interval = gzero_options.int_interval;
322 ss_opts->int_maxpacket = gzero_options.int_maxpacket;
323 ss_opts->int_mult = gzero_options.int_mult;
324 ss_opts->int_maxburst = gzero_options.int_maxburst;
325 ss_opts->bulk_buflen = gzero_options.bulk_buflen; 304 ss_opts->bulk_buflen = gzero_options.bulk_buflen;
326 305
327 func_ss = usb_get_function(func_inst_ss); 306 func_ss = usb_get_function(func_inst_ss);
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 663f7908b15c..be0964a801e8 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -34,7 +34,6 @@ static const char hcd_name[] = "ehci-atmel";
34 34
35struct atmel_ehci_priv { 35struct atmel_ehci_priv {
36 struct clk *iclk; 36 struct clk *iclk;
37 struct clk *fclk;
38 struct clk *uclk; 37 struct clk *uclk;
39 bool clocked; 38 bool clocked;
40}; 39};
@@ -51,12 +50,9 @@ static void atmel_start_clock(struct atmel_ehci_priv *atmel_ehci)
51{ 50{
52 if (atmel_ehci->clocked) 51 if (atmel_ehci->clocked)
53 return; 52 return;
54 if (IS_ENABLED(CONFIG_COMMON_CLK)) { 53
55 clk_set_rate(atmel_ehci->uclk, 48000000); 54 clk_prepare_enable(atmel_ehci->uclk);
56 clk_prepare_enable(atmel_ehci->uclk);
57 }
58 clk_prepare_enable(atmel_ehci->iclk); 55 clk_prepare_enable(atmel_ehci->iclk);
59 clk_prepare_enable(atmel_ehci->fclk);
60 atmel_ehci->clocked = true; 56 atmel_ehci->clocked = true;
61} 57}
62 58
@@ -64,10 +60,9 @@ static void atmel_stop_clock(struct atmel_ehci_priv *atmel_ehci)
64{ 60{
65 if (!atmel_ehci->clocked) 61 if (!atmel_ehci->clocked)
66 return; 62 return;
67 clk_disable_unprepare(atmel_ehci->fclk); 63
68 clk_disable_unprepare(atmel_ehci->iclk); 64 clk_disable_unprepare(atmel_ehci->iclk);
69 if (IS_ENABLED(CONFIG_COMMON_CLK)) 65 clk_disable_unprepare(atmel_ehci->uclk);
70 clk_disable_unprepare(atmel_ehci->uclk);
71 atmel_ehci->clocked = false; 66 atmel_ehci->clocked = false;
72} 67}
73 68
@@ -146,20 +141,13 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
146 retval = -ENOENT; 141 retval = -ENOENT;
147 goto fail_request_resource; 142 goto fail_request_resource;
148 } 143 }
149 atmel_ehci->fclk = devm_clk_get(&pdev->dev, "uhpck"); 144
150 if (IS_ERR(atmel_ehci->fclk)) { 145 atmel_ehci->uclk = devm_clk_get(&pdev->dev, "usb_clk");
151 dev_err(&pdev->dev, "Error getting function clock\n"); 146 if (IS_ERR(atmel_ehci->uclk)) {
152 retval = -ENOENT; 147 dev_err(&pdev->dev, "failed to get uclk\n");
148 retval = PTR_ERR(atmel_ehci->uclk);
153 goto fail_request_resource; 149 goto fail_request_resource;
154 } 150 }
155 if (IS_ENABLED(CONFIG_COMMON_CLK)) {
156 atmel_ehci->uclk = devm_clk_get(&pdev->dev, "usb_clk");
157 if (IS_ERR(atmel_ehci->uclk)) {
158 dev_err(&pdev->dev, "failed to get uclk\n");
159 retval = PTR_ERR(atmel_ehci->uclk);
160 goto fail_request_resource;
161 }
162 }
163 151
164 ehci = hcd_to_ehci(hcd); 152 ehci = hcd_to_ehci(hcd);
165 /* registers start at offset 0x0 */ 153 /* registers start at offset 0x0 */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 5fb66db89e05..73485fa4372f 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1729,7 +1729,7 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1729 if (!command) 1729 if (!command)
1730 return; 1730 return;
1731 1731
1732 ep->ep_state |= EP_HALTED | EP_RECENTLY_HALTED; 1732 ep->ep_state |= EP_HALTED;
1733 ep->stopped_stream = stream_id; 1733 ep->stopped_stream = stream_id;
1734 1734
1735 xhci_queue_reset_ep(xhci, command, slot_id, ep_index); 1735 xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index b06d1a53652d..ec8ac1674854 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1338,12 +1338,6 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1338 goto exit; 1338 goto exit;
1339 } 1339 }
1340 1340
1341 /* Reject urb if endpoint is in soft reset, queue must stay empty */
1342 if (xhci->devs[slot_id]->eps[ep_index].ep_state & EP_CONFIG_PENDING) {
1343 xhci_warn(xhci, "Can't enqueue URB while ep is in soft reset\n");
1344 ret = -EINVAL;
1345 }
1346
1347 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) 1341 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1348 size = urb->number_of_packets; 1342 size = urb->number_of_packets;
1349 else 1343 else
@@ -2954,36 +2948,23 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2954 } 2948 }
2955} 2949}
2956 2950
2957/* Called after clearing a halted device. USB core should have sent the control 2951/* Called when clearing halted device. The core should have sent the control
2958 * message to clear the device halt condition. The host side of the halt should 2952 * message to clear the device halt condition. The host side of the halt should
2959 * already be cleared with a reset endpoint command issued immediately when the 2953 * already be cleared with a reset endpoint command issued when the STALL tx
2960 * STALL tx event was received. 2954 * event was received.
2955 *
2956 * Context: in_interrupt
2961 */ 2957 */
2962 2958
2963void xhci_endpoint_reset(struct usb_hcd *hcd, 2959void xhci_endpoint_reset(struct usb_hcd *hcd,
2964 struct usb_host_endpoint *ep) 2960 struct usb_host_endpoint *ep)
2965{ 2961{
2966 struct xhci_hcd *xhci; 2962 struct xhci_hcd *xhci;
2967 struct usb_device *udev;
2968 struct xhci_virt_device *virt_dev;
2969 struct xhci_virt_ep *virt_ep;
2970 struct xhci_input_control_ctx *ctrl_ctx;
2971 struct xhci_command *command;
2972 unsigned int ep_index, ep_state;
2973 unsigned long flags;
2974 u32 ep_flag;
2975 2963
2976 xhci = hcd_to_xhci(hcd); 2964 xhci = hcd_to_xhci(hcd);
2977 udev = (struct usb_device *) ep->hcpriv;
2978 if (!ep->hcpriv)
2979 return;
2980 virt_dev = xhci->devs[udev->slot_id];
2981 ep_index = xhci_get_endpoint_index(&ep->desc);
2982 virt_ep = &virt_dev->eps[ep_index];
2983 ep_state = virt_ep->ep_state;
2984 2965
2985 /* 2966 /*
2986 * Implement the config ep command in xhci 4.6.8 additional note: 2967 * We might need to implement the config ep cmd in xhci 4.8.1 note:
2987 * The Reset Endpoint Command may only be issued to endpoints in the 2968 * The Reset Endpoint Command may only be issued to endpoints in the
2988 * Halted state. If software wishes reset the Data Toggle or Sequence 2969 * Halted state. If software wishes reset the Data Toggle or Sequence
2989 * Number of an endpoint that isn't in the Halted state, then software 2970 * Number of an endpoint that isn't in the Halted state, then software
@@ -2991,72 +2972,9 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
2991 * for the target endpoint. that is in the Stopped state. 2972 * for the target endpoint. that is in the Stopped state.
2992 */ 2973 */
2993 2974
2994 if (ep_state & SET_DEQ_PENDING || ep_state & EP_RECENTLY_HALTED) { 2975 /* For now just print debug to follow the situation */
2995 virt_ep->ep_state &= ~EP_RECENTLY_HALTED; 2976 xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
2996 xhci_dbg(xhci, "ep recently halted, no toggle reset needed\n"); 2977 ep->desc.bEndpointAddress);
2997 return;
2998 }
2999
3000 /* Only interrupt and bulk ep's use Data toggle, USB2 spec 5.5.4-> */
3001 if (usb_endpoint_xfer_control(&ep->desc) ||
3002 usb_endpoint_xfer_isoc(&ep->desc))
3003 return;
3004
3005 ep_flag = xhci_get_endpoint_flag(&ep->desc);
3006
3007 if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
3008 return;
3009
3010 command = xhci_alloc_command(xhci, true, true, GFP_NOWAIT);
3011 if (!command) {
3012 xhci_err(xhci, "Could not allocate xHCI command structure.\n");
3013 return;
3014 }
3015
3016 spin_lock_irqsave(&xhci->lock, flags);
3017
3018 /* block ringing ep doorbell */
3019 virt_ep->ep_state |= EP_CONFIG_PENDING;
3020
3021 /*
3022 * Make sure endpoint ring is empty before resetting the toggle/seq.
3023 * Driver is required to synchronously cancel all transfer request.
3024 *
3025 * xhci 4.6.6 says we can issue a configure endpoint command on a
3026 * running endpoint ring as long as it's idle (queue empty)
3027 */
3028
3029 if (!list_empty(&virt_ep->ring->td_list)) {
3030 dev_err(&udev->dev, "EP not empty, refuse reset\n");
3031 spin_unlock_irqrestore(&xhci->lock, flags);
3032 goto cleanup;
3033 }
3034
3035 xhci_dbg(xhci, "Reset toggle/seq for slot %d, ep_index: %d\n",
3036 udev->slot_id, ep_index);
3037
3038 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3039 if (!ctrl_ctx) {
3040 xhci_err(xhci, "Could not get input context, bad type. virt_dev: %p, in_ctx %p\n",
3041 virt_dev, virt_dev->in_ctx);
3042 spin_unlock_irqrestore(&xhci->lock, flags);
3043 goto cleanup;
3044 }
3045 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3046 virt_dev->out_ctx, ctrl_ctx,
3047 ep_flag, ep_flag);
3048 xhci_endpoint_copy(xhci, command->in_ctx, virt_dev->out_ctx, ep_index);
3049
3050 xhci_queue_configure_endpoint(xhci, command, command->in_ctx->dma,
3051 udev->slot_id, false);
3052 xhci_ring_cmd_db(xhci);
3053 spin_unlock_irqrestore(&xhci->lock, flags);
3054
3055 wait_for_completion(command->completion);
3056
3057cleanup:
3058 virt_ep->ep_state &= ~EP_CONFIG_PENDING;
3059 xhci_free_command(xhci, command);
3060} 2978}
3061 2979
3062static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, 2980static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 265ab1771d24..8e421b89632d 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -865,8 +865,6 @@ struct xhci_virt_ep {
865#define EP_HAS_STREAMS (1 << 4) 865#define EP_HAS_STREAMS (1 << 4)
866/* Transitioning the endpoint to not using streams, don't enqueue URBs */ 866/* Transitioning the endpoint to not using streams, don't enqueue URBs */
867#define EP_GETTING_NO_STREAMS (1 << 5) 867#define EP_GETTING_NO_STREAMS (1 << 5)
868#define EP_RECENTLY_HALTED (1 << 6)
869#define EP_CONFIG_PENDING (1 << 7)
870 /* ---- Related to URB cancellation ---- */ 868 /* ---- Related to URB cancellation ---- */
871 struct list_head cancelled_td_list; 869 struct list_head cancelled_td_list;
872 struct xhci_td *stopped_td; 870 struct xhci_td *stopped_td;
diff --git a/drivers/usb/isp1760/isp1760-core.c b/drivers/usb/isp1760/isp1760-core.c
index b9827556455f..bfa402cf3a27 100644
--- a/drivers/usb/isp1760/isp1760-core.c
+++ b/drivers/usb/isp1760/isp1760-core.c
@@ -151,8 +151,7 @@ int isp1760_register(struct resource *mem, int irq, unsigned long irqflags,
151 } 151 }
152 152
153 if (IS_ENABLED(CONFIG_USB_ISP1761_UDC) && !udc_disabled) { 153 if (IS_ENABLED(CONFIG_USB_ISP1761_UDC) && !udc_disabled) {
154 ret = isp1760_udc_register(isp, irq, irqflags | IRQF_SHARED | 154 ret = isp1760_udc_register(isp, irq, irqflags);
155 IRQF_DISABLED);
156 if (ret < 0) { 155 if (ret < 0) {
157 isp1760_hcd_unregister(&isp->hcd); 156 isp1760_hcd_unregister(&isp->hcd);
158 return ret; 157 return ret;
diff --git a/drivers/usb/isp1760/isp1760-udc.c b/drivers/usb/isp1760/isp1760-udc.c
index 9612d7990565..f32c292cc868 100644
--- a/drivers/usb/isp1760/isp1760-udc.c
+++ b/drivers/usb/isp1760/isp1760-udc.c
@@ -1191,6 +1191,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
1191 struct usb_gadget_driver *driver) 1191 struct usb_gadget_driver *driver)
1192{ 1192{
1193 struct isp1760_udc *udc = gadget_to_udc(gadget); 1193 struct isp1760_udc *udc = gadget_to_udc(gadget);
1194 unsigned long flags;
1194 1195
1195 /* The hardware doesn't support low speed. */ 1196 /* The hardware doesn't support low speed. */
1196 if (driver->max_speed < USB_SPEED_FULL) { 1197 if (driver->max_speed < USB_SPEED_FULL) {
@@ -1198,7 +1199,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
1198 return -EINVAL; 1199 return -EINVAL;
1199 } 1200 }
1200 1201
1201 spin_lock(&udc->lock); 1202 spin_lock_irqsave(&udc->lock, flags);
1202 1203
1203 if (udc->driver) { 1204 if (udc->driver) {
1204 dev_err(udc->isp->dev, "UDC already has a gadget driver\n"); 1205 dev_err(udc->isp->dev, "UDC already has a gadget driver\n");
@@ -1208,7 +1209,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
1208 1209
1209 udc->driver = driver; 1210 udc->driver = driver;
1210 1211
1211 spin_unlock(&udc->lock); 1212 spin_unlock_irqrestore(&udc->lock, flags);
1212 1213
1213 dev_dbg(udc->isp->dev, "starting UDC with driver %s\n", 1214 dev_dbg(udc->isp->dev, "starting UDC with driver %s\n",
1214 driver->function); 1215 driver->function);
@@ -1232,6 +1233,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
1232static int isp1760_udc_stop(struct usb_gadget *gadget) 1233static int isp1760_udc_stop(struct usb_gadget *gadget)
1233{ 1234{
1234 struct isp1760_udc *udc = gadget_to_udc(gadget); 1235 struct isp1760_udc *udc = gadget_to_udc(gadget);
1236 unsigned long flags;
1235 1237
1236 dev_dbg(udc->isp->dev, "%s\n", __func__); 1238 dev_dbg(udc->isp->dev, "%s\n", __func__);
1237 1239
@@ -1239,9 +1241,9 @@ static int isp1760_udc_stop(struct usb_gadget *gadget)
1239 1241
1240 isp1760_udc_write(udc, DC_MODE, 0); 1242 isp1760_udc_write(udc, DC_MODE, 0);
1241 1243
1242 spin_lock(&udc->lock); 1244 spin_lock_irqsave(&udc->lock, flags);
1243 udc->driver = NULL; 1245 udc->driver = NULL;
1244 spin_unlock(&udc->lock); 1246 spin_unlock_irqrestore(&udc->lock, flags);
1245 1247
1246 return 0; 1248 return 0;
1247} 1249}
@@ -1411,7 +1413,7 @@ static int isp1760_udc_init(struct isp1760_udc *udc)
1411 return -ENODEV; 1413 return -ENODEV;
1412 } 1414 }
1413 1415
1414 if (chipid != 0x00011582) { 1416 if (chipid != 0x00011582 && chipid != 0x00158210) {
1415 dev_err(udc->isp->dev, "udc: invalid chip ID 0x%08x\n", chipid); 1417 dev_err(udc->isp->dev, "udc: invalid chip ID 0x%08x\n", chipid);
1416 return -ENODEV; 1418 return -ENODEV;
1417 } 1419 }
@@ -1451,8 +1453,8 @@ int isp1760_udc_register(struct isp1760_device *isp, int irq,
1451 1453
1452 sprintf(udc->irqname, "%s (udc)", devname); 1454 sprintf(udc->irqname, "%s (udc)", devname);
1453 1455
1454 ret = request_irq(irq, isp1760_udc_irq, IRQF_SHARED | IRQF_DISABLED | 1456 ret = request_irq(irq, isp1760_udc_irq, IRQF_SHARED | irqflags,
1455 irqflags, udc->irqname, udc); 1457 udc->irqname, udc);
1456 if (ret < 0) 1458 if (ret < 0)
1457 goto error; 1459 goto error;
1458 1460
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 14e1628483d9..39db8b603627 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -79,7 +79,8 @@ config USB_MUSB_TUSB6010
79 79
80config USB_MUSB_OMAP2PLUS 80config USB_MUSB_OMAP2PLUS
81 tristate "OMAP2430 and onwards" 81 tristate "OMAP2430 and onwards"
82 depends on ARCH_OMAP2PLUS && USB && OMAP_CONTROL_PHY 82 depends on ARCH_OMAP2PLUS && USB
83 depends on OMAP_CONTROL_PHY || !OMAP_CONTROL_PHY
83 select GENERIC_PHY 84 select GENERIC_PHY
84 85
85config USB_MUSB_AM35X 86config USB_MUSB_AM35X
diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c
index 403fab772724..7b3035ff9434 100644
--- a/drivers/usb/phy/phy-am335x-control.c
+++ b/drivers/usb/phy/phy-am335x-control.c
@@ -126,6 +126,9 @@ struct phy_control *am335x_get_phy_control(struct device *dev)
126 return NULL; 126 return NULL;
127 127
128 dev = bus_find_device(&platform_bus_type, NULL, node, match); 128 dev = bus_find_device(&platform_bus_type, NULL, node, match);
129 if (!dev)
130 return NULL;
131
129 ctrl_usb = dev_get_drvdata(dev); 132 ctrl_usb = dev_get_drvdata(dev);
130 if (!ctrl_usb) 133 if (!ctrl_usb)
131 return NULL; 134 return NULL;
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 82570425fdfe..c85ea530085f 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -113,6 +113,13 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
113 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 113 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
114 US_FL_NO_ATA_1X), 114 US_FL_NO_ATA_1X),
115 115
116/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
117UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
118 "Initio Corporation",
119 "",
120 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
121 US_FL_NO_ATA_1X),
122
116/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */ 123/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
117UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999, 124UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
118 "JMicron", 125 "JMicron",
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index f88bfdf5b6a0..2027a27546ef 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -868,12 +868,14 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
868 func = vfio_pci_set_err_trigger; 868 func = vfio_pci_set_err_trigger;
869 break; 869 break;
870 } 870 }
871 break;
871 case VFIO_PCI_REQ_IRQ_INDEX: 872 case VFIO_PCI_REQ_IRQ_INDEX:
872 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { 873 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
873 case VFIO_IRQ_SET_ACTION_TRIGGER: 874 case VFIO_IRQ_SET_ACTION_TRIGGER:
874 func = vfio_pci_set_req_trigger; 875 func = vfio_pci_set_req_trigger;
875 break; 876 break;
876 } 877 }
878 break;
877 } 879 }
878 880
879 if (!func) 881 if (!func)
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 8d4f3f1ff799..71df240a467a 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1956,10 +1956,9 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1956 goto out; 1956 goto out;
1957 } 1957 }
1958 /* 1958 /*
1959 * Now register the TCM vhost virtual I_T Nexus as active with the 1959 * Now register the TCM vhost virtual I_T Nexus as active.
1960 * call to __transport_register_session()
1961 */ 1960 */
1962 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, 1961 transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1963 tv_nexus->tvn_se_sess, tv_nexus); 1962 tv_nexus->tvn_se_sess, tv_nexus);
1964 tpg->tpg_nexus = tv_nexus; 1963 tpg->tpg_nexus = tv_nexus;
1965 1964
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 0413157f3b49..6a356e344f82 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -29,6 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/balloon_compaction.h> 30#include <linux/balloon_compaction.h>
31#include <linux/oom.h> 31#include <linux/oom.h>
32#include <linux/wait.h>
32 33
33/* 34/*
34 * Balloon device works in 4K page units. So each page is pointed to by 35 * Balloon device works in 4K page units. So each page is pointed to by
@@ -334,17 +335,25 @@ static int virtballoon_oom_notify(struct notifier_block *self,
334static int balloon(void *_vballoon) 335static int balloon(void *_vballoon)
335{ 336{
336 struct virtio_balloon *vb = _vballoon; 337 struct virtio_balloon *vb = _vballoon;
338 DEFINE_WAIT_FUNC(wait, woken_wake_function);
337 339
338 set_freezable(); 340 set_freezable();
339 while (!kthread_should_stop()) { 341 while (!kthread_should_stop()) {
340 s64 diff; 342 s64 diff;
341 343
342 try_to_freeze(); 344 try_to_freeze();
343 wait_event_interruptible(vb->config_change, 345
344 (diff = towards_target(vb)) != 0 346 add_wait_queue(&vb->config_change, &wait);
345 || vb->need_stats_update 347 for (;;) {
346 || kthread_should_stop() 348 if ((diff = towards_target(vb)) != 0 ||
347 || freezing(current)); 349 vb->need_stats_update ||
350 kthread_should_stop() ||
351 freezing(current))
352 break;
353 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
354 }
355 remove_wait_queue(&vb->config_change, &wait);
356
348 if (vb->need_stats_update) 357 if (vb->need_stats_update)
349 stats_handle_request(vb); 358 stats_handle_request(vb);
350 if (diff > 0) 359 if (diff > 0)
@@ -499,6 +508,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
499 if (err < 0) 508 if (err < 0)
500 goto out_oom_notify; 509 goto out_oom_notify;
501 510
511 virtio_device_ready(vdev);
512
502 vb->thread = kthread_run(balloon, vb, "vballoon"); 513 vb->thread = kthread_run(balloon, vb, "vballoon");
503 if (IS_ERR(vb->thread)) { 514 if (IS_ERR(vb->thread)) {
504 err = PTR_ERR(vb->thread); 515 err = PTR_ERR(vb->thread);
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index cad569890908..6010d7ec0a0f 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -156,22 +156,95 @@ static void vm_get(struct virtio_device *vdev, unsigned offset,
156 void *buf, unsigned len) 156 void *buf, unsigned len)
157{ 157{
158 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 158 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
159 u8 *ptr = buf; 159 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
160 int i; 160 u8 b;
161 __le16 w;
162 __le32 l;
161 163
162 for (i = 0; i < len; i++) 164 if (vm_dev->version == 1) {
163 ptr[i] = readb(vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); 165 u8 *ptr = buf;
166 int i;
167
168 for (i = 0; i < len; i++)
169 ptr[i] = readb(base + offset + i);
170 return;
171 }
172
173 switch (len) {
174 case 1:
175 b = readb(base + offset);
176 memcpy(buf, &b, sizeof b);
177 break;
178 case 2:
179 w = cpu_to_le16(readw(base + offset));
180 memcpy(buf, &w, sizeof w);
181 break;
182 case 4:
183 l = cpu_to_le32(readl(base + offset));
184 memcpy(buf, &l, sizeof l);
185 break;
186 case 8:
187 l = cpu_to_le32(readl(base + offset));
188 memcpy(buf, &l, sizeof l);
189 l = cpu_to_le32(ioread32(base + offset + sizeof l));
190 memcpy(buf + sizeof l, &l, sizeof l);
191 break;
192 default:
193 BUG();
194 }
164} 195}
165 196
166static void vm_set(struct virtio_device *vdev, unsigned offset, 197static void vm_set(struct virtio_device *vdev, unsigned offset,
167 const void *buf, unsigned len) 198 const void *buf, unsigned len)
168{ 199{
169 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 200 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
170 const u8 *ptr = buf; 201 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
171 int i; 202 u8 b;
203 __le16 w;
204 __le32 l;
172 205
173 for (i = 0; i < len; i++) 206 if (vm_dev->version == 1) {
174 writeb(ptr[i], vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); 207 const u8 *ptr = buf;
208 int i;
209
210 for (i = 0; i < len; i++)
211 writeb(ptr[i], base + offset + i);
212
213 return;
214 }
215
216 switch (len) {
217 case 1:
218 memcpy(&b, buf, sizeof b);
219 writeb(b, base + offset);
220 break;
221 case 2:
222 memcpy(&w, buf, sizeof w);
223 writew(le16_to_cpu(w), base + offset);
224 break;
225 case 4:
226 memcpy(&l, buf, sizeof l);
227 writel(le32_to_cpu(l), base + offset);
228 break;
229 case 8:
230 memcpy(&l, buf, sizeof l);
231 writel(le32_to_cpu(l), base + offset);
232 memcpy(&l, buf + sizeof l, sizeof l);
233 writel(le32_to_cpu(l), base + offset + sizeof l);
234 break;
235 default:
236 BUG();
237 }
238}
239
240static u32 vm_generation(struct virtio_device *vdev)
241{
242 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
243
244 if (vm_dev->version == 1)
245 return 0;
246 else
247 return readl(vm_dev->base + VIRTIO_MMIO_CONFIG_GENERATION);
175} 248}
176 249
177static u8 vm_get_status(struct virtio_device *vdev) 250static u8 vm_get_status(struct virtio_device *vdev)
@@ -440,6 +513,7 @@ static const char *vm_bus_name(struct virtio_device *vdev)
440static const struct virtio_config_ops virtio_mmio_config_ops = { 513static const struct virtio_config_ops virtio_mmio_config_ops = {
441 .get = vm_get, 514 .get = vm_get,
442 .set = vm_set, 515 .set = vm_set,
516 .generation = vm_generation,
443 .get_status = vm_get_status, 517 .get_status = vm_get_status,
444 .set_status = vm_set_status, 518 .set_status = vm_set_status,
445 .reset = vm_reset, 519 .reset = vm_reset,
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index b4bca2d4a7e5..70fba973a107 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -526,20 +526,26 @@ static unsigned int __startup_pirq(unsigned int irq)
526 pirq_query_unmask(irq); 526 pirq_query_unmask(irq);
527 527
528 rc = set_evtchn_to_irq(evtchn, irq); 528 rc = set_evtchn_to_irq(evtchn, irq);
529 if (rc != 0) { 529 if (rc)
530 pr_err("irq%d: Failed to set port to irq mapping (%d)\n", 530 goto err;
531 irq, rc); 531
532 xen_evtchn_close(evtchn);
533 return 0;
534 }
535 bind_evtchn_to_cpu(evtchn, 0); 532 bind_evtchn_to_cpu(evtchn, 0);
536 info->evtchn = evtchn; 533 info->evtchn = evtchn;
537 534
535 rc = xen_evtchn_port_setup(info);
536 if (rc)
537 goto err;
538
538out: 539out:
539 unmask_evtchn(evtchn); 540 unmask_evtchn(evtchn);
540 eoi_pirq(irq_get_irq_data(irq)); 541 eoi_pirq(irq_get_irq_data(irq));
541 542
542 return 0; 543 return 0;
544
545err:
546 pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
547 xen_evtchn_close(evtchn);
548 return 0;
543} 549}
544 550
545static unsigned int startup_pirq(struct irq_data *data) 551static unsigned int startup_pirq(struct irq_data *data)
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 46ae0f9f02ad..75fe3d466515 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -16,7 +16,7 @@
16#include "conf_space.h" 16#include "conf_space.h"
17#include "conf_space_quirks.h" 17#include "conf_space_quirks.h"
18 18
19static bool permissive; 19bool permissive;
20module_param(permissive, bool, 0644); 20module_param(permissive, bool, 0644);
21 21
22/* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, 22/* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
index e56c934ad137..2e1d73d1d5d0 100644
--- a/drivers/xen/xen-pciback/conf_space.h
+++ b/drivers/xen/xen-pciback/conf_space.h
@@ -64,6 +64,8 @@ struct config_field_entry {
64 void *data; 64 void *data;
65}; 65};
66 66
67extern bool permissive;
68
67#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) 69#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
68 70
69/* Add fields to a device - the add_fields macro expects to get a pointer to 71/* Add fields to a device - the add_fields macro expects to get a pointer to
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index c5ee82587e8c..2d7369391472 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -11,6 +11,10 @@
11#include "pciback.h" 11#include "pciback.h"
12#include "conf_space.h" 12#include "conf_space.h"
13 13
14struct pci_cmd_info {
15 u16 val;
16};
17
14struct pci_bar_info { 18struct pci_bar_info {
15 u32 val; 19 u32 val;
16 u32 len_val; 20 u32 len_val;
@@ -20,22 +24,36 @@ struct pci_bar_info {
20#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO)) 24#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
21#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER) 25#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
22 26
23static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) 27/* Bits guests are allowed to control in permissive mode. */
28#define PCI_COMMAND_GUEST (PCI_COMMAND_MASTER|PCI_COMMAND_SPECIAL| \
29 PCI_COMMAND_INVALIDATE|PCI_COMMAND_VGA_PALETTE| \
30 PCI_COMMAND_WAIT|PCI_COMMAND_FAST_BACK)
31
32static void *command_init(struct pci_dev *dev, int offset)
24{ 33{
25 int i; 34 struct pci_cmd_info *cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
26 int ret; 35 int err;
27 36
28 ret = xen_pcibk_read_config_word(dev, offset, value, data); 37 if (!cmd)
29 if (!pci_is_enabled(dev)) 38 return ERR_PTR(-ENOMEM);
30 return ret; 39
31 40 err = pci_read_config_word(dev, PCI_COMMAND, &cmd->val);
32 for (i = 0; i < PCI_ROM_RESOURCE; i++) { 41 if (err) {
33 if (dev->resource[i].flags & IORESOURCE_IO) 42 kfree(cmd);
34 *value |= PCI_COMMAND_IO; 43 return ERR_PTR(err);
35 if (dev->resource[i].flags & IORESOURCE_MEM)
36 *value |= PCI_COMMAND_MEMORY;
37 } 44 }
38 45
46 return cmd;
47}
48
49static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
50{
51 int ret = pci_read_config_word(dev, offset, value);
52 const struct pci_cmd_info *cmd = data;
53
54 *value &= PCI_COMMAND_GUEST;
55 *value |= cmd->val & ~PCI_COMMAND_GUEST;
56
39 return ret; 57 return ret;
40} 58}
41 59
@@ -43,6 +61,8 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
43{ 61{
44 struct xen_pcibk_dev_data *dev_data; 62 struct xen_pcibk_dev_data *dev_data;
45 int err; 63 int err;
64 u16 val;
65 struct pci_cmd_info *cmd = data;
46 66
47 dev_data = pci_get_drvdata(dev); 67 dev_data = pci_get_drvdata(dev);
48 if (!pci_is_enabled(dev) && is_enable_cmd(value)) { 68 if (!pci_is_enabled(dev) && is_enable_cmd(value)) {
@@ -83,6 +103,19 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
83 } 103 }
84 } 104 }
85 105
106 cmd->val = value;
107
108 if (!permissive && (!dev_data || !dev_data->permissive))
109 return 0;
110
111 /* Only allow the guest to control certain bits. */
112 err = pci_read_config_word(dev, offset, &val);
113 if (err || val == value)
114 return err;
115
116 value &= PCI_COMMAND_GUEST;
117 value |= val & ~PCI_COMMAND_GUEST;
118
86 return pci_write_config_word(dev, offset, value); 119 return pci_write_config_word(dev, offset, value);
87} 120}
88 121
@@ -282,6 +315,8 @@ static const struct config_field header_common[] = {
282 { 315 {
283 .offset = PCI_COMMAND, 316 .offset = PCI_COMMAND,
284 .size = 2, 317 .size = 2,
318 .init = command_init,
319 .release = bar_release,
285 .u.w.read = command_read, 320 .u.w.read = command_read,
286 .u.w.write = command_write, 321 .u.w.write = command_write,
287 }, 322 },
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 9faca6a60bb0..42bd55a6c237 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1659,11 +1659,8 @@ static int scsiback_make_nexus(struct scsiback_tpg *tpg,
1659 name); 1659 name);
1660 goto out; 1660 goto out;
1661 } 1661 }
1662 /* 1662 /* Now register the TCM pvscsi virtual I_T Nexus as active. */
1663 * Now register the TCM pvscsi virtual I_T Nexus as active with the 1663 transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1664 * call to __transport_register_session()
1665 */
1666 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1667 tv_nexus->tvn_se_sess, tv_nexus); 1664 tv_nexus->tvn_se_sess, tv_nexus);
1668 tpg->tpg_nexus = tv_nexus; 1665 tpg->tpg_nexus = tv_nexus;
1669 1666