aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/ata/libata-core.c8
-rw-r--r--drivers/block/virtio_blk.c4
-rw-r--r--drivers/clk/clk-divider.c2
-rw-r--r--drivers/clk/st/clkgen-pll.c4
-rw-r--r--drivers/clk/tegra/clk-pll.c64
-rw-r--r--drivers/clocksource/tcb_clksrc.c8
-rw-r--r--drivers/clocksource/timer-marco.c2
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c16
-rw-r--r--drivers/cpufreq/cpufreq_governor.c6
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/dw/core.c11
-rw-r--r--drivers/dma/mv_xor.c8
-rw-r--r--drivers/dma/sa11x0-dma.c4
-rw-r--r--drivers/firewire/core.h4
-rw-r--r--drivers/firewire/ohci.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h30
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c365
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c130
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c11
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c6
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/ntc_thermistor.c15
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c7
-rw-r--r--drivers/input/mouse/Kconfig2
-rw-r--r--drivers/input/mouse/synaptics.c166
-rw-r--r--drivers/input/serio/ambakmi.c3
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/md/dm-cache-target.c2
-rw-r--r--drivers/md/dm-mpath.c14
-rw-r--r--drivers/md/dm-thin.c12
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/net/can/led.c3
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c29
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c5
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.h20
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c1
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c32
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c1
-rw-r--r--drivers/net/team/team.c7
-rw-r--r--drivers/net/usb/ipheth.c10
-rw-r--r--drivers/net/usb/qmi_wwan.c6
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c23
-rw-r--r--drivers/scsi/scsi_transport_sas.c3
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c2
-rw-r--r--drivers/staging/speakup/main.c1
-rw-r--r--drivers/staging/speakup/selection.c52
-rw-r--r--drivers/staging/speakup/speakup.h1
-rw-r--r--drivers/staging/speakup/speakup_acntsa.c8
-rw-r--r--drivers/tty/tty_buffer.c2
-rw-r--r--drivers/usb/core/driver.c9
-rw-r--r--drivers/usb/core/hub.c15
-rw-r--r--drivers/usb/host/pci-quirks.c7
-rw-r--r--drivers/usb/host/xhci-mem.c20
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h5
-rw-r--r--drivers/usb/serial/io_ti.c2
-rw-r--r--drivers/usb/serial/io_usbvend.h2
-rw-r--r--drivers/usb/serial/option.c2
78 files changed, 816 insertions, 602 deletions
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index c1e31a41f949..25bbc55dca89 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -1278,8 +1278,8 @@ static int __init acpi_thermal_init(void)
1278 1278
1279static void __exit acpi_thermal_exit(void) 1279static void __exit acpi_thermal_exit(void)
1280{ 1280{
1281 destroy_workqueue(acpi_thermal_pm_queue);
1282 acpi_bus_unregister_driver(&acpi_thermal_driver); 1281 acpi_bus_unregister_driver(&acpi_thermal_driver);
1282 destroy_workqueue(acpi_thermal_pm_queue);
1283 1283
1284 return; 1284 return;
1285} 1285}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index ea83828bfea9..18d97d5c7d90 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4224,10 +4224,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4224 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4224 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4225 4225
4226 /* devices that don't properly handle queued TRIM commands */ 4226 /* devices that don't properly handle queued TRIM commands */
4227 { "Micron_M500*", "MU0[1-4]*", ATA_HORKAGE_NO_NCQ_TRIM, }, 4227 { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4228 { "Crucial_CT???M500SSD*", "MU0[1-4]*", ATA_HORKAGE_NO_NCQ_TRIM, }, 4228 { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4229 { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, 4229 { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4230 { "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, 4230 { "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4231 4231
4232 /* 4232 /*
4233 * Some WD SATA-I drives spin up and down erratically when the link 4233 * Some WD SATA-I drives spin up and down erratically when the link
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6d8a87f252de..cb9b1f8326c3 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -144,11 +144,11 @@ static void virtblk_done(struct virtqueue *vq)
144 if (unlikely(virtqueue_is_broken(vq))) 144 if (unlikely(virtqueue_is_broken(vq)))
145 break; 145 break;
146 } while (!virtqueue_enable_cb(vq)); 146 } while (!virtqueue_enable_cb(vq));
147 spin_unlock_irqrestore(&vblk->vq_lock, flags);
148 147
149 /* In case queue is stopped waiting for more buffers. */ 148 /* In case queue is stopped waiting for more buffers. */
150 if (req_done) 149 if (req_done)
151 blk_mq_start_stopped_hw_queues(vblk->disk->queue); 150 blk_mq_start_stopped_hw_queues(vblk->disk->queue);
151 spin_unlock_irqrestore(&vblk->vq_lock, flags);
152} 152}
153 153
154static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) 154static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
@@ -202,8 +202,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
202 err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num); 202 err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num);
203 if (err) { 203 if (err) {
204 virtqueue_kick(vblk->vq); 204 virtqueue_kick(vblk->vq);
205 spin_unlock_irqrestore(&vblk->vq_lock, flags);
206 blk_mq_stop_hw_queue(hctx); 205 blk_mq_stop_hw_queue(hctx);
206 spin_unlock_irqrestore(&vblk->vq_lock, flags);
207 /* Out of mem doesn't actually happen, since we fall back 207 /* Out of mem doesn't actually happen, since we fall back
208 * to direct descriptors */ 208 * to direct descriptors */
209 if (err == -ENOMEM || err == -ENOSPC) 209 if (err == -ENOMEM || err == -ENOSPC)
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 4637697c139f..3fbee4540228 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -147,7 +147,7 @@ static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
147static int _round_up_table(const struct clk_div_table *table, int div) 147static int _round_up_table(const struct clk_div_table *table, int div)
148{ 148{
149 const struct clk_div_table *clkt; 149 const struct clk_div_table *clkt;
150 int up = _get_table_maxdiv(table); 150 int up = INT_MAX;
151 151
152 for (clkt = table; clkt->div; clkt++) { 152 for (clkt = table; clkt->div; clkt++) {
153 if (clkt->div == div) 153 if (clkt->div == div)
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index bca0a0badbfa..a886702f7c8b 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -521,8 +521,10 @@ static struct clk * __init clkgen_odf_register(const char *parent_name,
521 gate->lock = odf_lock; 521 gate->lock = odf_lock;
522 522
523 div = kzalloc(sizeof(*div), GFP_KERNEL); 523 div = kzalloc(sizeof(*div), GFP_KERNEL);
524 if (!div) 524 if (!div) {
525 kfree(gate);
525 return ERR_PTR(-ENOMEM); 526 return ERR_PTR(-ENOMEM);
527 }
526 528
527 div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO; 529 div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
528 div->reg = reg + pll_data->odf[odf].offset; 530 div->reg = reg + pll_data->odf[odf].offset;
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index e1769addf435..6aad8abc69a2 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -58,9 +58,9 @@
58#define PLLDU_LFCON_SET_DIVN 600 58#define PLLDU_LFCON_SET_DIVN 600
59 59
60#define PLLE_BASE_DIVCML_SHIFT 24 60#define PLLE_BASE_DIVCML_SHIFT 24
61#define PLLE_BASE_DIVCML_WIDTH 4 61#define PLLE_BASE_DIVCML_MASK 0xf
62#define PLLE_BASE_DIVP_SHIFT 16 62#define PLLE_BASE_DIVP_SHIFT 16
63#define PLLE_BASE_DIVP_WIDTH 7 63#define PLLE_BASE_DIVP_WIDTH 6
64#define PLLE_BASE_DIVN_SHIFT 8 64#define PLLE_BASE_DIVN_SHIFT 8
65#define PLLE_BASE_DIVN_WIDTH 8 65#define PLLE_BASE_DIVN_WIDTH 8
66#define PLLE_BASE_DIVM_SHIFT 0 66#define PLLE_BASE_DIVM_SHIFT 0
@@ -183,6 +183,14 @@
183#define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\ 183#define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\
184 mask(p->params->div_nmp->divp_width)) 184 mask(p->params->div_nmp->divp_width))
185 185
186#define divm_shift(p) (p)->params->div_nmp->divm_shift
187#define divn_shift(p) (p)->params->div_nmp->divn_shift
188#define divp_shift(p) (p)->params->div_nmp->divp_shift
189
190#define divm_mask_shifted(p) (divm_mask(p) << divm_shift(p))
191#define divn_mask_shifted(p) (divn_mask(p) << divn_shift(p))
192#define divp_mask_shifted(p) (divp_mask(p) << divp_shift(p))
193
186#define divm_max(p) (divm_mask(p)) 194#define divm_max(p) (divm_mask(p))
187#define divn_max(p) (divn_mask(p)) 195#define divn_max(p) (divn_mask(p))
188#define divp_max(p) (1 << (divp_mask(p))) 196#define divp_max(p) (1 << (divp_mask(p)))
@@ -476,13 +484,12 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll,
476 } else { 484 } else {
477 val = pll_readl_base(pll); 485 val = pll_readl_base(pll);
478 486
479 val &= ~((divm_mask(pll) << div_nmp->divm_shift) | 487 val &= ~(divm_mask_shifted(pll) | divn_mask_shifted(pll) |
480 (divn_mask(pll) << div_nmp->divn_shift) | 488 divp_mask_shifted(pll));
481 (divp_mask(pll) << div_nmp->divp_shift));
482 489
483 val |= ((cfg->m << div_nmp->divm_shift) | 490 val |= (cfg->m << divm_shift(pll)) |
484 (cfg->n << div_nmp->divn_shift) | 491 (cfg->n << divn_shift(pll)) |
485 (cfg->p << div_nmp->divp_shift)); 492 (cfg->p << divp_shift(pll));
486 493
487 pll_writel_base(val, pll); 494 pll_writel_base(val, pll);
488 } 495 }
@@ -730,11 +737,12 @@ static int clk_plle_enable(struct clk_hw *hw)
730 if (pll->params->flags & TEGRA_PLLE_CONFIGURE) { 737 if (pll->params->flags & TEGRA_PLLE_CONFIGURE) {
731 /* configure dividers */ 738 /* configure dividers */
732 val = pll_readl_base(pll); 739 val = pll_readl_base(pll);
733 val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll)); 740 val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) |
734 val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT); 741 divm_mask_shifted(pll));
735 val |= sel.m << pll->params->div_nmp->divm_shift; 742 val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT);
736 val |= sel.n << pll->params->div_nmp->divn_shift; 743 val |= sel.m << divm_shift(pll);
737 val |= sel.p << pll->params->div_nmp->divp_shift; 744 val |= sel.n << divn_shift(pll);
745 val |= sel.p << divp_shift(pll);
738 val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT; 746 val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
739 pll_writel_base(val, pll); 747 pll_writel_base(val, pll);
740 } 748 }
@@ -745,10 +753,11 @@ static int clk_plle_enable(struct clk_hw *hw)
745 pll_writel_misc(val, pll); 753 pll_writel_misc(val, pll);
746 754
747 val = readl(pll->clk_base + PLLE_SS_CTRL); 755 val = readl(pll->clk_base + PLLE_SS_CTRL);
756 val &= ~PLLE_SS_COEFFICIENTS_MASK;
748 val |= PLLE_SS_DISABLE; 757 val |= PLLE_SS_DISABLE;
749 writel(val, pll->clk_base + PLLE_SS_CTRL); 758 writel(val, pll->clk_base + PLLE_SS_CTRL);
750 759
751 val |= pll_readl_base(pll); 760 val = pll_readl_base(pll);
752 val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE); 761 val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE);
753 pll_writel_base(val, pll); 762 pll_writel_base(val, pll);
754 763
@@ -1292,10 +1301,11 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
1292 pll_writel(val, PLLE_SS_CTRL, pll); 1301 pll_writel(val, PLLE_SS_CTRL, pll);
1293 1302
1294 val = pll_readl_base(pll); 1303 val = pll_readl_base(pll);
1295 val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll)); 1304 val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) |
1296 val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT); 1305 divm_mask_shifted(pll));
1297 val |= sel.m << pll->params->div_nmp->divm_shift; 1306 val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT);
1298 val |= sel.n << pll->params->div_nmp->divn_shift; 1307 val |= sel.m << divm_shift(pll);
1308 val |= sel.n << divn_shift(pll);
1299 val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT; 1309 val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
1300 pll_writel_base(val, pll); 1310 pll_writel_base(val, pll);
1301 udelay(1); 1311 udelay(1);
@@ -1410,6 +1420,15 @@ struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
1410 return clk; 1420 return clk;
1411} 1421}
1412 1422
1423static struct div_nmp pll_e_nmp = {
1424 .divn_shift = PLLE_BASE_DIVN_SHIFT,
1425 .divn_width = PLLE_BASE_DIVN_WIDTH,
1426 .divm_shift = PLLE_BASE_DIVM_SHIFT,
1427 .divm_width = PLLE_BASE_DIVM_WIDTH,
1428 .divp_shift = PLLE_BASE_DIVP_SHIFT,
1429 .divp_width = PLLE_BASE_DIVP_WIDTH,
1430};
1431
1413struct clk *tegra_clk_register_plle(const char *name, const char *parent_name, 1432struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
1414 void __iomem *clk_base, void __iomem *pmc, 1433 void __iomem *clk_base, void __iomem *pmc,
1415 unsigned long flags, struct tegra_clk_pll_params *pll_params, 1434 unsigned long flags, struct tegra_clk_pll_params *pll_params,
@@ -1420,6 +1439,10 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
1420 1439
1421 pll_params->flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS; 1440 pll_params->flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS;
1422 pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE; 1441 pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
1442
1443 if (!pll_params->div_nmp)
1444 pll_params->div_nmp = &pll_e_nmp;
1445
1423 pll = _tegra_init_pll(clk_base, pmc, pll_params, lock); 1446 pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
1424 if (IS_ERR(pll)) 1447 if (IS_ERR(pll))
1425 return ERR_CAST(pll); 1448 return ERR_CAST(pll);
@@ -1557,9 +1580,8 @@ struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name,
1557 int m; 1580 int m;
1558 1581
1559 m = _pll_fixed_mdiv(pll_params, parent_rate); 1582 m = _pll_fixed_mdiv(pll_params, parent_rate);
1560 val = m << PLL_BASE_DIVM_SHIFT; 1583 val = m << divm_shift(pll);
1561 val |= (pll_params->vco_min / parent_rate) 1584 val |= (pll_params->vco_min / parent_rate) << divn_shift(pll);
1562 << PLL_BASE_DIVN_SHIFT;
1563 pll_writel_base(val, pll); 1585 pll_writel_base(val, pll);
1564 } 1586 }
1565 1587
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 00fdd1170284..a8d7ea14f183 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -100,7 +100,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
100 || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) { 100 || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) {
101 __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR)); 101 __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
102 __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); 102 __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
103 clk_disable_unprepare(tcd->clk); 103 clk_disable(tcd->clk);
104 } 104 }
105 105
106 switch (m) { 106 switch (m) {
@@ -109,7 +109,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
109 * of oneshot, we get lower overhead and improved accuracy. 109 * of oneshot, we get lower overhead and improved accuracy.
110 */ 110 */
111 case CLOCK_EVT_MODE_PERIODIC: 111 case CLOCK_EVT_MODE_PERIODIC:
112 clk_prepare_enable(tcd->clk); 112 clk_enable(tcd->clk);
113 113
114 /* slow clock, count up to RC, then irq and restart */ 114 /* slow clock, count up to RC, then irq and restart */
115 __raw_writel(timer_clock 115 __raw_writel(timer_clock
@@ -126,7 +126,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
126 break; 126 break;
127 127
128 case CLOCK_EVT_MODE_ONESHOT: 128 case CLOCK_EVT_MODE_ONESHOT:
129 clk_prepare_enable(tcd->clk); 129 clk_enable(tcd->clk);
130 130
131 /* slow clock, count up to RC, then irq and stop */ 131 /* slow clock, count up to RC, then irq and stop */
132 __raw_writel(timer_clock | ATMEL_TC_CPCSTOP 132 __raw_writel(timer_clock | ATMEL_TC_CPCSTOP
@@ -194,7 +194,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
194 ret = clk_prepare_enable(t2_clk); 194 ret = clk_prepare_enable(t2_clk);
195 if (ret) 195 if (ret)
196 return ret; 196 return ret;
197 clk_disable_unprepare(t2_clk); 197 clk_disable(t2_clk);
198 198
199 clkevt.regs = tc->regs; 199 clkevt.regs = tc->regs;
200 clkevt.clk = t2_clk; 200 clkevt.clk = t2_clk;
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c
index b52e1c078b99..7f5374dbefd9 100644
--- a/drivers/clocksource/timer-marco.c
+++ b/drivers/clocksource/timer-marco.c
@@ -199,7 +199,7 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
199 199
200 action->dev_id = ce; 200 action->dev_id = ce;
201 BUG_ON(setup_irq(ce->irq, action)); 201 BUG_ON(setup_irq(ce->irq, action));
202 irq_set_affinity(action->irq, cpumask_of(cpu)); 202 irq_force_affinity(action->irq, cpumask_of(cpu));
203 203
204 clockevents_register_device(ce); 204 clockevents_register_device(ce);
205 return 0; 205 return 0;
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 1bf6bbac3e03..09b9129c7bd3 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -130,7 +130,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
130 return -ENOENT; 130 return -ENOENT;
131 } 131 }
132 132
133 cpu_reg = devm_regulator_get_optional(cpu_dev, "cpu0"); 133 cpu_reg = regulator_get_optional(cpu_dev, "cpu0");
134 if (IS_ERR(cpu_reg)) { 134 if (IS_ERR(cpu_reg)) {
135 /* 135 /*
136 * If cpu0 regulator supply node is present, but regulator is 136 * If cpu0 regulator supply node is present, but regulator is
@@ -145,23 +145,23 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
145 PTR_ERR(cpu_reg)); 145 PTR_ERR(cpu_reg));
146 } 146 }
147 147
148 cpu_clk = devm_clk_get(cpu_dev, NULL); 148 cpu_clk = clk_get(cpu_dev, NULL);
149 if (IS_ERR(cpu_clk)) { 149 if (IS_ERR(cpu_clk)) {
150 ret = PTR_ERR(cpu_clk); 150 ret = PTR_ERR(cpu_clk);
151 pr_err("failed to get cpu0 clock: %d\n", ret); 151 pr_err("failed to get cpu0 clock: %d\n", ret);
152 goto out_put_node; 152 goto out_put_reg;
153 } 153 }
154 154
155 ret = of_init_opp_table(cpu_dev); 155 ret = of_init_opp_table(cpu_dev);
156 if (ret) { 156 if (ret) {
157 pr_err("failed to init OPP table: %d\n", ret); 157 pr_err("failed to init OPP table: %d\n", ret);
158 goto out_put_node; 158 goto out_put_clk;
159 } 159 }
160 160
161 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); 161 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
162 if (ret) { 162 if (ret) {
163 pr_err("failed to init cpufreq table: %d\n", ret); 163 pr_err("failed to init cpufreq table: %d\n", ret);
164 goto out_put_node; 164 goto out_put_clk;
165 } 165 }
166 166
167 of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance); 167 of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
@@ -216,6 +216,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
216 216
217out_free_table: 217out_free_table:
218 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); 218 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
219out_put_clk:
220 if (!IS_ERR(cpu_clk))
221 clk_put(cpu_clk);
222out_put_reg:
223 if (!IS_ERR(cpu_reg))
224 regulator_put(cpu_reg);
219out_put_node: 225out_put_node:
220 of_node_put(np); 226 of_node_put(np);
221 return ret; 227 return ret;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index ba43991ba98a..e1c6433b16e0 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -366,6 +366,11 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
366 break; 366 break;
367 367
368 case CPUFREQ_GOV_LIMITS: 368 case CPUFREQ_GOV_LIMITS:
369 mutex_lock(&dbs_data->mutex);
370 if (!cpu_cdbs->cur_policy) {
371 mutex_unlock(&dbs_data->mutex);
372 break;
373 }
369 mutex_lock(&cpu_cdbs->timer_mutex); 374 mutex_lock(&cpu_cdbs->timer_mutex);
370 if (policy->max < cpu_cdbs->cur_policy->cur) 375 if (policy->max < cpu_cdbs->cur_policy->cur)
371 __cpufreq_driver_target(cpu_cdbs->cur_policy, 376 __cpufreq_driver_target(cpu_cdbs->cur_policy,
@@ -375,6 +380,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
375 policy->min, CPUFREQ_RELATION_L); 380 policy->min, CPUFREQ_RELATION_L);
376 dbs_check_cpu(dbs_data, cpu); 381 dbs_check_cpu(dbs_data, cpu);
377 mutex_unlock(&cpu_cdbs->timer_mutex); 382 mutex_unlock(&cpu_cdbs->timer_mutex);
383 mutex_unlock(&dbs_data->mutex);
378 break; 384 break;
379 } 385 }
380 return 0; 386 return 0;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index a886713937fd..d5d30ed863ce 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1009,6 +1009,7 @@ static void dmaengine_unmap(struct kref *kref)
1009 dma_unmap_page(dev, unmap->addr[i], unmap->len, 1009 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1010 DMA_BIDIRECTIONAL); 1010 DMA_BIDIRECTIONAL);
1011 } 1011 }
1012 cnt = unmap->map_cnt;
1012 mempool_free(unmap, __get_unmap_pool(cnt)->pool); 1013 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1013} 1014}
1014 1015
@@ -1074,6 +1075,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1074 memset(unmap, 0, sizeof(*unmap)); 1075 memset(unmap, 0, sizeof(*unmap));
1075 kref_init(&unmap->kref); 1076 kref_init(&unmap->kref);
1076 unmap->dev = dev; 1077 unmap->dev = dev;
1078 unmap->map_cnt = nr;
1077 1079
1078 return unmap; 1080 return unmap;
1079} 1081}
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index cfdbb92aae1d..7a740769c2fa 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1548,11 +1548,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1548 /* Disable BLOCK interrupts as well */ 1548 /* Disable BLOCK interrupts as well */
1549 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); 1549 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1550 1550
1551 err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt,
1552 IRQF_SHARED, "dw_dmac", dw);
1553 if (err)
1554 return err;
1555
1556 /* Create a pool of consistent memory blocks for hardware descriptors */ 1551 /* Create a pool of consistent memory blocks for hardware descriptors */
1557 dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, 1552 dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
1558 sizeof(struct dw_desc), 4, 0); 1553 sizeof(struct dw_desc), 4, 0);
@@ -1563,6 +1558,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1563 1558
1564 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); 1559 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1565 1560
1561 err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
1562 "dw_dmac", dw);
1563 if (err)
1564 return err;
1565
1566 INIT_LIST_HEAD(&dw->dma.channels); 1566 INIT_LIST_HEAD(&dw->dma.channels);
1567 for (i = 0; i < nr_channels; i++) { 1567 for (i = 0; i < nr_channels; i++) {
1568 struct dw_dma_chan *dwc = &dw->chan[i]; 1568 struct dw_dma_chan *dwc = &dw->chan[i];
@@ -1667,6 +1667,7 @@ int dw_dma_remove(struct dw_dma_chip *chip)
1667 dw_dma_off(dw); 1667 dw_dma_off(dw);
1668 dma_async_device_unregister(&dw->dma); 1668 dma_async_device_unregister(&dw->dma);
1669 1669
1670 free_irq(chip->irq, dw);
1670 tasklet_kill(&dw->tasklet); 1671 tasklet_kill(&dw->tasklet);
1671 1672
1672 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, 1673 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 766b68ed505c..394cbc5c93e3 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -191,12 +191,10 @@ static void mv_set_mode(struct mv_xor_chan *chan,
191 191
192static void mv_chan_activate(struct mv_xor_chan *chan) 192static void mv_chan_activate(struct mv_xor_chan *chan)
193{ 193{
194 u32 activation;
195
196 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); 194 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
197 activation = readl_relaxed(XOR_ACTIVATION(chan)); 195
198 activation |= 0x1; 196 /* writel ensures all descriptors are flushed before activation */
199 writel_relaxed(activation, XOR_ACTIVATION(chan)); 197 writel(BIT(0), XOR_ACTIVATION(chan));
200} 198}
201 199
202static char mv_chan_is_busy(struct mv_xor_chan *chan) 200static char mv_chan_is_busy(struct mv_xor_chan *chan)
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index ab26d46bbe15..5ebdfbc1051e 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -113,11 +113,9 @@ struct sa11x0_dma_phy {
113 struct sa11x0_dma_desc *txd_load; 113 struct sa11x0_dma_desc *txd_load;
114 unsigned sg_done; 114 unsigned sg_done;
115 struct sa11x0_dma_desc *txd_done; 115 struct sa11x0_dma_desc *txd_done;
116#ifdef CONFIG_PM_SLEEP
117 u32 dbs[2]; 116 u32 dbs[2];
118 u32 dbt[2]; 117 u32 dbt[2];
119 u32 dcsr; 118 u32 dcsr;
120#endif
121}; 119};
122 120
123struct sa11x0_dma_dev { 121struct sa11x0_dma_dev {
@@ -984,7 +982,6 @@ static int sa11x0_dma_remove(struct platform_device *pdev)
984 return 0; 982 return 0;
985} 983}
986 984
987#ifdef CONFIG_PM_SLEEP
988static int sa11x0_dma_suspend(struct device *dev) 985static int sa11x0_dma_suspend(struct device *dev)
989{ 986{
990 struct sa11x0_dma_dev *d = dev_get_drvdata(dev); 987 struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
@@ -1054,7 +1051,6 @@ static int sa11x0_dma_resume(struct device *dev)
1054 1051
1055 return 0; 1052 return 0;
1056} 1053}
1057#endif
1058 1054
1059static const struct dev_pm_ops sa11x0_dma_pm_ops = { 1055static const struct dev_pm_ops sa11x0_dma_pm_ops = {
1060 .suspend_noirq = sa11x0_dma_suspend, 1056 .suspend_noirq = sa11x0_dma_suspend,
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index c98764aeeec6..f477308b6e9c 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -237,8 +237,8 @@ static inline bool is_next_generation(int new_generation, int old_generation)
237 237
238#define LOCAL_BUS 0xffc0 238#define LOCAL_BUS 0xffc0
239 239
240/* arbitrarily chosen maximum range for physical DMA: 128 TB */ 240/* OHCI-1394's default upper bound for physical DMA: 4 GB */
241#define FW_MAX_PHYSICAL_RANGE (128ULL << 40) 241#define FW_MAX_PHYSICAL_RANGE (1ULL << 32)
242 242
243void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); 243void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
244void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); 244void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 8db663219560..586f2f7f6993 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -3716,7 +3716,7 @@ static int pci_probe(struct pci_dev *dev,
3716 version >> 16, version & 0xff, ohci->card.index, 3716 version >> 16, version & 0xff, ohci->card.index,
3717 ohci->n_ir, ohci->n_it, ohci->quirks, 3717 ohci->n_ir, ohci->n_it, ohci->quirks,
3718 reg_read(ohci, OHCI1394_PhyUpperBound) ? 3718 reg_read(ohci, OHCI1394_PhyUpperBound) ?
3719 ", >4 GB phys DMA" : ""); 3719 ", physUB" : "");
3720 3720
3721 return 0; 3721 return 0;
3722 3722
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 96177eec0a0e..eedb023af27d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1833,7 +1833,6 @@ int i915_driver_unload(struct drm_device *dev)
1833 flush_workqueue(dev_priv->wq); 1833 flush_workqueue(dev_priv->wq);
1834 1834
1835 mutex_lock(&dev->struct_mutex); 1835 mutex_lock(&dev->struct_mutex);
1836 i915_gem_free_all_phys_object(dev);
1837 i915_gem_cleanup_ringbuffer(dev); 1836 i915_gem_cleanup_ringbuffer(dev);
1838 i915_gem_context_fini(dev); 1837 i915_gem_context_fini(dev);
1839 WARN_ON(dev_priv->mm.aliasing_ppgtt); 1838 WARN_ON(dev_priv->mm.aliasing_ppgtt);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 108e1ec2fa4b..388c028e223c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -242,18 +242,6 @@ struct intel_ddi_plls {
242#define WATCH_LISTS 0 242#define WATCH_LISTS 0
243#define WATCH_GTT 0 243#define WATCH_GTT 0
244 244
245#define I915_GEM_PHYS_CURSOR_0 1
246#define I915_GEM_PHYS_CURSOR_1 2
247#define I915_GEM_PHYS_OVERLAY_REGS 3
248#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
249
250struct drm_i915_gem_phys_object {
251 int id;
252 struct page **page_list;
253 drm_dma_handle_t *handle;
254 struct drm_i915_gem_object *cur_obj;
255};
256
257struct opregion_header; 245struct opregion_header;
258struct opregion_acpi; 246struct opregion_acpi;
259struct opregion_swsci; 247struct opregion_swsci;
@@ -1187,9 +1175,6 @@ struct i915_gem_mm {
1187 /** Bit 6 swizzling required for Y tiling */ 1175 /** Bit 6 swizzling required for Y tiling */
1188 uint32_t bit_6_swizzle_y; 1176 uint32_t bit_6_swizzle_y;
1189 1177
1190 /* storage for physical objects */
1191 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
1192
1193 /* accounting, useful for userland debugging */ 1178 /* accounting, useful for userland debugging */
1194 spinlock_t object_stat_lock; 1179 spinlock_t object_stat_lock;
1195 size_t object_memory; 1180 size_t object_memory;
@@ -1769,7 +1754,7 @@ struct drm_i915_gem_object {
1769 struct drm_file *pin_filp; 1754 struct drm_file *pin_filp;
1770 1755
1771 /** for phy allocated objects */ 1756 /** for phy allocated objects */
1772 struct drm_i915_gem_phys_object *phys_obj; 1757 drm_dma_handle_t *phys_handle;
1773}; 1758};
1774 1759
1775#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 1760#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
@@ -2204,10 +2189,12 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
2204#define PIN_MAPPABLE 0x1 2189#define PIN_MAPPABLE 0x1
2205#define PIN_NONBLOCK 0x2 2190#define PIN_NONBLOCK 0x2
2206#define PIN_GLOBAL 0x4 2191#define PIN_GLOBAL 0x4
2192#define PIN_OFFSET_BIAS 0x8
2193#define PIN_OFFSET_MASK (~4095)
2207int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 2194int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
2208 struct i915_address_space *vm, 2195 struct i915_address_space *vm,
2209 uint32_t alignment, 2196 uint32_t alignment,
2210 unsigned flags); 2197 uint64_t flags);
2211int __must_check i915_vma_unbind(struct i915_vma *vma); 2198int __must_check i915_vma_unbind(struct i915_vma *vma);
2212int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2199int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
2213void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 2200void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
@@ -2334,13 +2321,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2334 u32 alignment, 2321 u32 alignment,
2335 struct intel_ring_buffer *pipelined); 2322 struct intel_ring_buffer *pipelined);
2336void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); 2323void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
2337int i915_gem_attach_phys_object(struct drm_device *dev, 2324int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
2338 struct drm_i915_gem_object *obj,
2339 int id,
2340 int align); 2325 int align);
2341void i915_gem_detach_phys_object(struct drm_device *dev,
2342 struct drm_i915_gem_object *obj);
2343void i915_gem_free_all_phys_object(struct drm_device *dev);
2344int i915_gem_open(struct drm_device *dev, struct drm_file *file); 2326int i915_gem_open(struct drm_device *dev, struct drm_file *file);
2345void i915_gem_release(struct drm_device *dev, struct drm_file *file); 2327void i915_gem_release(struct drm_device *dev, struct drm_file *file);
2346 2328
@@ -2465,6 +2447,8 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
2465 int min_size, 2447 int min_size,
2466 unsigned alignment, 2448 unsigned alignment,
2467 unsigned cache_level, 2449 unsigned cache_level,
2450 unsigned long start,
2451 unsigned long end,
2468 unsigned flags); 2452 unsigned flags);
2469int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 2453int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2470int i915_gem_evict_everything(struct drm_device *dev); 2454int i915_gem_evict_everything(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2871ce75f438..3326770c9ed2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,10 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
43static __must_check int 43static __must_check int
44i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 44i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly); 45 bool readonly);
46static int i915_gem_phys_pwrite(struct drm_device *dev,
47 struct drm_i915_gem_object *obj,
48 struct drm_i915_gem_pwrite *args,
49 struct drm_file *file);
50 46
51static void i915_gem_write_fence(struct drm_device *dev, int reg, 47static void i915_gem_write_fence(struct drm_device *dev, int reg,
52 struct drm_i915_gem_object *obj); 48 struct drm_i915_gem_object *obj);
@@ -209,6 +205,128 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
209 return 0; 205 return 0;
210} 206}
211 207
208static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
209{
210 drm_dma_handle_t *phys = obj->phys_handle;
211
212 if (!phys)
213 return;
214
215 if (obj->madv == I915_MADV_WILLNEED) {
216 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
217 char *vaddr = phys->vaddr;
218 int i;
219
220 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
221 struct page *page = shmem_read_mapping_page(mapping, i);
222 if (!IS_ERR(page)) {
223 char *dst = kmap_atomic(page);
224 memcpy(dst, vaddr, PAGE_SIZE);
225 drm_clflush_virt_range(dst, PAGE_SIZE);
226 kunmap_atomic(dst);
227
228 set_page_dirty(page);
229 mark_page_accessed(page);
230 page_cache_release(page);
231 }
232 vaddr += PAGE_SIZE;
233 }
234 i915_gem_chipset_flush(obj->base.dev);
235 }
236
237#ifdef CONFIG_X86
238 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
239#endif
240 drm_pci_free(obj->base.dev, phys);
241 obj->phys_handle = NULL;
242}
243
244int
245i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
246 int align)
247{
248 drm_dma_handle_t *phys;
249 struct address_space *mapping;
250 char *vaddr;
251 int i;
252
253 if (obj->phys_handle) {
254 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
255 return -EBUSY;
256
257 return 0;
258 }
259
260 if (obj->madv != I915_MADV_WILLNEED)
261 return -EFAULT;
262
263 if (obj->base.filp == NULL)
264 return -EINVAL;
265
266 /* create a new object */
267 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
268 if (!phys)
269 return -ENOMEM;
270
271 vaddr = phys->vaddr;
272#ifdef CONFIG_X86
273 set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
274#endif
275 mapping = file_inode(obj->base.filp)->i_mapping;
276 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
277 struct page *page;
278 char *src;
279
280 page = shmem_read_mapping_page(mapping, i);
281 if (IS_ERR(page)) {
282#ifdef CONFIG_X86
283 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
284#endif
285 drm_pci_free(obj->base.dev, phys);
286 return PTR_ERR(page);
287 }
288
289 src = kmap_atomic(page);
290 memcpy(vaddr, src, PAGE_SIZE);
291 kunmap_atomic(src);
292
293 mark_page_accessed(page);
294 page_cache_release(page);
295
296 vaddr += PAGE_SIZE;
297 }
298
299 obj->phys_handle = phys;
300 return 0;
301}
302
303static int
304i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
305 struct drm_i915_gem_pwrite *args,
306 struct drm_file *file_priv)
307{
308 struct drm_device *dev = obj->base.dev;
309 void *vaddr = obj->phys_handle->vaddr + args->offset;
310 char __user *user_data = to_user_ptr(args->data_ptr);
311
312 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
313 unsigned long unwritten;
314
315 /* The physical object once assigned is fixed for the lifetime
316 * of the obj, so we can safely drop the lock and continue
317 * to access vaddr.
318 */
319 mutex_unlock(&dev->struct_mutex);
320 unwritten = copy_from_user(vaddr, user_data, args->size);
321 mutex_lock(&dev->struct_mutex);
322 if (unwritten)
323 return -EFAULT;
324 }
325
326 i915_gem_chipset_flush(dev);
327 return 0;
328}
329
212void *i915_gem_object_alloc(struct drm_device *dev) 330void *i915_gem_object_alloc(struct drm_device *dev)
213{ 331{
214 struct drm_i915_private *dev_priv = dev->dev_private; 332 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -921,8 +1039,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
921 * pread/pwrite currently are reading and writing from the CPU 1039 * pread/pwrite currently are reading and writing from the CPU
922 * perspective, requiring manual detiling by the client. 1040 * perspective, requiring manual detiling by the client.
923 */ 1041 */
924 if (obj->phys_obj) { 1042 if (obj->phys_handle) {
925 ret = i915_gem_phys_pwrite(dev, obj, args, file); 1043 ret = i915_gem_phys_pwrite(obj, args, file);
926 goto out; 1044 goto out;
927 } 1045 }
928 1046
@@ -3208,12 +3326,14 @@ static struct i915_vma *
3208i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, 3326i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3209 struct i915_address_space *vm, 3327 struct i915_address_space *vm,
3210 unsigned alignment, 3328 unsigned alignment,
3211 unsigned flags) 3329 uint64_t flags)
3212{ 3330{
3213 struct drm_device *dev = obj->base.dev; 3331 struct drm_device *dev = obj->base.dev;
3214 struct drm_i915_private *dev_priv = dev->dev_private; 3332 struct drm_i915_private *dev_priv = dev->dev_private;
3215 u32 size, fence_size, fence_alignment, unfenced_alignment; 3333 u32 size, fence_size, fence_alignment, unfenced_alignment;
3216 size_t gtt_max = 3334 unsigned long start =
3335 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3336 unsigned long end =
3217 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; 3337 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3218 struct i915_vma *vma; 3338 struct i915_vma *vma;
3219 int ret; 3339 int ret;
@@ -3242,11 +3362,11 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3242 /* If the object is bigger than the entire aperture, reject it early 3362 /* If the object is bigger than the entire aperture, reject it early
3243 * before evicting everything in a vain attempt to find space. 3363 * before evicting everything in a vain attempt to find space.
3244 */ 3364 */
3245 if (obj->base.size > gtt_max) { 3365 if (obj->base.size > end) {
3246 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", 3366 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
3247 obj->base.size, 3367 obj->base.size,
3248 flags & PIN_MAPPABLE ? "mappable" : "total", 3368 flags & PIN_MAPPABLE ? "mappable" : "total",
3249 gtt_max); 3369 end);
3250 return ERR_PTR(-E2BIG); 3370 return ERR_PTR(-E2BIG);
3251 } 3371 }
3252 3372
@@ -3263,12 +3383,15 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3263search_free: 3383search_free:
3264 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, 3384 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3265 size, alignment, 3385 size, alignment,
3266 obj->cache_level, 0, gtt_max, 3386 obj->cache_level,
3387 start, end,
3267 DRM_MM_SEARCH_DEFAULT, 3388 DRM_MM_SEARCH_DEFAULT,
3268 DRM_MM_CREATE_DEFAULT); 3389 DRM_MM_CREATE_DEFAULT);
3269 if (ret) { 3390 if (ret) {
3270 ret = i915_gem_evict_something(dev, vm, size, alignment, 3391 ret = i915_gem_evict_something(dev, vm, size, alignment,
3271 obj->cache_level, flags); 3392 obj->cache_level,
3393 start, end,
3394 flags);
3272 if (ret == 0) 3395 if (ret == 0)
3273 goto search_free; 3396 goto search_free;
3274 3397
@@ -3828,11 +3951,30 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3828 return ret; 3951 return ret;
3829} 3952}
3830 3953
3954static bool
3955i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
3956{
3957 struct drm_i915_gem_object *obj = vma->obj;
3958
3959 if (alignment &&
3960 vma->node.start & (alignment - 1))
3961 return true;
3962
3963 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
3964 return true;
3965
3966 if (flags & PIN_OFFSET_BIAS &&
3967 vma->node.start < (flags & PIN_OFFSET_MASK))
3968 return true;
3969
3970 return false;
3971}
3972
3831int 3973int
3832i915_gem_object_pin(struct drm_i915_gem_object *obj, 3974i915_gem_object_pin(struct drm_i915_gem_object *obj,
3833 struct i915_address_space *vm, 3975 struct i915_address_space *vm,
3834 uint32_t alignment, 3976 uint32_t alignment,
3835 unsigned flags) 3977 uint64_t flags)
3836{ 3978{
3837 struct i915_vma *vma; 3979 struct i915_vma *vma;
3838 int ret; 3980 int ret;
@@ -3845,15 +3987,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3845 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) 3987 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3846 return -EBUSY; 3988 return -EBUSY;
3847 3989
3848 if ((alignment && 3990 if (i915_vma_misplaced(vma, alignment, flags)) {
3849 vma->node.start & (alignment - 1)) ||
3850 (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
3851 WARN(vma->pin_count, 3991 WARN(vma->pin_count,
3852 "bo is already pinned with incorrect alignment:" 3992 "bo is already pinned with incorrect alignment:"
3853 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," 3993 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3854 " obj->map_and_fenceable=%d\n", 3994 " obj->map_and_fenceable=%d\n",
3855 i915_gem_obj_offset(obj, vm), alignment, 3995 i915_gem_obj_offset(obj, vm), alignment,
3856 flags & PIN_MAPPABLE, 3996 !!(flags & PIN_MAPPABLE),
3857 obj->map_and_fenceable); 3997 obj->map_and_fenceable);
3858 ret = i915_vma_unbind(vma); 3998 ret = i915_vma_unbind(vma);
3859 if (ret) 3999 if (ret)
@@ -4163,9 +4303,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4163 4303
4164 trace_i915_gem_object_destroy(obj); 4304 trace_i915_gem_object_destroy(obj);
4165 4305
4166 if (obj->phys_obj)
4167 i915_gem_detach_phys_object(dev, obj);
4168
4169 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 4306 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4170 int ret; 4307 int ret;
4171 4308
@@ -4183,6 +4320,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4183 } 4320 }
4184 } 4321 }
4185 4322
4323 i915_gem_object_detach_phys(obj);
4324
4186 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up 4325 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4187 * before progressing. */ 4326 * before progressing. */
4188 if (obj->stolen) 4327 if (obj->stolen)
@@ -4646,190 +4785,6 @@ i915_gem_load(struct drm_device *dev)
4646 register_shrinker(&dev_priv->mm.inactive_shrinker); 4785 register_shrinker(&dev_priv->mm.inactive_shrinker);
4647} 4786}
4648 4787
4649/*
4650 * Create a physically contiguous memory object for this object
4651 * e.g. for cursor + overlay regs
4652 */
4653static int i915_gem_init_phys_object(struct drm_device *dev,
4654 int id, int size, int align)
4655{
4656 struct drm_i915_private *dev_priv = dev->dev_private;
4657 struct drm_i915_gem_phys_object *phys_obj;
4658 int ret;
4659
4660 if (dev_priv->mm.phys_objs[id - 1] || !size)
4661 return 0;
4662
4663 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
4664 if (!phys_obj)
4665 return -ENOMEM;
4666
4667 phys_obj->id = id;
4668
4669 phys_obj->handle = drm_pci_alloc(dev, size, align);
4670 if (!phys_obj->handle) {
4671 ret = -ENOMEM;
4672 goto kfree_obj;
4673 }
4674#ifdef CONFIG_X86
4675 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4676#endif
4677
4678 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4679
4680 return 0;
4681kfree_obj:
4682 kfree(phys_obj);
4683 return ret;
4684}
4685
4686static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4687{
4688 struct drm_i915_private *dev_priv = dev->dev_private;
4689 struct drm_i915_gem_phys_object *phys_obj;
4690
4691 if (!dev_priv->mm.phys_objs[id - 1])
4692 return;
4693
4694 phys_obj = dev_priv->mm.phys_objs[id - 1];
4695 if (phys_obj->cur_obj) {
4696 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4697 }
4698
4699#ifdef CONFIG_X86
4700 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4701#endif
4702 drm_pci_free(dev, phys_obj->handle);
4703 kfree(phys_obj);
4704 dev_priv->mm.phys_objs[id - 1] = NULL;
4705}
4706
4707void i915_gem_free_all_phys_object(struct drm_device *dev)
4708{
4709 int i;
4710
4711 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4712 i915_gem_free_phys_object(dev, i);
4713}
4714
4715void i915_gem_detach_phys_object(struct drm_device *dev,
4716 struct drm_i915_gem_object *obj)
4717{
4718 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4719 char *vaddr;
4720 int i;
4721 int page_count;
4722
4723 if (!obj->phys_obj)
4724 return;
4725 vaddr = obj->phys_obj->handle->vaddr;
4726
4727 page_count = obj->base.size / PAGE_SIZE;
4728 for (i = 0; i < page_count; i++) {
4729 struct page *page = shmem_read_mapping_page(mapping, i);
4730 if (!IS_ERR(page)) {
4731 char *dst = kmap_atomic(page);
4732 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4733 kunmap_atomic(dst);
4734
4735 drm_clflush_pages(&page, 1);
4736
4737 set_page_dirty(page);
4738 mark_page_accessed(page);
4739 page_cache_release(page);
4740 }
4741 }
4742 i915_gem_chipset_flush(dev);
4743
4744 obj->phys_obj->cur_obj = NULL;
4745 obj->phys_obj = NULL;
4746}
4747
4748int
4749i915_gem_attach_phys_object(struct drm_device *dev,
4750 struct drm_i915_gem_object *obj,
4751 int id,
4752 int align)
4753{
4754 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4755 struct drm_i915_private *dev_priv = dev->dev_private;
4756 int ret = 0;
4757 int page_count;
4758 int i;
4759
4760 if (id > I915_MAX_PHYS_OBJECT)
4761 return -EINVAL;
4762
4763 if (obj->phys_obj) {
4764 if (obj->phys_obj->id == id)
4765 return 0;
4766 i915_gem_detach_phys_object(dev, obj);
4767 }
4768
4769 /* create a new object */
4770 if (!dev_priv->mm.phys_objs[id - 1]) {
4771 ret = i915_gem_init_phys_object(dev, id,
4772 obj->base.size, align);
4773 if (ret) {
4774 DRM_ERROR("failed to init phys object %d size: %zu\n",
4775 id, obj->base.size);
4776 return ret;
4777 }
4778 }
4779
4780 /* bind to the object */
4781 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4782 obj->phys_obj->cur_obj = obj;
4783
4784 page_count = obj->base.size / PAGE_SIZE;
4785
4786 for (i = 0; i < page_count; i++) {
4787 struct page *page;
4788 char *dst, *src;
4789
4790 page = shmem_read_mapping_page(mapping, i);
4791 if (IS_ERR(page))
4792 return PTR_ERR(page);
4793
4794 src = kmap_atomic(page);
4795 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4796 memcpy(dst, src, PAGE_SIZE);
4797 kunmap_atomic(src);
4798
4799 mark_page_accessed(page);
4800 page_cache_release(page);
4801 }
4802
4803 return 0;
4804}
4805
4806static int
4807i915_gem_phys_pwrite(struct drm_device *dev,
4808 struct drm_i915_gem_object *obj,
4809 struct drm_i915_gem_pwrite *args,
4810 struct drm_file *file_priv)
4811{
4812 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4813 char __user *user_data = to_user_ptr(args->data_ptr);
4814
4815 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4816 unsigned long unwritten;
4817
4818 /* The physical object once assigned is fixed for the lifetime
4819 * of the obj, so we can safely drop the lock and continue
4820 * to access vaddr.
4821 */
4822 mutex_unlock(&dev->struct_mutex);
4823 unwritten = copy_from_user(vaddr, user_data, args->size);
4824 mutex_lock(&dev->struct_mutex);
4825 if (unwritten)
4826 return -EFAULT;
4827 }
4828
4829 i915_gem_chipset_flush(dev);
4830 return 0;
4831}
4832
4833void i915_gem_release(struct drm_device *dev, struct drm_file *file) 4788void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4834{ 4789{
4835 struct drm_i915_file_private *file_priv = file->driver_priv; 4790 struct drm_i915_file_private *file_priv = file->driver_priv;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 75fca63dc8c1..bbf4b12d842e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -68,9 +68,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
68int 68int
69i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, 69i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
70 int min_size, unsigned alignment, unsigned cache_level, 70 int min_size, unsigned alignment, unsigned cache_level,
71 unsigned long start, unsigned long end,
71 unsigned flags) 72 unsigned flags)
72{ 73{
73 struct drm_i915_private *dev_priv = dev->dev_private;
74 struct list_head eviction_list, unwind_list; 74 struct list_head eviction_list, unwind_list;
75 struct i915_vma *vma; 75 struct i915_vma *vma;
76 int ret = 0; 76 int ret = 0;
@@ -102,11 +102,10 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
102 */ 102 */
103 103
104 INIT_LIST_HEAD(&unwind_list); 104 INIT_LIST_HEAD(&unwind_list);
105 if (flags & PIN_MAPPABLE) { 105 if (start != 0 || end != vm->total) {
106 BUG_ON(!i915_is_ggtt(vm));
107 drm_mm_init_scan_with_range(&vm->mm, min_size, 106 drm_mm_init_scan_with_range(&vm->mm, min_size,
108 alignment, cache_level, 0, 107 alignment, cache_level,
109 dev_priv->gtt.mappable_end); 108 start, end);
110 } else 109 } else
111 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); 110 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
112 111
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 2c9d9cbaf653..20fef6c50267 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -35,6 +35,9 @@
35 35
36#define __EXEC_OBJECT_HAS_PIN (1<<31) 36#define __EXEC_OBJECT_HAS_PIN (1<<31)
37#define __EXEC_OBJECT_HAS_FENCE (1<<30) 37#define __EXEC_OBJECT_HAS_FENCE (1<<30)
38#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
39
40#define BATCH_OFFSET_BIAS (256*1024)
38 41
39struct eb_vmas { 42struct eb_vmas {
40 struct list_head vmas; 43 struct list_head vmas;
@@ -545,7 +548,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
545 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 548 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
546 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 549 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
547 bool need_fence; 550 bool need_fence;
548 unsigned flags; 551 uint64_t flags;
549 int ret; 552 int ret;
550 553
551 flags = 0; 554 flags = 0;
@@ -559,6 +562,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
559 562
560 if (entry->flags & EXEC_OBJECT_NEEDS_GTT) 563 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
561 flags |= PIN_GLOBAL; 564 flags |= PIN_GLOBAL;
565 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
566 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
562 567
563 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); 568 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
564 if (ret) 569 if (ret)
@@ -592,6 +597,36 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
592 return 0; 597 return 0;
593} 598}
594 599
600static bool
601eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
602{
603 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
604 struct drm_i915_gem_object *obj = vma->obj;
605 bool need_fence, need_mappable;
606
607 need_fence =
608 has_fenced_gpu_access &&
609 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
610 obj->tiling_mode != I915_TILING_NONE;
611 need_mappable = need_fence || need_reloc_mappable(vma);
612
613 WARN_ON((need_mappable || need_fence) &&
614 !i915_is_ggtt(vma->vm));
615
616 if (entry->alignment &&
617 vma->node.start & (entry->alignment - 1))
618 return true;
619
620 if (need_mappable && !obj->map_and_fenceable)
621 return true;
622
623 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
624 vma->node.start < BATCH_OFFSET_BIAS)
625 return true;
626
627 return false;
628}
629
595static int 630static int
596i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 631i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
597 struct list_head *vmas, 632 struct list_head *vmas,
@@ -653,26 +688,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
653 688
654 /* Unbind any ill-fitting objects or pin. */ 689 /* Unbind any ill-fitting objects or pin. */
655 list_for_each_entry(vma, vmas, exec_list) { 690 list_for_each_entry(vma, vmas, exec_list) {
656 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
657 bool need_fence, need_mappable;
658
659 obj = vma->obj;
660
661 if (!drm_mm_node_allocated(&vma->node)) 691 if (!drm_mm_node_allocated(&vma->node))
662 continue; 692 continue;
663 693
664 need_fence = 694 if (eb_vma_misplaced(vma, has_fenced_gpu_access))
665 has_fenced_gpu_access &&
666 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
667 obj->tiling_mode != I915_TILING_NONE;
668 need_mappable = need_fence || need_reloc_mappable(vma);
669
670 WARN_ON((need_mappable || need_fence) &&
671 !i915_is_ggtt(vma->vm));
672
673 if ((entry->alignment &&
674 vma->node.start & (entry->alignment - 1)) ||
675 (need_mappable && !obj->map_and_fenceable))
676 ret = i915_vma_unbind(vma); 695 ret = i915_vma_unbind(vma);
677 else 696 else
678 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); 697 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
@@ -773,9 +792,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
773 * relocations were valid. 792 * relocations were valid.
774 */ 793 */
775 for (j = 0; j < exec[i].relocation_count; j++) { 794 for (j = 0; j < exec[i].relocation_count; j++) {
776 if (copy_to_user(&user_relocs[j].presumed_offset, 795 if (__copy_to_user(&user_relocs[j].presumed_offset,
777 &invalid_offset, 796 &invalid_offset,
778 sizeof(invalid_offset))) { 797 sizeof(invalid_offset))) {
779 ret = -EFAULT; 798 ret = -EFAULT;
780 mutex_lock(&dev->struct_mutex); 799 mutex_lock(&dev->struct_mutex);
781 goto err; 800 goto err;
@@ -999,6 +1018,25 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
999 return 0; 1018 return 0;
1000} 1019}
1001 1020
1021static struct drm_i915_gem_object *
1022eb_get_batch(struct eb_vmas *eb)
1023{
1024 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
1025
1026 /*
1027 * SNA is doing fancy tricks with compressing batch buffers, which leads
1028 * to negative relocation deltas. Usually that works out ok since the
1029 * relocate address is still positive, except when the batch is placed
1030 * very low in the GTT. Ensure this doesn't happen.
1031 *
1032 * Note that actual hangs have only been observed on gen7, but for
1033 * paranoia do it everywhere.
1034 */
1035 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1036
1037 return vma->obj;
1038}
1039
1002static int 1040static int
1003i915_gem_do_execbuffer(struct drm_device *dev, void *data, 1041i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1004 struct drm_file *file, 1042 struct drm_file *file,
@@ -1153,7 +1191,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1153 goto err; 1191 goto err;
1154 1192
1155 /* take note of the batch buffer before we might reorder the lists */ 1193 /* take note of the batch buffer before we might reorder the lists */
1156 batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj; 1194 batch_obj = eb_get_batch(eb);
1157 1195
1158 /* Move the objects en-masse into the GTT, evicting if necessary. */ 1196 /* Move the objects en-masse into the GTT, evicting if necessary. */
1159 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 1197 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
@@ -1355,18 +1393,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1355 1393
1356 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); 1394 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1357 if (!ret) { 1395 if (!ret) {
1396 struct drm_i915_gem_exec_object __user *user_exec_list =
1397 to_user_ptr(args->buffers_ptr);
1398
1358 /* Copy the new buffer offsets back to the user's exec list. */ 1399 /* Copy the new buffer offsets back to the user's exec list. */
1359 for (i = 0; i < args->buffer_count; i++) 1400 for (i = 0; i < args->buffer_count; i++) {
1360 exec_list[i].offset = exec2_list[i].offset; 1401 ret = __copy_to_user(&user_exec_list[i].offset,
1361 /* ... and back out to userspace */ 1402 &exec2_list[i].offset,
1362 ret = copy_to_user(to_user_ptr(args->buffers_ptr), 1403 sizeof(user_exec_list[i].offset));
1363 exec_list, 1404 if (ret) {
1364 sizeof(*exec_list) * args->buffer_count); 1405 ret = -EFAULT;
1365 if (ret) { 1406 DRM_DEBUG("failed to copy %d exec entries "
1366 ret = -EFAULT; 1407 "back to user (%d)\n",
1367 DRM_DEBUG("failed to copy %d exec entries " 1408 args->buffer_count, ret);
1368 "back to user (%d)\n", 1409 break;
1369 args->buffer_count, ret); 1410 }
1370 } 1411 }
1371 } 1412 }
1372 1413
@@ -1412,14 +1453,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1412 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); 1453 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1413 if (!ret) { 1454 if (!ret) {
1414 /* Copy the new buffer offsets back to the user's exec list. */ 1455 /* Copy the new buffer offsets back to the user's exec list. */
1415 ret = copy_to_user(to_user_ptr(args->buffers_ptr), 1456 struct drm_i915_gem_exec_object2 *user_exec_list =
1416 exec2_list, 1457 to_user_ptr(args->buffers_ptr);
1417 sizeof(*exec2_list) * args->buffer_count); 1458 int i;
1418 if (ret) { 1459
1419 ret = -EFAULT; 1460 for (i = 0; i < args->buffer_count; i++) {
1420 DRM_DEBUG("failed to copy %d exec entries " 1461 ret = __copy_to_user(&user_exec_list[i].offset,
1421 "back to user (%d)\n", 1462 &exec2_list[i].offset,
1422 args->buffer_count, ret); 1463 sizeof(user_exec_list[i].offset));
1464 if (ret) {
1465 ret = -EFAULT;
1466 DRM_DEBUG("failed to copy %d exec entries "
1467 "back to user\n",
1468 args->buffer_count);
1469 break;
1470 }
1423 } 1471 }
1424 } 1472 }
1425 1473
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 154b0f8bb88d..5deb22864c52 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1089,7 +1089,9 @@ alloc:
1089 if (ret == -ENOSPC && !retried) { 1089 if (ret == -ENOSPC && !retried) {
1090 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, 1090 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
1091 GEN6_PD_SIZE, GEN6_PD_ALIGN, 1091 GEN6_PD_SIZE, GEN6_PD_ALIGN,
1092 I915_CACHE_NONE, 0); 1092 I915_CACHE_NONE,
1093 0, dev_priv->gtt.base.total,
1094 0);
1093 if (ret) 1095 if (ret)
1094 return ret; 1096 return ret;
1095 1097
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 48aa516a1ac0..5b60e25baa32 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7825,14 +7825,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7825 addr = i915_gem_obj_ggtt_offset(obj); 7825 addr = i915_gem_obj_ggtt_offset(obj);
7826 } else { 7826 } else {
7827 int align = IS_I830(dev) ? 16 * 1024 : 256; 7827 int align = IS_I830(dev) ? 16 * 1024 : 256;
7828 ret = i915_gem_attach_phys_object(dev, obj, 7828 ret = i915_gem_object_attach_phys(obj, align);
7829 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
7830 align);
7831 if (ret) { 7829 if (ret) {
7832 DRM_DEBUG_KMS("failed to attach phys object\n"); 7830 DRM_DEBUG_KMS("failed to attach phys object\n");
7833 goto fail_locked; 7831 goto fail_locked;
7834 } 7832 }
7835 addr = obj->phys_obj->handle->busaddr; 7833 addr = obj->phys_handle->busaddr;
7836 } 7834 }
7837 7835
7838 if (IS_GEN2(dev)) 7836 if (IS_GEN2(dev))
@@ -7840,10 +7838,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7840 7838
7841 finish: 7839 finish:
7842 if (intel_crtc->cursor_bo) { 7840 if (intel_crtc->cursor_bo) {
7843 if (INTEL_INFO(dev)->cursor_needs_physical) { 7841 if (!INTEL_INFO(dev)->cursor_needs_physical)
7844 if (intel_crtc->cursor_bo != obj)
7845 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
7846 } else
7847 i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); 7842 i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
7848 drm_gem_object_unreference(&intel_crtc->cursor_bo->base); 7843 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
7849 } 7844 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d8adc9104dca..129db0c7d835 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -193,7 +193,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
193 struct overlay_registers __iomem *regs; 193 struct overlay_registers __iomem *regs;
194 194
195 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 195 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; 196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
197 else 197 else
198 regs = io_mapping_map_wc(dev_priv->gtt.mappable, 198 regs = io_mapping_map_wc(dev_priv->gtt.mappable,
199 i915_gem_obj_ggtt_offset(overlay->reg_bo)); 199 i915_gem_obj_ggtt_offset(overlay->reg_bo));
@@ -1340,14 +1340,12 @@ void intel_setup_overlay(struct drm_device *dev)
1340 overlay->reg_bo = reg_bo; 1340 overlay->reg_bo = reg_bo;
1341 1341
1342 if (OVERLAY_NEEDS_PHYSICAL(dev)) { 1342 if (OVERLAY_NEEDS_PHYSICAL(dev)) {
1343 ret = i915_gem_attach_phys_object(dev, reg_bo, 1343 ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
1344 I915_GEM_PHYS_OVERLAY_REGS,
1345 PAGE_SIZE);
1346 if (ret) { 1344 if (ret) {
1347 DRM_ERROR("failed to attach phys overlay regs\n"); 1345 DRM_ERROR("failed to attach phys overlay regs\n");
1348 goto out_free_bo; 1346 goto out_free_bo;
1349 } 1347 }
1350 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; 1348 overlay->flip_addr = reg_bo->phys_handle->busaddr;
1351 } else { 1349 } else {
1352 ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE); 1350 ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
1353 if (ret) { 1351 if (ret) {
@@ -1428,7 +1426,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1428 /* Cast to make sparse happy, but it's wc memory anyway, so 1426 /* Cast to make sparse happy, but it's wc memory anyway, so
1429 * equivalent to the wc io mapping on X86. */ 1427 * equivalent to the wc io mapping on X86. */
1430 regs = (struct overlay_registers __iomem *) 1428 regs = (struct overlay_registers __iomem *)
1431 overlay->reg_bo->phys_obj->handle->vaddr; 1429 overlay->reg_bo->phys_handle->vaddr;
1432 else 1430 else
1433 regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, 1431 regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1434 i915_gem_obj_ggtt_offset(overlay->reg_bo)); 1432 i915_gem_obj_ggtt_offset(overlay->reg_bo));
@@ -1462,7 +1460,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
1462 error->dovsta = I915_READ(DOVSTA); 1460 error->dovsta = I915_READ(DOVSTA);
1463 error->isr = I915_READ(ISR); 1461 error->isr = I915_READ(ISR);
1464 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1462 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1465 error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; 1463 error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
1466 else 1464 else
1467 error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); 1465 error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
1468 1466
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 2b6e0ebcc13a..41ecf8a60611 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -152,6 +152,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
152 uint32_t domain = r->write_domain ? 152 uint32_t domain = r->write_domain ?
153 r->write_domain : r->read_domains; 153 r->write_domain : r->read_domains;
154 154
155 if (domain & RADEON_GEM_DOMAIN_CPU) {
156 DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
157 "for command submission\n");
158 return -EINVAL;
159 }
160
155 p->relocs[i].domain = domain; 161 p->relocs[i].domain = domain;
156 if (domain == RADEON_GEM_DOMAIN_VRAM) 162 if (domain == RADEON_GEM_DOMAIN_VRAM)
157 domain |= RADEON_GEM_DOMAIN_GTT; 163 domain |= RADEON_GEM_DOMAIN_GTT;
@@ -342,10 +348,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
342 return -EINVAL; 348 return -EINVAL;
343 349
344 /* we only support VM on some SI+ rings */ 350 /* we only support VM on some SI+ rings */
345 if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) && 351 if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
346 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) { 352 if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
347 DRM_ERROR("Ring %d requires VM!\n", p->ring); 353 DRM_ERROR("Ring %d requires VM!\n", p->ring);
348 return -EINVAL; 354 return -EINVAL;
355 }
356 } else {
357 if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
358 DRM_ERROR("VM not supported on ring %d!\n",
359 p->ring);
360 return -EINVAL;
361 }
349 } 362 }
350 } 363 }
351 364
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0e770bbf7e29..14671406212f 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1533,11 +1533,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1533 1533
1534 radeon_restore_bios_scratch_regs(rdev); 1534 radeon_restore_bios_scratch_regs(rdev);
1535 1535
1536 if (fbcon) {
1537 radeon_fbdev_set_suspend(rdev, 0);
1538 console_unlock();
1539 }
1540
1541 /* init dig PHYs, disp eng pll */ 1536 /* init dig PHYs, disp eng pll */
1542 if (rdev->is_atom_bios) { 1537 if (rdev->is_atom_bios) {
1543 radeon_atom_encoder_init(rdev); 1538 radeon_atom_encoder_init(rdev);
@@ -1562,6 +1557,12 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1562 } 1557 }
1563 1558
1564 drm_kms_helper_poll_enable(dev); 1559 drm_kms_helper_poll_enable(dev);
1560
1561 if (fbcon) {
1562 radeon_fbdev_set_suspend(rdev, 0);
1563 console_unlock();
1564 }
1565
1565 return 0; 1566 return 0;
1566} 1567}
1567 1568
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index f00dbbf4d806..356b733caafe 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -862,7 +862,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
862 unsigned *fb_div, unsigned *ref_div) 862 unsigned *fb_div, unsigned *ref_div)
863{ 863{
864 /* limit reference * post divider to a maximum */ 864 /* limit reference * post divider to a maximum */
865 ref_div_max = min(128 / post_div, ref_div_max); 865 ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
866 866
867 /* get matching reference and feedback divider */ 867 /* get matching reference and feedback divider */
868 *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); 868 *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index d9ab99f47612..1f426696de36 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -130,10 +130,10 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
130 struct list_head *head) 130 struct list_head *head)
131{ 131{
132 struct radeon_cs_reloc *list; 132 struct radeon_cs_reloc *list;
133 unsigned i, idx, size; 133 unsigned i, idx;
134 134
135 size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc); 135 list = kmalloc_array(vm->max_pde_used + 1,
136 list = kmalloc(size, GFP_KERNEL); 136 sizeof(struct radeon_cs_reloc), GFP_KERNEL);
137 if (!list) 137 if (!list)
138 return NULL; 138 return NULL;
139 139
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index bc196f49ec53..4af0da96c2e2 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1053,7 +1053,7 @@ config SENSORS_PC87427
1053 1053
1054config SENSORS_NTC_THERMISTOR 1054config SENSORS_NTC_THERMISTOR
1055 tristate "NTC thermistor support" 1055 tristate "NTC thermistor support"
1056 depends on (!OF && !IIO) || (OF && IIO) 1056 depends on !OF || IIO=n || IIO
1057 help 1057 help
1058 This driver supports NTC thermistors sensor reading and its 1058 This driver supports NTC thermistors sensor reading and its
1059 interpretation. The driver can also monitor the temperature and 1059 interpretation. The driver can also monitor the temperature and
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 8a17f01e8672..e76feb86a1d4 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -44,6 +44,7 @@ struct ntc_compensation {
44 unsigned int ohm; 44 unsigned int ohm;
45}; 45};
46 46
47/* Order matters, ntc_match references the entries by index */
47static const struct platform_device_id ntc_thermistor_id[] = { 48static const struct platform_device_id ntc_thermistor_id[] = {
48 { "ncp15wb473", TYPE_NCPXXWB473 }, 49 { "ncp15wb473", TYPE_NCPXXWB473 },
49 { "ncp18wb473", TYPE_NCPXXWB473 }, 50 { "ncp18wb473", TYPE_NCPXXWB473 },
@@ -141,7 +142,7 @@ struct ntc_data {
141 char name[PLATFORM_NAME_SIZE]; 142 char name[PLATFORM_NAME_SIZE];
142}; 143};
143 144
144#ifdef CONFIG_OF 145#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
145static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) 146static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
146{ 147{
147 struct iio_channel *channel = pdata->chan; 148 struct iio_channel *channel = pdata->chan;
@@ -163,15 +164,15 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
163 164
164static const struct of_device_id ntc_match[] = { 165static const struct of_device_id ntc_match[] = {
165 { .compatible = "ntc,ncp15wb473", 166 { .compatible = "ntc,ncp15wb473",
166 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 167 .data = &ntc_thermistor_id[0] },
167 { .compatible = "ntc,ncp18wb473", 168 { .compatible = "ntc,ncp18wb473",
168 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 169 .data = &ntc_thermistor_id[1] },
169 { .compatible = "ntc,ncp21wb473", 170 { .compatible = "ntc,ncp21wb473",
170 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 171 .data = &ntc_thermistor_id[2] },
171 { .compatible = "ntc,ncp03wb473", 172 { .compatible = "ntc,ncp03wb473",
172 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 173 .data = &ntc_thermistor_id[3] },
173 { .compatible = "ntc,ncp15wl333", 174 { .compatible = "ntc,ncp15wl333",
174 .data = &ntc_thermistor_id[TYPE_NCPXXWL333] }, 175 .data = &ntc_thermistor_id[4] },
175 { }, 176 { },
176}; 177};
177MODULE_DEVICE_TABLE(of, ntc_match); 178MODULE_DEVICE_TABLE(of, ntc_match);
@@ -223,6 +224,8 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
223 return NULL; 224 return NULL;
224} 225}
225 226
227#define ntc_match NULL
228
226static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata) 229static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata)
227{ } 230{ }
228#endif 231#endif
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 76842d7dc2e3..ffc7ad3a2c88 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -71,7 +71,7 @@ config KEYBOARD_ATKBD
71 default y 71 default y
72 select SERIO 72 select SERIO
73 select SERIO_LIBPS2 73 select SERIO_LIBPS2
74 select SERIO_I8042 if X86 74 select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
75 select SERIO_GSCPS2 if GSC 75 select SERIO_GSCPS2 if GSC
76 help 76 help
77 Say Y here if you want to use a standard AT or PS/2 keyboard. Usually 77 Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index d8241ba0afa0..a15063bea700 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -111,6 +111,8 @@ struct pxa27x_keypad {
111 unsigned short keycodes[MAX_KEYPAD_KEYS]; 111 unsigned short keycodes[MAX_KEYPAD_KEYS];
112 int rotary_rel_code[2]; 112 int rotary_rel_code[2];
113 113
114 unsigned int row_shift;
115
114 /* state row bits of each column scan */ 116 /* state row bits of each column scan */
115 uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS]; 117 uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS];
116 uint32_t direct_key_state; 118 uint32_t direct_key_state;
@@ -467,7 +469,8 @@ scan:
467 if ((bits_changed & (1 << row)) == 0) 469 if ((bits_changed & (1 << row)) == 0)
468 continue; 470 continue;
469 471
470 code = MATRIX_SCAN_CODE(row, col, MATRIX_ROW_SHIFT); 472 code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
473
471 input_event(input_dev, EV_MSC, MSC_SCAN, code); 474 input_event(input_dev, EV_MSC, MSC_SCAN, code);
472 input_report_key(input_dev, keypad->keycodes[code], 475 input_report_key(input_dev, keypad->keycodes[code],
473 new_state[col] & (1 << row)); 476 new_state[col] & (1 << row));
@@ -802,6 +805,8 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
802 goto failed_put_clk; 805 goto failed_put_clk;
803 } 806 }
804 807
808 keypad->row_shift = get_count_order(pdata->matrix_key_cols);
809
805 if ((pdata->enable_rotary0 && keypad->rotary_rel_code[0] != -1) || 810 if ((pdata->enable_rotary0 && keypad->rotary_rel_code[0] != -1) ||
806 (pdata->enable_rotary1 && keypad->rotary_rel_code[1] != -1)) { 811 (pdata->enable_rotary1 && keypad->rotary_rel_code[1] != -1)) {
807 input_dev->evbit[0] |= BIT_MASK(EV_REL); 812 input_dev->evbit[0] |= BIT_MASK(EV_REL);
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index effa9c5f2c5c..6b8441f7bc32 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -17,7 +17,7 @@ config MOUSE_PS2
17 default y 17 default y
18 select SERIO 18 select SERIO
19 select SERIO_LIBPS2 19 select SERIO_LIBPS2
20 select SERIO_I8042 if X86 20 select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
21 select SERIO_GSCPS2 if GSC 21 select SERIO_GSCPS2 if GSC
22 help 22 help
23 Say Y here if you have a PS/2 mouse connected to your system. This 23 Say Y here if you have a PS/2 mouse connected to your system. This
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index d68d33fb5ac2..c5ec703c727e 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -117,6 +117,31 @@ void synaptics_reset(struct psmouse *psmouse)
117} 117}
118 118
119#ifdef CONFIG_MOUSE_PS2_SYNAPTICS 119#ifdef CONFIG_MOUSE_PS2_SYNAPTICS
120struct min_max_quirk {
121 const char * const *pnp_ids;
122 int x_min, x_max, y_min, y_max;
123};
124
125static const struct min_max_quirk min_max_pnpid_table[] = {
126 {
127 (const char * const []){"LEN0033", NULL},
128 1024, 5052, 2258, 4832
129 },
130 {
131 (const char * const []){"LEN0035", "LEN0042", NULL},
132 1232, 5710, 1156, 4696
133 },
134 {
135 (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL},
136 1024, 5112, 2024, 4832
137 },
138 {
139 (const char * const []){"LEN2001", NULL},
140 1024, 5022, 2508, 4832
141 },
142 { }
143};
144
120/* This list has been kindly provided by Synaptics. */ 145/* This list has been kindly provided by Synaptics. */
121static const char * const topbuttonpad_pnp_ids[] = { 146static const char * const topbuttonpad_pnp_ids[] = {
122 "LEN0017", 147 "LEN0017",
@@ -129,7 +154,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
129 "LEN002D", 154 "LEN002D",
130 "LEN002E", 155 "LEN002E",
131 "LEN0033", /* Helix */ 156 "LEN0033", /* Helix */
132 "LEN0034", /* T431s, T540, X1 Carbon 2nd */ 157 "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
133 "LEN0035", /* X240 */ 158 "LEN0035", /* X240 */
134 "LEN0036", /* T440 */ 159 "LEN0036", /* T440 */
135 "LEN0037", 160 "LEN0037",
@@ -142,7 +167,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
142 "LEN0048", 167 "LEN0048",
143 "LEN0049", 168 "LEN0049",
144 "LEN2000", 169 "LEN2000",
145 "LEN2001", 170 "LEN2001", /* Edge E431 */
146 "LEN2002", 171 "LEN2002",
147 "LEN2003", 172 "LEN2003",
148 "LEN2004", /* L440 */ 173 "LEN2004", /* L440 */
@@ -156,6 +181,18 @@ static const char * const topbuttonpad_pnp_ids[] = {
156 NULL 181 NULL
157}; 182};
158 183
184static bool matches_pnp_id(struct psmouse *psmouse, const char * const ids[])
185{
186 int i;
187
188 if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4))
189 for (i = 0; ids[i]; i++)
190 if (strstr(psmouse->ps2dev.serio->firmware_id, ids[i]))
191 return true;
192
193 return false;
194}
195
159/***************************************************************************** 196/*****************************************************************************
160 * Synaptics communications functions 197 * Synaptics communications functions
161 ****************************************************************************/ 198 ****************************************************************************/
@@ -304,20 +341,20 @@ static int synaptics_identify(struct psmouse *psmouse)
304 * Resolution is left zero if touchpad does not support the query 341 * Resolution is left zero if touchpad does not support the query
305 */ 342 */
306 343
307static const int *quirk_min_max;
308
309static int synaptics_resolution(struct psmouse *psmouse) 344static int synaptics_resolution(struct psmouse *psmouse)
310{ 345{
311 struct synaptics_data *priv = psmouse->private; 346 struct synaptics_data *priv = psmouse->private;
312 unsigned char resp[3]; 347 unsigned char resp[3];
348 int i;
313 349
314 if (quirk_min_max) { 350 for (i = 0; min_max_pnpid_table[i].pnp_ids; i++)
315 priv->x_min = quirk_min_max[0]; 351 if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) {
316 priv->x_max = quirk_min_max[1]; 352 priv->x_min = min_max_pnpid_table[i].x_min;
317 priv->y_min = quirk_min_max[2]; 353 priv->x_max = min_max_pnpid_table[i].x_max;
318 priv->y_max = quirk_min_max[3]; 354 priv->y_min = min_max_pnpid_table[i].y_min;
319 return 0; 355 priv->y_max = min_max_pnpid_table[i].y_max;
320 } 356 return 0;
357 }
321 358
322 if (SYN_ID_MAJOR(priv->identity) < 4) 359 if (SYN_ID_MAJOR(priv->identity) < 4)
323 return 0; 360 return 0;
@@ -1365,17 +1402,8 @@ static void set_input_params(struct psmouse *psmouse,
1365 1402
1366 if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { 1403 if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
1367 __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); 1404 __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
1368 /* See if this buttonpad has a top button area */ 1405 if (matches_pnp_id(psmouse, topbuttonpad_pnp_ids))
1369 if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4)) { 1406 __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit);
1370 for (i = 0; topbuttonpad_pnp_ids[i]; i++) {
1371 if (strstr(psmouse->ps2dev.serio->firmware_id,
1372 topbuttonpad_pnp_ids[i])) {
1373 __set_bit(INPUT_PROP_TOPBUTTONPAD,
1374 dev->propbit);
1375 break;
1376 }
1377 }
1378 }
1379 /* Clickpads report only left button */ 1407 /* Clickpads report only left button */
1380 __clear_bit(BTN_RIGHT, dev->keybit); 1408 __clear_bit(BTN_RIGHT, dev->keybit);
1381 __clear_bit(BTN_MIDDLE, dev->keybit); 1409 __clear_bit(BTN_MIDDLE, dev->keybit);
@@ -1547,104 +1575,10 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
1547 { } 1575 { }
1548}; 1576};
1549 1577
1550static const struct dmi_system_id min_max_dmi_table[] __initconst = {
1551#if defined(CONFIG_DMI)
1552 {
1553 /* Lenovo ThinkPad Helix */
1554 .matches = {
1555 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1556 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
1557 },
1558 .driver_data = (int []){1024, 5052, 2258, 4832},
1559 },
1560 {
1561 /* Lenovo ThinkPad X240 */
1562 .matches = {
1563 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1564 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
1565 },
1566 .driver_data = (int []){1232, 5710, 1156, 4696},
1567 },
1568 {
1569 /* Lenovo ThinkPad Edge E431 */
1570 .matches = {
1571 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1572 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E431"),
1573 },
1574 .driver_data = (int []){1024, 5022, 2508, 4832},
1575 },
1576 {
1577 /* Lenovo ThinkPad T431s */
1578 .matches = {
1579 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1580 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"),
1581 },
1582 .driver_data = (int []){1024, 5112, 2024, 4832},
1583 },
1584 {
1585 /* Lenovo ThinkPad T440s */
1586 .matches = {
1587 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1588 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
1589 },
1590 .driver_data = (int []){1024, 5112, 2024, 4832},
1591 },
1592 {
1593 /* Lenovo ThinkPad L440 */
1594 .matches = {
1595 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1596 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"),
1597 },
1598 .driver_data = (int []){1024, 5112, 2024, 4832},
1599 },
1600 {
1601 /* Lenovo ThinkPad T540p */
1602 .matches = {
1603 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1604 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
1605 },
1606 .driver_data = (int []){1024, 5056, 2058, 4832},
1607 },
1608 {
1609 /* Lenovo ThinkPad L540 */
1610 .matches = {
1611 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1612 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"),
1613 },
1614 .driver_data = (int []){1024, 5112, 2024, 4832},
1615 },
1616 {
1617 /* Lenovo Yoga S1 */
1618 .matches = {
1619 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1620 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
1621 "ThinkPad S1 Yoga"),
1622 },
1623 .driver_data = (int []){1232, 5710, 1156, 4696},
1624 },
1625 {
1626 /* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */
1627 .matches = {
1628 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1629 DMI_MATCH(DMI_PRODUCT_VERSION,
1630 "ThinkPad X1 Carbon 2nd"),
1631 },
1632 .driver_data = (int []){1024, 5112, 2024, 4832},
1633 },
1634#endif
1635 { }
1636};
1637
1638void __init synaptics_module_init(void) 1578void __init synaptics_module_init(void)
1639{ 1579{
1640 const struct dmi_system_id *min_max_dmi;
1641
1642 impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table); 1580 impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
1643 broken_olpc_ec = dmi_check_system(olpc_dmi_table); 1581 broken_olpc_ec = dmi_check_system(olpc_dmi_table);
1644
1645 min_max_dmi = dmi_first_match(min_max_dmi_table);
1646 if (min_max_dmi)
1647 quirk_min_max = min_max_dmi->driver_data;
1648} 1582}
1649 1583
1650static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode) 1584static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 762b08432de0..8b748d99b934 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -79,7 +79,8 @@ static int amba_kmi_open(struct serio *io)
79 writeb(divisor, KMICLKDIV); 79 writeb(divisor, KMICLKDIV);
80 writeb(KMICR_EN, KMICR); 80 writeb(KMICR_EN, KMICR);
81 81
82 ret = request_irq(kmi->irq, amba_kmi_int, 0, "kmi-pl050", kmi); 82 ret = request_irq(kmi->irq, amba_kmi_int, IRQF_SHARED, "kmi-pl050",
83 kmi);
83 if (ret) { 84 if (ret) {
84 printk(KERN_ERR "kmi: failed to claim IRQ%d\n", kmi->irq); 85 printk(KERN_ERR "kmi: failed to claim IRQ%d\n", kmi->irq);
85 writeb(0, KMICR); 86 writeb(0, KMICR);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 68edc9db2c64..b845e9370871 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -640,7 +640,7 @@ config TOUCHSCREEN_WM9713
640 640
641config TOUCHSCREEN_WM97XX_ATMEL 641config TOUCHSCREEN_WM97XX_ATMEL
642 tristate "WM97xx Atmel accelerated touch" 642 tristate "WM97xx Atmel accelerated touch"
643 depends on TOUCHSCREEN_WM97XX && (AVR32 || ARCH_AT91) 643 depends on TOUCHSCREEN_WM97XX && AVR32
644 help 644 help
645 Say Y here for support for streaming mode with WM97xx touchscreens 645 Say Y here for support for streaming mode with WM97xx touchscreens
646 on Atmel AT91 or AVR32 systems with an AC97C module. 646 on Atmel AT91 or AVR32 systems with an AC97C module.
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 9380be7b1895..5f054c44b485 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2178,6 +2178,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2178 ti->num_discard_bios = 1; 2178 ti->num_discard_bios = 1;
2179 ti->discards_supported = true; 2179 ti->discards_supported = true;
2180 ti->discard_zeroes_data_unsupported = true; 2180 ti->discard_zeroes_data_unsupported = true;
2181 /* Discard bios must be split on a block boundary */
2182 ti->split_discard_bios = true;
2181 2183
2182 cache->features = ca->features; 2184 cache->features = ca->features;
2183 ti->per_bio_data_size = get_per_bio_data_size(cache); 2185 ti->per_bio_data_size = get_per_bio_data_size(cache);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index fa0f6cbd6a41..ebfa411d1a7d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -445,11 +445,11 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
445 else 445 else
446 m->saved_queue_if_no_path = queue_if_no_path; 446 m->saved_queue_if_no_path = queue_if_no_path;
447 m->queue_if_no_path = queue_if_no_path; 447 m->queue_if_no_path = queue_if_no_path;
448 if (!m->queue_if_no_path)
449 dm_table_run_md_queue_async(m->ti->table);
450
451 spin_unlock_irqrestore(&m->lock, flags); 448 spin_unlock_irqrestore(&m->lock, flags);
452 449
450 if (!queue_if_no_path)
451 dm_table_run_md_queue_async(m->ti->table);
452
453 return 0; 453 return 0;
454} 454}
455 455
@@ -954,7 +954,7 @@ out:
954 */ 954 */
955static int reinstate_path(struct pgpath *pgpath) 955static int reinstate_path(struct pgpath *pgpath)
956{ 956{
957 int r = 0; 957 int r = 0, run_queue = 0;
958 unsigned long flags; 958 unsigned long flags;
959 struct multipath *m = pgpath->pg->m; 959 struct multipath *m = pgpath->pg->m;
960 960
@@ -978,7 +978,7 @@ static int reinstate_path(struct pgpath *pgpath)
978 978
979 if (!m->nr_valid_paths++) { 979 if (!m->nr_valid_paths++) {
980 m->current_pgpath = NULL; 980 m->current_pgpath = NULL;
981 dm_table_run_md_queue_async(m->ti->table); 981 run_queue = 1;
982 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { 982 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
983 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) 983 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
984 m->pg_init_in_progress++; 984 m->pg_init_in_progress++;
@@ -991,6 +991,8 @@ static int reinstate_path(struct pgpath *pgpath)
991 991
992out: 992out:
993 spin_unlock_irqrestore(&m->lock, flags); 993 spin_unlock_irqrestore(&m->lock, flags);
994 if (run_queue)
995 dm_table_run_md_queue_async(m->ti->table);
994 996
995 return r; 997 return r;
996} 998}
@@ -1566,8 +1568,8 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
1566 } 1568 }
1567 if (m->pg_init_required) 1569 if (m->pg_init_required)
1568 __pg_init_all_paths(m); 1570 __pg_init_all_paths(m);
1569 dm_table_run_md_queue_async(m->ti->table);
1570 spin_unlock_irqrestore(&m->lock, flags); 1571 spin_unlock_irqrestore(&m->lock, flags);
1572 dm_table_run_md_queue_async(m->ti->table);
1571 } 1573 }
1572 1574
1573 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); 1575 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 2e71de8e0048..242ac2ea5f29 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -27,7 +27,9 @@
27#define MAPPING_POOL_SIZE 1024 27#define MAPPING_POOL_SIZE 1024
28#define PRISON_CELLS 1024 28#define PRISON_CELLS 1024
29#define COMMIT_PERIOD HZ 29#define COMMIT_PERIOD HZ
30#define NO_SPACE_TIMEOUT (HZ * 60) 30#define NO_SPACE_TIMEOUT_SECS 60
31
32static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
31 33
32DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, 34DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
33 "A percentage of time allocated for copy on write"); 35 "A percentage of time allocated for copy on write");
@@ -1670,6 +1672,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1670 struct pool_c *pt = pool->ti->private; 1672 struct pool_c *pt = pool->ti->private;
1671 bool needs_check = dm_pool_metadata_needs_check(pool->pmd); 1673 bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
1672 enum pool_mode old_mode = get_pool_mode(pool); 1674 enum pool_mode old_mode = get_pool_mode(pool);
1675 unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
1673 1676
1674 /* 1677 /*
1675 * Never allow the pool to transition to PM_WRITE mode if user 1678 * Never allow the pool to transition to PM_WRITE mode if user
@@ -1732,8 +1735,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1732 pool->process_prepared_mapping = process_prepared_mapping; 1735 pool->process_prepared_mapping = process_prepared_mapping;
1733 pool->process_prepared_discard = process_prepared_discard_passdown; 1736 pool->process_prepared_discard = process_prepared_discard_passdown;
1734 1737
1735 if (!pool->pf.error_if_no_space) 1738 if (!pool->pf.error_if_no_space && no_space_timeout)
1736 queue_delayed_work(pool->wq, &pool->no_space_timeout, NO_SPACE_TIMEOUT); 1739 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
1737 break; 1740 break;
1738 1741
1739 case PM_WRITE: 1742 case PM_WRITE:
@@ -3508,6 +3511,9 @@ static void dm_thin_exit(void)
3508module_init(dm_thin_init); 3511module_init(dm_thin_init);
3509module_exit(dm_thin_exit); 3512module_exit(dm_thin_exit);
3510 3513
3514module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
3515MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
3516
3511MODULE_DESCRIPTION(DM_NAME " thin provisioning target"); 3517MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
3512MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3518MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3513MODULE_LICENSE("GPL"); 3519MODULE_LICENSE("GPL");
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 237b7e0ddc7a..2382cfc9bb3f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7381,8 +7381,10 @@ void md_do_sync(struct md_thread *thread)
7381 /* just incase thread restarts... */ 7381 /* just incase thread restarts... */
7382 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 7382 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7383 return; 7383 return;
7384 if (mddev->ro) /* never try to sync a read-only array */ 7384 if (mddev->ro) {/* never try to sync a read-only array */
7385 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7385 return; 7386 return;
7387 }
7386 7388
7387 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7389 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7388 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 7390 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
@@ -7824,6 +7826,7 @@ void md_check_recovery(struct mddev *mddev)
7824 /* There is no thread, but we need to call 7826 /* There is no thread, but we need to call
7825 * ->spare_active and clear saved_raid_disk 7827 * ->spare_active and clear saved_raid_disk
7826 */ 7828 */
7829 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7827 md_reap_sync_thread(mddev); 7830 md_reap_sync_thread(mddev);
7828 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7831 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7829 goto unlock; 7832 goto unlock;
diff --git a/drivers/net/can/led.c b/drivers/net/can/led.c
index a3d99a8fd2d1..ab7f1b01be49 100644
--- a/drivers/net/can/led.c
+++ b/drivers/net/can/led.c
@@ -97,6 +97,9 @@ static int can_led_notifier(struct notifier_block *nb, unsigned long msg,
97 if (!priv) 97 if (!priv)
98 return NOTIFY_DONE; 98 return NOTIFY_DONE;
99 99
100 if (!priv->tx_led_trig || !priv->rx_led_trig)
101 return NOTIFY_DONE;
102
100 if (msg == NETDEV_CHANGENAME) { 103 if (msg == NETDEV_CHANGENAME) {
101 snprintf(name, sizeof(name), "%s-tx", netdev->name); 104 snprintf(name, sizeof(name), "%s-tx", netdev->name);
102 led_trigger_rename_static(name, priv->tx_led_trig); 105 led_trigger_rename_static(name, priv->tx_led_trig);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index f0e2a4d4f621..edb718661850 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -39,6 +39,7 @@ source "drivers/net/ethernet/cisco/Kconfig"
39config CX_ECAT 39config CX_ECAT
40 tristate "Beckhoff CX5020 EtherCAT master support" 40 tristate "Beckhoff CX5020 EtherCAT master support"
41 depends on PCI 41 depends on PCI
42 depends on X86 || COMPILE_TEST
42 ---help--- 43 ---help---
43 Driver for EtherCAT master module located on CCAT FPGA 44 Driver for EtherCAT master module located on CCAT FPGA
44 that can be found on Beckhoff CX5020, and possibly other of CX 45 that can be found on Beckhoff CX5020, and possibly other of CX
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index d18441ebe944..23da47925fa3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -906,6 +906,18 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
906 bd_prod = RX_BD(bd_prod); 906 bd_prod = RX_BD(bd_prod);
907 bd_cons = RX_BD(bd_cons); 907 bd_cons = RX_BD(bd_cons);
908 908
909 /* A rmb() is required to ensure that the CQE is not read
910 * before it is written by the adapter DMA. PCI ordering
911 * rules will make sure the other fields are written before
912 * the marker at the end of struct eth_fast_path_rx_cqe
913 * but without rmb() a weakly ordered processor can process
914 * stale data. Without the barrier TPA state-machine might
915 * enter inconsistent state and kernel stack might be
916 * provided with incorrect packet description - these lead
917 * to various kernel crashed.
918 */
919 rmb();
920
909 cqe_fp_flags = cqe_fp->type_error_flags; 921 cqe_fp_flags = cqe_fp->type_error_flags;
910 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 922 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
911 923
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index ff2bdd80f0aa..cf14218697e4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13283,8 +13283,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13283 netdev_reset_tc(bp->dev); 13283 netdev_reset_tc(bp->dev);
13284 13284
13285 del_timer_sync(&bp->timer); 13285 del_timer_sync(&bp->timer);
13286 cancel_delayed_work(&bp->sp_task); 13286 cancel_delayed_work_sync(&bp->sp_task);
13287 cancel_delayed_work(&bp->period_task); 13287 cancel_delayed_work_sync(&bp->period_task);
13288 13288
13289 spin_lock_bh(&bp->stats_lock); 13289 spin_lock_bh(&bp->stats_lock);
13290 bp->stats_state = STATS_STATE_DISABLED; 13290 bp->stats_state = STATS_STATE_DISABLED;
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 09f3fefcbf9c..a4b25bc7113a 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -608,6 +608,10 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
608 pr_err("%s: Bad type %d\n", __func__, ulp_type); 608 pr_err("%s: Bad type %d\n", __func__, ulp_type);
609 return -EINVAL; 609 return -EINVAL;
610 } 610 }
611
612 if (ulp_type == CNIC_ULP_ISCSI)
613 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
614
611 mutex_lock(&cnic_lock); 615 mutex_lock(&cnic_lock);
612 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 616 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
613 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); 617 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
@@ -620,9 +624,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
620 } 624 }
621 mutex_unlock(&cnic_lock); 625 mutex_unlock(&cnic_lock);
622 626
623 if (ulp_type == CNIC_ULP_ISCSI) 627 if (ulp_type == CNIC_ULP_FCOE)
624 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
625 else if (ulp_type == CNIC_ULP_FCOE)
626 dev->fcoe_cap = NULL; 628 dev->fcoe_cap = NULL;
627 629
628 synchronize_rcu(); 630 synchronize_rcu();
@@ -1039,21 +1041,17 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1039 struct cnic_local *cp = dev->cnic_priv; 1041 struct cnic_local *cp = dev->cnic_priv;
1040 struct cnic_uio_dev *udev; 1042 struct cnic_uio_dev *udev;
1041 1043
1042 read_lock(&cnic_dev_lock);
1043 list_for_each_entry(udev, &cnic_udev_list, list) { 1044 list_for_each_entry(udev, &cnic_udev_list, list) {
1044 if (udev->pdev == dev->pcidev) { 1045 if (udev->pdev == dev->pcidev) {
1045 udev->dev = dev; 1046 udev->dev = dev;
1046 if (__cnic_alloc_uio_rings(udev, pages)) { 1047 if (__cnic_alloc_uio_rings(udev, pages)) {
1047 udev->dev = NULL; 1048 udev->dev = NULL;
1048 read_unlock(&cnic_dev_lock);
1049 return -ENOMEM; 1049 return -ENOMEM;
1050 } 1050 }
1051 cp->udev = udev; 1051 cp->udev = udev;
1052 read_unlock(&cnic_dev_lock);
1053 return 0; 1052 return 0;
1054 } 1053 }
1055 } 1054 }
1056 read_unlock(&cnic_dev_lock);
1057 1055
1058 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC); 1056 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1059 if (!udev) 1057 if (!udev)
@@ -1067,9 +1065,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1067 if (__cnic_alloc_uio_rings(udev, pages)) 1065 if (__cnic_alloc_uio_rings(udev, pages))
1068 goto err_udev; 1066 goto err_udev;
1069 1067
1070 write_lock(&cnic_dev_lock);
1071 list_add(&udev->list, &cnic_udev_list); 1068 list_add(&udev->list, &cnic_udev_list);
1072 write_unlock(&cnic_dev_lock);
1073 1069
1074 pci_dev_get(udev->pdev); 1070 pci_dev_get(udev->pdev);
1075 1071
@@ -5624,20 +5620,27 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5624{ 5620{
5625 int if_type; 5621 int if_type;
5626 5622
5627 rcu_read_lock();
5628 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 5623 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5629 struct cnic_ulp_ops *ulp_ops; 5624 struct cnic_ulp_ops *ulp_ops;
5630 void *ctx; 5625 void *ctx;
5631 5626
5632 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); 5627 mutex_lock(&cnic_lock);
5633 if (!ulp_ops || !ulp_ops->indicate_netevent) 5628 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
5629 lockdep_is_held(&cnic_lock));
5630 if (!ulp_ops || !ulp_ops->indicate_netevent) {
5631 mutex_unlock(&cnic_lock);
5634 continue; 5632 continue;
5633 }
5635 5634
5636 ctx = cp->ulp_handle[if_type]; 5635 ctx = cp->ulp_handle[if_type];
5637 5636
5637 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5638 mutex_unlock(&cnic_lock);
5639
5638 ulp_ops->indicate_netevent(ctx, event, vlan_id); 5640 ulp_ops->indicate_netevent(ctx, event, vlan_id);
5641
5642 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5639 } 5643 }
5640 rcu_read_unlock();
5641} 5644}
5642 5645
5643/* netdev event handler */ 5646/* netdev event handler */
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 9d75fef6396f..63eb959a28aa 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -682,10 +682,7 @@ static int mal_probe(struct platform_device *ofdev)
682 goto fail6; 682 goto fail6;
683 683
684 /* Enable all MAL SERR interrupt sources */ 684 /* Enable all MAL SERR interrupt sources */
685 if (mal->version == 2) 685 set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS);
686 set_mal_dcrn(mal, MAL_IER, MAL2_IER_EVENTS);
687 else
688 set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS);
689 686
690 /* Enable EOB interrupt */ 687 /* Enable EOB interrupt */
691 mal_enable_eob_irq(mal); 688 mal_enable_eob_irq(mal);
diff --git a/drivers/net/ethernet/ibm/emac/mal.h b/drivers/net/ethernet/ibm/emac/mal.h
index e431a32e3d69..eeade2ea8334 100644
--- a/drivers/net/ethernet/ibm/emac/mal.h
+++ b/drivers/net/ethernet/ibm/emac/mal.h
@@ -95,24 +95,20 @@
95 95
96 96
97#define MAL_IER 0x02 97#define MAL_IER 0x02
98/* MAL IER bits */
98#define MAL_IER_DE 0x00000010 99#define MAL_IER_DE 0x00000010
99#define MAL_IER_OTE 0x00000004 100#define MAL_IER_OTE 0x00000004
100#define MAL_IER_OE 0x00000002 101#define MAL_IER_OE 0x00000002
101#define MAL_IER_PE 0x00000001 102#define MAL_IER_PE 0x00000001
102/* MAL V1 IER bits */
103#define MAL1_IER_NWE 0x00000008
104#define MAL1_IER_SOC_EVENTS MAL1_IER_NWE
105#define MAL1_IER_EVENTS (MAL1_IER_SOC_EVENTS | MAL_IER_DE | \
106 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
107 103
108/* MAL V2 IER bits */ 104/* PLB read/write/timeout errors */
109#define MAL2_IER_PT 0x00000080 105#define MAL_IER_PTE 0x00000080
110#define MAL2_IER_PRE 0x00000040 106#define MAL_IER_PRE 0x00000040
111#define MAL2_IER_PWE 0x00000020 107#define MAL_IER_PWE 0x00000020
112#define MAL2_IER_SOC_EVENTS (MAL2_IER_PT | MAL2_IER_PRE | MAL2_IER_PWE)
113#define MAL2_IER_EVENTS (MAL2_IER_SOC_EVENTS | MAL_IER_DE | \
114 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
115 108
109#define MAL_IER_SOC_EVENTS (MAL_IER_PTE | MAL_IER_PRE | MAL_IER_PWE)
110#define MAL_IER_EVENTS (MAL_IER_SOC_EVENTS | MAL_IER_DE | \
111 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
116 112
117#define MAL_TXCASR 0x04 113#define MAL_TXCASR 0x04
118#define MAL_TXCARR 0x05 114#define MAL_TXCARR 0x05
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index 4fb2f96da23b..a01182cce965 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -45,6 +45,7 @@
45 45
46/* RGMIIx_SSR */ 46/* RGMIIx_SSR */
47#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8)) 47#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8))
48#define RGMII_SSR_10(idx) (0x1 << ((idx) * 8))
48#define RGMII_SSR_100(idx) (0x2 << ((idx) * 8)) 49#define RGMII_SSR_100(idx) (0x2 << ((idx) * 8))
49#define RGMII_SSR_1000(idx) (0x4 << ((idx) * 8)) 50#define RGMII_SSR_1000(idx) (0x4 << ((idx) * 8))
50 51
@@ -139,6 +140,8 @@ void rgmii_set_speed(struct platform_device *ofdev, int input, int speed)
139 ssr |= RGMII_SSR_1000(input); 140 ssr |= RGMII_SSR_1000(input);
140 else if (speed == SPEED_100) 141 else if (speed == SPEED_100)
141 ssr |= RGMII_SSR_100(input); 142 ssr |= RGMII_SSR_100(input);
143 else if (speed == SPEED_10)
144 ssr |= RGMII_SSR_10(input);
142 145
143 out_be32(&p->ssr, ssr); 146 out_be32(&p->ssr, ssr);
144 147
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 38e9a4c9099c..19606a44672b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2012,6 +2012,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2012 if (!mlx4_is_slave(dev)) { 2012 if (!mlx4_is_slave(dev)) {
2013 mlx4_init_mac_table(dev, &info->mac_table); 2013 mlx4_init_mac_table(dev, &info->mac_table);
2014 mlx4_init_vlan_table(dev, &info->vlan_table); 2014 mlx4_init_vlan_table(dev, &info->vlan_table);
2015 mlx4_init_roce_gid_table(dev, &info->gid_table);
2015 info->base_qpn = mlx4_get_base_qpn(dev, port); 2016 info->base_qpn = mlx4_get_base_qpn(dev, port);
2016 } 2017 }
2017 2018
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 9dd1b30ea757..4b416edb4c1e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -696,6 +696,17 @@ struct mlx4_mac_table {
696 int max; 696 int max;
697}; 697};
698 698
699#define MLX4_ROCE_GID_ENTRY_SIZE 16
700
701struct mlx4_roce_gid_entry {
702 u8 raw[MLX4_ROCE_GID_ENTRY_SIZE];
703};
704
705struct mlx4_roce_gid_table {
706 struct mlx4_roce_gid_entry roce_gids[MLX4_ROCE_MAX_GIDS];
707 struct mutex mutex;
708};
709
699#define MLX4_MAX_VLAN_NUM 128 710#define MLX4_MAX_VLAN_NUM 128
700#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2) 711#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2)
701 712
@@ -759,6 +770,7 @@ struct mlx4_port_info {
759 struct device_attribute port_mtu_attr; 770 struct device_attribute port_mtu_attr;
760 struct mlx4_mac_table mac_table; 771 struct mlx4_mac_table mac_table;
761 struct mlx4_vlan_table vlan_table; 772 struct mlx4_vlan_table vlan_table;
773 struct mlx4_roce_gid_table gid_table;
762 int base_qpn; 774 int base_qpn;
763}; 775};
764 776
@@ -789,10 +801,6 @@ enum {
789 MLX4_USE_RR = 1, 801 MLX4_USE_RR = 1,
790}; 802};
791 803
792struct mlx4_roce_gid_entry {
793 u8 raw[16];
794};
795
796struct mlx4_priv { 804struct mlx4_priv {
797 struct mlx4_dev dev; 805 struct mlx4_dev dev;
798 806
@@ -840,7 +848,6 @@ struct mlx4_priv {
840 int fs_hash_mode; 848 int fs_hash_mode;
841 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; 849 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
842 __be64 slave_node_guids[MLX4_MFUNC_MAX]; 850 __be64 slave_node_guids[MLX4_MFUNC_MAX];
843 struct mlx4_roce_gid_entry roce_gids[MLX4_MAX_PORTS][MLX4_ROCE_MAX_GIDS];
844 851
845 atomic_t opreq_count; 852 atomic_t opreq_count;
846 struct work_struct opreq_task; 853 struct work_struct opreq_task;
@@ -1141,6 +1148,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
1141 1148
1142void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); 1149void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
1143void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); 1150void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
1151void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
1152 struct mlx4_roce_gid_table *table);
1144void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); 1153void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
1145int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 1154int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
1146 1155
@@ -1150,6 +1159,7 @@ int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
1150 enum mlx4_resource resource_type, 1159 enum mlx4_resource resource_type,
1151 u64 resource_id, int *slave); 1160 u64 resource_id, int *slave);
1152void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id); 1161void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
1162void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave);
1153int mlx4_init_resource_tracker(struct mlx4_dev *dev); 1163int mlx4_init_resource_tracker(struct mlx4_dev *dev);
1154 1164
1155void mlx4_free_resource_tracker(struct mlx4_dev *dev, 1165void mlx4_free_resource_tracker(struct mlx4_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 376f2f1d445e..7ab97174886d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -75,6 +75,16 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
75 table->total = 0; 75 table->total = 0;
76} 76}
77 77
78void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
79 struct mlx4_roce_gid_table *table)
80{
81 int i;
82
83 mutex_init(&table->mutex);
84 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++)
85 memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE);
86}
87
78static int validate_index(struct mlx4_dev *dev, 88static int validate_index(struct mlx4_dev *dev,
79 struct mlx4_mac_table *table, int index) 89 struct mlx4_mac_table *table, int index)
80{ 90{
@@ -583,6 +593,84 @@ int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
583} 593}
584EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix); 594EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
585 595
596static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
597 int port, struct mlx4_cmd_mailbox *mailbox)
598{
599 struct mlx4_roce_gid_entry *gid_entry_mbox;
600 struct mlx4_priv *priv = mlx4_priv(dev);
601 int num_gids, base, offset;
602 int i, err;
603
604 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
605 base = mlx4_get_base_gid_ix(dev, slave, port);
606
607 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
608
609 mutex_lock(&(priv->port[port].gid_table.mutex));
610 /* Zero-out gids belonging to that slave in the port GID table */
611 for (i = 0, offset = base; i < num_gids; offset++, i++)
612 memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
613 zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE);
614
615 /* Now, copy roce port gids table to mailbox for passing to FW */
616 gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf;
617 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
618 memcpy(gid_entry_mbox->raw,
619 priv->port[port].gid_table.roce_gids[i].raw,
620 MLX4_ROCE_GID_ENTRY_SIZE);
621
622 err = mlx4_cmd(dev, mailbox->dma,
623 ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), 1,
624 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
625 MLX4_CMD_NATIVE);
626 mutex_unlock(&(priv->port[port].gid_table.mutex));
627 return err;
628}
629
630
631void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
632{
633 struct mlx4_active_ports actv_ports;
634 struct mlx4_cmd_mailbox *mailbox;
635 int num_eth_ports, err;
636 int i;
637
638 if (slave < 0 || slave > dev->num_vfs)
639 return;
640
641 actv_ports = mlx4_get_active_ports(dev, slave);
642
643 for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) {
644 if (test_bit(i, actv_ports.ports)) {
645 if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
646 continue;
647 num_eth_ports++;
648 }
649 }
650
651 if (!num_eth_ports)
652 return;
653
654 /* have ETH ports. Alloc mailbox for SET_PORT command */
655 mailbox = mlx4_alloc_cmd_mailbox(dev);
656 if (IS_ERR(mailbox))
657 return;
658
659 for (i = 0; i < dev->caps.num_ports; i++) {
660 if (test_bit(i, actv_ports.ports)) {
661 if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
662 continue;
663 err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox);
664 if (err)
665 mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n",
666 slave, i + 1, err);
667 }
668 }
669
670 mlx4_free_cmd_mailbox(dev, mailbox);
671 return;
672}
673
586static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, 674static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
587 u8 op_mod, struct mlx4_cmd_mailbox *inbox) 675 u8 op_mod, struct mlx4_cmd_mailbox *inbox)
588{ 676{
@@ -691,10 +779,12 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
691 /* 2. Check that do not have duplicates in OTHER 779 /* 2. Check that do not have duplicates in OTHER
692 * entries in the port GID table 780 * entries in the port GID table
693 */ 781 */
782
783 mutex_lock(&(priv->port[port].gid_table.mutex));
694 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { 784 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
695 if (i >= base && i < base + num_gids) 785 if (i >= base && i < base + num_gids)
696 continue; /* don't compare to slave's current gids */ 786 continue; /* don't compare to slave's current gids */
697 gid_entry_tbl = &priv->roce_gids[port - 1][i]; 787 gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i];
698 if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry))) 788 if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
699 continue; 789 continue;
700 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 790 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
@@ -707,6 +797,7 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
707 /* found duplicate */ 797 /* found duplicate */
708 mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n", 798 mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
709 slave, i); 799 slave, i);
800 mutex_unlock(&(priv->port[port].gid_table.mutex));
710 return -EINVAL; 801 return -EINVAL;
711 } 802 }
712 } 803 }
@@ -715,16 +806,24 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
715 /* insert slave GIDs with memcpy, starting at slave's base index */ 806 /* insert slave GIDs with memcpy, starting at slave's base index */
716 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 807 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
717 for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++) 808 for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
718 memcpy(priv->roce_gids[port - 1][offset].raw, gid_entry_mbox->raw, 16); 809 memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
810 gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE);
719 811
720 /* Now, copy roce port gids table to current mailbox for passing to FW */ 812 /* Now, copy roce port gids table to current mailbox for passing to FW */
721 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 813 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
722 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++) 814 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
723 memcpy(gid_entry_mbox->raw, priv->roce_gids[port - 1][i].raw, 16); 815 memcpy(gid_entry_mbox->raw,
724 816 priv->port[port].gid_table.roce_gids[i].raw,
725 break; 817 MLX4_ROCE_GID_ENTRY_SIZE);
818
819 err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
820 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
821 MLX4_CMD_NATIVE);
822 mutex_unlock(&(priv->port[port].gid_table.mutex));
823 return err;
726 } 824 }
727 return mlx4_cmd(dev, inbox->dma, in_mod, op_mod, 825
826 return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
728 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 827 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
729 MLX4_CMD_NATIVE); 828 MLX4_CMD_NATIVE);
730 } 829 }
@@ -1097,7 +1196,8 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1097 num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; 1196 num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
1098 1197
1099 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { 1198 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
1100 if (!memcmp(priv->roce_gids[port - 1][i].raw, gid, 16)) { 1199 if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
1200 MLX4_ROCE_GID_ENTRY_SIZE)) {
1101 found_ix = i; 1201 found_ix = i;
1102 break; 1202 break;
1103 } 1203 }
@@ -1185,7 +1285,8 @@ int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
1185 if (!mlx4_is_master(dev)) 1285 if (!mlx4_is_master(dev))
1186 return -EINVAL; 1286 return -EINVAL;
1187 1287
1188 memcpy(gid, priv->roce_gids[port - 1][slave_id].raw, 16); 1288 memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw,
1289 MLX4_ROCE_GID_ENTRY_SIZE);
1189 return 0; 1290 return 0;
1190} 1291}
1191EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave); 1292EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index dd821b363686..b6cddef24391 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -586,6 +586,7 @@ void mlx4_free_resource_tracker(struct mlx4_dev *dev,
586 } 586 }
587 /* free master's vlans */ 587 /* free master's vlans */
588 i = dev->caps.function; 588 i = dev->caps.function;
589 mlx4_reset_roce_gids(dev, i);
589 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 590 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
590 rem_slave_vlans(dev, i); 591 rem_slave_vlans(dev, i);
591 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 592 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
@@ -4672,7 +4673,7 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4672void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) 4673void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4673{ 4674{
4674 struct mlx4_priv *priv = mlx4_priv(dev); 4675 struct mlx4_priv *priv = mlx4_priv(dev);
4675 4676 mlx4_reset_roce_gids(dev, slave);
4676 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 4677 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4677 rem_slave_vlans(dev, slave); 4678 rem_slave_vlans(dev, slave);
4678 rem_slave_macs(dev, slave); 4679 rem_slave_macs(dev, slave);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index a51fe18f09a8..561cb11ca58c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -1020,6 +1020,7 @@ static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
1020 struct qlcnic_dcb_cee *peer; 1020 struct qlcnic_dcb_cee *peer;
1021 int i; 1021 int i;
1022 1022
1023 memset(info, 0, sizeof(*info));
1023 *app_count = 0; 1024 *app_count = 0;
1024 1025
1025 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) 1026 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 6a94ede699b4..b19a323c8647 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -307,6 +307,27 @@ static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
307}; 307};
308 308
309static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { 309static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
310 [EDMR] = 0x0000,
311 [EDTRR] = 0x0004,
312 [EDRRR] = 0x0008,
313 [TDLAR] = 0x000c,
314 [RDLAR] = 0x0010,
315 [EESR] = 0x0014,
316 [EESIPR] = 0x0018,
317 [TRSCER] = 0x001c,
318 [RMFCR] = 0x0020,
319 [TFTR] = 0x0024,
320 [FDR] = 0x0028,
321 [RMCR] = 0x002c,
322 [EDOCR] = 0x0030,
323 [FCFTR] = 0x0034,
324 [RPADIR] = 0x0038,
325 [TRIMD] = 0x003c,
326 [RBWAR] = 0x0040,
327 [RDFAR] = 0x0044,
328 [TBRAR] = 0x004c,
329 [TDFAR] = 0x0050,
330
310 [ECMR] = 0x0160, 331 [ECMR] = 0x0160,
311 [ECSR] = 0x0164, 332 [ECSR] = 0x0164,
312 [ECSIPR] = 0x0168, 333 [ECSIPR] = 0x0168,
@@ -546,7 +567,6 @@ static struct sh_eth_cpu_data sh7757_data = {
546 .register_type = SH_ETH_REG_FAST_SH4, 567 .register_type = SH_ETH_REG_FAST_SH4,
547 568
548 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 569 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
549 .rmcr_value = RMCR_RNC,
550 570
551 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 571 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
552 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 572 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
@@ -624,7 +644,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
624 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 644 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
625 EESR_TDE | EESR_ECI, 645 EESR_TDE | EESR_ECI,
626 .fdr_value = 0x0000072f, 646 .fdr_value = 0x0000072f,
627 .rmcr_value = RMCR_RNC,
628 647
629 .irq_flags = IRQF_SHARED, 648 .irq_flags = IRQF_SHARED,
630 .apr = 1, 649 .apr = 1,
@@ -752,7 +771,6 @@ static struct sh_eth_cpu_data r8a7740_data = {
752 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 771 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
753 EESR_TDE | EESR_ECI, 772 EESR_TDE | EESR_ECI,
754 .fdr_value = 0x0000070f, 773 .fdr_value = 0x0000070f,
755 .rmcr_value = RMCR_RNC,
756 774
757 .apr = 1, 775 .apr = 1,
758 .mpr = 1, 776 .mpr = 1,
@@ -784,7 +802,6 @@ static struct sh_eth_cpu_data r7s72100_data = {
784 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | 802 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
785 EESR_TDE | EESR_ECI, 803 EESR_TDE | EESR_ECI,
786 .fdr_value = 0x0000070f, 804 .fdr_value = 0x0000070f,
787 .rmcr_value = RMCR_RNC,
788 805
789 .no_psr = 1, 806 .no_psr = 1,
790 .apr = 1, 807 .apr = 1,
@@ -833,9 +850,6 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
833 if (!cd->fdr_value) 850 if (!cd->fdr_value)
834 cd->fdr_value = DEFAULT_FDR_INIT; 851 cd->fdr_value = DEFAULT_FDR_INIT;
835 852
836 if (!cd->rmcr_value)
837 cd->rmcr_value = DEFAULT_RMCR_VALUE;
838
839 if (!cd->tx_check) 853 if (!cd->tx_check)
840 cd->tx_check = DEFAULT_TX_CHECK; 854 cd->tx_check = DEFAULT_TX_CHECK;
841 855
@@ -1287,8 +1301,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
1287 sh_eth_write(ndev, mdp->cd->fdr_value, FDR); 1301 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1288 sh_eth_write(ndev, 0, TFTR); 1302 sh_eth_write(ndev, 0, TFTR);
1289 1303
1290 /* Frame recv control */ 1304 /* Frame recv control (enable multiple-packets per rx irq) */
1291 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR); 1305 sh_eth_write(ndev, RMCR_RNC, RMCR);
1292 1306
1293 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER); 1307 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
1294 1308
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index d55e37cd5fec..b37c427144ee 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -319,7 +319,6 @@ enum TD_STS_BIT {
319enum RMCR_BIT { 319enum RMCR_BIT {
320 RMCR_RNC = 0x00000001, 320 RMCR_RNC = 0x00000001,
321}; 321};
322#define DEFAULT_RMCR_VALUE 0x00000000
323 322
324/* ECMR */ 323/* ECMR */
325enum FELIC_MODE_BIT { 324enum FELIC_MODE_BIT {
@@ -466,7 +465,6 @@ struct sh_eth_cpu_data {
466 unsigned long fdr_value; 465 unsigned long fdr_value;
467 unsigned long fcftr_value; 466 unsigned long fcftr_value;
468 unsigned long rpadir_value; 467 unsigned long rpadir_value;
469 unsigned long rmcr_value;
470 468
471 /* interrupt checking mask */ 469 /* interrupt checking mask */
472 unsigned long tx_check; 470 unsigned long tx_check;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index a0fc151da40d..5e13fa5524ae 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2477,6 +2477,8 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
2477 goto out_disable_resources; 2477 goto out_disable_resources;
2478 } 2478 }
2479 2479
2480 netif_carrier_off(dev);
2481
2480 retval = register_netdev(dev); 2482 retval = register_netdev(dev);
2481 if (retval) { 2483 if (retval) {
2482 SMSC_WARN(pdata, probe, "Error %i registering device", retval); 2484 SMSC_WARN(pdata, probe, "Error %i registering device", retval);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index f32d730f55cc..35a139e9a833 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1567,7 +1567,6 @@ static int emac_dev_open(struct net_device *ndev)
1567 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, 1567 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ,
1568 res_num))) { 1568 res_num))) {
1569 for (irq_num = res->start; irq_num <= res->end; irq_num++) { 1569 for (irq_num = res->start; irq_num <= res->end; irq_num++) {
1570 dev_err(emac_dev, "Request IRQ %d\n", irq_num);
1571 if (request_irq(irq_num, emac_irq, 0, ndev->name, 1570 if (request_irq(irq_num, emac_irq, 0, ndev->name,
1572 ndev)) { 1571 ndev)) {
1573 dev_err(emac_dev, 1572 dev_err(emac_dev,
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 9a9ce8debefa..b4958c7ffa84 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1724,6 +1724,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
1724 * to traverse list in reverse under rcu_read_lock 1724 * to traverse list in reverse under rcu_read_lock
1725 */ 1725 */
1726 mutex_lock(&team->lock); 1726 mutex_lock(&team->lock);
1727 team->port_mtu_change_allowed = true;
1727 list_for_each_entry(port, &team->port_list, list) { 1728 list_for_each_entry(port, &team->port_list, list) {
1728 err = dev_set_mtu(port->dev, new_mtu); 1729 err = dev_set_mtu(port->dev, new_mtu);
1729 if (err) { 1730 if (err) {
@@ -1732,6 +1733,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
1732 goto unwind; 1733 goto unwind;
1733 } 1734 }
1734 } 1735 }
1736 team->port_mtu_change_allowed = false;
1735 mutex_unlock(&team->lock); 1737 mutex_unlock(&team->lock);
1736 1738
1737 dev->mtu = new_mtu; 1739 dev->mtu = new_mtu;
@@ -1741,6 +1743,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
1741unwind: 1743unwind:
1742 list_for_each_entry_continue_reverse(port, &team->port_list, list) 1744 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1743 dev_set_mtu(port->dev, dev->mtu); 1745 dev_set_mtu(port->dev, dev->mtu);
1746 team->port_mtu_change_allowed = false;
1744 mutex_unlock(&team->lock); 1747 mutex_unlock(&team->lock);
1745 1748
1746 return err; 1749 return err;
@@ -2851,7 +2854,9 @@ static int team_device_event(struct notifier_block *unused,
2851 break; 2854 break;
2852 case NETDEV_PRECHANGEMTU: 2855 case NETDEV_PRECHANGEMTU:
2853 /* Forbid to change mtu of underlaying device */ 2856 /* Forbid to change mtu of underlaying device */
2854 return NOTIFY_BAD; 2857 if (!port->team->port_mtu_change_allowed)
2858 return NOTIFY_BAD;
2859 break;
2855 case NETDEV_PRE_TYPE_CHANGE: 2860 case NETDEV_PRE_TYPE_CHANGE:
2856 /* Forbid to change type of underlaying device */ 2861 /* Forbid to change type of underlaying device */
2857 return NOTIFY_BAD; 2862 return NOTIFY_BAD;
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index f72570708edb..76465b117b72 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -59,6 +59,8 @@
59#define USB_PRODUCT_IPHONE_3GS 0x1294 59#define USB_PRODUCT_IPHONE_3GS 0x1294
60#define USB_PRODUCT_IPHONE_4 0x1297 60#define USB_PRODUCT_IPHONE_4 0x1297
61#define USB_PRODUCT_IPAD 0x129a 61#define USB_PRODUCT_IPAD 0x129a
62#define USB_PRODUCT_IPAD_2 0x12a2
63#define USB_PRODUCT_IPAD_3 0x12a6
62#define USB_PRODUCT_IPAD_MINI 0x12ab 64#define USB_PRODUCT_IPAD_MINI 0x12ab
63#define USB_PRODUCT_IPHONE_4_VZW 0x129c 65#define USB_PRODUCT_IPHONE_4_VZW 0x129c
64#define USB_PRODUCT_IPHONE_4S 0x12a0 66#define USB_PRODUCT_IPHONE_4S 0x12a0
@@ -107,6 +109,14 @@ static struct usb_device_id ipheth_table[] = {
107 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 109 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
108 IPHETH_USBINTF_PROTO) }, 110 IPHETH_USBINTF_PROTO) },
109 { USB_DEVICE_AND_INTERFACE_INFO( 111 { USB_DEVICE_AND_INTERFACE_INFO(
112 USB_VENDOR_APPLE, USB_PRODUCT_IPAD_2,
113 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
114 IPHETH_USBINTF_PROTO) },
115 { USB_DEVICE_AND_INTERFACE_INFO(
116 USB_VENDOR_APPLE, USB_PRODUCT_IPAD_3,
117 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
118 IPHETH_USBINTF_PROTO) },
119 { USB_DEVICE_AND_INTERFACE_INFO(
110 USB_VENDOR_APPLE, USB_PRODUCT_IPAD_MINI, 120 USB_VENDOR_APPLE, USB_PRODUCT_IPAD_MINI,
111 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 121 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
112 IPHETH_USBINTF_PROTO) }, 122 IPHETH_USBINTF_PROTO) },
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 83208d4fdc59..dc4bf06948c7 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -748,11 +748,15 @@ static const struct usb_device_id products[] = {
748 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ 748 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
749 {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC73xx */ 749 {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC73xx */
750 {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC73xx */ 750 {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC73xx */
751 {QMI_FIXED_INTF(0x1199, 0x68c0, 11)}, /* Sierra Wireless MC73xx */
752 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 751 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
753 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ 752 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
754 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ 753 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
755 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ 754 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
755 {QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */
756 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
757 {QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */
758 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
759 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
756 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 760 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
757 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 761 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
758 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 762 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 9802b67040cc..2c61281bebd7 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -523,17 +523,6 @@ static int wmt_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
523 return GPIOF_DIR_IN; 523 return GPIOF_DIR_IN;
524} 524}
525 525
526static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
527{
528 return pinctrl_gpio_direction_input(chip->base + offset);
529}
530
531static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
532 int value)
533{
534 return pinctrl_gpio_direction_output(chip->base + offset);
535}
536
537static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset) 526static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset)
538{ 527{
539 struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev); 528 struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev);
@@ -568,6 +557,18 @@ static void wmt_gpio_set_value(struct gpio_chip *chip, unsigned offset,
568 wmt_clearbits(data, reg_data_out, BIT(bit)); 557 wmt_clearbits(data, reg_data_out, BIT(bit));
569} 558}
570 559
560static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
561{
562 return pinctrl_gpio_direction_input(chip->base + offset);
563}
564
565static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
566 int value)
567{
568 wmt_gpio_set_value(chip, offset, value);
569 return pinctrl_gpio_direction_output(chip->base + offset);
570}
571
571static struct gpio_chip wmt_gpio_chip = { 572static struct gpio_chip wmt_gpio_chip = {
572 .label = "gpio-wmt", 573 .label = "gpio-wmt",
573 .owner = THIS_MODULE, 574 .owner = THIS_MODULE,
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 1b681427dde0..c341f855fadc 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1621,8 +1621,6 @@ void sas_rphy_free(struct sas_rphy *rphy)
1621 list_del(&rphy->list); 1621 list_del(&rphy->list);
1622 mutex_unlock(&sas_host->lock); 1622 mutex_unlock(&sas_host->lock);
1623 1623
1624 sas_bsg_remove(shost, rphy);
1625
1626 transport_destroy_device(dev); 1624 transport_destroy_device(dev);
1627 1625
1628 put_device(dev); 1626 put_device(dev);
@@ -1681,6 +1679,7 @@ sas_rphy_remove(struct sas_rphy *rphy)
1681 } 1679 }
1682 1680
1683 sas_rphy_unlink(rphy); 1681 sas_rphy_unlink(rphy);
1682 sas_bsg_remove(NULL, rphy);
1684 transport_remove_device(dev); 1683 transport_remove_device(dev);
1685 device_del(dev); 1684 device_del(dev);
1686} 1685}
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index 171a71d20c88..728bf7f14f7b 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -139,6 +139,8 @@ static int daq700_ai_rinsn(struct comedi_device *dev,
139 /* write channel to multiplexer */ 139 /* write channel to multiplexer */
140 /* set mask scan bit high to disable scanning */ 140 /* set mask scan bit high to disable scanning */
141 outb(chan | 0x80, dev->iobase + CMD_R1); 141 outb(chan | 0x80, dev->iobase + CMD_R1);
142 /* mux needs 2us to really settle [Fred Brooks]. */
143 udelay(2);
142 144
143 /* convert n samples */ 145 /* convert n samples */
144 for (n = 0; n < insn->n; n++) { 146 for (n = 0; n < insn->n; n++) {
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 11d0a9d8ee59..b7dd1539bbc4 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -171,7 +171,7 @@ inline int rtllib_put_snap(u8 *data, u16 h_proto)
171 snap->oui[1] = oui[1]; 171 snap->oui[1] = oui[1];
172 snap->oui[2] = oui[2]; 172 snap->oui[2] = oui[2];
173 173
174 *(u16 *)(data + SNAP_SIZE) = h_proto; 174 *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
175 175
176 return SNAP_SIZE + sizeof(u16); 176 return SNAP_SIZE + sizeof(u16);
177} 177}
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 3b6e5358c723..7de79d59a4cd 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -2218,6 +2218,7 @@ static void __exit speakup_exit(void)
2218 unregister_keyboard_notifier(&keyboard_notifier_block); 2218 unregister_keyboard_notifier(&keyboard_notifier_block);
2219 unregister_vt_notifier(&vt_notifier_block); 2219 unregister_vt_notifier(&vt_notifier_block);
2220 speakup_unregister_devsynth(); 2220 speakup_unregister_devsynth();
2221 speakup_cancel_paste();
2221 del_timer(&cursor_timer); 2222 del_timer(&cursor_timer);
2222 kthread_stop(speakup_task); 2223 kthread_stop(speakup_task);
2223 speakup_task = NULL; 2224 speakup_task = NULL;
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index f0fb00392d6b..ca04d3669acc 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -4,6 +4,10 @@
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/device.h> /* for dev_warn */ 5#include <linux/device.h> /* for dev_warn */
6#include <linux/selection.h> 6#include <linux/selection.h>
7#include <linux/workqueue.h>
8#include <linux/tty.h>
9#include <linux/tty_flip.h>
10#include <asm/cmpxchg.h>
7 11
8#include "speakup.h" 12#include "speakup.h"
9 13
@@ -121,31 +125,61 @@ int speakup_set_selection(struct tty_struct *tty)
121 return 0; 125 return 0;
122} 126}
123 127
124/* TODO: move to some helper thread, probably. That'd fix having to check for 128struct speakup_paste_work {
125 * in_atomic(). */ 129 struct work_struct work;
126int speakup_paste_selection(struct tty_struct *tty) 130 struct tty_struct *tty;
131};
132
133static void __speakup_paste_selection(struct work_struct *work)
127{ 134{
135 struct speakup_paste_work *spw =
136 container_of(work, struct speakup_paste_work, work);
137 struct tty_struct *tty = xchg(&spw->tty, NULL);
128 struct vc_data *vc = (struct vc_data *) tty->driver_data; 138 struct vc_data *vc = (struct vc_data *) tty->driver_data;
129 int pasted = 0, count; 139 int pasted = 0, count;
140 struct tty_ldisc *ld;
130 DECLARE_WAITQUEUE(wait, current); 141 DECLARE_WAITQUEUE(wait, current);
142
143 ld = tty_ldisc_ref_wait(tty);
144 tty_buffer_lock_exclusive(&vc->port);
145
131 add_wait_queue(&vc->paste_wait, &wait); 146 add_wait_queue(&vc->paste_wait, &wait);
132 while (sel_buffer && sel_buffer_lth > pasted) { 147 while (sel_buffer && sel_buffer_lth > pasted) {
133 set_current_state(TASK_INTERRUPTIBLE); 148 set_current_state(TASK_INTERRUPTIBLE);
134 if (test_bit(TTY_THROTTLED, &tty->flags)) { 149 if (test_bit(TTY_THROTTLED, &tty->flags)) {
135 if (in_atomic())
136 /* if we are in an interrupt handler, abort */
137 break;
138 schedule(); 150 schedule();
139 continue; 151 continue;
140 } 152 }
141 count = sel_buffer_lth - pasted; 153 count = sel_buffer_lth - pasted;
142 count = min_t(int, count, tty->receive_room); 154 count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL,
143 tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted, 155 count);
144 NULL, count);
145 pasted += count; 156 pasted += count;
146 } 157 }
147 remove_wait_queue(&vc->paste_wait, &wait); 158 remove_wait_queue(&vc->paste_wait, &wait);
148 current->state = TASK_RUNNING; 159 current->state = TASK_RUNNING;
160
161 tty_buffer_unlock_exclusive(&vc->port);
162 tty_ldisc_deref(ld);
163 tty_kref_put(tty);
164}
165
166static struct speakup_paste_work speakup_paste_work = {
167 .work = __WORK_INITIALIZER(speakup_paste_work.work,
168 __speakup_paste_selection)
169};
170
171int speakup_paste_selection(struct tty_struct *tty)
172{
173 if (cmpxchg(&speakup_paste_work.tty, NULL, tty) != NULL)
174 return -EBUSY;
175
176 tty_kref_get(tty);
177 schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work);
149 return 0; 178 return 0;
150} 179}
151 180
181void speakup_cancel_paste(void)
182{
183 cancel_work_sync(&speakup_paste_work.work);
184 tty_kref_put(speakup_paste_work.tty);
185}
diff --git a/drivers/staging/speakup/speakup.h b/drivers/staging/speakup/speakup.h
index a7bcceec436a..898dce5e1243 100644
--- a/drivers/staging/speakup/speakup.h
+++ b/drivers/staging/speakup/speakup.h
@@ -75,6 +75,7 @@ extern void synth_buffer_clear(void);
75extern void speakup_clear_selection(void); 75extern void speakup_clear_selection(void);
76extern int speakup_set_selection(struct tty_struct *tty); 76extern int speakup_set_selection(struct tty_struct *tty);
77extern int speakup_paste_selection(struct tty_struct *tty); 77extern int speakup_paste_selection(struct tty_struct *tty);
78extern void speakup_cancel_paste(void);
78extern void speakup_register_devsynth(void); 79extern void speakup_register_devsynth(void);
79extern void speakup_unregister_devsynth(void); 80extern void speakup_unregister_devsynth(void);
80extern void synth_write(const char *buf, size_t count); 81extern void synth_write(const char *buf, size_t count);
diff --git a/drivers/staging/speakup/speakup_acntsa.c b/drivers/staging/speakup/speakup_acntsa.c
index c7f014ed9628..5079dbd5d7ad 100644
--- a/drivers/staging/speakup/speakup_acntsa.c
+++ b/drivers/staging/speakup/speakup_acntsa.c
@@ -60,15 +60,15 @@ static struct kobj_attribute vol_attribute =
60 __ATTR(vol, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store); 60 __ATTR(vol, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
61 61
62static struct kobj_attribute delay_time_attribute = 62static struct kobj_attribute delay_time_attribute =
63 __ATTR(delay_time, S_IRUSR|S_IRUGO, spk_var_show, spk_var_store); 63 __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
64static struct kobj_attribute direct_attribute = 64static struct kobj_attribute direct_attribute =
65 __ATTR(direct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store); 65 __ATTR(direct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
66static struct kobj_attribute full_time_attribute = 66static struct kobj_attribute full_time_attribute =
67 __ATTR(full_time, S_IRUSR|S_IRUGO, spk_var_show, spk_var_store); 67 __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
68static struct kobj_attribute jiffy_delta_attribute = 68static struct kobj_attribute jiffy_delta_attribute =
69 __ATTR(jiffy_delta, S_IRUSR|S_IRUGO, spk_var_show, spk_var_store); 69 __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
70static struct kobj_attribute trigger_time_attribute = 70static struct kobj_attribute trigger_time_attribute =
71 __ATTR(trigger_time, S_IRUSR|S_IRUGO, spk_var_show, spk_var_store); 71 __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
72 72
73/* 73/*
74 * Create a group of attributes so that we can create and destroy them all 74 * Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index cf78d1985cd8..143deb62467d 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -60,6 +60,7 @@ void tty_buffer_lock_exclusive(struct tty_port *port)
60 atomic_inc(&buf->priority); 60 atomic_inc(&buf->priority);
61 mutex_lock(&buf->lock); 61 mutex_lock(&buf->lock);
62} 62}
63EXPORT_SYMBOL_GPL(tty_buffer_lock_exclusive);
63 64
64void tty_buffer_unlock_exclusive(struct tty_port *port) 65void tty_buffer_unlock_exclusive(struct tty_port *port)
65{ 66{
@@ -73,6 +74,7 @@ void tty_buffer_unlock_exclusive(struct tty_port *port)
73 if (restart) 74 if (restart)
74 queue_work(system_unbound_wq, &buf->work); 75 queue_work(system_unbound_wq, &buf->work);
75} 76}
77EXPORT_SYMBOL_GPL(tty_buffer_unlock_exclusive);
76 78
77/** 79/**
78 * tty_buffer_space_avail - return unused buffer space 80 * tty_buffer_space_avail - return unused buffer space
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 888881e5f292..4aeb10034de7 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1822,10 +1822,13 @@ int usb_runtime_suspend(struct device *dev)
1822 if (status == -EAGAIN || status == -EBUSY) 1822 if (status == -EAGAIN || status == -EBUSY)
1823 usb_mark_last_busy(udev); 1823 usb_mark_last_busy(udev);
1824 1824
1825 /* The PM core reacts badly unless the return code is 0, 1825 /*
1826 * -EAGAIN, or -EBUSY, so always return -EBUSY on an error. 1826 * The PM core reacts badly unless the return code is 0,
1827 * -EAGAIN, or -EBUSY, so always return -EBUSY on an error
1828 * (except for root hubs, because they don't suspend through
1829 * an upstream port like other USB devices).
1827 */ 1830 */
1828 if (status != 0) 1831 if (status != 0 && udev->parent)
1829 return -EBUSY; 1832 return -EBUSY;
1830 return status; 1833 return status;
1831} 1834}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 090469ebfcff..229a73f64304 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1691,8 +1691,19 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
1691 */ 1691 */
1692 pm_runtime_set_autosuspend_delay(&hdev->dev, 0); 1692 pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
1693 1693
1694 /* Hubs have proper suspend/resume support. */ 1694 /*
1695 usb_enable_autosuspend(hdev); 1695 * Hubs have proper suspend/resume support, except for root hubs
1696 * where the controller driver doesn't have bus_suspend and
1697 * bus_resume methods.
1698 */
1699 if (hdev->parent) { /* normal device */
1700 usb_enable_autosuspend(hdev);
1701 } else { /* root hub */
1702 const struct hc_driver *drv = bus_to_hcd(hdev->bus)->driver;
1703
1704 if (drv->bus_suspend && drv->bus_resume)
1705 usb_enable_autosuspend(hdev);
1706 }
1696 1707
1697 if (hdev->level == MAX_TOPO_LEVEL) { 1708 if (hdev->level == MAX_TOPO_LEVEL) {
1698 dev_err(&intf->dev, 1709 dev_err(&intf->dev,
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 00661d305143..4a6d3dd68572 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -847,6 +847,13 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
847 bool ehci_found = false; 847 bool ehci_found = false;
848 struct pci_dev *companion = NULL; 848 struct pci_dev *companion = NULL;
849 849
850 /* Sony VAIO t-series with subsystem device ID 90a8 is not capable of
851 * switching ports from EHCI to xHCI
852 */
853 if (xhci_pdev->subsystem_vendor == PCI_VENDOR_ID_SONY &&
854 xhci_pdev->subsystem_device == 0x90a8)
855 return;
856
850 /* make sure an intel EHCI controller exists */ 857 /* make sure an intel EHCI controller exists */
851 for_each_pci_dev(companion) { 858 for_each_pci_dev(companion) {
852 if (companion->class == PCI_CLASS_SERIAL_USB_EHCI && 859 if (companion->class == PCI_CLASS_SERIAL_USB_EHCI &&
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index c089668308ad..b1a8a5f4bbb8 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1822,6 +1822,16 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1822 kfree(cur_cd); 1822 kfree(cur_cd);
1823 } 1823 }
1824 1824
1825 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1826 for (i = 0; i < num_ports; i++) {
1827 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1828 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1829 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1830 while (!list_empty(ep))
1831 list_del_init(ep->next);
1832 }
1833 }
1834
1825 for (i = 1; i < MAX_HC_SLOTS; ++i) 1835 for (i = 1; i < MAX_HC_SLOTS; ++i)
1826 xhci_free_virt_device(xhci, i); 1836 xhci_free_virt_device(xhci, i);
1827 1837
@@ -1857,16 +1867,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1857 if (!xhci->rh_bw) 1867 if (!xhci->rh_bw)
1858 goto no_bw; 1868 goto no_bw;
1859 1869
1860 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1861 for (i = 0; i < num_ports; i++) {
1862 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1863 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1864 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1865 while (!list_empty(ep))
1866 list_del_init(ep->next);
1867 }
1868 }
1869
1870 for (i = 0; i < num_ports; i++) { 1870 for (i = 0; i < num_ports; i++) {
1871 struct xhci_tt_bw_info *tt, *n; 1871 struct xhci_tt_bw_info *tt, *n;
1872 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) { 1872 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 7c6e1dedeb06..edf3b124583c 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -580,6 +580,8 @@ static const struct usb_device_id id_table_combined[] = {
580 { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) }, 580 { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) },
581 { USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID), 581 { USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID),
582 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 582 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
583 { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
584 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
583 /* 585 /*
584 * ELV devices: 586 * ELV devices:
585 */ 587 */
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 993c93df6874..500474c48f4b 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -538,6 +538,11 @@
538 */ 538 */
539#define FTDI_TIAO_UMPA_PID 0x8a98 /* TIAO/DIYGADGET USB Multi-Protocol Adapter */ 539#define FTDI_TIAO_UMPA_PID 0x8a98 /* TIAO/DIYGADGET USB Multi-Protocol Adapter */
540 540
541/*
542 * NovaTech product ids (FTDI_VID)
543 */
544#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
545
541 546
542/********************************/ 547/********************************/
543/** third-party VID/PID combos **/ 548/** third-party VID/PID combos **/
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index df90dae53eb9..c0a42e9e6777 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -821,7 +821,7 @@ static int build_i2c_fw_hdr(__u8 *header, struct device *dev)
821 firmware_rec = (struct ti_i2c_firmware_rec*)i2c_header->Data; 821 firmware_rec = (struct ti_i2c_firmware_rec*)i2c_header->Data;
822 822
823 i2c_header->Type = I2C_DESC_TYPE_FIRMWARE_BLANK; 823 i2c_header->Type = I2C_DESC_TYPE_FIRMWARE_BLANK;
824 i2c_header->Size = (__u16)buffer_size; 824 i2c_header->Size = cpu_to_le16(buffer_size);
825 i2c_header->CheckSum = cs; 825 i2c_header->CheckSum = cs;
826 firmware_rec->Ver_Major = OperationalMajorVersion; 826 firmware_rec->Ver_Major = OperationalMajorVersion;
827 firmware_rec->Ver_Minor = OperationalMinorVersion; 827 firmware_rec->Ver_Minor = OperationalMinorVersion;
diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h
index 51f83fbb73bb..6f6a856bc37c 100644
--- a/drivers/usb/serial/io_usbvend.h
+++ b/drivers/usb/serial/io_usbvend.h
@@ -594,7 +594,7 @@ struct edge_boot_descriptor {
594 594
595struct ti_i2c_desc { 595struct ti_i2c_desc {
596 __u8 Type; // Type of descriptor 596 __u8 Type; // Type of descriptor
597 __u16 Size; // Size of data only not including header 597 __le16 Size; // Size of data only not including header
598 __u8 CheckSum; // Checksum (8 bit sum of data only) 598 __u8 CheckSum; // Checksum (8 bit sum of data only)
599 __u8 Data[0]; // Data starts here 599 __u8 Data[0]; // Data starts here
600} __attribute__((packed)); 600} __attribute__((packed));
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index f213ee978516..948a19f0cdf7 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -161,6 +161,7 @@ static void option_instat_callback(struct urb *urb);
161#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000 161#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
162#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 162#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
163#define NOVATELWIRELESS_PRODUCT_E362 0x9010 163#define NOVATELWIRELESS_PRODUCT_E362 0x9010
164#define NOVATELWIRELESS_PRODUCT_E371 0x9011
164#define NOVATELWIRELESS_PRODUCT_G2 0xA010 165#define NOVATELWIRELESS_PRODUCT_G2 0xA010
165#define NOVATELWIRELESS_PRODUCT_MC551 0xB001 166#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
166 167
@@ -1012,6 +1013,7 @@ static const struct usb_device_id option_ids[] = {
1012 /* Novatel Ovation MC551 a.k.a. Verizon USB551L */ 1013 /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
1013 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, 1014 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
1014 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, 1015 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
1016 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) },
1015 1017
1016 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, 1018 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
1017 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, 1019 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },