aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/dsmethod.c3
-rw-r--r--drivers/acpi/nfit.c5
-rw-r--r--drivers/ata/Kconfig8
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci_platform.c3
-rw-r--r--drivers/ata/ahci_seattle.c210
-rw-r--r--drivers/ata/libahci.c1
-rw-r--r--drivers/base/power/opp/core.c3
-rw-r--r--drivers/base/property.c2
-rw-r--r--drivers/cpufreq/cpufreq.c26
-rw-r--r--drivers/cpufreq/intel_pstate.c26
-rw-r--r--drivers/cpufreq/sti-cpufreq.c4
-rw-r--r--drivers/cpuidle/cpuidle-arm.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h11
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c26
-rw-r--r--drivers/firmware/qemu_fw_cfg.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c32
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h9
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c22
-rw-r--r--drivers/gpu/drm/i915/intel_display.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c12
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c4
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c7
-rw-r--r--drivers/hv/ring_buffer.c26
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c30
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c3
-rw-r--r--drivers/iio/magnetometer/ak8975.c6
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c14
-rw-r--r--drivers/input/misc/twl6040-vibra.c16
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c28
-rw-r--r--drivers/input/touchscreen/zforce_ts.c4
-rw-r--r--drivers/media/media-device.c8
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c13
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c12
-rw-r--r--drivers/misc/mic/vop/vop_vringh.c5
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c23
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c14
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c8
-rw-r--r--drivers/net/geneve.c5
-rw-r--r--drivers/net/macsec.c23
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/vxlan.c5
-rw-r--r--drivers/nvdimm/pmem.c13
-rw-r--r--drivers/nvmem/mxs-ocotp.c4
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c115
-rw-r--r--drivers/usb/core/port.c6
-rw-r--r--drivers/usb/core/usb.c8
-rw-r--r--drivers/usb/musb/jz4740.c4
-rw-r--r--drivers/usb/musb/musb_gadget.c6
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/virtio/virtio_ring.c2
-rw-r--r--drivers/xen/balloon.c16
-rw-r--r--drivers/xen/evtchn.c20
71 files changed, 703 insertions, 247 deletions
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 1982310e6d83..da198b864107 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -428,6 +428,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
428 obj_desc->method.mutex->mutex. 428 obj_desc->method.mutex->mutex.
429 original_sync_level = 429 original_sync_level =
430 obj_desc->method.mutex->mutex.sync_level; 430 obj_desc->method.mutex->mutex.sync_level;
431
432 obj_desc->method.mutex->mutex.thread_id =
433 acpi_os_get_thread_id();
431 } 434 }
432 } 435 }
433 436
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index d0f35e63640b..63cc9dbe4f3b 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -287,8 +287,11 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
287 offset); 287 offset);
288 rc = -ENXIO; 288 rc = -ENXIO;
289 } 289 }
290 } else 290 } else {
291 rc = 0; 291 rc = 0;
292 if (cmd_rc)
293 *cmd_rc = xlat_status(buf, cmd);
294 }
292 295
293 out: 296 out:
294 ACPI_FREE(out_obj); 297 ACPI_FREE(out_obj);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 5083f85efea7..cfa936a32513 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -202,6 +202,14 @@ config SATA_FSL
202 202
203 If unsure, say N. 203 If unsure, say N.
204 204
205config SATA_AHCI_SEATTLE
206 tristate "AMD Seattle 6.0Gbps AHCI SATA host controller support"
207 depends on ARCH_SEATTLE
208 help
209 This option enables support for AMD Seattle SATA host controller.
210
211 If unsure, say N
212
205config SATA_INIC162X 213config SATA_INIC162X
206 tristate "Initio 162x SATA support (Very Experimental)" 214 tristate "Initio 162x SATA support (Very Experimental)"
207 depends on PCI 215 depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 18579521464e..0b2afb7e5f35 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_ATA) += libata.o
4# non-SFF interface 4# non-SFF interface
5obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o 5obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o
6obj-$(CONFIG_SATA_ACARD_AHCI) += acard-ahci.o libahci.o 6obj-$(CONFIG_SATA_ACARD_AHCI) += acard-ahci.o libahci.o
7obj-$(CONFIG_SATA_AHCI_SEATTLE) += ahci_seattle.o libahci.o libahci_platform.o
7obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o libahci_platform.o 8obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o libahci_platform.o
8obj-$(CONFIG_SATA_FSL) += sata_fsl.o 9obj-$(CONFIG_SATA_FSL) += sata_fsl.o
9obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o 10obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 40442332bfa7..62a04c8fb5c9 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -51,6 +51,9 @@ static int ahci_probe(struct platform_device *pdev)
51 if (rc) 51 if (rc)
52 return rc; 52 return rc;
53 53
54 of_property_read_u32(dev->of_node,
55 "ports-implemented", &hpriv->force_port_map);
56
54 if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci")) 57 if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
55 hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ; 58 hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
56 59
diff --git a/drivers/ata/ahci_seattle.c b/drivers/ata/ahci_seattle.c
new file mode 100644
index 000000000000..6e702ab57220
--- /dev/null
+++ b/drivers/ata/ahci_seattle.c
@@ -0,0 +1,210 @@
1/*
2 * AMD Seattle AHCI SATA driver
3 *
4 * Copyright (c) 2015, Advanced Micro Devices
5 * Author: Brijesh Singh <brijesh.singh@amd.com>
6 *
7 * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/pm.h>
22#include <linux/device.h>
23#include <linux/of_device.h>
24#include <linux/platform_device.h>
25#include <linux/libata.h>
26#include <linux/ahci_platform.h>
27#include <linux/acpi.h>
28#include <linux/pci_ids.h>
29#include "ahci.h"
30
31/* SGPIO Control Register definition
32 *
33 * Bit Type Description
34 * 31 RW OD7.2 (activity)
35 * 30 RW OD7.1 (locate)
36 * 29 RW OD7.0 (fault)
37 * 28...8 RW OD6.2...OD0.0 (3bits per port, 1 bit per LED)
38 * 7 RO SGPIO feature flag
39 * 6:4 RO Reserved
40 * 3:0 RO Number of ports (0 means no port supported)
41 */
42#define ACTIVITY_BIT_POS(x) (8 + (3 * x))
43#define LOCATE_BIT_POS(x) (ACTIVITY_BIT_POS(x) + 1)
44#define FAULT_BIT_POS(x) (LOCATE_BIT_POS(x) + 1)
45
46#define ACTIVITY_MASK 0x00010000
47#define LOCATE_MASK 0x00080000
48#define FAULT_MASK 0x00400000
49
50#define DRV_NAME "ahci-seattle"
51
52static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state,
53 ssize_t size);
54
55struct seattle_plat_data {
56 void __iomem *sgpio_ctrl;
57};
58
59static struct ata_port_operations ahci_port_ops = {
60 .inherits = &ahci_ops,
61};
62
63static const struct ata_port_info ahci_port_info = {
64 .flags = AHCI_FLAG_COMMON,
65 .pio_mask = ATA_PIO4,
66 .udma_mask = ATA_UDMA6,
67 .port_ops = &ahci_port_ops,
68};
69
70static struct ata_port_operations ahci_seattle_ops = {
71 .inherits = &ahci_ops,
72 .transmit_led_message = seattle_transmit_led_message,
73};
74
75static const struct ata_port_info ahci_port_seattle_info = {
76 .flags = AHCI_FLAG_COMMON | ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY,
77 .link_flags = ATA_LFLAG_SW_ACTIVITY,
78 .pio_mask = ATA_PIO4,
79 .udma_mask = ATA_UDMA6,
80 .port_ops = &ahci_seattle_ops,
81};
82
83static struct scsi_host_template ahci_platform_sht = {
84 AHCI_SHT(DRV_NAME),
85};
86
87static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state,
88 ssize_t size)
89{
90 struct ahci_host_priv *hpriv = ap->host->private_data;
91 struct ahci_port_priv *pp = ap->private_data;
92 struct seattle_plat_data *plat_data = hpriv->plat_data;
93 unsigned long flags;
94 int pmp;
95 struct ahci_em_priv *emp;
96 u32 val;
97
98 /* get the slot number from the message */
99 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
100 if (pmp >= EM_MAX_SLOTS)
101 return -EINVAL;
102 emp = &pp->em_priv[pmp];
103
104 val = ioread32(plat_data->sgpio_ctrl);
105 if (state & ACTIVITY_MASK)
106 val |= 1 << ACTIVITY_BIT_POS((ap->port_no));
107 else
108 val &= ~(1 << ACTIVITY_BIT_POS((ap->port_no)));
109
110 if (state & LOCATE_MASK)
111 val |= 1 << LOCATE_BIT_POS((ap->port_no));
112 else
113 val &= ~(1 << LOCATE_BIT_POS((ap->port_no)));
114
115 if (state & FAULT_MASK)
116 val |= 1 << FAULT_BIT_POS((ap->port_no));
117 else
118 val &= ~(1 << FAULT_BIT_POS((ap->port_no)));
119
120 iowrite32(val, plat_data->sgpio_ctrl);
121
122 spin_lock_irqsave(ap->lock, flags);
123
124 /* save off new led state for port/slot */
125 emp->led_state = state;
126
127 spin_unlock_irqrestore(ap->lock, flags);
128
129 return size;
130}
131
132static const struct ata_port_info *ahci_seattle_get_port_info(
133 struct platform_device *pdev, struct ahci_host_priv *hpriv)
134{
135 struct device *dev = &pdev->dev;
136 struct seattle_plat_data *plat_data;
137 u32 val;
138
139 plat_data = devm_kzalloc(dev, sizeof(*plat_data), GFP_KERNEL);
140 if (IS_ERR(plat_data))
141 return &ahci_port_info;
142
143 plat_data->sgpio_ctrl = devm_ioremap_resource(dev,
144 platform_get_resource(pdev, IORESOURCE_MEM, 1));
145 if (IS_ERR(plat_data->sgpio_ctrl))
146 return &ahci_port_info;
147
148 val = ioread32(plat_data->sgpio_ctrl);
149
150 if (!(val & 0xf))
151 return &ahci_port_info;
152
153 hpriv->em_loc = 0;
154 hpriv->em_buf_sz = 4;
155 hpriv->em_msg_type = EM_MSG_TYPE_LED;
156 hpriv->plat_data = plat_data;
157
158 dev_info(dev, "SGPIO LED control is enabled.\n");
159 return &ahci_port_seattle_info;
160}
161
162static int ahci_seattle_probe(struct platform_device *pdev)
163{
164 int rc;
165 struct ahci_host_priv *hpriv;
166
167 hpriv = ahci_platform_get_resources(pdev);
168 if (IS_ERR(hpriv))
169 return PTR_ERR(hpriv);
170
171 rc = ahci_platform_enable_resources(hpriv);
172 if (rc)
173 return rc;
174
175 rc = ahci_platform_init_host(pdev, hpriv,
176 ahci_seattle_get_port_info(pdev, hpriv),
177 &ahci_platform_sht);
178 if (rc)
179 goto disable_resources;
180
181 return 0;
182disable_resources:
183 ahci_platform_disable_resources(hpriv);
184 return rc;
185}
186
187static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
188 ahci_platform_resume);
189
190static const struct acpi_device_id ahci_acpi_match[] = {
191 { "AMDI0600", 0 },
192 {}
193};
194MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
195
196static struct platform_driver ahci_seattle_driver = {
197 .probe = ahci_seattle_probe,
198 .remove = ata_platform_remove_one,
199 .driver = {
200 .name = DRV_NAME,
201 .acpi_match_table = ahci_acpi_match,
202 .pm = &ahci_pm_ops,
203 },
204};
205module_platform_driver(ahci_seattle_driver);
206
207MODULE_DESCRIPTION("Seattle AHCI SATA platform driver");
208MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
209MODULE_LICENSE("GPL");
210MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 3982054060b8..a5d7c1c2a05e 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -507,6 +507,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
507 dev_info(dev, "forcing port_map 0x%x -> 0x%x\n", 507 dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
508 port_map, hpriv->force_port_map); 508 port_map, hpriv->force_port_map);
509 port_map = hpriv->force_port_map; 509 port_map = hpriv->force_port_map;
510 hpriv->saved_port_map = port_map;
510 } 511 }
511 512
512 if (hpriv->mask_port_map) { 513 if (hpriv->mask_port_map) {
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index 433b60092972..d8f4cc22856c 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -259,9 +259,6 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
259 reg = opp_table->regulator; 259 reg = opp_table->regulator;
260 if (IS_ERR(reg)) { 260 if (IS_ERR(reg)) {
261 /* Regulator may not be required for device */ 261 /* Regulator may not be required for device */
262 if (reg)
263 dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
264 PTR_ERR(reg));
265 rcu_read_unlock(); 262 rcu_read_unlock();
266 return 0; 263 return 0;
267 } 264 }
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 9b1a65debd49..7f692accdc90 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -21,7 +21,7 @@
21 21
22static inline bool is_pset_node(struct fwnode_handle *fwnode) 22static inline bool is_pset_node(struct fwnode_handle *fwnode)
23{ 23{
24 return fwnode && fwnode->type == FWNODE_PDATA; 24 return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_PDATA;
25} 25}
26 26
27static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode) 27static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index e93405f0eac4..c4acfc5273b3 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1557,21 +1557,25 @@ void cpufreq_suspend(void)
1557 if (!cpufreq_driver) 1557 if (!cpufreq_driver)
1558 return; 1558 return;
1559 1559
1560 if (!has_target()) 1560 if (!has_target() && !cpufreq_driver->suspend)
1561 goto suspend; 1561 goto suspend;
1562 1562
1563 pr_debug("%s: Suspending Governors\n", __func__); 1563 pr_debug("%s: Suspending Governors\n", __func__);
1564 1564
1565 for_each_active_policy(policy) { 1565 for_each_active_policy(policy) {
1566 down_write(&policy->rwsem); 1566 if (has_target()) {
1567 ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP); 1567 down_write(&policy->rwsem);
1568 up_write(&policy->rwsem); 1568 ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1569 up_write(&policy->rwsem);
1569 1570
1570 if (ret) 1571 if (ret) {
1571 pr_err("%s: Failed to stop governor for policy: %p\n", 1572 pr_err("%s: Failed to stop governor for policy: %p\n",
1572 __func__, policy); 1573 __func__, policy);
1573 else if (cpufreq_driver->suspend 1574 continue;
1574 && cpufreq_driver->suspend(policy)) 1575 }
1576 }
1577
1578 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1575 pr_err("%s: Failed to suspend driver: %p\n", __func__, 1579 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1576 policy); 1580 policy);
1577 } 1581 }
@@ -1596,7 +1600,7 @@ void cpufreq_resume(void)
1596 1600
1597 cpufreq_suspended = false; 1601 cpufreq_suspended = false;
1598 1602
1599 if (!has_target()) 1603 if (!has_target() && !cpufreq_driver->resume)
1600 return; 1604 return;
1601 1605
1602 pr_debug("%s: Resuming Governors\n", __func__); 1606 pr_debug("%s: Resuming Governors\n", __func__);
@@ -1605,7 +1609,7 @@ void cpufreq_resume(void)
1605 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) { 1609 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1606 pr_err("%s: Failed to resume driver: %p\n", __func__, 1610 pr_err("%s: Failed to resume driver: %p\n", __func__,
1607 policy); 1611 policy);
1608 } else { 1612 } else if (has_target()) {
1609 down_write(&policy->rwsem); 1613 down_write(&policy->rwsem);
1610 ret = cpufreq_start_governor(policy); 1614 ret = cpufreq_start_governor(policy);
1611 up_write(&policy->rwsem); 1615 up_write(&policy->rwsem);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index f502d5b90c25..b230ebaae66c 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -453,6 +453,14 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
453 } 453 }
454} 454}
455 455
456static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
457{
458 if (hwp_active)
459 intel_pstate_hwp_set(policy->cpus);
460
461 return 0;
462}
463
456static void intel_pstate_hwp_set_online_cpus(void) 464static void intel_pstate_hwp_set_online_cpus(void)
457{ 465{
458 get_online_cpus(); 466 get_online_cpus();
@@ -1062,8 +1070,9 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
1062 1070
1063static inline int32_t get_avg_frequency(struct cpudata *cpu) 1071static inline int32_t get_avg_frequency(struct cpudata *cpu)
1064{ 1072{
1065 return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf * 1073 return fp_toint(mul_fp(cpu->sample.core_pct_busy,
1066 cpu->pstate.scaling, cpu->sample.mperf); 1074 int_tofp(cpu->pstate.max_pstate_physical *
1075 cpu->pstate.scaling / 100)));
1067} 1076}
1068 1077
1069static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) 1078static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
@@ -1106,8 +1115,6 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
1106 int32_t core_busy, max_pstate, current_pstate, sample_ratio; 1115 int32_t core_busy, max_pstate, current_pstate, sample_ratio;
1107 u64 duration_ns; 1116 u64 duration_ns;
1108 1117
1109 intel_pstate_calc_busy(cpu);
1110
1111 /* 1118 /*
1112 * core_busy is the ratio of actual performance to max 1119 * core_busy is the ratio of actual performance to max
1113 * max_pstate is the max non turbo pstate available 1120 * max_pstate is the max non turbo pstate available
@@ -1191,8 +1198,11 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
1191 if ((s64)delta_ns >= pid_params.sample_rate_ns) { 1198 if ((s64)delta_ns >= pid_params.sample_rate_ns) {
1192 bool sample_taken = intel_pstate_sample(cpu, time); 1199 bool sample_taken = intel_pstate_sample(cpu, time);
1193 1200
1194 if (sample_taken && !hwp_active) 1201 if (sample_taken) {
1195 intel_pstate_adjust_busy_pstate(cpu); 1202 intel_pstate_calc_busy(cpu);
1203 if (!hwp_active)
1204 intel_pstate_adjust_busy_pstate(cpu);
1205 }
1196 } 1206 }
1197} 1207}
1198 1208
@@ -1346,8 +1356,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1346 out: 1356 out:
1347 intel_pstate_set_update_util_hook(policy->cpu); 1357 intel_pstate_set_update_util_hook(policy->cpu);
1348 1358
1349 if (hwp_active) 1359 intel_pstate_hwp_set_policy(policy);
1350 intel_pstate_hwp_set(policy->cpus);
1351 1360
1352 return 0; 1361 return 0;
1353} 1362}
@@ -1411,6 +1420,7 @@ static struct cpufreq_driver intel_pstate_driver = {
1411 .flags = CPUFREQ_CONST_LOOPS, 1420 .flags = CPUFREQ_CONST_LOOPS,
1412 .verify = intel_pstate_verify_policy, 1421 .verify = intel_pstate_verify_policy,
1413 .setpolicy = intel_pstate_set_policy, 1422 .setpolicy = intel_pstate_set_policy,
1423 .resume = intel_pstate_hwp_set_policy,
1414 .get = intel_pstate_get, 1424 .get = intel_pstate_get,
1415 .init = intel_pstate_cpu_init, 1425 .init = intel_pstate_cpu_init,
1416 .stop_cpu = intel_pstate_stop_cpu, 1426 .stop_cpu = intel_pstate_stop_cpu,
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index a9c659f58974..04042038ec4b 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -259,6 +259,10 @@ static int sti_cpufreq_init(void)
259{ 259{
260 int ret; 260 int ret;
261 261
262 if ((!of_machine_is_compatible("st,stih407")) &&
263 (!of_machine_is_compatible("st,stih410")))
264 return -ENODEV;
265
262 ddata.cpu = get_cpu_device(0); 266 ddata.cpu = get_cpu_device(0);
263 if (!ddata.cpu) { 267 if (!ddata.cpu) {
264 dev_err(ddata.cpu, "Failed to get device for CPU0\n"); 268 dev_err(ddata.cpu, "Failed to get device for CPU0\n");
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 545069d5fdfb..e342565e8715 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -50,7 +50,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
50 * call the CPU ops suspend protocol with idle index as a 50 * call the CPU ops suspend protocol with idle index as a
51 * parameter. 51 * parameter.
52 */ 52 */
53 arm_cpuidle_suspend(idx); 53 ret = arm_cpuidle_suspend(idx);
54 54
55 cpu_pm_exit(); 55 cpu_pm_exit();
56 } 56 }
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 0e82ce3c383e..976b01e58afb 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -236,6 +236,8 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
236 uint32_t vf_mask); 236 uint32_t vf_mask);
237void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); 237void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
238void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); 238void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
239int adf_init_pf_wq(void);
240void adf_exit_pf_wq(void);
239#else 241#else
240static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs) 242static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
241{ 243{
@@ -253,5 +255,14 @@ static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
253static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) 255static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
254{ 256{
255} 257}
258
259static inline int adf_init_pf_wq(void)
260{
261 return 0;
262}
263
264static inline void adf_exit_pf_wq(void)
265{
266}
256#endif 267#endif
257#endif 268#endif
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 5c897e6e7994..3c3f948290ca 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -462,12 +462,17 @@ static int __init adf_register_ctl_device_driver(void)
462 if (adf_init_aer()) 462 if (adf_init_aer())
463 goto err_aer; 463 goto err_aer;
464 464
465 if (adf_init_pf_wq())
466 goto err_pf_wq;
467
465 if (qat_crypto_register()) 468 if (qat_crypto_register())
466 goto err_crypto_register; 469 goto err_crypto_register;
467 470
468 return 0; 471 return 0;
469 472
470err_crypto_register: 473err_crypto_register:
474 adf_exit_pf_wq();
475err_pf_wq:
471 adf_exit_aer(); 476 adf_exit_aer();
472err_aer: 477err_aer:
473 adf_chr_drv_destroy(); 478 adf_chr_drv_destroy();
@@ -480,6 +485,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
480{ 485{
481 adf_chr_drv_destroy(); 486 adf_chr_drv_destroy();
482 adf_exit_aer(); 487 adf_exit_aer();
488 adf_exit_pf_wq();
483 qat_crypto_unregister(); 489 qat_crypto_unregister();
484 adf_clean_vf_map(false); 490 adf_clean_vf_map(false);
485 mutex_destroy(&adf_ctl_lock); 491 mutex_destroy(&adf_ctl_lock);
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index 1117a8b58280..38a0415e767d 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -119,11 +119,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
119 int i; 119 int i;
120 u32 reg; 120 u32 reg;
121 121
122 /* Workqueue for PF2VF responses */
123 pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
124 if (!pf2vf_resp_wq)
125 return -ENOMEM;
126
127 for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs; 122 for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
128 i++, vf_info++) { 123 i++, vf_info++) {
129 /* This ptr will be populated when VFs will be created */ 124 /* This ptr will be populated when VFs will be created */
@@ -216,11 +211,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
216 211
217 kfree(accel_dev->pf.vf_info); 212 kfree(accel_dev->pf.vf_info);
218 accel_dev->pf.vf_info = NULL; 213 accel_dev->pf.vf_info = NULL;
219
220 if (pf2vf_resp_wq) {
221 destroy_workqueue(pf2vf_resp_wq);
222 pf2vf_resp_wq = NULL;
223 }
224} 214}
225EXPORT_SYMBOL_GPL(adf_disable_sriov); 215EXPORT_SYMBOL_GPL(adf_disable_sriov);
226 216
@@ -304,3 +294,19 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
304 return numvfs; 294 return numvfs;
305} 295}
306EXPORT_SYMBOL_GPL(adf_sriov_configure); 296EXPORT_SYMBOL_GPL(adf_sriov_configure);
297
298int __init adf_init_pf_wq(void)
299{
300 /* Workqueue for PF2VF responses */
301 pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
302
303 return !pf2vf_resp_wq ? -ENOMEM : 0;
304}
305
306void adf_exit_pf_wq(void)
307{
308 if (pf2vf_resp_wq) {
309 destroy_workqueue(pf2vf_resp_wq);
310 pf2vf_resp_wq = NULL;
311 }
312}
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 815c4a5cae54..1b95475b6aef 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -77,7 +77,7 @@ static inline u16 fw_cfg_sel_endianness(u16 key)
77static inline void fw_cfg_read_blob(u16 key, 77static inline void fw_cfg_read_blob(u16 key,
78 void *buf, loff_t pos, size_t count) 78 void *buf, loff_t pos, size_t count)
79{ 79{
80 u32 glk; 80 u32 glk = -1U;
81 acpi_status status; 81 acpi_status status;
82 82
83 /* If we have ACPI, ensure mutual exclusion against any potential 83 /* If we have ACPI, ensure mutual exclusion against any potential
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index e557fc1f17c8..7ecea83ce453 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -541,6 +541,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
541 if (!metadata_size) { 541 if (!metadata_size) {
542 if (bo->metadata_size) { 542 if (bo->metadata_size) {
543 kfree(bo->metadata); 543 kfree(bo->metadata);
544 bo->metadata = NULL;
544 bo->metadata_size = 0; 545 bo->metadata_size = 0;
545 } 546 }
546 return 0; 547 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 1e0bba29e167..1cd6de575305 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -298,6 +298,10 @@ bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder,
298 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) 298 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
299 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; 299 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
300 300
301 /* vertical FP must be at least 1 */
302 if (mode->crtc_vsync_start == mode->crtc_vdisplay)
303 adjusted_mode->crtc_vsync_start++;
304
301 /* get the native mode for scaling */ 305 /* get the native mode for scaling */
302 if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) 306 if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
303 amdgpu_panel_mode_fixup(encoder, adjusted_mode); 307 amdgpu_panel_mode_fixup(encoder, adjusted_mode);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 30798cbc6fc0..6d2fb3f4ac62 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -792,7 +792,7 @@ static int i915_drm_resume(struct drm_device *dev)
792static int i915_drm_resume_early(struct drm_device *dev) 792static int i915_drm_resume_early(struct drm_device *dev)
793{ 793{
794 struct drm_i915_private *dev_priv = dev->dev_private; 794 struct drm_i915_private *dev_priv = dev->dev_private;
795 int ret = 0; 795 int ret;
796 796
797 /* 797 /*
798 * We have a resume ordering issue with the snd-hda driver also 798 * We have a resume ordering issue with the snd-hda driver also
@@ -803,6 +803,36 @@ static int i915_drm_resume_early(struct drm_device *dev)
803 * FIXME: This should be solved with a special hdmi sink device or 803 * FIXME: This should be solved with a special hdmi sink device or
804 * similar so that power domains can be employed. 804 * similar so that power domains can be employed.
805 */ 805 */
806
807 /*
808 * Note that we need to set the power state explicitly, since we
809 * powered off the device during freeze and the PCI core won't power
810 * it back up for us during thaw. Powering off the device during
811 * freeze is not a hard requirement though, and during the
812 * suspend/resume phases the PCI core makes sure we get here with the
813 * device powered on. So in case we change our freeze logic and keep
814 * the device powered we can also remove the following set power state
815 * call.
816 */
817 ret = pci_set_power_state(dev->pdev, PCI_D0);
818 if (ret) {
819 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
820 goto out;
821 }
822
823 /*
824 * Note that pci_enable_device() first enables any parent bridge
825 * device and only then sets the power state for this device. The
826 * bridge enabling is a nop though, since bridge devices are resumed
827 * first. The order of enabling power and enabling the device is
828 * imposed by the PCI core as described above, so here we preserve the
829 * same order for the freeze/thaw phases.
830 *
831 * TODO: eventually we should remove pci_disable_device() /
832 * pci_enable_enable_device() from suspend/resume. Due to how they
833 * depend on the device enable refcount we can't anyway depend on them
834 * disabling/enabling the device.
835 */
806 if (pci_enable_device(dev->pdev)) { 836 if (pci_enable_device(dev->pdev)) {
807 ret = -EIO; 837 ret = -EIO;
808 goto out; 838 goto out;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f76cbf3e5d1e..fffdac801d3b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2907,7 +2907,14 @@ enum skl_disp_power_wells {
2907#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998) 2907#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
2908#define BXT_RP_STATE_CAP _MMIO(0x138170) 2908#define BXT_RP_STATE_CAP _MMIO(0x138170)
2909 2909
2910#define INTERVAL_1_28_US(us) (((us) * 100) >> 7) 2910/*
2911 * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
2912 * 8300) freezing up around GPU hangs. Looks as if even
2913 * scheduling/timer interrupts start misbehaving if the RPS
2914 * EI/thresholds are "bad", leading to a very sluggish or even
2915 * frozen machine.
2916 */
2917#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25)
2911#define INTERVAL_1_33_US(us) (((us) * 3) >> 2) 2918#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
2912#define INTERVAL_0_833_US(us) (((us) * 6) / 5) 2919#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
2913#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \ 2920#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 62de9f4bce09..3b57bf06abe8 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -443,9 +443,17 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
443 } else if (IS_BROADWELL(dev_priv)) { 443 } else if (IS_BROADWELL(dev_priv)) {
444 ddi_translations_fdi = bdw_ddi_translations_fdi; 444 ddi_translations_fdi = bdw_ddi_translations_fdi;
445 ddi_translations_dp = bdw_ddi_translations_dp; 445 ddi_translations_dp = bdw_ddi_translations_dp;
446 ddi_translations_edp = bdw_ddi_translations_edp; 446
447 if (dev_priv->edp_low_vswing) {
448 ddi_translations_edp = bdw_ddi_translations_edp;
449 n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
450 } else {
451 ddi_translations_edp = bdw_ddi_translations_dp;
452 n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
453 }
454
447 ddi_translations_hdmi = bdw_ddi_translations_hdmi; 455 ddi_translations_hdmi = bdw_ddi_translations_hdmi;
448 n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); 456
449 n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); 457 n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
450 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); 458 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
451 hdmi_default_entry = 7; 459 hdmi_default_entry = 7;
@@ -3201,12 +3209,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
3201 intel_ddi_clock_get(encoder, pipe_config); 3209 intel_ddi_clock_get(encoder, pipe_config);
3202} 3210}
3203 3211
3204static void intel_ddi_destroy(struct drm_encoder *encoder)
3205{
3206 /* HDMI has nothing special to destroy, so we can go with this. */
3207 intel_dp_encoder_destroy(encoder);
3208}
3209
3210static bool intel_ddi_compute_config(struct intel_encoder *encoder, 3212static bool intel_ddi_compute_config(struct intel_encoder *encoder,
3211 struct intel_crtc_state *pipe_config) 3213 struct intel_crtc_state *pipe_config)
3212{ 3214{
@@ -3225,7 +3227,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
3225} 3227}
3226 3228
3227static const struct drm_encoder_funcs intel_ddi_funcs = { 3229static const struct drm_encoder_funcs intel_ddi_funcs = {
3228 .destroy = intel_ddi_destroy, 3230 .reset = intel_dp_encoder_reset,
3231 .destroy = intel_dp_encoder_destroy,
3229}; 3232};
3230 3233
3231static struct intel_connector * 3234static struct intel_connector *
@@ -3324,6 +3327,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
3324 intel_encoder->post_disable = intel_ddi_post_disable; 3327 intel_encoder->post_disable = intel_ddi_post_disable;
3325 intel_encoder->get_hw_state = intel_ddi_get_hw_state; 3328 intel_encoder->get_hw_state = intel_ddi_get_hw_state;
3326 intel_encoder->get_config = intel_ddi_get_config; 3329 intel_encoder->get_config = intel_ddi_get_config;
3330 intel_encoder->suspend = intel_dp_encoder_suspend;
3327 3331
3328 intel_dig_port->port = port; 3332 intel_dig_port->port = port;
3329 intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & 3333 intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 6e0d8283daa6..182f84937345 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -13351,6 +13351,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
13351 } 13351 }
13352 13352
13353 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13353 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13354 if (state->legacy_cursor_update)
13355 continue;
13356
13354 ret = intel_crtc_wait_for_pending_flips(crtc); 13357 ret = intel_crtc_wait_for_pending_flips(crtc);
13355 if (ret) 13358 if (ret)
13356 return ret; 13359 return ret;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f069a82deb57..412a34c39522 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4898,7 +4898,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4898 kfree(intel_dig_port); 4898 kfree(intel_dig_port);
4899} 4899}
4900 4900
4901static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 4901void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4902{ 4902{
4903 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 4903 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4904 4904
@@ -4940,7 +4940,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4940 edp_panel_vdd_schedule_off(intel_dp); 4940 edp_panel_vdd_schedule_off(intel_dp);
4941} 4941}
4942 4942
4943static void intel_dp_encoder_reset(struct drm_encoder *encoder) 4943void intel_dp_encoder_reset(struct drm_encoder *encoder)
4944{ 4944{
4945 struct intel_dp *intel_dp; 4945 struct intel_dp *intel_dp;
4946 4946
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 4c027d69fac9..7d3af3a72abe 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1238,6 +1238,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
1238void intel_dp_start_link_train(struct intel_dp *intel_dp); 1238void intel_dp_start_link_train(struct intel_dp *intel_dp);
1239void intel_dp_stop_link_train(struct intel_dp *intel_dp); 1239void intel_dp_stop_link_train(struct intel_dp *intel_dp);
1240void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); 1240void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
1241void intel_dp_encoder_reset(struct drm_encoder *encoder);
1242void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
1241void intel_dp_encoder_destroy(struct drm_encoder *encoder); 1243void intel_dp_encoder_destroy(struct drm_encoder *encoder);
1242int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); 1244int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
1243bool intel_dp_compute_config(struct intel_encoder *encoder, 1245bool intel_dp_compute_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index a0d8daed2470..1ab6f687f640 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1415,8 +1415,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
1415 hdmi_to_dig_port(intel_hdmi)); 1415 hdmi_to_dig_port(intel_hdmi));
1416 } 1416 }
1417 1417
1418 if (!live_status) 1418 if (!live_status) {
1419 DRM_DEBUG_KMS("Live status not up!"); 1419 DRM_DEBUG_KMS("HDMI live status down\n");
1420 /*
1421 * Live status register is not reliable on all intel platforms.
1422 * So consider live_status only for certain platforms, for
1423 * others, read EDID to determine presence of sink.
1424 */
1425 if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
1426 live_status = true;
1427 }
1420 1428
1421 intel_hdmi_unset_edid(connector); 1429 intel_hdmi_unset_edid(connector);
1422 1430
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index edd05cdb0cd8..587cae4e73c9 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -310,6 +310,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
310 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) 310 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
311 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; 311 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
312 312
313 /* vertical FP must be at least 1 */
314 if (mode->crtc_vsync_start == mode->crtc_vdisplay)
315 adjusted_mode->crtc_vsync_start++;
316
313 /* get the native mode for scaling */ 317 /* get the native mode for scaling */
314 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { 318 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
315 radeon_panel_mode_fixup(encoder, adjusted_mode); 319 radeon_panel_mode_fixup(encoder, adjusted_mode);
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index e00db3f510dd..abb98c77bad2 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1068,7 +1068,6 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
1068 goto err_register; 1068 goto err_register;
1069 } 1069 }
1070 1070
1071 pdev->dev.of_node = of_node;
1072 pdev->dev.parent = dev; 1071 pdev->dev.parent = dev;
1073 1072
1074 ret = platform_device_add_data(pdev, &reg->pdata, 1073 ret = platform_device_add_data(pdev, &reg->pdata,
@@ -1079,6 +1078,12 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
1079 platform_device_put(pdev); 1078 platform_device_put(pdev);
1080 goto err_register; 1079 goto err_register;
1081 } 1080 }
1081
1082 /*
1083 * Set of_node only after calling platform_device_add. Otherwise
1084 * the platform:imx-ipuv3-crtc modalias won't be used.
1085 */
1086 pdev->dev.of_node = of_node;
1082 } 1087 }
1083 1088
1084 return 0; 1089 return 0;
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 5613e2b5cff7..a40a73a7b71d 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -103,15 +103,29 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
103 * there is room for the producer to send the pending packet. 103 * there is room for the producer to send the pending packet.
104 */ 104 */
105 105
106static bool hv_need_to_signal_on_read(u32 prev_write_sz, 106static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
107 struct hv_ring_buffer_info *rbi)
108{ 107{
109 u32 cur_write_sz; 108 u32 cur_write_sz;
110 u32 r_size; 109 u32 r_size;
111 u32 write_loc = rbi->ring_buffer->write_index; 110 u32 write_loc;
112 u32 read_loc = rbi->ring_buffer->read_index; 111 u32 read_loc = rbi->ring_buffer->read_index;
113 u32 pending_sz = rbi->ring_buffer->pending_send_sz; 112 u32 pending_sz;
114 113
114 /*
115 * Issue a full memory barrier before making the signaling decision.
116 * Here is the reason for having this barrier:
117 * If the reading of the pend_sz (in this function)
118 * were to be reordered and read before we commit the new read
119 * index (in the calling function) we could
120 * have a problem. If the host were to set the pending_sz after we
121 * have sampled pending_sz and go to sleep before we commit the
122 * read index, we could miss sending the interrupt. Issue a full
123 * memory barrier to address this.
124 */
125 mb();
126
127 pending_sz = rbi->ring_buffer->pending_send_sz;
128 write_loc = rbi->ring_buffer->write_index;
115 /* If the other end is not blocked on write don't bother. */ 129 /* If the other end is not blocked on write don't bother. */
116 if (pending_sz == 0) 130 if (pending_sz == 0)
117 return false; 131 return false;
@@ -120,7 +134,7 @@ static bool hv_need_to_signal_on_read(u32 prev_write_sz,
120 cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) : 134 cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
121 read_loc - write_loc; 135 read_loc - write_loc;
122 136
123 if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz)) 137 if (cur_write_sz >= pending_sz)
124 return true; 138 return true;
125 139
126 return false; 140 return false;
@@ -455,7 +469,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
455 /* Update the read index */ 469 /* Update the read index */
456 hv_set_next_read_location(inring_info, next_read_location); 470 hv_set_next_read_location(inring_info, next_read_location);
457 471
458 *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info); 472 *signal = hv_need_to_signal_on_read(inring_info);
459 473
460 return ret; 474 return ret;
461} 475}
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index dbee13ad33a3..2e154cb51685 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -451,6 +451,8 @@ static int at91_adc_probe(struct platform_device *pdev)
451 if (ret) 451 if (ret)
452 goto vref_disable; 452 goto vref_disable;
453 453
454 platform_set_drvdata(pdev, indio_dev);
455
454 ret = iio_device_register(indio_dev); 456 ret = iio_device_register(indio_dev);
455 if (ret < 0) 457 if (ret < 0)
456 goto per_clk_disable_unprepare; 458 goto per_clk_disable_unprepare;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index f581256d9d4c..5ee4e0dc093e 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -104,6 +104,19 @@ static int inv_mpu6050_deselect_bypass(struct i2c_adapter *adap,
104 return 0; 104 return 0;
105} 105}
106 106
107static const char *inv_mpu_match_acpi_device(struct device *dev, int *chip_id)
108{
109 const struct acpi_device_id *id;
110
111 id = acpi_match_device(dev->driver->acpi_match_table, dev);
112 if (!id)
113 return NULL;
114
115 *chip_id = (int)id->driver_data;
116
117 return dev_name(dev);
118}
119
107/** 120/**
108 * inv_mpu_probe() - probe function. 121 * inv_mpu_probe() - probe function.
109 * @client: i2c client. 122 * @client: i2c client.
@@ -115,14 +128,25 @@ static int inv_mpu_probe(struct i2c_client *client,
115 const struct i2c_device_id *id) 128 const struct i2c_device_id *id)
116{ 129{
117 struct inv_mpu6050_state *st; 130 struct inv_mpu6050_state *st;
118 int result; 131 int result, chip_type;
119 const char *name = id ? id->name : NULL;
120 struct regmap *regmap; 132 struct regmap *regmap;
133 const char *name;
121 134
122 if (!i2c_check_functionality(client->adapter, 135 if (!i2c_check_functionality(client->adapter,
123 I2C_FUNC_SMBUS_I2C_BLOCK)) 136 I2C_FUNC_SMBUS_I2C_BLOCK))
124 return -EOPNOTSUPP; 137 return -EOPNOTSUPP;
125 138
139 if (id) {
140 chip_type = (int)id->driver_data;
141 name = id->name;
142 } else if (ACPI_HANDLE(&client->dev)) {
143 name = inv_mpu_match_acpi_device(&client->dev, &chip_type);
144 if (!name)
145 return -ENODEV;
146 } else {
147 return -ENOSYS;
148 }
149
126 regmap = devm_regmap_init_i2c(client, &inv_mpu_regmap_config); 150 regmap = devm_regmap_init_i2c(client, &inv_mpu_regmap_config);
127 if (IS_ERR(regmap)) { 151 if (IS_ERR(regmap)) {
128 dev_err(&client->dev, "Failed to register i2c regmap %d\n", 152 dev_err(&client->dev, "Failed to register i2c regmap %d\n",
@@ -131,7 +155,7 @@ static int inv_mpu_probe(struct i2c_client *client,
131 } 155 }
132 156
133 result = inv_mpu_core_probe(regmap, client->irq, name, 157 result = inv_mpu_core_probe(regmap, client->irq, name,
134 NULL, id->driver_data); 158 NULL, chip_type);
135 if (result < 0) 159 if (result < 0)
136 return result; 160 return result;
137 161
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index dea6c4361de0..7bcb8d839f05 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -46,6 +46,7 @@ static int inv_mpu_probe(struct spi_device *spi)
46 struct regmap *regmap; 46 struct regmap *regmap;
47 const struct spi_device_id *id = spi_get_device_id(spi); 47 const struct spi_device_id *id = spi_get_device_id(spi);
48 const char *name = id ? id->name : NULL; 48 const char *name = id ? id->name : NULL;
49 const int chip_type = id ? id->driver_data : 0;
49 50
50 regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config); 51 regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config);
51 if (IS_ERR(regmap)) { 52 if (IS_ERR(regmap)) {
@@ -55,7 +56,7 @@ static int inv_mpu_probe(struct spi_device *spi)
55 } 56 }
56 57
57 return inv_mpu_core_probe(regmap, spi->irq, name, 58 return inv_mpu_core_probe(regmap, spi->irq, name,
58 inv_mpu_i2c_disable, id->driver_data); 59 inv_mpu_i2c_disable, chip_type);
59} 60}
60 61
61static int inv_mpu_remove(struct spi_device *spi) 62static int inv_mpu_remove(struct spi_device *spi)
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 9c5c9ef3f1da..0e931a9a1669 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -462,6 +462,8 @@ static int ak8975_setup_irq(struct ak8975_data *data)
462 int rc; 462 int rc;
463 int irq; 463 int irq;
464 464
465 init_waitqueue_head(&data->data_ready_queue);
466 clear_bit(0, &data->flags);
465 if (client->irq) 467 if (client->irq)
466 irq = client->irq; 468 irq = client->irq;
467 else 469 else
@@ -477,8 +479,6 @@ static int ak8975_setup_irq(struct ak8975_data *data)
477 return rc; 479 return rc;
478 } 480 }
479 481
480 init_waitqueue_head(&data->data_ready_queue);
481 clear_bit(0, &data->flags);
482 data->eoc_irq = irq; 482 data->eoc_irq = irq;
483 483
484 return rc; 484 return rc;
@@ -732,7 +732,7 @@ static int ak8975_probe(struct i2c_client *client,
732 int eoc_gpio; 732 int eoc_gpio;
733 int err; 733 int err;
734 const char *name = NULL; 734 const char *name = NULL;
735 enum asahi_compass_chipset chipset; 735 enum asahi_compass_chipset chipset = AK_MAX_TYPE;
736 736
737 /* Grab and set up the supplied GPIO. */ 737 /* Grab and set up the supplied GPIO. */
738 if (client->dev.platform_data) 738 if (client->dev.platform_data)
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 80b6bedc172f..64b3d11dcf1e 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -612,6 +612,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
612 struct Scsi_Host *shost; 612 struct Scsi_Host *shost;
613 struct iser_conn *iser_conn = NULL; 613 struct iser_conn *iser_conn = NULL;
614 struct ib_conn *ib_conn; 614 struct ib_conn *ib_conn;
615 u32 max_fr_sectors;
615 u16 max_cmds; 616 u16 max_cmds;
616 617
617 shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0); 618 shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
@@ -632,7 +633,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
632 iser_conn = ep->dd_data; 633 iser_conn = ep->dd_data;
633 max_cmds = iser_conn->max_cmds; 634 max_cmds = iser_conn->max_cmds;
634 shost->sg_tablesize = iser_conn->scsi_sg_tablesize; 635 shost->sg_tablesize = iser_conn->scsi_sg_tablesize;
635 shost->max_sectors = iser_conn->scsi_max_sectors;
636 636
637 mutex_lock(&iser_conn->state_mutex); 637 mutex_lock(&iser_conn->state_mutex);
638 if (iser_conn->state != ISER_CONN_UP) { 638 if (iser_conn->state != ISER_CONN_UP) {
@@ -657,8 +657,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
657 */ 657 */
658 shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize, 658 shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
659 ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len); 659 ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len);
660 shost->max_sectors = min_t(unsigned int,
661 1024, (shost->sg_tablesize * PAGE_SIZE) >> 9);
662 660
663 if (iscsi_host_add(shost, 661 if (iscsi_host_add(shost,
664 ib_conn->device->ib_device->dma_device)) { 662 ib_conn->device->ib_device->dma_device)) {
@@ -672,6 +670,15 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
672 goto free_host; 670 goto free_host;
673 } 671 }
674 672
673 /*
674 * FRs or FMRs can only map up to a (device) page per entry, but if the
675 * first entry is misaligned we'll end up using using two entries
676 * (head and tail) for a single page worth data, so we have to drop
677 * one segment from the calculation.
678 */
679 max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
680 shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
681
675 if (cmds_max > max_cmds) { 682 if (cmds_max > max_cmds) {
676 iser_info("cmds_max changed from %u to %u\n", 683 iser_info("cmds_max changed from %u to %u\n",
677 cmds_max, max_cmds); 684 cmds_max, max_cmds);
@@ -989,7 +996,6 @@ static struct scsi_host_template iscsi_iser_sht = {
989 .queuecommand = iscsi_queuecommand, 996 .queuecommand = iscsi_queuecommand,
990 .change_queue_depth = scsi_change_queue_depth, 997 .change_queue_depth = scsi_change_queue_depth,
991 .sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE, 998 .sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE,
992 .max_sectors = ISER_DEF_MAX_SECTORS,
993 .cmd_per_lun = ISER_DEF_CMD_PER_LUN, 999 .cmd_per_lun = ISER_DEF_CMD_PER_LUN,
994 .eh_abort_handler = iscsi_eh_abort, 1000 .eh_abort_handler = iscsi_eh_abort,
995 .eh_device_reset_handler= iscsi_eh_device_reset, 1001 .eh_device_reset_handler= iscsi_eh_device_reset,
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index 53e33fab3f7a..df3581f60628 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -181,6 +181,14 @@ static void vibra_play_work(struct work_struct *work)
181{ 181{
182 struct vibra_info *info = container_of(work, 182 struct vibra_info *info = container_of(work,
183 struct vibra_info, play_work); 183 struct vibra_info, play_work);
184 int ret;
185
186 /* Do not allow effect, while the routing is set to use audio */
187 ret = twl6040_get_vibralr_status(info->twl6040);
188 if (ret & TWL6040_VIBSEL) {
189 dev_info(info->dev, "Vibra is configured for audio\n");
190 return;
191 }
184 192
185 mutex_lock(&info->mutex); 193 mutex_lock(&info->mutex);
186 194
@@ -199,14 +207,6 @@ static int vibra_play(struct input_dev *input, void *data,
199 struct ff_effect *effect) 207 struct ff_effect *effect)
200{ 208{
201 struct vibra_info *info = input_get_drvdata(input); 209 struct vibra_info *info = input_get_drvdata(input);
202 int ret;
203
204 /* Do not allow effect, while the routing is set to use audio */
205 ret = twl6040_get_vibralr_status(info->twl6040);
206 if (ret & TWL6040_VIBSEL) {
207 dev_info(&input->dev, "Vibra is configured for audio\n");
208 return -EBUSY;
209 }
210 210
211 info->weak_speed = effect->u.rumble.weak_magnitude; 211 info->weak_speed = effect->u.rumble.weak_magnitude;
212 info->strong_speed = effect->u.rumble.strong_magnitude; 212 info->strong_speed = effect->u.rumble.strong_magnitude;
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 2160512e861a..5af7907d0af4 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -1093,6 +1093,19 @@ static int mxt_t6_command(struct mxt_data *data, u16 cmd_offset,
1093 return 0; 1093 return 0;
1094} 1094}
1095 1095
1096static int mxt_acquire_irq(struct mxt_data *data)
1097{
1098 int error;
1099
1100 enable_irq(data->irq);
1101
1102 error = mxt_process_messages_until_invalid(data);
1103 if (error)
1104 return error;
1105
1106 return 0;
1107}
1108
1096static int mxt_soft_reset(struct mxt_data *data) 1109static int mxt_soft_reset(struct mxt_data *data)
1097{ 1110{
1098 struct device *dev = &data->client->dev; 1111 struct device *dev = &data->client->dev;
@@ -1111,7 +1124,7 @@ static int mxt_soft_reset(struct mxt_data *data)
1111 /* Ignore CHG line for 100ms after reset */ 1124 /* Ignore CHG line for 100ms after reset */
1112 msleep(100); 1125 msleep(100);
1113 1126
1114 enable_irq(data->irq); 1127 mxt_acquire_irq(data);
1115 1128
1116 ret = mxt_wait_for_completion(data, &data->reset_completion, 1129 ret = mxt_wait_for_completion(data, &data->reset_completion,
1117 MXT_RESET_TIMEOUT); 1130 MXT_RESET_TIMEOUT);
@@ -1466,19 +1479,6 @@ release_mem:
1466 return ret; 1479 return ret;
1467} 1480}
1468 1481
1469static int mxt_acquire_irq(struct mxt_data *data)
1470{
1471 int error;
1472
1473 enable_irq(data->irq);
1474
1475 error = mxt_process_messages_until_invalid(data);
1476 if (error)
1477 return error;
1478
1479 return 0;
1480}
1481
1482static int mxt_get_info(struct mxt_data *data) 1482static int mxt_get_info(struct mxt_data *data)
1483{ 1483{
1484 struct i2c_client *client = data->client; 1484 struct i2c_client *client = data->client;
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index 9bbadaaf6bc3..7b3845aa5983 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -370,8 +370,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
370 point.coord_x = point.coord_y = 0; 370 point.coord_x = point.coord_y = 0;
371 } 371 }
372 372
373 point.state = payload[9 * i + 5] & 0x03; 373 point.state = payload[9 * i + 5] & 0x0f;
374 point.id = (payload[9 * i + 5] & 0xfc) >> 2; 374 point.id = (payload[9 * i + 5] & 0xf0) >> 4;
375 375
376 /* determine touch major, minor and orientation */ 376 /* determine touch major, minor and orientation */
377 point.area_major = max(payload[9 * i + 6], 377 point.area_major = max(payload[9 * i + 6],
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 6e43c95629ea..3cfd7af8c5ca 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -846,11 +846,11 @@ struct media_device *media_device_find_devres(struct device *dev)
846} 846}
847EXPORT_SYMBOL_GPL(media_device_find_devres); 847EXPORT_SYMBOL_GPL(media_device_find_devres);
848 848
849#if IS_ENABLED(CONFIG_PCI)
849void media_device_pci_init(struct media_device *mdev, 850void media_device_pci_init(struct media_device *mdev,
850 struct pci_dev *pci_dev, 851 struct pci_dev *pci_dev,
851 const char *name) 852 const char *name)
852{ 853{
853#ifdef CONFIG_PCI
854 mdev->dev = &pci_dev->dev; 854 mdev->dev = &pci_dev->dev;
855 855
856 if (name) 856 if (name)
@@ -866,16 +866,16 @@ void media_device_pci_init(struct media_device *mdev,
866 mdev->driver_version = LINUX_VERSION_CODE; 866 mdev->driver_version = LINUX_VERSION_CODE;
867 867
868 media_device_init(mdev); 868 media_device_init(mdev);
869#endif
870} 869}
871EXPORT_SYMBOL_GPL(media_device_pci_init); 870EXPORT_SYMBOL_GPL(media_device_pci_init);
871#endif
872 872
873#if IS_ENABLED(CONFIG_USB)
873void __media_device_usb_init(struct media_device *mdev, 874void __media_device_usb_init(struct media_device *mdev,
874 struct usb_device *udev, 875 struct usb_device *udev,
875 const char *board_name, 876 const char *board_name,
876 const char *driver_name) 877 const char *driver_name)
877{ 878{
878#ifdef CONFIG_USB
879 mdev->dev = &udev->dev; 879 mdev->dev = &udev->dev;
880 880
881 if (driver_name) 881 if (driver_name)
@@ -895,9 +895,9 @@ void __media_device_usb_init(struct media_device *mdev,
895 mdev->driver_version = LINUX_VERSION_CODE; 895 mdev->driver_version = LINUX_VERSION_CODE;
896 896
897 media_device_init(mdev); 897 media_device_init(mdev);
898#endif
899} 898}
900EXPORT_SYMBOL_GPL(__media_device_usb_init); 899EXPORT_SYMBOL_GPL(__media_device_usb_init);
900#endif
901 901
902 902
903#endif /* CONFIG_MEDIA_CONTROLLER */ 903#endif /* CONFIG_MEDIA_CONTROLLER */
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index feb521f28e14..4f494acd8150 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -1446,22 +1446,13 @@ static int fimc_md_probe(struct platform_device *pdev)
1446 1446
1447 platform_set_drvdata(pdev, fmd); 1447 platform_set_drvdata(pdev, fmd);
1448 1448
1449 /* Protect the media graph while we're registering entities */
1450 mutex_lock(&fmd->media_dev.graph_mutex);
1451
1452 ret = fimc_md_register_platform_entities(fmd, dev->of_node); 1449 ret = fimc_md_register_platform_entities(fmd, dev->of_node);
1453 if (ret) { 1450 if (ret)
1454 mutex_unlock(&fmd->media_dev.graph_mutex);
1455 goto err_clk; 1451 goto err_clk;
1456 }
1457 1452
1458 ret = fimc_md_register_sensor_entities(fmd); 1453 ret = fimc_md_register_sensor_entities(fmd);
1459 if (ret) { 1454 if (ret)
1460 mutex_unlock(&fmd->media_dev.graph_mutex);
1461 goto err_m_ent; 1455 goto err_m_ent;
1462 }
1463
1464 mutex_unlock(&fmd->media_dev.graph_mutex);
1465 1456
1466 ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode); 1457 ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode);
1467 if (ret) 1458 if (ret)
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
index 0b44b9accf50..af237af204e2 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -493,21 +493,17 @@ static int s3c_camif_probe(struct platform_device *pdev)
493 if (ret < 0) 493 if (ret < 0)
494 goto err_sens; 494 goto err_sens;
495 495
496 mutex_lock(&camif->media_dev.graph_mutex);
497
498 ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev); 496 ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev);
499 if (ret < 0) 497 if (ret < 0)
500 goto err_unlock; 498 goto err_sens;
501 499
502 ret = camif_register_video_nodes(camif); 500 ret = camif_register_video_nodes(camif);
503 if (ret < 0) 501 if (ret < 0)
504 goto err_unlock; 502 goto err_sens;
505 503
506 ret = camif_create_media_links(camif); 504 ret = camif_create_media_links(camif);
507 if (ret < 0) 505 if (ret < 0)
508 goto err_unlock; 506 goto err_sens;
509
510 mutex_unlock(&camif->media_dev.graph_mutex);
511 507
512 ret = media_device_register(&camif->media_dev); 508 ret = media_device_register(&camif->media_dev);
513 if (ret < 0) 509 if (ret < 0)
@@ -516,8 +512,6 @@ static int s3c_camif_probe(struct platform_device *pdev)
516 pm_runtime_put(dev); 512 pm_runtime_put(dev);
517 return 0; 513 return 0;
518 514
519err_unlock:
520 mutex_unlock(&camif->media_dev.graph_mutex);
521err_sens: 515err_sens:
522 v4l2_device_unregister(&camif->v4l2_dev); 516 v4l2_device_unregister(&camif->v4l2_dev);
523 media_device_unregister(&camif->media_dev); 517 media_device_unregister(&camif->media_dev);
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index e94c7fb6712a..88e45234d527 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -945,6 +945,11 @@ static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
945 ret = -EFAULT; 945 ret = -EFAULT;
946 goto free_ret; 946 goto free_ret;
947 } 947 }
948 /* Ensure desc has not changed between the two reads */
949 if (memcmp(&dd, dd_config, sizeof(dd))) {
950 ret = -EINVAL;
951 goto free_ret;
952 }
948 mutex_lock(&vdev->vdev_mutex); 953 mutex_lock(&vdev->vdev_mutex);
949 mutex_lock(&vi->vop_mutex); 954 mutex_lock(&vi->vop_mutex);
950 ret = vop_virtio_add_device(vdev, dd_config); 955 ret = vop_virtio_add_device(vdev, dd_config);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 409152b21191..aa87049c353d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1608,21 +1608,22 @@ static int xgene_enet_probe(struct platform_device *pdev)
1608 1608
1609 ret = xgene_enet_init_hw(pdata); 1609 ret = xgene_enet_init_hw(pdata);
1610 if (ret) 1610 if (ret)
1611 goto err; 1611 goto err_netdev;
1612 1612
1613 mac_ops = pdata->mac_ops; 1613 mac_ops = pdata->mac_ops;
1614 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) { 1614 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
1615 ret = xgene_enet_mdio_config(pdata); 1615 ret = xgene_enet_mdio_config(pdata);
1616 if (ret) 1616 if (ret)
1617 goto err; 1617 goto err_netdev;
1618 } else { 1618 } else {
1619 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); 1619 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
1620 } 1620 }
1621 1621
1622 xgene_enet_napi_add(pdata); 1622 xgene_enet_napi_add(pdata);
1623 return 0; 1623 return 0;
1624err: 1624err_netdev:
1625 unregister_netdev(ndev); 1625 unregister_netdev(ndev);
1626err:
1626 free_netdev(ndev); 1627 free_netdev(ndev);
1627 return ret; 1628 return ret;
1628} 1629}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index fd85b6dd4a6e..6a5a71710fa9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1439,6 +1439,10 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1439 if (!TX_CMP_VALID(txcmp, raw_cons)) 1439 if (!TX_CMP_VALID(txcmp, raw_cons))
1440 break; 1440 break;
1441 1441
1442 /* The valid test of the entry must be done first before
1443 * reading any further.
1444 */
1445 rmb();
1442 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { 1446 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1443 tx_pkts++; 1447 tx_pkts++;
1444 /* return full budget so NAPI will complete. */ 1448 /* return full budget so NAPI will complete. */
@@ -4096,9 +4100,11 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
4096} 4100}
4097 4101
4098static int bnxt_cfg_rx_mode(struct bnxt *); 4102static int bnxt_cfg_rx_mode(struct bnxt *);
4103static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
4099 4104
4100static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 4105static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
4101{ 4106{
4107 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
4102 int rc = 0; 4108 int rc = 0;
4103 4109
4104 if (irq_re_init) { 4110 if (irq_re_init) {
@@ -4154,13 +4160,22 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
4154 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 4160 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
4155 goto err_out; 4161 goto err_out;
4156 } 4162 }
4157 bp->vnic_info[0].uc_filter_count = 1; 4163 vnic->uc_filter_count = 1;
4158 4164
4159 bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 4165 vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
4160 4166
4161 if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp)) 4167 if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
4162 bp->vnic_info[0].rx_mask |= 4168 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
4163 CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 4169
4170 if (bp->dev->flags & IFF_ALLMULTI) {
4171 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
4172 vnic->mc_list_count = 0;
4173 } else {
4174 u32 mask = 0;
4175
4176 bnxt_mc_list_updated(bp, &mask);
4177 vnic->rx_mask |= mask;
4178 }
4164 4179
4165 rc = bnxt_cfg_rx_mode(bp); 4180 rc = bnxt_cfg_rx_mode(bp);
4166 if (rc) 4181 if (rc)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index bfa10c3da35f..c9f77c324535 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1521,9 +1521,15 @@ fec_enet_rx(struct net_device *ndev, int budget)
1521 struct fec_enet_private *fep = netdev_priv(ndev); 1521 struct fec_enet_private *fep = netdev_priv(ndev);
1522 1522
1523 for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { 1523 for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
1524 clear_bit(queue_id, &fep->work_rx); 1524 int ret;
1525 pkt_received += fec_enet_rx_queue(ndev, 1525
1526 ret = fec_enet_rx_queue(ndev,
1526 budget - pkt_received, queue_id); 1527 budget - pkt_received, queue_id);
1528
1529 if (ret < budget - pkt_received)
1530 clear_bit(queue_id, &fep->work_rx);
1531
1532 pkt_received += ret;
1527 } 1533 }
1528 return pkt_received; 1534 return pkt_received;
1529} 1535}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 8ef6875b6cf9..c1b3a9c8cf3b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -698,7 +698,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
698 698
699 if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS) 699 if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
700 return -1; 700 return -1;
701 hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8)); 701 hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
702 702
703 csum_pseudo_hdr = csum_partial(&ipv6h->saddr, 703 csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
704 sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); 704 sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 559d11a443bc..f5c3b9465d8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -14,7 +14,6 @@ config MLX5_CORE_EN
14 bool "Mellanox Technologies ConnectX-4 Ethernet support" 14 bool "Mellanox Technologies ConnectX-4 Ethernet support"
15 depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE 15 depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
16 select PTP_1588_CLOCK 16 select PTP_1588_CLOCK
17 select VXLAN if MLX5_CORE=y
18 default n 17 default n
19 ---help--- 18 ---help---
20 Ethernet support in Mellanox Technologies ConnectX-4 NIC. 19 Ethernet support in Mellanox Technologies ConnectX-4 NIC.
@@ -32,3 +31,10 @@ config MLX5_CORE_EN_DCB
32 This flag is depended on the kernel's DCB support. 31 This flag is depended on the kernel's DCB support.
33 32
34 If unsure, set to Y 33 If unsure, set to Y
34
35config MLX5_CORE_EN_VXLAN
36 bool "VXLAN offloads Support"
37 default y
38 depends on MLX5_CORE_EN && VXLAN && !(MLX5_CORE=y && VXLAN=m)
39 ---help---
40 Say Y here if you want to use VXLAN offloads in the driver.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index b531d4f3c00b..e4a5b37b90ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -6,6 +6,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
6 6
7mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \ 7mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \
8 en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \ 8 en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \
9 en_txrx.o en_clock.o vxlan.o en_tc.o en_arfs.o 9 en_txrx.o en_clock.o en_tc.o en_arfs.o
10 10
11mlx5_core-$(CONFIG_MLX5_CORE_EN_VXLAN) += vxlan.o
11mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o 12mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index bfa5daaaf5aa..7aea32e085b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -522,7 +522,12 @@ struct mlx5e_priv {
522 struct mlx5e_direct_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; 522 struct mlx5e_direct_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
523 523
524 struct mlx5e_flow_steering fs; 524 struct mlx5e_flow_steering fs;
525 struct mlx5e_flow_tables fts;
526 struct mlx5e_eth_addr_db eth_addr;
527 struct mlx5e_vlan_db vlan;
528#ifdef CONFIG_MLX5_CORE_EN_VXLAN
525 struct mlx5e_vxlan_db vxlan; 529 struct mlx5e_vxlan_db vxlan;
530#endif
526 531
527 struct mlx5e_params params; 532 struct mlx5e_params params;
528 struct workqueue_struct *wq; 533 struct workqueue_struct *wq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 1c70e518b5c5..b60a1bc6f457 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2509,6 +2509,7 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
2509 vf_stats); 2509 vf_stats);
2510} 2510}
2511 2511
2512#if IS_ENABLED(CONFIG_MLX5_CORE_EN_VXLAN)
2512static void mlx5e_add_vxlan_port(struct net_device *netdev, 2513static void mlx5e_add_vxlan_port(struct net_device *netdev,
2513 sa_family_t sa_family, __be16 port) 2514 sa_family_t sa_family, __be16 port)
2514{ 2515{
@@ -2580,6 +2581,7 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
2580 2581
2581 return features; 2582 return features;
2582} 2583}
2584#endif
2583 2585
2584static const struct net_device_ops mlx5e_netdev_ops_basic = { 2586static const struct net_device_ops mlx5e_netdev_ops_basic = {
2585 .ndo_open = mlx5e_open, 2587 .ndo_open = mlx5e_open,
@@ -2614,6 +2616,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
2614 .ndo_set_features = mlx5e_set_features, 2616 .ndo_set_features = mlx5e_set_features,
2615 .ndo_change_mtu = mlx5e_change_mtu, 2617 .ndo_change_mtu = mlx5e_change_mtu,
2616 .ndo_do_ioctl = mlx5e_ioctl, 2618 .ndo_do_ioctl = mlx5e_ioctl,
2619#ifdef CONFIG_MLX5_CORE_EN_VXLAN
2617 .ndo_add_vxlan_port = mlx5e_add_vxlan_port, 2620 .ndo_add_vxlan_port = mlx5e_add_vxlan_port,
2618 .ndo_del_vxlan_port = mlx5e_del_vxlan_port, 2621 .ndo_del_vxlan_port = mlx5e_del_vxlan_port,
2619 .ndo_features_check = mlx5e_features_check, 2622 .ndo_features_check = mlx5e_features_check,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
index 129f3527aa14..217ac530a514 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
@@ -48,14 +48,21 @@ struct mlx5e_vxlan_work {
48 48
49static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) 49static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
50{ 50{
51 return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) && 51 return IS_ENABLED(CONFIG_MLX5_CORE_EN_VXLAN) &&
52 (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
52 mlx5_core_is_pf(mdev)); 53 mlx5_core_is_pf(mdev));
53} 54}
54 55
56#ifdef CONFIG_MLX5_CORE_EN_VXLAN
55void mlx5e_vxlan_init(struct mlx5e_priv *priv); 57void mlx5e_vxlan_init(struct mlx5e_priv *priv);
58void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
59#else
60static inline void mlx5e_vxlan_init(struct mlx5e_priv *priv) {}
61static inline void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) {}
62#endif
63
56void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family, 64void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
57 u16 port, int add); 65 u16 port, int add);
58struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port); 66struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
59void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
60 67
61#endif /* __MLX5_VXLAN_H__ */ 68#endif /* __MLX5_VXLAN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 79cdd81d55ab..4a7273771028 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2843,11 +2843,11 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2843 lag->ref_count++; 2843 lag->ref_count++;
2844 return 0; 2844 return 0;
2845 2845
2846err_col_port_enable:
2847 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2846err_col_port_add: 2848err_col_port_add:
2847 if (!lag->ref_count) 2849 if (!lag->ref_count)
2848 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 2850 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2849err_col_port_enable:
2850 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2851 return err; 2851 return err;
2852} 2852}
2853 2853
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index fb9efb84f13b..3710f19ed6bb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -214,7 +214,15 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
214 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin, 214 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
215 table_type, range, local_port, set); 215 table_type, range, local_port, set);
216 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 216 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
217 if (err)
218 goto err_flood_bm_set;
219 else
220 goto buffer_out;
217 221
222err_flood_bm_set:
223 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
224 table_type, range, local_port, !set);
225 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
218buffer_out: 226buffer_out:
219 kfree(sftr_pl); 227 kfree(sftr_pl);
220 return err; 228 return err;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index db80eb1c6d4f..2b10f1bcd151 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -1015,20 +1015,24 @@ static int netxen_get_flash_block(struct netxen_adapter *adapter, int base,
1015{ 1015{
1016 int i, v, addr; 1016 int i, v, addr;
1017 __le32 *ptr32; 1017 __le32 *ptr32;
1018 int ret;
1018 1019
1019 addr = base; 1020 addr = base;
1020 ptr32 = buf; 1021 ptr32 = buf;
1021 for (i = 0; i < size / sizeof(u32); i++) { 1022 for (i = 0; i < size / sizeof(u32); i++) {
1022 if (netxen_rom_fast_read(adapter, addr, &v) == -1) 1023 ret = netxen_rom_fast_read(adapter, addr, &v);
1023 return -1; 1024 if (ret)
1025 return ret;
1026
1024 *ptr32 = cpu_to_le32(v); 1027 *ptr32 = cpu_to_le32(v);
1025 ptr32++; 1028 ptr32++;
1026 addr += sizeof(u32); 1029 addr += sizeof(u32);
1027 } 1030 }
1028 if ((char *)buf + size > (char *)ptr32) { 1031 if ((char *)buf + size > (char *)ptr32) {
1029 __le32 local; 1032 __le32 local;
1030 if (netxen_rom_fast_read(adapter, addr, &v) == -1) 1033 ret = netxen_rom_fast_read(adapter, addr, &v);
1031 return -1; 1034 if (ret)
1035 return ret;
1032 local = cpu_to_le32(v); 1036 local = cpu_to_le32(v);
1033 memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32); 1037 memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32);
1034 } 1038 }
@@ -1940,7 +1944,7 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
1940 if (adapter->phy_read && 1944 if (adapter->phy_read &&
1941 adapter->phy_read(adapter, 1945 adapter->phy_read(adapter,
1942 NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, 1946 NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
1943 &autoneg) != 0) 1947 &autoneg) == 0)
1944 adapter->link_autoneg = autoneg; 1948 adapter->link_autoneg = autoneg;
1945 } else 1949 } else
1946 goto link_down; 1950 goto link_down;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index cad37af1517d..7a0281a36c28 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -852,7 +852,8 @@ netxen_check_options(struct netxen_adapter *adapter)
852 ptr32 = (__le32 *)&serial_num; 852 ptr32 = (__le32 *)&serial_num;
853 offset = NX_FW_SERIAL_NUM_OFFSET; 853 offset = NX_FW_SERIAL_NUM_OFFSET;
854 for (i = 0; i < 8; i++) { 854 for (i = 0; i < 8; i++) {
855 if (netxen_rom_fast_read(adapter, offset, &val) == -1) { 855 err = netxen_rom_fast_read(adapter, offset, &val);
856 if (err) {
856 dev_err(&pdev->dev, "error reading board info\n"); 857 dev_err(&pdev->dev, "error reading board info\n");
857 adapter->driver_mismatch = 1; 858 adapter->driver_mismatch = 1;
858 return; 859 return;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 82d85ccc9ed1..075faa52eb48 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -429,7 +429,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
429 u8 xmit_type; 429 u8 xmit_type;
430 u16 idx; 430 u16 idx;
431 u16 hlen; 431 u16 hlen;
432 bool data_split; 432 bool data_split = false;
433 433
434 /* Get tx-queue context and netdev index */ 434 /* Get tx-queue context and netdev index */
435 txq_index = skb_get_queue_mapping(skb); 435 txq_index = skb_get_queue_mapping(skb);
@@ -2094,8 +2094,6 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
2094 edev->q_num_rx_buffers = NUM_RX_BDS_DEF; 2094 edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
2095 edev->q_num_tx_buffers = NUM_TX_BDS_DEF; 2095 edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
2096 2096
2097 DP_INFO(edev, "Allocated netdev with 64 tx queues and 64 rx queues\n");
2098
2099 SET_NETDEV_DEV(ndev, &pdev->dev); 2097 SET_NETDEV_DEV(ndev, &pdev->dev);
2100 2098
2101 memset(&edev->stats, 0, sizeof(edev->stats)); 2099 memset(&edev->stats, 0, sizeof(edev->stats));
@@ -2274,9 +2272,9 @@ static void qede_update_pf_params(struct qed_dev *cdev)
2274{ 2272{
2275 struct qed_pf_params pf_params; 2273 struct qed_pf_params pf_params;
2276 2274
2277 /* 16 rx + 16 tx */ 2275 /* 64 rx + 64 tx */
2278 memset(&pf_params, 0, sizeof(struct qed_pf_params)); 2276 memset(&pf_params, 0, sizeof(struct qed_pf_params));
2279 pf_params.eth_pf_params.num_cons = 32; 2277 pf_params.eth_pf_params.num_cons = 128;
2280 qed_ops->common->update_pf_params(cdev, &pf_params); 2278 qed_ops->common->update_pf_params(cdev, &pf_params);
2281} 2279}
2282 2280
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 9c40b88fabd5..a6dc11ce497f 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -495,8 +495,6 @@ static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
495 int gh_len; 495 int gh_len;
496 int err = -ENOSYS; 496 int err = -ENOSYS;
497 497
498 udp_tunnel_gro_complete(skb, nhoff);
499
500 gh = (struct genevehdr *)(skb->data + nhoff); 498 gh = (struct genevehdr *)(skb->data + nhoff);
501 gh_len = geneve_hlen(gh); 499 gh_len = geneve_hlen(gh);
502 type = gh->proto_type; 500 type = gh->proto_type;
@@ -507,6 +505,9 @@ static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
507 err = ptype->callbacks.gro_complete(skb, nhoff + gh_len); 505 err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
508 506
509 rcu_read_unlock(); 507 rcu_read_unlock();
508
509 skb_set_inner_mac_header(skb, nhoff + gh_len);
510
510 return err; 511 return err;
511} 512}
512 513
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 3add2c4aac21..460740ccc238 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -85,7 +85,7 @@ struct gcm_iv {
85 * @tfm: crypto struct, key storage 85 * @tfm: crypto struct, key storage
86 */ 86 */
87struct macsec_key { 87struct macsec_key {
88 u64 id; 88 u8 id[MACSEC_KEYID_LEN];
89 struct crypto_aead *tfm; 89 struct crypto_aead *tfm;
90}; 90};
91 91
@@ -1530,7 +1530,8 @@ static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
1530 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1530 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
1531 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1531 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
1532 [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 }, 1532 [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 },
1533 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_U64 }, 1533 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
1534 .len = MACSEC_KEYID_LEN, },
1534 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1535 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
1535 .len = MACSEC_MAX_KEY_LEN, }, 1536 .len = MACSEC_MAX_KEY_LEN, },
1536}; 1537};
@@ -1577,6 +1578,9 @@ static bool validate_add_rxsa(struct nlattr **attrs)
1577 return false; 1578 return false;
1578 } 1579 }
1579 1580
1581 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1582 return false;
1583
1580 return true; 1584 return true;
1581} 1585}
1582 1586
@@ -1642,7 +1646,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1642 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1646 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1643 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1647 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1644 1648
1645 rx_sa->key.id = nla_get_u64(tb_sa[MACSEC_SA_ATTR_KEYID]); 1649 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEY], MACSEC_KEYID_LEN);
1646 rx_sa->sc = rx_sc; 1650 rx_sa->sc = rx_sc;
1647 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1651 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
1648 1652
@@ -1723,6 +1727,9 @@ static bool validate_add_txsa(struct nlattr **attrs)
1723 return false; 1727 return false;
1724 } 1728 }
1725 1729
1730 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1731 return false;
1732
1726 return true; 1733 return true;
1727} 1734}
1728 1735
@@ -1778,7 +1785,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1778 return -ENOMEM; 1785 return -ENOMEM;
1779 } 1786 }
1780 1787
1781 tx_sa->key.id = nla_get_u64(tb_sa[MACSEC_SA_ATTR_KEYID]); 1788 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEY], MACSEC_KEYID_LEN);
1782 1789
1783 spin_lock_bh(&tx_sa->lock); 1790 spin_lock_bh(&tx_sa->lock);
1784 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1791 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
@@ -2365,9 +2372,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
2365 2372
2366 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2373 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
2367 nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) || 2374 nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) ||
2368 nla_put_u64_64bit(skb, MACSEC_SA_ATTR_KEYID, 2375 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
2369 tx_sa->key.id,
2370 MACSEC_SA_ATTR_PAD) ||
2371 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 2376 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
2372 nla_nest_cancel(skb, txsa_nest); 2377 nla_nest_cancel(skb, txsa_nest);
2373 nla_nest_cancel(skb, txsa_list); 2378 nla_nest_cancel(skb, txsa_list);
@@ -2469,9 +2474,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
2469 2474
2470 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2475 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
2471 nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) || 2476 nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) ||
2472 nla_put_u64_64bit(skb, MACSEC_SA_ATTR_KEYID, 2477 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
2473 rx_sa->key.id,
2474 MACSEC_SA_ATTR_PAD) ||
2475 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 2478 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
2476 nla_nest_cancel(skb, rxsa_nest); 2479 nla_nest_cancel(skb, rxsa_nest);
2477 nla_nest_cancel(skb, rxsc_nest); 2480 nla_nest_cancel(skb, rxsc_nest);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 22b85b097cbc..bd6720962b1f 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -384,7 +384,7 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
384 goto wake_up; 384 goto wake_up;
385 } 385 }
386 386
387 kfree_skb(skb); 387 consume_skb(skb);
388 while (segs) { 388 while (segs) {
389 struct sk_buff *nskb = segs->next; 389 struct sk_buff *nskb = segs->next;
390 390
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 2668e528dee4..2f29d20aa08f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -613,8 +613,9 @@ out:
613 613
614static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) 614static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
615{ 615{
616 udp_tunnel_gro_complete(skb, nhoff); 616 /* Sets 'skb->inner_mac_header' since we are always called with
617 617 * 'skb->encapsulation' set.
618 */
618 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); 619 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
619} 620}
620 621
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index f798899338ed..5101f3ab4f29 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -397,10 +397,17 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
397 */ 397 */
398 start += start_pad; 398 start += start_pad;
399 npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K; 399 npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K;
400 if (nd_pfn->mode == PFN_MODE_PMEM) 400 if (nd_pfn->mode == PFN_MODE_PMEM) {
401 offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align) 401 unsigned long memmap_size;
402
403 /*
404 * vmemmap_populate_hugepages() allocates the memmap array in
405 * HPAGE_SIZE chunks.
406 */
407 memmap_size = ALIGN(64 * npfns, HPAGE_SIZE);
408 offset = ALIGN(start + SZ_8K + memmap_size, nd_pfn->align)
402 - start; 409 - start;
403 else if (nd_pfn->mode == PFN_MODE_RAM) 410 } else if (nd_pfn->mode == PFN_MODE_RAM)
404 offset = ALIGN(start + SZ_8K, nd_pfn->align) - start; 411 offset = ALIGN(start + SZ_8K, nd_pfn->align) - start;
405 else 412 else
406 goto err; 413 goto err;
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index 8ba19bba3156..2bb3c5799ac4 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -94,7 +94,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
94 if (ret) 94 if (ret)
95 goto close_banks; 95 goto close_banks;
96 96
97 while (val_size) { 97 while (val_size >= reg_size) {
98 if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) { 98 if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
99 /* fill up non-data register */ 99 /* fill up non-data register */
100 *buf = 0; 100 *buf = 0;
@@ -103,7 +103,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
103 } 103 }
104 104
105 buf++; 105 buf++;
106 val_size--; 106 val_size -= reg_size;
107 offset += reg_size; 107 offset += reg_size;
108 } 108 }
109 109
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 96168b819044..e165b7ce29d7 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -126,7 +126,7 @@ struct rio_mport_mapping {
126 struct list_head node; 126 struct list_head node;
127 struct mport_dev *md; 127 struct mport_dev *md;
128 enum rio_mport_map_dir dir; 128 enum rio_mport_map_dir dir;
129 u32 rioid; 129 u16 rioid;
130 u64 rio_addr; 130 u64 rio_addr;
131 dma_addr_t phys_addr; /* for mmap */ 131 dma_addr_t phys_addr; /* for mmap */
132 void *virt_addr; /* kernel address, for dma_free_coherent */ 132 void *virt_addr; /* kernel address, for dma_free_coherent */
@@ -137,7 +137,7 @@ struct rio_mport_mapping {
137 137
138struct rio_mport_dma_map { 138struct rio_mport_dma_map {
139 int valid; 139 int valid;
140 uint64_t length; 140 u64 length;
141 void *vaddr; 141 void *vaddr;
142 dma_addr_t paddr; 142 dma_addr_t paddr;
143}; 143};
@@ -208,7 +208,7 @@ struct mport_cdev_priv {
208 struct kfifo event_fifo; 208 struct kfifo event_fifo;
209 wait_queue_head_t event_rx_wait; 209 wait_queue_head_t event_rx_wait;
210 spinlock_t fifo_lock; 210 spinlock_t fifo_lock;
211 unsigned int event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */ 211 u32 event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
212#ifdef CONFIG_RAPIDIO_DMA_ENGINE 212#ifdef CONFIG_RAPIDIO_DMA_ENGINE
213 struct dma_chan *dmach; 213 struct dma_chan *dmach;
214 struct list_head async_list; 214 struct list_head async_list;
@@ -276,7 +276,8 @@ static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
276 return -EFAULT; 276 return -EFAULT;
277 277
278 if ((maint_io.offset % 4) || 278 if ((maint_io.offset % 4) ||
279 (maint_io.length == 0) || (maint_io.length % 4)) 279 (maint_io.length == 0) || (maint_io.length % 4) ||
280 (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
280 return -EINVAL; 281 return -EINVAL;
281 282
282 buffer = vmalloc(maint_io.length); 283 buffer = vmalloc(maint_io.length);
@@ -298,7 +299,8 @@ static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
298 offset += 4; 299 offset += 4;
299 } 300 }
300 301
301 if (unlikely(copy_to_user(maint_io.buffer, buffer, maint_io.length))) 302 if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer,
303 buffer, maint_io.length)))
302 ret = -EFAULT; 304 ret = -EFAULT;
303out: 305out:
304 vfree(buffer); 306 vfree(buffer);
@@ -319,7 +321,8 @@ static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
319 return -EFAULT; 321 return -EFAULT;
320 322
321 if ((maint_io.offset % 4) || 323 if ((maint_io.offset % 4) ||
322 (maint_io.length == 0) || (maint_io.length % 4)) 324 (maint_io.length == 0) || (maint_io.length % 4) ||
325 (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
323 return -EINVAL; 326 return -EINVAL;
324 327
325 buffer = vmalloc(maint_io.length); 328 buffer = vmalloc(maint_io.length);
@@ -327,7 +330,8 @@ static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
327 return -ENOMEM; 330 return -ENOMEM;
328 length = maint_io.length; 331 length = maint_io.length;
329 332
330 if (unlikely(copy_from_user(buffer, maint_io.buffer, length))) { 333 if (unlikely(copy_from_user(buffer,
334 (void __user *)(uintptr_t)maint_io.buffer, length))) {
331 ret = -EFAULT; 335 ret = -EFAULT;
332 goto out; 336 goto out;
333 } 337 }
@@ -360,7 +364,7 @@ out:
360 */ 364 */
361static int 365static int
362rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, 366rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
363 u32 rioid, u64 raddr, u32 size, 367 u16 rioid, u64 raddr, u32 size,
364 dma_addr_t *paddr) 368 dma_addr_t *paddr)
365{ 369{
366 struct rio_mport *mport = md->mport; 370 struct rio_mport *mport = md->mport;
@@ -369,7 +373,7 @@ rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
369 373
370 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size); 374 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size);
371 375
372 map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); 376 map = kzalloc(sizeof(*map), GFP_KERNEL);
373 if (map == NULL) 377 if (map == NULL)
374 return -ENOMEM; 378 return -ENOMEM;
375 379
@@ -394,7 +398,7 @@ err_map_outb:
394 398
395static int 399static int
396rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, 400rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp,
397 u32 rioid, u64 raddr, u32 size, 401 u16 rioid, u64 raddr, u32 size,
398 dma_addr_t *paddr) 402 dma_addr_t *paddr)
399{ 403{
400 struct rio_mport_mapping *map; 404 struct rio_mport_mapping *map;
@@ -433,7 +437,7 @@ static int rio_mport_obw_map(struct file *filp, void __user *arg)
433 dma_addr_t paddr; 437 dma_addr_t paddr;
434 int ret; 438 int ret;
435 439
436 if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap)))) 440 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
437 return -EFAULT; 441 return -EFAULT;
438 442
439 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx", 443 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx",
@@ -448,7 +452,7 @@ static int rio_mport_obw_map(struct file *filp, void __user *arg)
448 452
449 map.handle = paddr; 453 map.handle = paddr;
450 454
451 if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) 455 if (unlikely(copy_to_user(arg, &map, sizeof(map))))
452 return -EFAULT; 456 return -EFAULT;
453 return 0; 457 return 0;
454} 458}
@@ -469,7 +473,7 @@ static int rio_mport_obw_free(struct file *filp, void __user *arg)
469 if (!md->mport->ops->unmap_outb) 473 if (!md->mport->ops->unmap_outb)
470 return -EPROTONOSUPPORT; 474 return -EPROTONOSUPPORT;
471 475
472 if (copy_from_user(&handle, arg, sizeof(u64))) 476 if (copy_from_user(&handle, arg, sizeof(handle)))
473 return -EFAULT; 477 return -EFAULT;
474 478
475 rmcd_debug(OBW, "h=0x%llx", handle); 479 rmcd_debug(OBW, "h=0x%llx", handle);
@@ -498,9 +502,9 @@ static int rio_mport_obw_free(struct file *filp, void __user *arg)
498static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg) 502static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
499{ 503{
500 struct mport_dev *md = priv->md; 504 struct mport_dev *md = priv->md;
501 uint16_t hdid; 505 u16 hdid;
502 506
503 if (copy_from_user(&hdid, arg, sizeof(uint16_t))) 507 if (copy_from_user(&hdid, arg, sizeof(hdid)))
504 return -EFAULT; 508 return -EFAULT;
505 509
506 md->mport->host_deviceid = hdid; 510 md->mport->host_deviceid = hdid;
@@ -520,9 +524,9 @@ static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
520static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg) 524static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
521{ 525{
522 struct mport_dev *md = priv->md; 526 struct mport_dev *md = priv->md;
523 uint32_t comptag; 527 u32 comptag;
524 528
525 if (copy_from_user(&comptag, arg, sizeof(uint32_t))) 529 if (copy_from_user(&comptag, arg, sizeof(comptag)))
526 return -EFAULT; 530 return -EFAULT;
527 531
528 rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag); 532 rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag);
@@ -837,7 +841,7 @@ err_out:
837 * @xfer: data transfer descriptor structure 841 * @xfer: data transfer descriptor structure
838 */ 842 */
839static int 843static int
840rio_dma_transfer(struct file *filp, uint32_t transfer_mode, 844rio_dma_transfer(struct file *filp, u32 transfer_mode,
841 enum rio_transfer_sync sync, enum dma_data_direction dir, 845 enum rio_transfer_sync sync, enum dma_data_direction dir,
842 struct rio_transfer_io *xfer) 846 struct rio_transfer_io *xfer)
843{ 847{
@@ -875,7 +879,7 @@ rio_dma_transfer(struct file *filp, uint32_t transfer_mode,
875 unsigned long offset; 879 unsigned long offset;
876 long pinned; 880 long pinned;
877 881
878 offset = (unsigned long)xfer->loc_addr & ~PAGE_MASK; 882 offset = (unsigned long)(uintptr_t)xfer->loc_addr & ~PAGE_MASK;
879 nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT; 883 nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
880 884
881 page_list = kmalloc_array(nr_pages, 885 page_list = kmalloc_array(nr_pages,
@@ -1015,19 +1019,20 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
1015 if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction)))) 1019 if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction))))
1016 return -EFAULT; 1020 return -EFAULT;
1017 1021
1018 if (transaction.count != 1) 1022 if (transaction.count != 1) /* only single transfer for now */
1019 return -EINVAL; 1023 return -EINVAL;
1020 1024
1021 if ((transaction.transfer_mode & 1025 if ((transaction.transfer_mode &
1022 priv->md->properties.transfer_mode) == 0) 1026 priv->md->properties.transfer_mode) == 0)
1023 return -ENODEV; 1027 return -ENODEV;
1024 1028
1025 transfer = vmalloc(transaction.count * sizeof(struct rio_transfer_io)); 1029 transfer = vmalloc(transaction.count * sizeof(*transfer));
1026 if (!transfer) 1030 if (!transfer)
1027 return -ENOMEM; 1031 return -ENOMEM;
1028 1032
1029 if (unlikely(copy_from_user(transfer, transaction.block, 1033 if (unlikely(copy_from_user(transfer,
1030 transaction.count * sizeof(struct rio_transfer_io)))) { 1034 (void __user *)(uintptr_t)transaction.block,
1035 transaction.count * sizeof(*transfer)))) {
1031 ret = -EFAULT; 1036 ret = -EFAULT;
1032 goto out_free; 1037 goto out_free;
1033 } 1038 }
@@ -1038,8 +1043,9 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
1038 ret = rio_dma_transfer(filp, transaction.transfer_mode, 1043 ret = rio_dma_transfer(filp, transaction.transfer_mode,
1039 transaction.sync, dir, &transfer[i]); 1044 transaction.sync, dir, &transfer[i]);
1040 1045
1041 if (unlikely(copy_to_user(transaction.block, transfer, 1046 if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
1042 transaction.count * sizeof(struct rio_transfer_io)))) 1047 transfer,
1048 transaction.count * sizeof(*transfer))))
1043 ret = -EFAULT; 1049 ret = -EFAULT;
1044 1050
1045out_free: 1051out_free:
@@ -1129,11 +1135,11 @@ err_tmo:
1129} 1135}
1130 1136
1131static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp, 1137static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp,
1132 uint64_t size, struct rio_mport_mapping **mapping) 1138 u64 size, struct rio_mport_mapping **mapping)
1133{ 1139{
1134 struct rio_mport_mapping *map; 1140 struct rio_mport_mapping *map;
1135 1141
1136 map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); 1142 map = kzalloc(sizeof(*map), GFP_KERNEL);
1137 if (map == NULL) 1143 if (map == NULL)
1138 return -ENOMEM; 1144 return -ENOMEM;
1139 1145
@@ -1165,7 +1171,7 @@ static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1165 struct rio_mport_mapping *mapping = NULL; 1171 struct rio_mport_mapping *mapping = NULL;
1166 int ret; 1172 int ret;
1167 1173
1168 if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_dma_mem)))) 1174 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1169 return -EFAULT; 1175 return -EFAULT;
1170 1176
1171 ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping); 1177 ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
@@ -1174,7 +1180,7 @@ static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1174 1180
1175 map.dma_handle = mapping->phys_addr; 1181 map.dma_handle = mapping->phys_addr;
1176 1182
1177 if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_dma_mem)))) { 1183 if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1178 mutex_lock(&md->buf_mutex); 1184 mutex_lock(&md->buf_mutex);
1179 kref_put(&mapping->ref, mport_release_mapping); 1185 kref_put(&mapping->ref, mport_release_mapping);
1180 mutex_unlock(&md->buf_mutex); 1186 mutex_unlock(&md->buf_mutex);
@@ -1192,7 +1198,7 @@ static int rio_mport_free_dma(struct file *filp, void __user *arg)
1192 int ret = -EFAULT; 1198 int ret = -EFAULT;
1193 struct rio_mport_mapping *map, *_map; 1199 struct rio_mport_mapping *map, *_map;
1194 1200
1195 if (copy_from_user(&handle, arg, sizeof(u64))) 1201 if (copy_from_user(&handle, arg, sizeof(handle)))
1196 return -EFAULT; 1202 return -EFAULT;
1197 rmcd_debug(EXIT, "filp=%p", filp); 1203 rmcd_debug(EXIT, "filp=%p", filp);
1198 1204
@@ -1242,14 +1248,18 @@ static int rio_mport_free_dma(struct file *filp, void __user *arg)
1242 1248
1243static int 1249static int
1244rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, 1250rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
1245 u64 raddr, u32 size, 1251 u64 raddr, u64 size,
1246 struct rio_mport_mapping **mapping) 1252 struct rio_mport_mapping **mapping)
1247{ 1253{
1248 struct rio_mport *mport = md->mport; 1254 struct rio_mport *mport = md->mport;
1249 struct rio_mport_mapping *map; 1255 struct rio_mport_mapping *map;
1250 int ret; 1256 int ret;
1251 1257
1252 map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); 1258 /* rio_map_inb_region() accepts u32 size */
1259 if (size > 0xffffffff)
1260 return -EINVAL;
1261
1262 map = kzalloc(sizeof(*map), GFP_KERNEL);
1253 if (map == NULL) 1263 if (map == NULL)
1254 return -ENOMEM; 1264 return -ENOMEM;
1255 1265
@@ -1262,7 +1272,7 @@ rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
1262 1272
1263 if (raddr == RIO_MAP_ANY_ADDR) 1273 if (raddr == RIO_MAP_ANY_ADDR)
1264 raddr = map->phys_addr; 1274 raddr = map->phys_addr;
1265 ret = rio_map_inb_region(mport, map->phys_addr, raddr, size, 0); 1275 ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0);
1266 if (ret < 0) 1276 if (ret < 0)
1267 goto err_map_inb; 1277 goto err_map_inb;
1268 1278
@@ -1288,7 +1298,7 @@ err_dma_alloc:
1288 1298
1289static int 1299static int
1290rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp, 1300rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp,
1291 u64 raddr, u32 size, 1301 u64 raddr, u64 size,
1292 struct rio_mport_mapping **mapping) 1302 struct rio_mport_mapping **mapping)
1293{ 1303{
1294 struct rio_mport_mapping *map; 1304 struct rio_mport_mapping *map;
@@ -1331,7 +1341,7 @@ static int rio_mport_map_inbound(struct file *filp, void __user *arg)
1331 1341
1332 if (!md->mport->ops->map_inb) 1342 if (!md->mport->ops->map_inb)
1333 return -EPROTONOSUPPORT; 1343 return -EPROTONOSUPPORT;
1334 if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap)))) 1344 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1335 return -EFAULT; 1345 return -EFAULT;
1336 1346
1337 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); 1347 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
@@ -1344,7 +1354,7 @@ static int rio_mport_map_inbound(struct file *filp, void __user *arg)
1344 map.handle = mapping->phys_addr; 1354 map.handle = mapping->phys_addr;
1345 map.rio_addr = mapping->rio_addr; 1355 map.rio_addr = mapping->rio_addr;
1346 1356
1347 if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) { 1357 if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1348 /* Delete mapping if it was created by this request */ 1358 /* Delete mapping if it was created by this request */
1349 if (ret == 0 && mapping->filp == filp) { 1359 if (ret == 0 && mapping->filp == filp) {
1350 mutex_lock(&md->buf_mutex); 1360 mutex_lock(&md->buf_mutex);
@@ -1375,7 +1385,7 @@ static int rio_mport_inbound_free(struct file *filp, void __user *arg)
1375 if (!md->mport->ops->unmap_inb) 1385 if (!md->mport->ops->unmap_inb)
1376 return -EPROTONOSUPPORT; 1386 return -EPROTONOSUPPORT;
1377 1387
1378 if (copy_from_user(&handle, arg, sizeof(u64))) 1388 if (copy_from_user(&handle, arg, sizeof(handle)))
1379 return -EFAULT; 1389 return -EFAULT;
1380 1390
1381 mutex_lock(&md->buf_mutex); 1391 mutex_lock(&md->buf_mutex);
@@ -1401,7 +1411,7 @@ static int rio_mport_inbound_free(struct file *filp, void __user *arg)
1401static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg) 1411static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg)
1402{ 1412{
1403 struct mport_dev *md = priv->md; 1413 struct mport_dev *md = priv->md;
1404 uint32_t port_idx = md->mport->index; 1414 u32 port_idx = md->mport->index;
1405 1415
1406 rmcd_debug(MPORT, "port_index=%d", port_idx); 1416 rmcd_debug(MPORT, "port_index=%d", port_idx);
1407 1417
@@ -1451,7 +1461,7 @@ static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id,
1451 handled = 0; 1461 handled = 0;
1452 spin_lock(&data->db_lock); 1462 spin_lock(&data->db_lock);
1453 list_for_each_entry(db_filter, &data->doorbells, data_node) { 1463 list_for_each_entry(db_filter, &data->doorbells, data_node) {
1454 if (((db_filter->filter.rioid == 0xffffffff || 1464 if (((db_filter->filter.rioid == RIO_INVALID_DESTID ||
1455 db_filter->filter.rioid == src)) && 1465 db_filter->filter.rioid == src)) &&
1456 info >= db_filter->filter.low && 1466 info >= db_filter->filter.low &&
1457 info <= db_filter->filter.high) { 1467 info <= db_filter->filter.high) {
@@ -1525,6 +1535,9 @@ static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv,
1525 if (copy_from_user(&filter, arg, sizeof(filter))) 1535 if (copy_from_user(&filter, arg, sizeof(filter)))
1526 return -EFAULT; 1536 return -EFAULT;
1527 1537
1538 if (filter.low > filter.high)
1539 return -EINVAL;
1540
1528 spin_lock_irqsave(&priv->md->db_lock, flags); 1541 spin_lock_irqsave(&priv->md->db_lock, flags);
1529 list_for_each_entry(db_filter, &priv->db_filters, priv_node) { 1542 list_for_each_entry(db_filter, &priv->db_filters, priv_node) {
1530 if (db_filter->filter.rioid == filter.rioid && 1543 if (db_filter->filter.rioid == filter.rioid &&
@@ -1737,10 +1750,10 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
1737 return -EEXIST; 1750 return -EEXIST;
1738 } 1751 }
1739 1752
1740 size = sizeof(struct rio_dev); 1753 size = sizeof(*rdev);
1741 mport = md->mport; 1754 mport = md->mport;
1742 destid = (u16)dev_info.destid; 1755 destid = dev_info.destid;
1743 hopcount = (u8)dev_info.hopcount; 1756 hopcount = dev_info.hopcount;
1744 1757
1745 if (rio_mport_read_config_32(mport, destid, hopcount, 1758 if (rio_mport_read_config_32(mport, destid, hopcount,
1746 RIO_PEF_CAR, &rval)) 1759 RIO_PEF_CAR, &rval))
@@ -1872,8 +1885,8 @@ static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
1872 do { 1885 do {
1873 rdev = rio_get_comptag(dev_info.comptag, rdev); 1886 rdev = rio_get_comptag(dev_info.comptag, rdev);
1874 if (rdev && rdev->dev.parent == &mport->net->dev && 1887 if (rdev && rdev->dev.parent == &mport->net->dev &&
1875 rdev->destid == (u16)dev_info.destid && 1888 rdev->destid == dev_info.destid &&
1876 rdev->hopcount == (u8)dev_info.hopcount) 1889 rdev->hopcount == dev_info.hopcount)
1877 break; 1890 break;
1878 } while (rdev); 1891 } while (rdev);
1879 } 1892 }
@@ -2146,8 +2159,8 @@ static long mport_cdev_ioctl(struct file *filp,
2146 return maint_port_idx_get(data, (void __user *)arg); 2159 return maint_port_idx_get(data, (void __user *)arg);
2147 case RIO_MPORT_GET_PROPERTIES: 2160 case RIO_MPORT_GET_PROPERTIES:
2148 md->properties.hdid = md->mport->host_deviceid; 2161 md->properties.hdid = md->mport->host_deviceid;
2149 if (copy_to_user((void __user *)arg, &(data->md->properties), 2162 if (copy_to_user((void __user *)arg, &(md->properties),
2150 sizeof(data->md->properties))) 2163 sizeof(md->properties)))
2151 return -EFAULT; 2164 return -EFAULT;
2152 return 0; 2165 return 0;
2153 case RIO_ENABLE_DOORBELL_RANGE: 2166 case RIO_ENABLE_DOORBELL_RANGE:
@@ -2159,11 +2172,11 @@ static long mport_cdev_ioctl(struct file *filp,
2159 case RIO_DISABLE_PORTWRITE_RANGE: 2172 case RIO_DISABLE_PORTWRITE_RANGE:
2160 return rio_mport_remove_pw_filter(data, (void __user *)arg); 2173 return rio_mport_remove_pw_filter(data, (void __user *)arg);
2161 case RIO_SET_EVENT_MASK: 2174 case RIO_SET_EVENT_MASK:
2162 data->event_mask = arg; 2175 data->event_mask = (u32)arg;
2163 return 0; 2176 return 0;
2164 case RIO_GET_EVENT_MASK: 2177 case RIO_GET_EVENT_MASK:
2165 if (copy_to_user((void __user *)arg, &data->event_mask, 2178 if (copy_to_user((void __user *)arg, &data->event_mask,
2166 sizeof(data->event_mask))) 2179 sizeof(u32)))
2167 return -EFAULT; 2180 return -EFAULT;
2168 return 0; 2181 return 0;
2169 case RIO_MAP_OUTBOUND: 2182 case RIO_MAP_OUTBOUND:
@@ -2374,7 +2387,7 @@ static ssize_t mport_write(struct file *filp, const char __user *buf,
2374 return -EINVAL; 2387 return -EINVAL;
2375 2388
2376 ret = rio_mport_send_doorbell(mport, 2389 ret = rio_mport_send_doorbell(mport,
2377 (u16)event.u.doorbell.rioid, 2390 event.u.doorbell.rioid,
2378 event.u.doorbell.payload); 2391 event.u.doorbell.payload);
2379 if (ret < 0) 2392 if (ret < 0)
2380 return ret; 2393 return ret;
@@ -2421,7 +2434,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
2421 struct mport_dev *md; 2434 struct mport_dev *md;
2422 struct rio_mport_attr attr; 2435 struct rio_mport_attr attr;
2423 2436
2424 md = kzalloc(sizeof(struct mport_dev), GFP_KERNEL); 2437 md = kzalloc(sizeof(*md), GFP_KERNEL);
2425 if (!md) { 2438 if (!md) {
2426 rmcd_error("Unable allocate a device object"); 2439 rmcd_error("Unable allocate a device object");
2427 return NULL; 2440 return NULL;
@@ -2470,7 +2483,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
2470 /* The transfer_mode property will be returned through mport query 2483 /* The transfer_mode property will be returned through mport query
2471 * interface 2484 * interface
2472 */ 2485 */
2473#ifdef CONFIG_PPC /* for now: only on Freescale's SoCs */ 2486#ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */
2474 md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED; 2487 md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED;
2475#else 2488#else
2476 md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; 2489 md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 14718a9ffcfb..460c855be0d0 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -249,18 +249,12 @@ static int usb_port_runtime_suspend(struct device *dev)
249 249
250 return retval; 250 return retval;
251} 251}
252
253static int usb_port_prepare(struct device *dev)
254{
255 return 1;
256}
257#endif 252#endif
258 253
259static const struct dev_pm_ops usb_port_pm_ops = { 254static const struct dev_pm_ops usb_port_pm_ops = {
260#ifdef CONFIG_PM 255#ifdef CONFIG_PM
261 .runtime_suspend = usb_port_runtime_suspend, 256 .runtime_suspend = usb_port_runtime_suspend,
262 .runtime_resume = usb_port_runtime_resume, 257 .runtime_resume = usb_port_runtime_resume,
263 .prepare = usb_port_prepare,
264#endif 258#endif
265}; 259};
266 260
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index dcb85e3cd5a7..479187c32571 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -312,13 +312,7 @@ static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
312 312
313static int usb_dev_prepare(struct device *dev) 313static int usb_dev_prepare(struct device *dev)
314{ 314{
315 struct usb_device *udev = to_usb_device(dev); 315 return 0; /* Implement eventually? */
316
317 /* Return 0 if the current wakeup setting is wrong, otherwise 1 */
318 if (udev->do_remote_wakeup != device_may_wakeup(dev))
319 return 0;
320
321 return 1;
322} 316}
323 317
324static void usb_dev_complete(struct device *dev) 318static void usb_dev_complete(struct device *dev)
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index 5e5a8fa005f8..bc8889956d17 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -83,9 +83,9 @@ static int jz4740_musb_init(struct musb *musb)
83{ 83{
84 usb_phy_generic_register(); 84 usb_phy_generic_register();
85 musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); 85 musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
86 if (!musb->xceiv) { 86 if (IS_ERR(musb->xceiv)) {
87 pr_err("HS UDC: no transceiver configured\n"); 87 pr_err("HS UDC: no transceiver configured\n");
88 return -ENODEV; 88 return PTR_ERR(musb->xceiv);
89 } 89 }
90 90
91 /* Silicon does not implement ConfigData register. 91 /* Silicon does not implement ConfigData register.
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 87bd578799a8..152865b36522 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1164,12 +1164,12 @@ static int musb_gadget_disable(struct usb_ep *ep)
1164 musb_writew(epio, MUSB_RXMAXP, 0); 1164 musb_writew(epio, MUSB_RXMAXP, 0);
1165 } 1165 }
1166 1166
1167 musb_ep->desc = NULL;
1168 musb_ep->end_point.desc = NULL;
1169
1170 /* abort all pending DMA and requests */ 1167 /* abort all pending DMA and requests */
1171 nuke(musb_ep, -ESHUTDOWN); 1168 nuke(musb_ep, -ESHUTDOWN);
1172 1169
1170 musb_ep->desc = NULL;
1171 musb_ep->end_point.desc = NULL;
1172
1173 schedule_work(&musb->irq_work); 1173 schedule_work(&musb->irq_work);
1174 1174
1175 spin_unlock_irqrestore(&(musb->lock), flags); 1175 spin_unlock_irqrestore(&(musb->lock), flags);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 58487a473521..2f8ad7f1f482 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2735,7 +2735,7 @@ static const struct hc_driver musb_hc_driver = {
2735 .description = "musb-hcd", 2735 .description = "musb-hcd",
2736 .product_desc = "MUSB HDRC host driver", 2736 .product_desc = "MUSB HDRC host driver",
2737 .hcd_priv_size = sizeof(struct musb *), 2737 .hcd_priv_size = sizeof(struct musb *),
2738 .flags = HCD_USB2 | HCD_MEMORY | HCD_BH, 2738 .flags = HCD_USB2 | HCD_MEMORY,
2739 2739
2740 /* not using irq handler or reset hooks from usbcore, since 2740 /* not using irq handler or reset hooks from usbcore, since
2741 * those must be shared with peripheral code for OTG configs 2741 * those must be shared with peripheral code for OTG configs
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index dd47823bb014..7c9f25e9c422 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -109,6 +109,7 @@ static const struct usb_device_id id_table[] = {
109 { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */ 109 { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
110 { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */ 110 { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
111 { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */ 111 { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
112 { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
112 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ 113 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
113 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ 114 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
114 { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ 115 { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
@@ -118,6 +119,7 @@ static const struct usb_device_id id_table[] = {
118 { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */ 119 { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
119 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ 120 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
120 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ 121 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
122 { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
121 { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ 123 { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
122 { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ 124 { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
123 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ 125 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -141,6 +143,8 @@ static const struct usb_device_id id_table[] = {
141 { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */ 143 { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
142 { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */ 144 { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
143 { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */ 145 { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
146 { USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
147 { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
144 { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ 148 { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
145 { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ 149 { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
146 { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */ 150 { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 5c802d47892c..ca6bfddaacad 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -1006,7 +1006,7 @@ struct virtqueue *vring_create_virtqueue(
1006 const char *name) 1006 const char *name)
1007{ 1007{
1008 struct virtqueue *vq; 1008 struct virtqueue *vq;
1009 void *queue; 1009 void *queue = NULL;
1010 dma_addr_t dma_addr; 1010 dma_addr_t dma_addr;
1011 size_t queue_size_in_bytes; 1011 size_t queue_size_in_bytes;
1012 struct vring vring; 1012 struct vring vring;
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 9781e0dd59d6..d46839f51e73 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -151,6 +151,8 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
151static void balloon_process(struct work_struct *work); 151static void balloon_process(struct work_struct *work);
152static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); 152static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
153 153
154static void release_memory_resource(struct resource *resource);
155
154/* When ballooning out (allocating memory to return to Xen) we don't really 156/* When ballooning out (allocating memory to return to Xen) we don't really
155 want the kernel to try too hard since that can trigger the oom killer. */ 157 want the kernel to try too hard since that can trigger the oom killer. */
156#define GFP_BALLOON \ 158#define GFP_BALLOON \
@@ -267,6 +269,20 @@ static struct resource *additional_memory_resource(phys_addr_t size)
267 return NULL; 269 return NULL;
268 } 270 }
269 271
272#ifdef CONFIG_SPARSEMEM
273 {
274 unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT);
275 unsigned long pfn = res->start >> PAGE_SHIFT;
276
277 if (pfn > limit) {
278 pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
279 pfn, limit);
280 release_memory_resource(res);
281 return NULL;
282 }
283 }
284#endif
285
270 return res; 286 return res;
271} 287}
272 288
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 38272ad24551..f4edd6df3df2 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -316,7 +316,6 @@ static int evtchn_resize_ring(struct per_user_data *u)
316{ 316{
317 unsigned int new_size; 317 unsigned int new_size;
318 evtchn_port_t *new_ring, *old_ring; 318 evtchn_port_t *new_ring, *old_ring;
319 unsigned int p, c;
320 319
321 /* 320 /*
322 * Ensure the ring is large enough to capture all possible 321 * Ensure the ring is large enough to capture all possible
@@ -346,20 +345,17 @@ static int evtchn_resize_ring(struct per_user_data *u)
346 /* 345 /*
347 * Copy the old ring contents to the new ring. 346 * Copy the old ring contents to the new ring.
348 * 347 *
349 * If the ring contents crosses the end of the current ring, 348 * To take care of wrapping, a full ring, and the new index
350 * it needs to be copied in two chunks. 349 * pointing into the second half, simply copy the old contents
350 * twice.
351 * 351 *
352 * +---------+ +------------------+ 352 * +---------+ +------------------+
353 * |34567 12| -> | 1234567 | 353 * |34567 12| -> |34567 1234567 12|
354 * +-----p-c-+ +------------------+ 354 * +-----p-c-+ +-------c------p---+
355 */ 355 */
356 p = evtchn_ring_offset(u, u->ring_prod); 356 memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
357 c = evtchn_ring_offset(u, u->ring_cons); 357 memcpy(new_ring + u->ring_size, old_ring,
358 if (p < c) { 358 u->ring_size * sizeof(*u->ring));
359 memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring));
360 memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring));
361 } else
362 memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring));
363 359
364 u->ring = new_ring; 360 u->ring = new_ring;
365 u->ring_size = new_size; 361 u->ring_size = new_size;