aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-11-24 13:34:40 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2015-11-24 13:34:40 -0500
commit8bd142c01648cdb33e9bcafa0448ba2c20ed814c (patch)
tree9197c60d3f9d4036f38f281a183e94750ceea1d7 /drivers
parentd792abacaf1a1a8dfea353fab699b97fa6251c2a (diff)
parentfbb4574ce9a37e15a9872860bf202f2be5bdf6c4 (diff)
Merge tag 'kvm-arm-for-v4.4-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-master
KVM/ARM Fixes for v4.4-rc3. Includes some timer fixes, properly unmapping PTEs, an errata fix, and two tweaks to the EL2 panic code.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/cppc_acpi.c2
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/sbshc.c48
-rw-r--r--drivers/base/power/wakeirq.c6
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c82
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c8
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/fsl_ftm_timer.c4
-rw-r--r--drivers/cpufreq/Kconfig.arm1
-rw-r--r--drivers/cpufreq/Kconfig.x861
-rw-r--r--drivers/cpufreq/intel_pstate.c316
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c2
-rw-r--r--drivers/dma/at_hdmac.c20
-rw-r--r--drivers/dma/at_hdmac_regs.h6
-rw-r--r--drivers/dma/at_xdmac.c20
-rw-r--r--drivers/dma/edma.c4
-rw-r--r--drivers/dma/imx-sdma.c2
-rw-r--r--drivers/dma/sh/usb-dmac.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h120
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c177
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c138
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c302
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c9
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h24
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c24
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h6
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c10
-rw-r--r--drivers/gpu/drm/drm_atomic.c61
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c29
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c51
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_params.c5
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c31
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c75
-rw-r--r--drivers/gpu/drm/i915/intel_display.c37
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c3
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c18
-rw-r--r--drivers/hid/wacom_wac.c5
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/applesmc.c2
-rw-r--r--drivers/hwmon/scpi-hwmon.c21
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-i801.c6
-rw-r--r--drivers/i2c/busses/i2c-imx.c1
-rw-r--r--drivers/i2c/busses/i2c-xiic.c4
-rw-r--r--drivers/i2c/i2c-core.c2
-rw-r--r--drivers/iio/adc/ad7793.c2
-rw-r--r--drivers/iio/adc/vf610_adc.c22
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c1
-rw-r--r--drivers/iio/dac/ad5064.c91
-rw-r--r--drivers/iio/humidity/si7020.c8
-rw-r--r--drivers/iommu/s390-iommu.c23
-rw-r--r--drivers/irqchip/irq-gic-common.c13
-rw-r--r--drivers/irqchip/irq-gic.c38
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c3
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c4
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c3
-rw-r--r--drivers/media/pci/cx88/cx88-video.c4
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c4
-rw-r--r--drivers/media/pci/tw68/tw68-core.c4
-rw-r--r--drivers/mmc/card/block.c11
-rw-r--r--drivers/mmc/core/mmc.c93
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/mtk-sd.c2
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mtd/nand/jz4740_nand.c1
-rw-r--r--drivers/mtd/nand/nand_base.c2
-rw-r--r--drivers/net/dsa/mv88e6060.c114
-rw-r--r--drivers/net/dsa/mv88e6060.h111
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c10
-rw-r--r--drivers/net/ethernet/dlink/Kconfig5
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c55
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h15
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c19
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/icplus/Kconfig13
-rw-r--r--drivers/net/ethernet/icplus/Makefile5
-rw-r--r--drivers/net/ethernet/icplus/ipg.c2300
-rw-r--r--drivers/net/ethernet/icplus/ipg.h748
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c76
-rw-r--r--drivers/net/ethernet/realtek/r8169.c6
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c8
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c10
-rw-r--r--drivers/net/ethernet/via/via-velocity.c24
-rw-r--r--drivers/net/fjes/fjes_hw.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c14
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/at803x.c4
-rw-r--r--drivers/net/phy/marvell.c16
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/phy/vitesse.c16
-rw-r--r--drivers/net/usb/cdc_ether.c5
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/pci/probe.c4
-rw-r--r--drivers/s390/cio/chsc.c37
-rw-r--r--drivers/s390/cio/chsc.h15
-rw-r--r--drivers/s390/cio/cio.c14
-rw-r--r--drivers/s390/cio/css.c5
-rw-r--r--drivers/s390/crypto/Makefile7
-rw-r--r--drivers/s390/crypto/ap_bus.c6
-rw-r--r--drivers/s390/crypto/zcrypt_api.c10
-rw-r--r--drivers/s390/crypto/zcrypt_api.h1
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c1
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c3
-rw-r--r--drivers/sh/pm_runtime.c2
-rw-r--r--drivers/staging/iio/Kconfig3
-rw-r--r--drivers/staging/iio/adc/lpc32xx_adc.c4
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.c48
-rw-r--r--drivers/tty/n_tty.c2
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c1
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c2
-rw-r--r--drivers/tty/serial/etraxfs-uart.c2
-rw-r--r--drivers/tty/tty_audit.c2
-rw-r--r--drivers/tty/tty_io.c4
-rw-r--r--drivers/tty/tty_ioctl.c4
-rw-r--r--drivers/tty/tty_ldisc.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c142
-rw-r--r--drivers/usb/chipidea/debug.c2
-rw-r--r--drivers/usb/chipidea/udc.c17
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c10
-rw-r--r--drivers/usb/class/usblp.c2
-rw-r--r--drivers/usb/core/Kconfig3
-rw-r--r--drivers/usb/dwc2/hcd.c9
-rw-r--r--drivers/usb/dwc2/platform.c3
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c24
-rw-r--r--drivers/usb/gadget/function/f_loopback.c2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c2
-rw-r--r--drivers/usb/host/xhci-hub.c15
-rw-r--r--drivers/usb/host/xhci-ring.c32
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/musb/musb_core.c12
-rw-r--r--drivers/usb/musb/musb_host.c22
-rw-r--r--drivers/usb/phy/Kconfig4
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c7
-rw-r--r--drivers/usb/phy/phy-omap-otg.c2
-rw-r--r--drivers/usb/serial/option.c11
-rw-r--r--drivers/usb/serial/qcserial.c94
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.h4
177 files changed, 2302 insertions, 4456 deletions
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 3c083d2cc434..6730f965b379 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -304,7 +304,7 @@ EXPORT_SYMBOL_GPL(acpi_get_psd_map);
304 304
305static int register_pcc_channel(int pcc_subspace_idx) 305static int register_pcc_channel(int pcc_subspace_idx)
306{ 306{
307 struct acpi_pcct_subspace *cppc_ss; 307 struct acpi_pcct_hw_reduced *cppc_ss;
308 unsigned int len; 308 unsigned int len;
309 309
310 if (pcc_subspace_idx >= 0) { 310 if (pcc_subspace_idx >= 0) {
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index f61a7c834540..b420fb46669d 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1103,7 +1103,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
1103 } 1103 }
1104 1104
1105err_exit: 1105err_exit:
1106 if (result && q) 1106 if (result)
1107 acpi_ec_delete_query(q); 1107 acpi_ec_delete_query(q);
1108 if (data) 1108 if (data)
1109 *data = value; 1109 *data = value;
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index bf034f8b7c1a..2fa8304171e0 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -14,7 +14,6 @@
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/dmi.h>
18#include "sbshc.h" 17#include "sbshc.h"
19 18
20#define PREFIX "ACPI: " 19#define PREFIX "ACPI: "
@@ -30,6 +29,7 @@ struct acpi_smb_hc {
30 u8 query_bit; 29 u8 query_bit;
31 smbus_alarm_callback callback; 30 smbus_alarm_callback callback;
32 void *context; 31 void *context;
32 bool done;
33}; 33};
34 34
35static int acpi_smbus_hc_add(struct acpi_device *device); 35static int acpi_smbus_hc_add(struct acpi_device *device);
@@ -88,8 +88,6 @@ enum acpi_smb_offset {
88 ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */ 88 ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */
89}; 89};
90 90
91static bool macbook;
92
93static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data) 91static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
94{ 92{
95 return ec_read(hc->offset + address, data); 93 return ec_read(hc->offset + address, data);
@@ -100,27 +98,11 @@ static inline int smb_hc_write(struct acpi_smb_hc *hc, u8 address, u8 data)
100 return ec_write(hc->offset + address, data); 98 return ec_write(hc->offset + address, data);
101} 99}
102 100
103static inline int smb_check_done(struct acpi_smb_hc *hc)
104{
105 union acpi_smb_status status = {.raw = 0};
106 smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw);
107 return status.fields.done && (status.fields.status == SMBUS_OK);
108}
109
110static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout) 101static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout)
111{ 102{
112 if (wait_event_timeout(hc->wait, smb_check_done(hc), 103 if (wait_event_timeout(hc->wait, hc->done, msecs_to_jiffies(timeout)))
113 msecs_to_jiffies(timeout)))
114 return 0; 104 return 0;
115 /* 105 return -ETIME;
116 * After the timeout happens, OS will try to check the status of SMbus.
117 * If the status is what OS expected, it will be regarded as the bogus
118 * timeout.
119 */
120 if (smb_check_done(hc))
121 return 0;
122 else
123 return -ETIME;
124} 106}
125 107
126static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, 108static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
@@ -135,8 +117,7 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
135 } 117 }
136 118
137 mutex_lock(&hc->lock); 119 mutex_lock(&hc->lock);
138 if (macbook) 120 hc->done = false;
139 udelay(5);
140 if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp)) 121 if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
141 goto end; 122 goto end;
142 if (temp) { 123 if (temp) {
@@ -235,8 +216,10 @@ static int smbus_alarm(void *context)
235 if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw)) 216 if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw))
236 return 0; 217 return 0;
237 /* Check if it is only a completion notify */ 218 /* Check if it is only a completion notify */
238 if (status.fields.done) 219 if (status.fields.done && status.fields.status == SMBUS_OK) {
220 hc->done = true;
239 wake_up(&hc->wait); 221 wake_up(&hc->wait);
222 }
240 if (!status.fields.alarm) 223 if (!status.fields.alarm)
241 return 0; 224 return 0;
242 mutex_lock(&hc->lock); 225 mutex_lock(&hc->lock);
@@ -262,29 +245,12 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
262 acpi_handle handle, acpi_ec_query_func func, 245 acpi_handle handle, acpi_ec_query_func func,
263 void *data); 246 void *data);
264 247
265static int macbook_dmi_match(const struct dmi_system_id *d)
266{
267 pr_debug("Detected MacBook, enabling workaround\n");
268 macbook = true;
269 return 0;
270}
271
272static struct dmi_system_id acpi_smbus_dmi_table[] = {
273 { macbook_dmi_match, "Apple MacBook", {
274 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
275 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") },
276 },
277 { },
278};
279
280static int acpi_smbus_hc_add(struct acpi_device *device) 248static int acpi_smbus_hc_add(struct acpi_device *device)
281{ 249{
282 int status; 250 int status;
283 unsigned long long val; 251 unsigned long long val;
284 struct acpi_smb_hc *hc; 252 struct acpi_smb_hc *hc;
285 253
286 dmi_check_system(acpi_smbus_dmi_table);
287
288 if (!device) 254 if (!device)
289 return -EINVAL; 255 return -EINVAL;
290 256
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index eb6e67451dec..0d77cd6fd8d1 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -68,6 +68,9 @@ int dev_pm_set_wake_irq(struct device *dev, int irq)
68 struct wake_irq *wirq; 68 struct wake_irq *wirq;
69 int err; 69 int err;
70 70
71 if (irq < 0)
72 return -EINVAL;
73
71 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); 74 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
72 if (!wirq) 75 if (!wirq)
73 return -ENOMEM; 76 return -ENOMEM;
@@ -167,6 +170,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
167 struct wake_irq *wirq; 170 struct wake_irq *wirq;
168 int err; 171 int err;
169 172
173 if (irq < 0)
174 return -EINVAL;
175
170 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); 176 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
171 if (!wirq) 177 if (!wirq)
172 return -ENOMEM; 178 return -ENOMEM;
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 654f6f36a071..55fe9020459f 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -412,18 +412,42 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
412 return rv; 412 return rv;
413} 413}
414 414
415static void start_check_enables(struct smi_info *smi_info) 415static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
416{
417 smi_info->last_timeout_jiffies = jiffies;
418 mod_timer(&smi_info->si_timer, new_val);
419 smi_info->timer_running = true;
420}
421
422/*
423 * Start a new message and (re)start the timer and thread.
424 */
425static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
426 unsigned int size)
427{
428 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
429
430 if (smi_info->thread)
431 wake_up_process(smi_info->thread);
432
433 smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
434}
435
436static void start_check_enables(struct smi_info *smi_info, bool start_timer)
416{ 437{
417 unsigned char msg[2]; 438 unsigned char msg[2];
418 439
419 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 440 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
420 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 441 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
421 442
422 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 443 if (start_timer)
444 start_new_msg(smi_info, msg, 2);
445 else
446 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
423 smi_info->si_state = SI_CHECKING_ENABLES; 447 smi_info->si_state = SI_CHECKING_ENABLES;
424} 448}
425 449
426static void start_clear_flags(struct smi_info *smi_info) 450static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
427{ 451{
428 unsigned char msg[3]; 452 unsigned char msg[3];
429 453
@@ -432,7 +456,10 @@ static void start_clear_flags(struct smi_info *smi_info)
432 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; 456 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
433 msg[2] = WDT_PRE_TIMEOUT_INT; 457 msg[2] = WDT_PRE_TIMEOUT_INT;
434 458
435 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); 459 if (start_timer)
460 start_new_msg(smi_info, msg, 3);
461 else
462 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
436 smi_info->si_state = SI_CLEARING_FLAGS; 463 smi_info->si_state = SI_CLEARING_FLAGS;
437} 464}
438 465
@@ -442,10 +469,8 @@ static void start_getting_msg_queue(struct smi_info *smi_info)
442 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; 469 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
443 smi_info->curr_msg->data_size = 2; 470 smi_info->curr_msg->data_size = 2;
444 471
445 smi_info->handlers->start_transaction( 472 start_new_msg(smi_info, smi_info->curr_msg->data,
446 smi_info->si_sm, 473 smi_info->curr_msg->data_size);
447 smi_info->curr_msg->data,
448 smi_info->curr_msg->data_size);
449 smi_info->si_state = SI_GETTING_MESSAGES; 474 smi_info->si_state = SI_GETTING_MESSAGES;
450} 475}
451 476
@@ -455,20 +480,11 @@ static void start_getting_events(struct smi_info *smi_info)
455 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; 480 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
456 smi_info->curr_msg->data_size = 2; 481 smi_info->curr_msg->data_size = 2;
457 482
458 smi_info->handlers->start_transaction( 483 start_new_msg(smi_info, smi_info->curr_msg->data,
459 smi_info->si_sm, 484 smi_info->curr_msg->data_size);
460 smi_info->curr_msg->data,
461 smi_info->curr_msg->data_size);
462 smi_info->si_state = SI_GETTING_EVENTS; 485 smi_info->si_state = SI_GETTING_EVENTS;
463} 486}
464 487
465static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
466{
467 smi_info->last_timeout_jiffies = jiffies;
468 mod_timer(&smi_info->si_timer, new_val);
469 smi_info->timer_running = true;
470}
471
472/* 488/*
473 * When we have a situtaion where we run out of memory and cannot 489 * When we have a situtaion where we run out of memory and cannot
474 * allocate messages, we just leave them in the BMC and run the system 490 * allocate messages, we just leave them in the BMC and run the system
@@ -478,11 +494,11 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
478 * Note that we cannot just use disable_irq(), since the interrupt may 494 * Note that we cannot just use disable_irq(), since the interrupt may
479 * be shared. 495 * be shared.
480 */ 496 */
481static inline bool disable_si_irq(struct smi_info *smi_info) 497static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
482{ 498{
483 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 499 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
484 smi_info->interrupt_disabled = true; 500 smi_info->interrupt_disabled = true;
485 start_check_enables(smi_info); 501 start_check_enables(smi_info, start_timer);
486 return true; 502 return true;
487 } 503 }
488 return false; 504 return false;
@@ -492,7 +508,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
492{ 508{
493 if ((smi_info->irq) && (smi_info->interrupt_disabled)) { 509 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
494 smi_info->interrupt_disabled = false; 510 smi_info->interrupt_disabled = false;
495 start_check_enables(smi_info); 511 start_check_enables(smi_info, true);
496 return true; 512 return true;
497 } 513 }
498 return false; 514 return false;
@@ -510,7 +526,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
510 526
511 msg = ipmi_alloc_smi_msg(); 527 msg = ipmi_alloc_smi_msg();
512 if (!msg) { 528 if (!msg) {
513 if (!disable_si_irq(smi_info)) 529 if (!disable_si_irq(smi_info, true))
514 smi_info->si_state = SI_NORMAL; 530 smi_info->si_state = SI_NORMAL;
515 } else if (enable_si_irq(smi_info)) { 531 } else if (enable_si_irq(smi_info)) {
516 ipmi_free_smi_msg(msg); 532 ipmi_free_smi_msg(msg);
@@ -526,7 +542,7 @@ static void handle_flags(struct smi_info *smi_info)
526 /* Watchdog pre-timeout */ 542 /* Watchdog pre-timeout */
527 smi_inc_stat(smi_info, watchdog_pretimeouts); 543 smi_inc_stat(smi_info, watchdog_pretimeouts);
528 544
529 start_clear_flags(smi_info); 545 start_clear_flags(smi_info, true);
530 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; 546 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
531 if (smi_info->intf) 547 if (smi_info->intf)
532 ipmi_smi_watchdog_pretimeout(smi_info->intf); 548 ipmi_smi_watchdog_pretimeout(smi_info->intf);
@@ -879,8 +895,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
879 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 895 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
880 msg[1] = IPMI_GET_MSG_FLAGS_CMD; 896 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
881 897
882 smi_info->handlers->start_transaction( 898 start_new_msg(smi_info, msg, 2);
883 smi_info->si_sm, msg, 2);
884 smi_info->si_state = SI_GETTING_FLAGS; 899 smi_info->si_state = SI_GETTING_FLAGS;
885 goto restart; 900 goto restart;
886 } 901 }
@@ -910,7 +925,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
910 * disable and messages disabled. 925 * disable and messages disabled.
911 */ 926 */
912 if (smi_info->supports_event_msg_buff || smi_info->irq) { 927 if (smi_info->supports_event_msg_buff || smi_info->irq) {
913 start_check_enables(smi_info); 928 start_check_enables(smi_info, true);
914 } else { 929 } else {
915 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); 930 smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
916 if (!smi_info->curr_msg) 931 if (!smi_info->curr_msg)
@@ -920,6 +935,13 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
920 } 935 }
921 goto restart; 936 goto restart;
922 } 937 }
938
939 if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
940 /* Ok it if fails, the timer will just go off. */
941 if (del_timer(&smi_info->si_timer))
942 smi_info->timer_running = false;
943 }
944
923 out: 945 out:
924 return si_sm_result; 946 return si_sm_result;
925} 947}
@@ -2560,6 +2582,7 @@ static const struct of_device_id of_ipmi_match[] = {
2560 .data = (void *)(unsigned long) SI_BT }, 2582 .data = (void *)(unsigned long) SI_BT },
2561 {}, 2583 {},
2562}; 2584};
2585MODULE_DEVICE_TABLE(of, of_ipmi_match);
2563 2586
2564static int of_ipmi_probe(struct platform_device *dev) 2587static int of_ipmi_probe(struct platform_device *dev)
2565{ 2588{
@@ -2646,7 +2669,6 @@ static int of_ipmi_probe(struct platform_device *dev)
2646 } 2669 }
2647 return 0; 2670 return 0;
2648} 2671}
2649MODULE_DEVICE_TABLE(of, of_ipmi_match);
2650#else 2672#else
2651#define of_ipmi_match NULL 2673#define of_ipmi_match NULL
2652static int of_ipmi_probe(struct platform_device *dev) 2674static int of_ipmi_probe(struct platform_device *dev)
@@ -3613,7 +3635,7 @@ static int try_smi_init(struct smi_info *new_smi)
3613 * Start clearing the flags before we enable interrupts or the 3635 * Start clearing the flags before we enable interrupts or the
3614 * timer to avoid racing with the timer. 3636 * timer to avoid racing with the timer.
3615 */ 3637 */
3616 start_clear_flags(new_smi); 3638 start_clear_flags(new_smi, false);
3617 3639
3618 /* 3640 /*
3619 * IRQ is defined to be set when non-zero. req_events will 3641 * IRQ is defined to be set when non-zero. req_events will
@@ -3908,7 +3930,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
3908 poll(to_clean); 3930 poll(to_clean);
3909 schedule_timeout_uninterruptible(1); 3931 schedule_timeout_uninterruptible(1);
3910 } 3932 }
3911 disable_si_irq(to_clean); 3933 disable_si_irq(to_clean, false);
3912 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { 3934 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3913 poll(to_clean); 3935 poll(to_clean);
3914 schedule_timeout_uninterruptible(1); 3936 schedule_timeout_uninterruptible(1);
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 0ac3bd1a5497..096f0cef4da1 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -153,6 +153,9 @@ static int timeout = 10;
153/* The pre-timeout is disabled by default. */ 153/* The pre-timeout is disabled by default. */
154static int pretimeout; 154static int pretimeout;
155 155
156/* Default timeout to set on panic */
157static int panic_wdt_timeout = 255;
158
156/* Default action is to reset the board on a timeout. */ 159/* Default action is to reset the board on a timeout. */
157static unsigned char action_val = WDOG_TIMEOUT_RESET; 160static unsigned char action_val = WDOG_TIMEOUT_RESET;
158 161
@@ -293,6 +296,9 @@ MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
293module_param(pretimeout, timeout, 0644); 296module_param(pretimeout, timeout, 0644);
294MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds."); 297MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
295 298
299module_param(panic_wdt_timeout, timeout, 0644);
300MODULE_PARM_DESC(timeout, "Timeout value on kernel panic in seconds.");
301
296module_param_cb(action, &param_ops_str, action_op, 0644); 302module_param_cb(action, &param_ops_str, action_op, 0644);
297MODULE_PARM_DESC(action, "Timeout action. One of: " 303MODULE_PARM_DESC(action, "Timeout action. One of: "
298 "reset, none, power_cycle, power_off."); 304 "reset, none, power_cycle, power_off.");
@@ -1189,7 +1195,7 @@ static int wdog_panic_handler(struct notifier_block *this,
1189 /* Make sure we do this only once. */ 1195 /* Make sure we do this only once. */
1190 panic_event_handled = 1; 1196 panic_event_handled = 1;
1191 1197
1192 timeout = 255; 1198 timeout = panic_wdt_timeout;
1193 pretimeout = 0; 1199 pretimeout = 0;
1194 panic_halt_ipmi_set_timeout(); 1200 panic_halt_ipmi_set_timeout();
1195 } 1201 }
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 71cfdf7c9708..2eb5f0efae90 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -1,4 +1,5 @@
1menu "Clock Source drivers" 1menu "Clock Source drivers"
2 depends on !ARCH_USES_GETTIMEOFFSET
2 3
3config CLKSRC_OF 4config CLKSRC_OF
4 bool 5 bool
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
index 10202f1fdfd7..517e1c7624d4 100644
--- a/drivers/clocksource/fsl_ftm_timer.c
+++ b/drivers/clocksource/fsl_ftm_timer.c
@@ -203,7 +203,7 @@ static int __init ftm_clockevent_init(unsigned long freq, int irq)
203 int err; 203 int err;
204 204
205 ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN); 205 ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN);
206 ftm_writel(~0UL, priv->clkevt_base + FTM_MOD); 206 ftm_writel(~0u, priv->clkevt_base + FTM_MOD);
207 207
208 ftm_reset_counter(priv->clkevt_base); 208 ftm_reset_counter(priv->clkevt_base);
209 209
@@ -230,7 +230,7 @@ static int __init ftm_clocksource_init(unsigned long freq)
230 int err; 230 int err;
231 231
232 ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN); 232 ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN);
233 ftm_writel(~0UL, priv->clksrc_base + FTM_MOD); 233 ftm_writel(~0u, priv->clksrc_base + FTM_MOD);
234 234
235 ftm_reset_counter(priv->clksrc_base); 235 ftm_reset_counter(priv->clksrc_base);
236 236
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 1582c1c016b0..8014c2307332 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -84,6 +84,7 @@ config ARM_KIRKWOOD_CPUFREQ
84config ARM_MT8173_CPUFREQ 84config ARM_MT8173_CPUFREQ
85 bool "Mediatek MT8173 CPUFreq support" 85 bool "Mediatek MT8173 CPUFreq support"
86 depends on ARCH_MEDIATEK && REGULATOR 86 depends on ARCH_MEDIATEK && REGULATOR
87 depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST)
87 depends on !CPU_THERMAL || THERMAL=y 88 depends on !CPU_THERMAL || THERMAL=y
88 select PM_OPP 89 select PM_OPP
89 help 90 help
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index adbd1de1cea5..c59bdcb83217 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -5,7 +5,6 @@
5config X86_INTEL_PSTATE 5config X86_INTEL_PSTATE
6 bool "Intel P state control" 6 bool "Intel P state control"
7 depends on X86 7 depends on X86
8 select ACPI_PROCESSOR if ACPI
9 help 8 help
10 This driver provides a P state for Intel core processors. 9 This driver provides a P state for Intel core processors.
11 The driver implements an internal governor and will become 10 The driver implements an internal governor and will become
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 2e31d097def6..001a532e342e 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -34,14 +34,10 @@
34#include <asm/cpu_device_id.h> 34#include <asm/cpu_device_id.h>
35#include <asm/cpufeature.h> 35#include <asm/cpufeature.h>
36 36
37#if IS_ENABLED(CONFIG_ACPI) 37#define ATOM_RATIOS 0x66a
38#include <acpi/processor.h> 38#define ATOM_VIDS 0x66b
39#endif 39#define ATOM_TURBO_RATIOS 0x66c
40 40#define ATOM_TURBO_VIDS 0x66d
41#define BYT_RATIOS 0x66a
42#define BYT_VIDS 0x66b
43#define BYT_TURBO_RATIOS 0x66c
44#define BYT_TURBO_VIDS 0x66d
45 41
46#define FRAC_BITS 8 42#define FRAC_BITS 8
47#define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 43#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
@@ -117,9 +113,6 @@ struct cpudata {
117 u64 prev_mperf; 113 u64 prev_mperf;
118 u64 prev_tsc; 114 u64 prev_tsc;
119 struct sample sample; 115 struct sample sample;
120#if IS_ENABLED(CONFIG_ACPI)
121 struct acpi_processor_performance acpi_perf_data;
122#endif
123}; 116};
124 117
125static struct cpudata **all_cpu_data; 118static struct cpudata **all_cpu_data;
@@ -150,7 +143,6 @@ struct cpu_defaults {
150static struct pstate_adjust_policy pid_params; 143static struct pstate_adjust_policy pid_params;
151static struct pstate_funcs pstate_funcs; 144static struct pstate_funcs pstate_funcs;
152static int hwp_active; 145static int hwp_active;
153static int no_acpi_perf;
154 146
155struct perf_limits { 147struct perf_limits {
156 int no_turbo; 148 int no_turbo;
@@ -163,8 +155,6 @@ struct perf_limits {
163 int max_sysfs_pct; 155 int max_sysfs_pct;
164 int min_policy_pct; 156 int min_policy_pct;
165 int min_sysfs_pct; 157 int min_sysfs_pct;
166 int max_perf_ctl;
167 int min_perf_ctl;
168}; 158};
169 159
170static struct perf_limits performance_limits = { 160static struct perf_limits performance_limits = {
@@ -191,8 +181,6 @@ static struct perf_limits powersave_limits = {
191 .max_sysfs_pct = 100, 181 .max_sysfs_pct = 100,
192 .min_policy_pct = 0, 182 .min_policy_pct = 0,
193 .min_sysfs_pct = 0, 183 .min_sysfs_pct = 0,
194 .max_perf_ctl = 0,
195 .min_perf_ctl = 0,
196}; 184};
197 185
198#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE 186#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
@@ -201,153 +189,6 @@ static struct perf_limits *limits = &performance_limits;
201static struct perf_limits *limits = &powersave_limits; 189static struct perf_limits *limits = &powersave_limits;
202#endif 190#endif
203 191
204#if IS_ENABLED(CONFIG_ACPI)
205/*
206 * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
207 * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
208 * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
209 * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
210 * target ratio 0x17. The _PSS control value stores in a format which can be
211 * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
212 * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
213 * This function converts the _PSS control value to intel pstate driver format
214 * for comparison and assignment.
215 */
216static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
217{
218 return cpu->acpi_perf_data.states[index].control >> 8;
219}
220
221static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
222{
223 struct cpudata *cpu;
224 int ret;
225 bool turbo_absent = false;
226 int max_pstate_index;
227 int min_pss_ctl, max_pss_ctl, turbo_pss_ctl;
228 int i;
229
230 cpu = all_cpu_data[policy->cpu];
231
232 pr_debug("intel_pstate: default limits 0x%x 0x%x 0x%x\n",
233 cpu->pstate.min_pstate, cpu->pstate.max_pstate,
234 cpu->pstate.turbo_pstate);
235
236 if (!cpu->acpi_perf_data.shared_cpu_map &&
237 zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map,
238 GFP_KERNEL, cpu_to_node(policy->cpu))) {
239 return -ENOMEM;
240 }
241
242 ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
243 policy->cpu);
244 if (ret)
245 return ret;
246
247 /*
248 * Check if the control value in _PSS is for PERF_CTL MSR, which should
249 * guarantee that the states returned by it map to the states in our
250 * list directly.
251 */
252 if (cpu->acpi_perf_data.control_register.space_id !=
253 ACPI_ADR_SPACE_FIXED_HARDWARE)
254 return -EIO;
255
256 pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu);
257 for (i = 0; i < cpu->acpi_perf_data.state_count; i++)
258 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n",
259 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
260 (u32) cpu->acpi_perf_data.states[i].core_frequency,
261 (u32) cpu->acpi_perf_data.states[i].power,
262 (u32) cpu->acpi_perf_data.states[i].control);
263
264 /*
265 * If there is only one entry _PSS, simply ignore _PSS and continue as
266 * usual without taking _PSS into account
267 */
268 if (cpu->acpi_perf_data.state_count < 2)
269 return 0;
270
271 turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
272 min_pss_ctl = convert_to_native_pstate_format(cpu,
273 cpu->acpi_perf_data.state_count - 1);
274 /* Check if there is a turbo freq in _PSS */
275 if (turbo_pss_ctl <= cpu->pstate.max_pstate &&
276 turbo_pss_ctl > cpu->pstate.min_pstate) {
277 pr_debug("intel_pstate: no turbo range exists in _PSS\n");
278 limits->no_turbo = limits->turbo_disabled = 1;
279 cpu->pstate.turbo_pstate = cpu->pstate.max_pstate;
280 turbo_absent = true;
281 }
282
283 /* Check if the max non turbo p state < Intel P state max */
284 max_pstate_index = turbo_absent ? 0 : 1;
285 max_pss_ctl = convert_to_native_pstate_format(cpu, max_pstate_index);
286 if (max_pss_ctl < cpu->pstate.max_pstate &&
287 max_pss_ctl > cpu->pstate.min_pstate)
288 cpu->pstate.max_pstate = max_pss_ctl;
289
290 /* check If min perf > Intel P State min */
291 if (min_pss_ctl > cpu->pstate.min_pstate &&
292 min_pss_ctl < cpu->pstate.max_pstate) {
293 cpu->pstate.min_pstate = min_pss_ctl;
294 policy->cpuinfo.min_freq = min_pss_ctl * cpu->pstate.scaling;
295 }
296
297 if (turbo_absent)
298 policy->cpuinfo.max_freq = cpu->pstate.max_pstate *
299 cpu->pstate.scaling;
300 else {
301 policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate *
302 cpu->pstate.scaling;
303 /*
304 * The _PSS table doesn't contain whole turbo frequency range.
305 * This just contains +1 MHZ above the max non turbo frequency,
306 * with control value corresponding to max turbo ratio. But
307 * when cpufreq set policy is called, it will call with this
308 * max frequency, which will cause a reduced performance as
309 * this driver uses real max turbo frequency as the max
310 * frequeny. So correct this frequency in _PSS table to
311 * correct max turbo frequency based on the turbo ratio.
312 * Also need to convert to MHz as _PSS freq is in MHz.
313 */
314 cpu->acpi_perf_data.states[0].core_frequency =
315 turbo_pss_ctl * 100;
316 }
317
318 pr_debug("intel_pstate: Updated limits using _PSS 0x%x 0x%x 0x%x\n",
319 cpu->pstate.min_pstate, cpu->pstate.max_pstate,
320 cpu->pstate.turbo_pstate);
321 pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n",
322 policy->cpuinfo.max_freq, policy->cpuinfo.min_freq);
323
324 return 0;
325}
326
327static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
328{
329 struct cpudata *cpu;
330
331 if (!no_acpi_perf)
332 return 0;
333
334 cpu = all_cpu_data[policy->cpu];
335 acpi_processor_unregister_performance(policy->cpu);
336 return 0;
337}
338
339#else
340static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
341{
342 return 0;
343}
344
345static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
346{
347 return 0;
348}
349#endif
350
351static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 192static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
352 int deadband, int integral) { 193 int deadband, int integral) {
353 pid->setpoint = setpoint; 194 pid->setpoint = setpoint;
@@ -687,31 +528,31 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
687 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 528 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
688} 529}
689 530
690static int byt_get_min_pstate(void) 531static int atom_get_min_pstate(void)
691{ 532{
692 u64 value; 533 u64 value;
693 534
694 rdmsrl(BYT_RATIOS, value); 535 rdmsrl(ATOM_RATIOS, value);
695 return (value >> 8) & 0x7F; 536 return (value >> 8) & 0x7F;
696} 537}
697 538
698static int byt_get_max_pstate(void) 539static int atom_get_max_pstate(void)
699{ 540{
700 u64 value; 541 u64 value;
701 542
702 rdmsrl(BYT_RATIOS, value); 543 rdmsrl(ATOM_RATIOS, value);
703 return (value >> 16) & 0x7F; 544 return (value >> 16) & 0x7F;
704} 545}
705 546
706static int byt_get_turbo_pstate(void) 547static int atom_get_turbo_pstate(void)
707{ 548{
708 u64 value; 549 u64 value;
709 550
710 rdmsrl(BYT_TURBO_RATIOS, value); 551 rdmsrl(ATOM_TURBO_RATIOS, value);
711 return value & 0x7F; 552 return value & 0x7F;
712} 553}
713 554
714static void byt_set_pstate(struct cpudata *cpudata, int pstate) 555static void atom_set_pstate(struct cpudata *cpudata, int pstate)
715{ 556{
716 u64 val; 557 u64 val;
717 int32_t vid_fp; 558 int32_t vid_fp;
@@ -736,27 +577,42 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
736 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 577 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
737} 578}
738 579
739#define BYT_BCLK_FREQS 5 580static int silvermont_get_scaling(void)
740static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800};
741
742static int byt_get_scaling(void)
743{ 581{
744 u64 value; 582 u64 value;
745 int i; 583 int i;
584 /* Defined in Table 35-6 from SDM (Sept 2015) */
585 static int silvermont_freq_table[] = {
586 83300, 100000, 133300, 116700, 80000};
746 587
747 rdmsrl(MSR_FSB_FREQ, value); 588 rdmsrl(MSR_FSB_FREQ, value);
748 i = value & 0x3; 589 i = value & 0x7;
590 WARN_ON(i > 4);
749 591
750 BUG_ON(i > BYT_BCLK_FREQS); 592 return silvermont_freq_table[i];
593}
751 594
752 return byt_freq_table[i] * 100; 595static int airmont_get_scaling(void)
596{
597 u64 value;
598 int i;
599 /* Defined in Table 35-10 from SDM (Sept 2015) */
600 static int airmont_freq_table[] = {
601 83300, 100000, 133300, 116700, 80000,
602 93300, 90000, 88900, 87500};
603
604 rdmsrl(MSR_FSB_FREQ, value);
605 i = value & 0xF;
606 WARN_ON(i > 8);
607
608 return airmont_freq_table[i];
753} 609}
754 610
755static void byt_get_vid(struct cpudata *cpudata) 611static void atom_get_vid(struct cpudata *cpudata)
756{ 612{
757 u64 value; 613 u64 value;
758 614
759 rdmsrl(BYT_VIDS, value); 615 rdmsrl(ATOM_VIDS, value);
760 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 616 cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
761 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 617 cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
762 cpudata->vid.ratio = div_fp( 618 cpudata->vid.ratio = div_fp(
@@ -764,7 +620,7 @@ static void byt_get_vid(struct cpudata *cpudata)
764 int_tofp(cpudata->pstate.max_pstate - 620 int_tofp(cpudata->pstate.max_pstate -
765 cpudata->pstate.min_pstate)); 621 cpudata->pstate.min_pstate));
766 622
767 rdmsrl(BYT_TURBO_VIDS, value); 623 rdmsrl(ATOM_TURBO_VIDS, value);
768 cpudata->vid.turbo = value & 0x7f; 624 cpudata->vid.turbo = value & 0x7f;
769} 625}
770 626
@@ -885,7 +741,7 @@ static struct cpu_defaults core_params = {
885 }, 741 },
886}; 742};
887 743
888static struct cpu_defaults byt_params = { 744static struct cpu_defaults silvermont_params = {
889 .pid_policy = { 745 .pid_policy = {
890 .sample_rate_ms = 10, 746 .sample_rate_ms = 10,
891 .deadband = 0, 747 .deadband = 0,
@@ -895,13 +751,33 @@ static struct cpu_defaults byt_params = {
895 .i_gain_pct = 4, 751 .i_gain_pct = 4,
896 }, 752 },
897 .funcs = { 753 .funcs = {
898 .get_max = byt_get_max_pstate, 754 .get_max = atom_get_max_pstate,
899 .get_max_physical = byt_get_max_pstate, 755 .get_max_physical = atom_get_max_pstate,
900 .get_min = byt_get_min_pstate, 756 .get_min = atom_get_min_pstate,
901 .get_turbo = byt_get_turbo_pstate, 757 .get_turbo = atom_get_turbo_pstate,
902 .set = byt_set_pstate, 758 .set = atom_set_pstate,
903 .get_scaling = byt_get_scaling, 759 .get_scaling = silvermont_get_scaling,
904 .get_vid = byt_get_vid, 760 .get_vid = atom_get_vid,
761 },
762};
763
764static struct cpu_defaults airmont_params = {
765 .pid_policy = {
766 .sample_rate_ms = 10,
767 .deadband = 0,
768 .setpoint = 60,
769 .p_gain_pct = 14,
770 .d_gain_pct = 0,
771 .i_gain_pct = 4,
772 },
773 .funcs = {
774 .get_max = atom_get_max_pstate,
775 .get_max_physical = atom_get_max_pstate,
776 .get_min = atom_get_min_pstate,
777 .get_turbo = atom_get_turbo_pstate,
778 .set = atom_set_pstate,
779 .get_scaling = airmont_get_scaling,
780 .get_vid = atom_get_vid,
905 }, 781 },
906}; 782};
907 783
@@ -938,23 +814,12 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
938 * policy, or by cpu specific default values determined through 814 * policy, or by cpu specific default values determined through
939 * experimentation. 815 * experimentation.
940 */ 816 */
941 if (limits->max_perf_ctl && limits->max_sysfs_pct >= 817 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf));
942 limits->max_policy_pct) { 818 *max = clamp_t(int, max_perf_adj,
943 *max = limits->max_perf_ctl; 819 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
944 } else {
945 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf),
946 limits->max_perf));
947 *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate,
948 cpu->pstate.turbo_pstate);
949 }
950 820
951 if (limits->min_perf_ctl) { 821 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf));
952 *min = limits->min_perf_ctl; 822 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
953 } else {
954 min_perf = fp_toint(mul_fp(int_tofp(max_perf),
955 limits->min_perf));
956 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
957 }
958} 823}
959 824
960static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) 825static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
@@ -1153,7 +1018,7 @@ static void intel_pstate_timer_func(unsigned long __data)
1153static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1018static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
1154 ICPU(0x2a, core_params), 1019 ICPU(0x2a, core_params),
1155 ICPU(0x2d, core_params), 1020 ICPU(0x2d, core_params),
1156 ICPU(0x37, byt_params), 1021 ICPU(0x37, silvermont_params),
1157 ICPU(0x3a, core_params), 1022 ICPU(0x3a, core_params),
1158 ICPU(0x3c, core_params), 1023 ICPU(0x3c, core_params),
1159 ICPU(0x3d, core_params), 1024 ICPU(0x3d, core_params),
@@ -1162,7 +1027,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
1162 ICPU(0x45, core_params), 1027 ICPU(0x45, core_params),
1163 ICPU(0x46, core_params), 1028 ICPU(0x46, core_params),
1164 ICPU(0x47, core_params), 1029 ICPU(0x47, core_params),
1165 ICPU(0x4c, byt_params), 1030 ICPU(0x4c, airmont_params),
1166 ICPU(0x4e, core_params), 1031 ICPU(0x4e, core_params),
1167 ICPU(0x4f, core_params), 1032 ICPU(0x4f, core_params),
1168 ICPU(0x5e, core_params), 1033 ICPU(0x5e, core_params),
@@ -1229,12 +1094,6 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
1229 1094
1230static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1095static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1231{ 1096{
1232#if IS_ENABLED(CONFIG_ACPI)
1233 struct cpudata *cpu;
1234 int i;
1235#endif
1236 pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__,
1237 policy->cpuinfo.max_freq, policy->max);
1238 if (!policy->cpuinfo.max_freq) 1097 if (!policy->cpuinfo.max_freq)
1239 return -ENODEV; 1098 return -ENODEV;
1240 1099
@@ -1270,23 +1129,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1270 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), 1129 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
1271 int_tofp(100)); 1130 int_tofp(100));
1272 1131
1273#if IS_ENABLED(CONFIG_ACPI)
1274 cpu = all_cpu_data[policy->cpu];
1275 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
1276 int control;
1277
1278 control = convert_to_native_pstate_format(cpu, i);
1279 if (control * cpu->pstate.scaling == policy->max)
1280 limits->max_perf_ctl = control;
1281 if (control * cpu->pstate.scaling == policy->min)
1282 limits->min_perf_ctl = control;
1283 }
1284
1285 pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n",
1286 policy->cpuinfo.max_freq, policy->max, limits->min_perf_ctl,
1287 limits->max_perf_ctl);
1288#endif
1289
1290 if (hwp_active) 1132 if (hwp_active)
1291 intel_pstate_hwp_set(); 1133 intel_pstate_hwp_set();
1292 1134
@@ -1341,30 +1183,18 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
1341 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1183 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
1342 policy->cpuinfo.max_freq = 1184 policy->cpuinfo.max_freq =
1343 cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1185 cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1344 if (!no_acpi_perf)
1345 intel_pstate_init_perf_limits(policy);
1346 /*
1347 * If there is no acpi perf data or error, we ignore and use Intel P
1348 * state calculated limits, So this is not fatal error.
1349 */
1350 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1186 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
1351 cpumask_set_cpu(policy->cpu, policy->cpus); 1187 cpumask_set_cpu(policy->cpu, policy->cpus);
1352 1188
1353 return 0; 1189 return 0;
1354} 1190}
1355 1191
1356static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
1357{
1358 return intel_pstate_exit_perf_limits(policy);
1359}
1360
1361static struct cpufreq_driver intel_pstate_driver = { 1192static struct cpufreq_driver intel_pstate_driver = {
1362 .flags = CPUFREQ_CONST_LOOPS, 1193 .flags = CPUFREQ_CONST_LOOPS,
1363 .verify = intel_pstate_verify_policy, 1194 .verify = intel_pstate_verify_policy,
1364 .setpolicy = intel_pstate_set_policy, 1195 .setpolicy = intel_pstate_set_policy,
1365 .get = intel_pstate_get, 1196 .get = intel_pstate_get,
1366 .init = intel_pstate_cpu_init, 1197 .init = intel_pstate_cpu_init,
1367 .exit = intel_pstate_cpu_exit,
1368 .stop_cpu = intel_pstate_stop_cpu, 1198 .stop_cpu = intel_pstate_stop_cpu,
1369 .name = "intel_pstate", 1199 .name = "intel_pstate",
1370}; 1200};
@@ -1406,6 +1236,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
1406} 1236}
1407 1237
1408#if IS_ENABLED(CONFIG_ACPI) 1238#if IS_ENABLED(CONFIG_ACPI)
1239#include <acpi/processor.h>
1409 1240
1410static bool intel_pstate_no_acpi_pss(void) 1241static bool intel_pstate_no_acpi_pss(void)
1411{ 1242{
@@ -1601,9 +1432,6 @@ static int __init intel_pstate_setup(char *str)
1601 force_load = 1; 1432 force_load = 1;
1602 if (!strcmp(str, "hwp_only")) 1433 if (!strcmp(str, "hwp_only"))
1603 hwp_only = 1; 1434 hwp_only = 1;
1604 if (!strcmp(str, "no_acpi"))
1605 no_acpi_perf = 1;
1606
1607 return 0; 1435 return 0;
1608} 1436}
1609early_param("intel_pstate", intel_pstate_setup); 1437early_param("intel_pstate", intel_pstate_setup);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 03856ad280b9..473d36d91644 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -198,7 +198,7 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
198 goto out_err; 198 goto out_err;
199 } 199 }
200 200
201 params_head = section_head->params; 201 params_head = section.params;
202 202
203 while (params_head) { 203 while (params_head) {
204 if (copy_from_user(&key_val, (void __user *)params_head, 204 if (copy_from_user(&key_val, (void __user *)params_head,
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 4e55239c7a30..53d22eb73b56 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -729,8 +729,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
729 return NULL; 729 return NULL;
730 730
731 dev_info(chan2dev(chan), 731 dev_info(chan2dev(chan),
732 "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", 732 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
733 __func__, xt->src_start, xt->dst_start, xt->numf, 733 __func__, &xt->src_start, &xt->dst_start, xt->numf,
734 xt->frame_size, flags); 734 xt->frame_size, flags);
735 735
736 /* 736 /*
@@ -824,8 +824,8 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
824 u32 ctrla; 824 u32 ctrla;
825 u32 ctrlb; 825 u32 ctrlb;
826 826
827 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", 827 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
828 dest, src, len, flags); 828 &dest, &src, len, flags);
829 829
830 if (unlikely(!len)) { 830 if (unlikely(!len)) {
831 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 831 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
@@ -938,8 +938,8 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
938 void __iomem *vaddr; 938 void __iomem *vaddr;
939 dma_addr_t paddr; 939 dma_addr_t paddr;
940 940
941 dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__, 941 dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
942 dest, value, len, flags); 942 &dest, value, len, flags);
943 943
944 if (unlikely(!len)) { 944 if (unlikely(!len)) {
945 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); 945 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
@@ -1022,8 +1022,8 @@ atc_prep_dma_memset_sg(struct dma_chan *chan,
1022 dma_addr_t dest = sg_dma_address(sg); 1022 dma_addr_t dest = sg_dma_address(sg);
1023 size_t len = sg_dma_len(sg); 1023 size_t len = sg_dma_len(sg);
1024 1024
1025 dev_vdbg(chan2dev(chan), "%s: d0x%08x, l0x%zx\n", 1025 dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
1026 __func__, dest, len); 1026 __func__, &dest, len);
1027 1027
1028 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) { 1028 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
1029 dev_err(chan2dev(chan), "%s: buffer is not aligned\n", 1029 dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
@@ -1439,9 +1439,9 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1439 unsigned int periods = buf_len / period_len; 1439 unsigned int periods = buf_len / period_len;
1440 unsigned int i; 1440 unsigned int i;
1441 1441
1442 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", 1442 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
1443 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 1443 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1444 buf_addr, 1444 &buf_addr,
1445 periods, buf_len, period_len); 1445 periods, buf_len, period_len);
1446 1446
1447 if (unlikely(!atslave || !buf_len || !period_len)) { 1447 if (unlikely(!atslave || !buf_len || !period_len)) {
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index d1cfc8c876f9..7f58f06157f6 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -385,9 +385,9 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {}
385static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) 385static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
386{ 386{
387 dev_crit(chan2dev(&atchan->chan_common), 387 dev_crit(chan2dev(&atchan->chan_common),
388 " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", 388 " desc: s%pad d%pad ctrl0x%x:0x%x l0x%pad\n",
389 lli->saddr, lli->daddr, 389 &lli->saddr, &lli->daddr,
390 lli->ctrla, lli->ctrlb, lli->dscr); 390 lli->ctrla, lli->ctrlb, &lli->dscr);
391} 391}
392 392
393 393
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b5e132d4bae5..7f039de143f0 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -920,8 +920,8 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
920 desc->lld.mbr_cfg = chan_cc; 920 desc->lld.mbr_cfg = chan_cc;
921 921
922 dev_dbg(chan2dev(chan), 922 dev_dbg(chan2dev(chan),
923 "%s: lld: mbr_sa=0x%08x, mbr_da=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", 923 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
924 __func__, desc->lld.mbr_sa, desc->lld.mbr_da, 924 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
925 desc->lld.mbr_ubc, desc->lld.mbr_cfg); 925 desc->lld.mbr_ubc, desc->lld.mbr_cfg);
926 926
927 /* Chain lld. */ 927 /* Chain lld. */
@@ -953,8 +953,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
953 if ((xt->numf > 1) && (xt->frame_size > 1)) 953 if ((xt->numf > 1) && (xt->frame_size > 1))
954 return NULL; 954 return NULL;
955 955
956 dev_dbg(chan2dev(chan), "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", 956 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
957 __func__, xt->src_start, xt->dst_start, xt->numf, 957 __func__, &xt->src_start, &xt->dst_start, xt->numf,
958 xt->frame_size, flags); 958 xt->frame_size, flags);
959 959
960 src_addr = xt->src_start; 960 src_addr = xt->src_start;
@@ -1179,8 +1179,8 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
1179 desc->lld.mbr_cfg = chan_cc; 1179 desc->lld.mbr_cfg = chan_cc;
1180 1180
1181 dev_dbg(chan2dev(chan), 1181 dev_dbg(chan2dev(chan),
1182 "%s: lld: mbr_da=0x%08x, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", 1182 "%s: lld: mbr_da=%pad, mbr_ds=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1183 __func__, desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc, 1183 __func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc,
1184 desc->lld.mbr_cfg); 1184 desc->lld.mbr_cfg);
1185 1185
1186 return desc; 1186 return desc;
@@ -1193,8 +1193,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
1193 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1193 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1194 struct at_xdmac_desc *desc; 1194 struct at_xdmac_desc *desc;
1195 1195
1196 dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n", 1196 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
1197 __func__, dest, len, value, flags); 1197 __func__, &dest, len, value, flags);
1198 1198
1199 if (unlikely(!len)) 1199 if (unlikely(!len))
1200 return NULL; 1200 return NULL;
@@ -1229,8 +1229,8 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
1229 1229
1230 /* Prepare descriptors. */ 1230 /* Prepare descriptors. */
1231 for_each_sg(sgl, sg, sg_len, i) { 1231 for_each_sg(sgl, sg, sg_len, i) {
1232 dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n", 1232 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
1233 __func__, sg_dma_address(sg), sg_dma_len(sg), 1233 __func__, &sg_dma_address(sg), sg_dma_len(sg),
1234 value, flags); 1234 value, flags);
1235 desc = at_xdmac_memset_create_desc(chan, atchan, 1235 desc = at_xdmac_memset_create_desc(chan, atchan,
1236 sg_dma_address(sg), 1236 sg_dma_address(sg),
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 6b03e4e84e6b..0675e268d577 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -107,7 +107,7 @@
107 107
108/* CCCFG register */ 108/* CCCFG register */
109#define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ 109#define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
110#define GET_NUM_QDMACH(x) (x & 0x70 >> 4) /* bits 4-6 */ 110#define GET_NUM_QDMACH(x) ((x & 0x70) >> 4) /* bits 4-6 */
111#define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ 111#define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
112#define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ 112#define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ 113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
@@ -1565,7 +1565,7 @@ static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
1565 struct platform_device *tc_pdev; 1565 struct platform_device *tc_pdev;
1566 int ret; 1566 int ret;
1567 1567
1568 if (!tc) 1568 if (!IS_ENABLED(CONFIG_OF) || !tc)
1569 return; 1569 return;
1570 1570
1571 tc_pdev = of_find_device_by_node(tc->node); 1571 tc_pdev = of_find_device_by_node(tc->node);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 7058d58ba588..0f6fd42f55ca 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1462,7 +1462,7 @@ err_firmware:
1462 1462
1463#define EVENT_REMAP_CELLS 3 1463#define EVENT_REMAP_CELLS 3
1464 1464
1465static int __init sdma_event_remap(struct sdma_engine *sdma) 1465static int sdma_event_remap(struct sdma_engine *sdma)
1466{ 1466{
1467 struct device_node *np = sdma->dev->of_node; 1467 struct device_node *np = sdma->dev->of_node;
1468 struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0); 1468 struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index ebd8a5f398b0..f1bcc2a163b3 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -679,8 +679,11 @@ static int usb_dmac_runtime_suspend(struct device *dev)
679 struct usb_dmac *dmac = dev_get_drvdata(dev); 679 struct usb_dmac *dmac = dev_get_drvdata(dev);
680 int i; 680 int i;
681 681
682 for (i = 0; i < dmac->n_channels; ++i) 682 for (i = 0; i < dmac->n_channels; ++i) {
683 if (!dmac->channels[i].iomem)
684 break;
683 usb_dmac_chan_halt(&dmac->channels[i]); 685 usb_dmac_chan_halt(&dmac->channels[i]);
686 }
684 687
685 return 0; 688 return 0;
686} 689}
@@ -799,11 +802,10 @@ static int usb_dmac_probe(struct platform_device *pdev)
799 ret = pm_runtime_get_sync(&pdev->dev); 802 ret = pm_runtime_get_sync(&pdev->dev);
800 if (ret < 0) { 803 if (ret < 0) {
801 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); 804 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
802 return ret; 805 goto error_pm;
803 } 806 }
804 807
805 ret = usb_dmac_init(dmac); 808 ret = usb_dmac_init(dmac);
806 pm_runtime_put(&pdev->dev);
807 809
808 if (ret) { 810 if (ret) {
809 dev_err(&pdev->dev, "failed to reset device\n"); 811 dev_err(&pdev->dev, "failed to reset device\n");
@@ -851,10 +853,13 @@ static int usb_dmac_probe(struct platform_device *pdev)
851 if (ret < 0) 853 if (ret < 0)
852 goto error; 854 goto error;
853 855
856 pm_runtime_put(&pdev->dev);
854 return 0; 857 return 0;
855 858
856error: 859error:
857 of_dma_controller_free(pdev->dev.of_node); 860 of_dma_controller_free(pdev->dev.of_node);
861 pm_runtime_put(&pdev->dev);
862error_pm:
858 pm_runtime_disable(&pdev->dev); 863 pm_runtime_disable(&pdev->dev);
859 return ret; 864 return ret;
860} 865}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 615ce6d464fb..306f75700bf8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -389,7 +389,6 @@ struct amdgpu_clock {
389 * Fences. 389 * Fences.
390 */ 390 */
391struct amdgpu_fence_driver { 391struct amdgpu_fence_driver {
392 struct amdgpu_ring *ring;
393 uint64_t gpu_addr; 392 uint64_t gpu_addr;
394 volatile uint32_t *cpu_addr; 393 volatile uint32_t *cpu_addr;
395 /* sync_seq is protected by ring emission lock */ 394 /* sync_seq is protected by ring emission lock */
@@ -398,7 +397,7 @@ struct amdgpu_fence_driver {
398 bool initialized; 397 bool initialized;
399 struct amdgpu_irq_src *irq_src; 398 struct amdgpu_irq_src *irq_src;
400 unsigned irq_type; 399 unsigned irq_type;
401 struct delayed_work lockup_work; 400 struct timer_list fallback_timer;
402 wait_queue_head_t fence_queue; 401 wait_queue_head_t fence_queue;
403}; 402};
404 403
@@ -917,8 +916,8 @@ struct amdgpu_ring {
917#define AMDGPU_VM_FAULT_STOP_ALWAYS 2 916#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
918 917
919struct amdgpu_vm_pt { 918struct amdgpu_vm_pt {
920 struct amdgpu_bo *bo; 919 struct amdgpu_bo *bo;
921 uint64_t addr; 920 uint64_t addr;
922}; 921};
923 922
924struct amdgpu_vm_id { 923struct amdgpu_vm_id {
@@ -926,8 +925,6 @@ struct amdgpu_vm_id {
926 uint64_t pd_gpu_addr; 925 uint64_t pd_gpu_addr;
927 /* last flushed PD/PT update */ 926 /* last flushed PD/PT update */
928 struct fence *flushed_updates; 927 struct fence *flushed_updates;
929 /* last use of vmid */
930 struct fence *last_id_use;
931}; 928};
932 929
933struct amdgpu_vm { 930struct amdgpu_vm {
@@ -957,24 +954,70 @@ struct amdgpu_vm {
957 954
958 /* for id and flush management per ring */ 955 /* for id and flush management per ring */
959 struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; 956 struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
957 /* for interval tree */
958 spinlock_t it_lock;
960}; 959};
961 960
962struct amdgpu_vm_manager { 961struct amdgpu_vm_manager {
963 struct fence *active[AMDGPU_NUM_VM]; 962 struct {
964 uint32_t max_pfn; 963 struct fence *active;
964 atomic_long_t owner;
965 } ids[AMDGPU_NUM_VM];
966
967 uint32_t max_pfn;
965 /* number of VMIDs */ 968 /* number of VMIDs */
966 unsigned nvm; 969 unsigned nvm;
967 /* vram base address for page table entry */ 970 /* vram base address for page table entry */
968 u64 vram_base_offset; 971 u64 vram_base_offset;
969 /* is vm enabled? */ 972 /* is vm enabled? */
970 bool enabled; 973 bool enabled;
971 /* for hw to save the PD addr on suspend/resume */
972 uint32_t saved_table_addr[AMDGPU_NUM_VM];
973 /* vm pte handling */ 974 /* vm pte handling */
974 const struct amdgpu_vm_pte_funcs *vm_pte_funcs; 975 const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
975 struct amdgpu_ring *vm_pte_funcs_ring; 976 struct amdgpu_ring *vm_pte_funcs_ring;
976}; 977};
977 978
979void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
980int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
981void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
982struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
983 struct amdgpu_vm *vm,
984 struct list_head *head);
985int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
986 struct amdgpu_sync *sync);
987void amdgpu_vm_flush(struct amdgpu_ring *ring,
988 struct amdgpu_vm *vm,
989 struct fence *updates);
990void amdgpu_vm_fence(struct amdgpu_device *adev,
991 struct amdgpu_vm *vm,
992 struct fence *fence);
993uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
994int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
995 struct amdgpu_vm *vm);
996int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
997 struct amdgpu_vm *vm);
998int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
999 struct amdgpu_sync *sync);
1000int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1001 struct amdgpu_bo_va *bo_va,
1002 struct ttm_mem_reg *mem);
1003void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1004 struct amdgpu_bo *bo);
1005struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
1006 struct amdgpu_bo *bo);
1007struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1008 struct amdgpu_vm *vm,
1009 struct amdgpu_bo *bo);
1010int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1011 struct amdgpu_bo_va *bo_va,
1012 uint64_t addr, uint64_t offset,
1013 uint64_t size, uint32_t flags);
1014int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1015 struct amdgpu_bo_va *bo_va,
1016 uint64_t addr);
1017void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1018 struct amdgpu_bo_va *bo_va);
1019int amdgpu_vm_free_job(struct amdgpu_job *job);
1020
978/* 1021/*
979 * context related structures 1022 * context related structures
980 */ 1023 */
@@ -1211,6 +1254,7 @@ struct amdgpu_cs_parser {
1211 /* relocations */ 1254 /* relocations */
1212 struct amdgpu_bo_list_entry *vm_bos; 1255 struct amdgpu_bo_list_entry *vm_bos;
1213 struct list_head validated; 1256 struct list_head validated;
1257 struct fence *fence;
1214 1258
1215 struct amdgpu_ib *ibs; 1259 struct amdgpu_ib *ibs;
1216 uint32_t num_ibs; 1260 uint32_t num_ibs;
@@ -1226,7 +1270,7 @@ struct amdgpu_job {
1226 struct amdgpu_device *adev; 1270 struct amdgpu_device *adev;
1227 struct amdgpu_ib *ibs; 1271 struct amdgpu_ib *ibs;
1228 uint32_t num_ibs; 1272 uint32_t num_ibs;
1229 struct mutex job_lock; 1273 void *owner;
1230 struct amdgpu_user_fence uf; 1274 struct amdgpu_user_fence uf;
1231 int (*free_job)(struct amdgpu_job *job); 1275 int (*free_job)(struct amdgpu_job *job);
1232}; 1276};
@@ -2257,11 +2301,6 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
2257bool amdgpu_card_posted(struct amdgpu_device *adev); 2301bool amdgpu_card_posted(struct amdgpu_device *adev);
2258void amdgpu_update_display_priority(struct amdgpu_device *adev); 2302void amdgpu_update_display_priority(struct amdgpu_device *adev);
2259bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); 2303bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
2260struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
2261 struct drm_file *filp,
2262 struct amdgpu_ctx *ctx,
2263 struct amdgpu_ib *ibs,
2264 uint32_t num_ibs);
2265 2304
2266int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); 2305int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
2267int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, 2306int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
@@ -2319,49 +2358,6 @@ long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
2319 unsigned long arg); 2358 unsigned long arg);
2320 2359
2321/* 2360/*
2322 * vm
2323 */
2324int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
2325void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
2326struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
2327 struct amdgpu_vm *vm,
2328 struct list_head *head);
2329int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
2330 struct amdgpu_sync *sync);
2331void amdgpu_vm_flush(struct amdgpu_ring *ring,
2332 struct amdgpu_vm *vm,
2333 struct fence *updates);
2334void amdgpu_vm_fence(struct amdgpu_device *adev,
2335 struct amdgpu_vm *vm,
2336 struct amdgpu_fence *fence);
2337uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
2338int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
2339 struct amdgpu_vm *vm);
2340int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
2341 struct amdgpu_vm *vm);
2342int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
2343 struct amdgpu_vm *vm, struct amdgpu_sync *sync);
2344int amdgpu_vm_bo_update(struct amdgpu_device *adev,
2345 struct amdgpu_bo_va *bo_va,
2346 struct ttm_mem_reg *mem);
2347void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2348 struct amdgpu_bo *bo);
2349struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
2350 struct amdgpu_bo *bo);
2351struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2352 struct amdgpu_vm *vm,
2353 struct amdgpu_bo *bo);
2354int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2355 struct amdgpu_bo_va *bo_va,
2356 uint64_t addr, uint64_t offset,
2357 uint64_t size, uint32_t flags);
2358int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2359 struct amdgpu_bo_va *bo_va,
2360 uint64_t addr);
2361void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2362 struct amdgpu_bo_va *bo_va);
2363int amdgpu_vm_free_job(struct amdgpu_job *job);
2364/*
2365 * functions used by amdgpu_encoder.c 2361 * functions used by amdgpu_encoder.c
2366 */ 2362 */
2367struct amdgpu_afmt_acr { 2363struct amdgpu_afmt_acr {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index dfc4d02c7a38..3afcf0237c25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -127,30 +127,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
127 return 0; 127 return 0;
128} 128}
129 129
130struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
131 struct drm_file *filp,
132 struct amdgpu_ctx *ctx,
133 struct amdgpu_ib *ibs,
134 uint32_t num_ibs)
135{
136 struct amdgpu_cs_parser *parser;
137 int i;
138
139 parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL);
140 if (!parser)
141 return NULL;
142
143 parser->adev = adev;
144 parser->filp = filp;
145 parser->ctx = ctx;
146 parser->ibs = ibs;
147 parser->num_ibs = num_ibs;
148 for (i = 0; i < num_ibs; i++)
149 ibs[i].ctx = ctx;
150
151 return parser;
152}
153
154int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) 130int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
155{ 131{
156 union drm_amdgpu_cs *cs = data; 132 union drm_amdgpu_cs *cs = data;
@@ -463,8 +439,18 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
463 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; 439 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
464} 440}
465 441
466static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff) 442/**
443 * cs_parser_fini() - clean parser states
444 * @parser: parser structure holding parsing context.
445 * @error: error number
446 *
447 * If error is set than unvalidate buffer, otherwise just free memory
448 * used by parsing context.
449 **/
450static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
467{ 451{
452 unsigned i;
453
468 if (!error) { 454 if (!error) {
469 /* Sort the buffer list from the smallest to largest buffer, 455 /* Sort the buffer list from the smallest to largest buffer,
470 * which affects the order of buffers in the LRU list. 456 * which affects the order of buffers in the LRU list.
@@ -479,17 +465,14 @@ static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int err
479 list_sort(NULL, &parser->validated, cmp_size_smaller_first); 465 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
480 466
481 ttm_eu_fence_buffer_objects(&parser->ticket, 467 ttm_eu_fence_buffer_objects(&parser->ticket,
482 &parser->validated, 468 &parser->validated,
483 &parser->ibs[parser->num_ibs-1].fence->base); 469 parser->fence);
484 } else if (backoff) { 470 } else if (backoff) {
485 ttm_eu_backoff_reservation(&parser->ticket, 471 ttm_eu_backoff_reservation(&parser->ticket,
486 &parser->validated); 472 &parser->validated);
487 } 473 }
488} 474 fence_put(parser->fence);
489 475
490static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
491{
492 unsigned i;
493 if (parser->ctx) 476 if (parser->ctx)
494 amdgpu_ctx_put(parser->ctx); 477 amdgpu_ctx_put(parser->ctx);
495 if (parser->bo_list) 478 if (parser->bo_list)
@@ -499,31 +482,12 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
499 for (i = 0; i < parser->nchunks; i++) 482 for (i = 0; i < parser->nchunks; i++)
500 drm_free_large(parser->chunks[i].kdata); 483 drm_free_large(parser->chunks[i].kdata);
501 kfree(parser->chunks); 484 kfree(parser->chunks);
502 if (!amdgpu_enable_scheduler) 485 if (parser->ibs)
503 { 486 for (i = 0; i < parser->num_ibs; i++)
504 if (parser->ibs) 487 amdgpu_ib_free(parser->adev, &parser->ibs[i]);
505 for (i = 0; i < parser->num_ibs; i++) 488 kfree(parser->ibs);
506 amdgpu_ib_free(parser->adev, &parser->ibs[i]); 489 if (parser->uf.bo)
507 kfree(parser->ibs); 490 drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
508 if (parser->uf.bo)
509 drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
510 }
511
512 kfree(parser);
513}
514
515/**
516 * cs_parser_fini() - clean parser states
517 * @parser: parser structure holding parsing context.
518 * @error: error number
519 *
520 * If error is set than unvalidate buffer, otherwise just free memory
521 * used by parsing context.
522 **/
523static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
524{
525 amdgpu_cs_parser_fini_early(parser, error, backoff);
526 amdgpu_cs_parser_fini_late(parser);
527} 491}
528 492
529static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, 493static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
@@ -610,15 +574,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
610 } 574 }
611 575
612 r = amdgpu_bo_vm_update_pte(parser, vm); 576 r = amdgpu_bo_vm_update_pte(parser, vm);
613 if (r) { 577 if (!r)
614 goto out; 578 amdgpu_cs_sync_rings(parser);
615 }
616 amdgpu_cs_sync_rings(parser);
617 if (!amdgpu_enable_scheduler)
618 r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
619 parser->filp);
620 579
621out:
622 return r; 580 return r;
623} 581}
624 582
@@ -828,36 +786,36 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
828 union drm_amdgpu_cs *cs = data; 786 union drm_amdgpu_cs *cs = data;
829 struct amdgpu_fpriv *fpriv = filp->driver_priv; 787 struct amdgpu_fpriv *fpriv = filp->driver_priv;
830 struct amdgpu_vm *vm = &fpriv->vm; 788 struct amdgpu_vm *vm = &fpriv->vm;
831 struct amdgpu_cs_parser *parser; 789 struct amdgpu_cs_parser parser = {};
832 bool reserved_buffers = false; 790 bool reserved_buffers = false;
833 int i, r; 791 int i, r;
834 792
835 if (!adev->accel_working) 793 if (!adev->accel_working)
836 return -EBUSY; 794 return -EBUSY;
837 795
838 parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0); 796 parser.adev = adev;
839 if (!parser) 797 parser.filp = filp;
840 return -ENOMEM; 798
841 r = amdgpu_cs_parser_init(parser, data); 799 r = amdgpu_cs_parser_init(&parser, data);
842 if (r) { 800 if (r) {
843 DRM_ERROR("Failed to initialize parser !\n"); 801 DRM_ERROR("Failed to initialize parser !\n");
844 amdgpu_cs_parser_fini(parser, r, false); 802 amdgpu_cs_parser_fini(&parser, r, false);
845 r = amdgpu_cs_handle_lockup(adev, r); 803 r = amdgpu_cs_handle_lockup(adev, r);
846 return r; 804 return r;
847 } 805 }
848 mutex_lock(&vm->mutex); 806 mutex_lock(&vm->mutex);
849 r = amdgpu_cs_parser_relocs(parser); 807 r = amdgpu_cs_parser_relocs(&parser);
850 if (r == -ENOMEM) 808 if (r == -ENOMEM)
851 DRM_ERROR("Not enough memory for command submission!\n"); 809 DRM_ERROR("Not enough memory for command submission!\n");
852 else if (r && r != -ERESTARTSYS) 810 else if (r && r != -ERESTARTSYS)
853 DRM_ERROR("Failed to process the buffer list %d!\n", r); 811 DRM_ERROR("Failed to process the buffer list %d!\n", r);
854 else if (!r) { 812 else if (!r) {
855 reserved_buffers = true; 813 reserved_buffers = true;
856 r = amdgpu_cs_ib_fill(adev, parser); 814 r = amdgpu_cs_ib_fill(adev, &parser);
857 } 815 }
858 816
859 if (!r) { 817 if (!r) {
860 r = amdgpu_cs_dependencies(adev, parser); 818 r = amdgpu_cs_dependencies(adev, &parser);
861 if (r) 819 if (r)
862 DRM_ERROR("Failed in the dependencies handling %d!\n", r); 820 DRM_ERROR("Failed in the dependencies handling %d!\n", r);
863 } 821 }
@@ -865,62 +823,71 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
865 if (r) 823 if (r)
866 goto out; 824 goto out;
867 825
868 for (i = 0; i < parser->num_ibs; i++) 826 for (i = 0; i < parser.num_ibs; i++)
869 trace_amdgpu_cs(parser, i); 827 trace_amdgpu_cs(&parser, i);
870 828
871 r = amdgpu_cs_ib_vm_chunk(adev, parser); 829 r = amdgpu_cs_ib_vm_chunk(adev, &parser);
872 if (r) 830 if (r)
873 goto out; 831 goto out;
874 832
875 if (amdgpu_enable_scheduler && parser->num_ibs) { 833 if (amdgpu_enable_scheduler && parser.num_ibs) {
834 struct amdgpu_ring * ring = parser.ibs->ring;
835 struct amd_sched_fence *fence;
876 struct amdgpu_job *job; 836 struct amdgpu_job *job;
877 struct amdgpu_ring * ring = parser->ibs->ring; 837
878 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); 838 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
879 if (!job) { 839 if (!job) {
880 r = -ENOMEM; 840 r = -ENOMEM;
881 goto out; 841 goto out;
882 } 842 }
843
883 job->base.sched = &ring->sched; 844 job->base.sched = &ring->sched;
884 job->base.s_entity = &parser->ctx->rings[ring->idx].entity; 845 job->base.s_entity = &parser.ctx->rings[ring->idx].entity;
885 job->adev = parser->adev; 846 job->adev = parser.adev;
886 job->ibs = parser->ibs; 847 job->owner = parser.filp;
887 job->num_ibs = parser->num_ibs; 848 job->free_job = amdgpu_cs_free_job;
888 job->base.owner = parser->filp; 849
889 mutex_init(&job->job_lock); 850 job->ibs = parser.ibs;
851 job->num_ibs = parser.num_ibs;
852 parser.ibs = NULL;
853 parser.num_ibs = 0;
854
890 if (job->ibs[job->num_ibs - 1].user) { 855 if (job->ibs[job->num_ibs - 1].user) {
891 memcpy(&job->uf, &parser->uf, 856 job->uf = parser.uf;
892 sizeof(struct amdgpu_user_fence));
893 job->ibs[job->num_ibs - 1].user = &job->uf; 857 job->ibs[job->num_ibs - 1].user = &job->uf;
858 parser.uf.bo = NULL;
894 } 859 }
895 860
896 job->free_job = amdgpu_cs_free_job; 861 fence = amd_sched_fence_create(job->base.s_entity,
897 mutex_lock(&job->job_lock); 862 parser.filp);
898 r = amd_sched_entity_push_job(&job->base); 863 if (!fence) {
899 if (r) { 864 r = -ENOMEM;
900 mutex_unlock(&job->job_lock);
901 amdgpu_cs_free_job(job); 865 amdgpu_cs_free_job(job);
902 kfree(job); 866 kfree(job);
903 goto out; 867 goto out;
904 } 868 }
905 cs->out.handle = 869 job->base.s_fence = fence;
906 amdgpu_ctx_add_fence(parser->ctx, ring, 870 parser.fence = fence_get(&fence->base);
907 &job->base.s_fence->base);
908 parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle;
909 871
910 list_sort(NULL, &parser->validated, cmp_size_smaller_first); 872 cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring,
911 ttm_eu_fence_buffer_objects(&parser->ticket, 873 &fence->base);
912 &parser->validated, 874 job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
913 &job->base.s_fence->base);
914 875
915 mutex_unlock(&job->job_lock); 876 trace_amdgpu_cs_ioctl(job);
916 amdgpu_cs_parser_fini_late(parser); 877 amd_sched_entity_push_job(&job->base);
917 mutex_unlock(&vm->mutex); 878
918 return 0; 879 } else {
880 struct amdgpu_fence *fence;
881
882 r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs,
883 parser.filp);
884 fence = parser.ibs[parser.num_ibs - 1].fence;
885 parser.fence = fence_get(&fence->base);
886 cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence;
919 } 887 }
920 888
921 cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
922out: 889out:
923 amdgpu_cs_parser_fini(parser, r, reserved_buffers); 890 amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
924 mutex_unlock(&vm->mutex); 891 mutex_unlock(&vm->mutex);
925 r = amdgpu_cs_handle_lockup(adev, r); 892 r = amdgpu_cs_handle_lockup(adev, r);
926 return r; 893 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 257d72205bb5..3671f9f220bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -47,6 +47,9 @@
47 * that the the relevant GPU caches have been flushed. 47 * that the the relevant GPU caches have been flushed.
48 */ 48 */
49 49
50static struct kmem_cache *amdgpu_fence_slab;
51static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
52
50/** 53/**
51 * amdgpu_fence_write - write a fence value 54 * amdgpu_fence_write - write a fence value
52 * 55 *
@@ -85,24 +88,6 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
85} 88}
86 89
87/** 90/**
88 * amdgpu_fence_schedule_check - schedule lockup check
89 *
90 * @ring: pointer to struct amdgpu_ring
91 *
92 * Queues a delayed work item to check for lockups.
93 */
94static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring)
95{
96 /*
97 * Do not reset the timer here with mod_delayed_work,
98 * this can livelock in an interaction with TTM delayed destroy.
99 */
100 queue_delayed_work(system_power_efficient_wq,
101 &ring->fence_drv.lockup_work,
102 AMDGPU_FENCE_JIFFIES_TIMEOUT);
103}
104
105/**
106 * amdgpu_fence_emit - emit a fence on the requested ring 91 * amdgpu_fence_emit - emit a fence on the requested ring
107 * 92 *
108 * @ring: ring the fence is associated with 93 * @ring: ring the fence is associated with
@@ -118,7 +103,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
118 struct amdgpu_device *adev = ring->adev; 103 struct amdgpu_device *adev = ring->adev;
119 104
120 /* we are protected by the ring emission mutex */ 105 /* we are protected by the ring emission mutex */
121 *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); 106 *fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
122 if ((*fence) == NULL) { 107 if ((*fence) == NULL) {
123 return -ENOMEM; 108 return -ENOMEM;
124 } 109 }
@@ -132,11 +117,23 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
132 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 117 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
133 (*fence)->seq, 118 (*fence)->seq,
134 AMDGPU_FENCE_FLAG_INT); 119 AMDGPU_FENCE_FLAG_INT);
135 trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq);
136 return 0; 120 return 0;
137} 121}
138 122
139/** 123/**
124 * amdgpu_fence_schedule_fallback - schedule fallback check
125 *
126 * @ring: pointer to struct amdgpu_ring
127 *
128 * Start a timer as fallback to our interrupts.
129 */
130static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
131{
132 mod_timer(&ring->fence_drv.fallback_timer,
133 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
134}
135
136/**
140 * amdgpu_fence_activity - check for fence activity 137 * amdgpu_fence_activity - check for fence activity
141 * 138 *
142 * @ring: pointer to struct amdgpu_ring 139 * @ring: pointer to struct amdgpu_ring
@@ -202,45 +199,38 @@ static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
202 } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); 199 } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
203 200
204 if (seq < last_emitted) 201 if (seq < last_emitted)
205 amdgpu_fence_schedule_check(ring); 202 amdgpu_fence_schedule_fallback(ring);
206 203
207 return wake; 204 return wake;
208} 205}
209 206
210/** 207/**
211 * amdgpu_fence_check_lockup - check for hardware lockup 208 * amdgpu_fence_process - process a fence
212 * 209 *
213 * @work: delayed work item 210 * @adev: amdgpu_device pointer
211 * @ring: ring index the fence is associated with
214 * 212 *
215 * Checks for fence activity and if there is none probe 213 * Checks the current fence value and wakes the fence queue
216 * the hardware if a lockup occured. 214 * if the sequence number has increased (all asics).
217 */ 215 */
218static void amdgpu_fence_check_lockup(struct work_struct *work) 216void amdgpu_fence_process(struct amdgpu_ring *ring)
219{ 217{
220 struct amdgpu_fence_driver *fence_drv;
221 struct amdgpu_ring *ring;
222
223 fence_drv = container_of(work, struct amdgpu_fence_driver,
224 lockup_work.work);
225 ring = fence_drv->ring;
226
227 if (amdgpu_fence_activity(ring)) 218 if (amdgpu_fence_activity(ring))
228 wake_up_all(&ring->fence_drv.fence_queue); 219 wake_up_all(&ring->fence_drv.fence_queue);
229} 220}
230 221
231/** 222/**
232 * amdgpu_fence_process - process a fence 223 * amdgpu_fence_fallback - fallback for hardware interrupts
233 * 224 *
234 * @adev: amdgpu_device pointer 225 * @work: delayed work item
235 * @ring: ring index the fence is associated with
236 * 226 *
237 * Checks the current fence value and wakes the fence queue 227 * Checks for fence activity.
238 * if the sequence number has increased (all asics).
239 */ 228 */
240void amdgpu_fence_process(struct amdgpu_ring *ring) 229static void amdgpu_fence_fallback(unsigned long arg)
241{ 230{
242 if (amdgpu_fence_activity(ring)) 231 struct amdgpu_ring *ring = (void *)arg;
243 wake_up_all(&ring->fence_drv.fence_queue); 232
233 amdgpu_fence_process(ring);
244} 234}
245 235
246/** 236/**
@@ -290,7 +280,7 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
290 if (atomic64_read(&ring->fence_drv.last_seq) >= seq) 280 if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
291 return 0; 281 return 0;
292 282
293 amdgpu_fence_schedule_check(ring); 283 amdgpu_fence_schedule_fallback(ring);
294 wait_event(ring->fence_drv.fence_queue, ( 284 wait_event(ring->fence_drv.fence_queue, (
295 (signaled = amdgpu_fence_seq_signaled(ring, seq)))); 285 (signaled = amdgpu_fence_seq_signaled(ring, seq))));
296 286
@@ -491,9 +481,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
491 atomic64_set(&ring->fence_drv.last_seq, 0); 481 atomic64_set(&ring->fence_drv.last_seq, 0);
492 ring->fence_drv.initialized = false; 482 ring->fence_drv.initialized = false;
493 483
494 INIT_DELAYED_WORK(&ring->fence_drv.lockup_work, 484 setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
495 amdgpu_fence_check_lockup); 485 (unsigned long)ring);
496 ring->fence_drv.ring = ring;
497 486
498 init_waitqueue_head(&ring->fence_drv.fence_queue); 487 init_waitqueue_head(&ring->fence_drv.fence_queue);
499 488
@@ -536,6 +525,13 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
536 */ 525 */
537int amdgpu_fence_driver_init(struct amdgpu_device *adev) 526int amdgpu_fence_driver_init(struct amdgpu_device *adev)
538{ 527{
528 if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
529 amdgpu_fence_slab = kmem_cache_create(
530 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
531 SLAB_HWCACHE_ALIGN, NULL);
532 if (!amdgpu_fence_slab)
533 return -ENOMEM;
534 }
539 if (amdgpu_debugfs_fence_init(adev)) 535 if (amdgpu_debugfs_fence_init(adev))
540 dev_err(adev->dev, "fence debugfs file creation failed\n"); 536 dev_err(adev->dev, "fence debugfs file creation failed\n");
541 537
@@ -554,9 +550,12 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
554{ 550{
555 int i, r; 551 int i, r;
556 552
553 if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
554 kmem_cache_destroy(amdgpu_fence_slab);
557 mutex_lock(&adev->ring_lock); 555 mutex_lock(&adev->ring_lock);
558 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 556 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
559 struct amdgpu_ring *ring = adev->rings[i]; 557 struct amdgpu_ring *ring = adev->rings[i];
558
560 if (!ring || !ring->fence_drv.initialized) 559 if (!ring || !ring->fence_drv.initialized)
561 continue; 560 continue;
562 r = amdgpu_fence_wait_empty(ring); 561 r = amdgpu_fence_wait_empty(ring);
@@ -568,6 +567,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
568 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 567 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
569 ring->fence_drv.irq_type); 568 ring->fence_drv.irq_type);
570 amd_sched_fini(&ring->sched); 569 amd_sched_fini(&ring->sched);
570 del_timer_sync(&ring->fence_drv.fallback_timer);
571 ring->fence_drv.initialized = false; 571 ring->fence_drv.initialized = false;
572 } 572 }
573 mutex_unlock(&adev->ring_lock); 573 mutex_unlock(&adev->ring_lock);
@@ -751,18 +751,25 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
751 fence->fence_wake.func = amdgpu_fence_check_signaled; 751 fence->fence_wake.func = amdgpu_fence_check_signaled;
752 __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake); 752 __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
753 fence_get(f); 753 fence_get(f);
754 amdgpu_fence_schedule_check(ring); 754 if (!timer_pending(&ring->fence_drv.fallback_timer))
755 amdgpu_fence_schedule_fallback(ring);
755 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); 756 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
756 return true; 757 return true;
757} 758}
758 759
760static void amdgpu_fence_release(struct fence *f)
761{
762 struct amdgpu_fence *fence = to_amdgpu_fence(f);
763 kmem_cache_free(amdgpu_fence_slab, fence);
764}
765
759const struct fence_ops amdgpu_fence_ops = { 766const struct fence_ops amdgpu_fence_ops = {
760 .get_driver_name = amdgpu_fence_get_driver_name, 767 .get_driver_name = amdgpu_fence_get_driver_name,
761 .get_timeline_name = amdgpu_fence_get_timeline_name, 768 .get_timeline_name = amdgpu_fence_get_timeline_name,
762 .enable_signaling = amdgpu_fence_enable_signaling, 769 .enable_signaling = amdgpu_fence_enable_signaling,
763 .signaled = amdgpu_fence_is_signaled, 770 .signaled = amdgpu_fence_is_signaled,
764 .wait = fence_default_wait, 771 .wait = fence_default_wait,
765 .release = NULL, 772 .release = amdgpu_fence_release,
766}; 773};
767 774
768/* 775/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 087332858853..00c5b580f56c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -483,6 +483,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
483 if (domain == AMDGPU_GEM_DOMAIN_CPU) 483 if (domain == AMDGPU_GEM_DOMAIN_CPU)
484 goto error_unreserve; 484 goto error_unreserve;
485 } 485 }
486 r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
487 if (r)
488 goto error_unreserve;
486 489
487 r = amdgpu_vm_clear_freed(adev, bo_va->vm); 490 r = amdgpu_vm_clear_freed(adev, bo_va->vm);
488 if (r) 491 if (r)
@@ -512,6 +515,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
512 struct amdgpu_fpriv *fpriv = filp->driver_priv; 515 struct amdgpu_fpriv *fpriv = filp->driver_priv;
513 struct amdgpu_bo *rbo; 516 struct amdgpu_bo *rbo;
514 struct amdgpu_bo_va *bo_va; 517 struct amdgpu_bo_va *bo_va;
518 struct ttm_validate_buffer tv, tv_pd;
519 struct ww_acquire_ctx ticket;
520 struct list_head list, duplicates;
515 uint32_t invalid_flags, va_flags = 0; 521 uint32_t invalid_flags, va_flags = 0;
516 int r = 0; 522 int r = 0;
517 523
@@ -549,7 +555,18 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
549 return -ENOENT; 555 return -ENOENT;
550 mutex_lock(&fpriv->vm.mutex); 556 mutex_lock(&fpriv->vm.mutex);
551 rbo = gem_to_amdgpu_bo(gobj); 557 rbo = gem_to_amdgpu_bo(gobj);
552 r = amdgpu_bo_reserve(rbo, false); 558 INIT_LIST_HEAD(&list);
559 INIT_LIST_HEAD(&duplicates);
560 tv.bo = &rbo->tbo;
561 tv.shared = true;
562 list_add(&tv.head, &list);
563
564 if (args->operation == AMDGPU_VA_OP_MAP) {
565 tv_pd.bo = &fpriv->vm.page_directory->tbo;
566 tv_pd.shared = true;
567 list_add(&tv_pd.head, &list);
568 }
569 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
553 if (r) { 570 if (r) {
554 mutex_unlock(&fpriv->vm.mutex); 571 mutex_unlock(&fpriv->vm.mutex);
555 drm_gem_object_unreference_unlocked(gobj); 572 drm_gem_object_unreference_unlocked(gobj);
@@ -558,7 +575,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
558 575
559 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); 576 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
560 if (!bo_va) { 577 if (!bo_va) {
561 amdgpu_bo_unreserve(rbo); 578 ttm_eu_backoff_reservation(&ticket, &list);
579 drm_gem_object_unreference_unlocked(gobj);
562 mutex_unlock(&fpriv->vm.mutex); 580 mutex_unlock(&fpriv->vm.mutex);
563 return -ENOENT; 581 return -ENOENT;
564 } 582 }
@@ -581,7 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
581 default: 599 default:
582 break; 600 break;
583 } 601 }
584 602 ttm_eu_backoff_reservation(&ticket, &list);
585 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) 603 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
586 amdgpu_gem_va_update_vm(adev, bo_va, args->operation); 604 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
587 mutex_unlock(&fpriv->vm.mutex); 605 mutex_unlock(&fpriv->vm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index e65987743871..9e25edafa721 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -62,7 +62,7 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
62 int r; 62 int r;
63 63
64 if (size) { 64 if (size) {
65 r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo, 65 r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
66 &ib->sa_bo, size, 256); 66 &ib->sa_bo, size, 256);
67 if (r) { 67 if (r) {
68 dev_err(adev->dev, "failed to get a new IB (%d)\n", r); 68 dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
@@ -216,7 +216,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
216 } 216 }
217 217
218 if (ib->vm) 218 if (ib->vm)
219 amdgpu_vm_fence(adev, ib->vm, ib->fence); 219 amdgpu_vm_fence(adev, ib->vm, &ib->fence->base);
220 220
221 amdgpu_ring_unlock_commit(ring); 221 amdgpu_ring_unlock_commit(ring);
222 return 0; 222 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 3c2ff4567798..ea756e77b023 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -189,10 +189,9 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
189 struct amdgpu_sa_manager *sa_manager); 189 struct amdgpu_sa_manager *sa_manager);
190int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev, 190int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
191 struct amdgpu_sa_manager *sa_manager); 191 struct amdgpu_sa_manager *sa_manager);
192int amdgpu_sa_bo_new(struct amdgpu_device *adev, 192int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
193 struct amdgpu_sa_manager *sa_manager, 193 struct amdgpu_sa_bo **sa_bo,
194 struct amdgpu_sa_bo **sa_bo, 194 unsigned size, unsigned align);
195 unsigned size, unsigned align);
196void amdgpu_sa_bo_free(struct amdgpu_device *adev, 195void amdgpu_sa_bo_free(struct amdgpu_device *adev,
197 struct amdgpu_sa_bo **sa_bo, 196 struct amdgpu_sa_bo **sa_bo,
198 struct fence *fence); 197 struct fence *fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 0212b31dc194..8b88edb0434b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -311,8 +311,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
311 return false; 311 return false;
312} 312}
313 313
314int amdgpu_sa_bo_new(struct amdgpu_device *adev, 314int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
315 struct amdgpu_sa_manager *sa_manager,
316 struct amdgpu_sa_bo **sa_bo, 315 struct amdgpu_sa_bo **sa_bo,
317 unsigned size, unsigned align) 316 unsigned size, unsigned align)
318{ 317{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index dcf4a8aca680..438c05254695 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -26,6 +26,7 @@
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include "amdgpu.h" 28#include "amdgpu.h"
29#include "amdgpu_trace.h"
29 30
30static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) 31static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
31{ 32{
@@ -44,11 +45,8 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
44 return NULL; 45 return NULL;
45 } 46 }
46 job = to_amdgpu_job(sched_job); 47 job = to_amdgpu_job(sched_job);
47 mutex_lock(&job->job_lock); 48 trace_amdgpu_sched_run_job(job);
48 r = amdgpu_ib_schedule(job->adev, 49 r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner);
49 job->num_ibs,
50 job->ibs,
51 job->base.owner);
52 if (r) { 50 if (r) {
53 DRM_ERROR("Error scheduling IBs (%d)\n", r); 51 DRM_ERROR("Error scheduling IBs (%d)\n", r);
54 goto err; 52 goto err;
@@ -61,8 +59,6 @@ err:
61 if (job->free_job) 59 if (job->free_job)
62 job->free_job(job); 60 job->free_job(job);
63 61
64 mutex_unlock(&job->job_lock);
65 fence_put(&job->base.s_fence->base);
66 kfree(job); 62 kfree(job);
67 return fence ? &fence->base : NULL; 63 return fence ? &fence->base : NULL;
68} 64}
@@ -88,21 +84,19 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
88 return -ENOMEM; 84 return -ENOMEM;
89 job->base.sched = &ring->sched; 85 job->base.sched = &ring->sched;
90 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; 86 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
87 job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
88 if (!job->base.s_fence) {
89 kfree(job);
90 return -ENOMEM;
91 }
92 *f = fence_get(&job->base.s_fence->base);
93
91 job->adev = adev; 94 job->adev = adev;
92 job->ibs = ibs; 95 job->ibs = ibs;
93 job->num_ibs = num_ibs; 96 job->num_ibs = num_ibs;
94 job->base.owner = owner; 97 job->owner = owner;
95 mutex_init(&job->job_lock);
96 job->free_job = free_job; 98 job->free_job = free_job;
97 mutex_lock(&job->job_lock); 99 amd_sched_entity_push_job(&job->base);
98 r = amd_sched_entity_push_job(&job->base);
99 if (r) {
100 mutex_unlock(&job->job_lock);
101 kfree(job);
102 return r;
103 }
104 *f = fence_get(&job->base.s_fence->base);
105 mutex_unlock(&job->job_lock);
106 } else { 100 } else {
107 r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); 101 r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
108 if (r) 102 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
index ff3ca52ec6fe..1caaf201b708 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
@@ -40,7 +40,7 @@ int amdgpu_semaphore_create(struct amdgpu_device *adev,
40 if (*semaphore == NULL) { 40 if (*semaphore == NULL) {
41 return -ENOMEM; 41 return -ENOMEM;
42 } 42 }
43 r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo, 43 r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
44 &(*semaphore)->sa_bo, 8, 8); 44 &(*semaphore)->sa_bo, 8, 8);
45 if (r) { 45 if (r) {
46 kfree(*semaphore); 46 kfree(*semaphore);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index a6697fd05217..dd005c336c97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -302,8 +302,14 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
302 return -EINVAL; 302 return -EINVAL;
303 } 303 }
304 304
305 if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores || 305 if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
306 (count >= AMDGPU_NUM_SYNCS)) { 306 r = fence_wait(&fence->base, true);
307 if (r)
308 return r;
309 continue;
310 }
311
312 if (count >= AMDGPU_NUM_SYNCS) {
307 /* not enough room, wait manually */ 313 /* not enough room, wait manually */
308 r = fence_wait(&fence->base, false); 314 r = fence_wait(&fence->base, false);
309 if (r) 315 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 76ecbaf72a2e..8f9834ab1bd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -48,6 +48,57 @@ TRACE_EVENT(amdgpu_cs,
48 __entry->fences) 48 __entry->fences)
49); 49);
50 50
51TRACE_EVENT(amdgpu_cs_ioctl,
52 TP_PROTO(struct amdgpu_job *job),
53 TP_ARGS(job),
54 TP_STRUCT__entry(
55 __field(struct amdgpu_device *, adev)
56 __field(struct amd_sched_job *, sched_job)
57 __field(struct amdgpu_ib *, ib)
58 __field(struct fence *, fence)
59 __field(char *, ring_name)
60 __field(u32, num_ibs)
61 ),
62
63 TP_fast_assign(
64 __entry->adev = job->adev;
65 __entry->sched_job = &job->base;
66 __entry->ib = job->ibs;
67 __entry->fence = &job->base.s_fence->base;
68 __entry->ring_name = job->ibs[0].ring->name;
69 __entry->num_ibs = job->num_ibs;
70 ),
71 TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
72 __entry->adev, __entry->sched_job, __entry->ib,
73 __entry->fence, __entry->ring_name, __entry->num_ibs)
74);
75
76TRACE_EVENT(amdgpu_sched_run_job,
77 TP_PROTO(struct amdgpu_job *job),
78 TP_ARGS(job),
79 TP_STRUCT__entry(
80 __field(struct amdgpu_device *, adev)
81 __field(struct amd_sched_job *, sched_job)
82 __field(struct amdgpu_ib *, ib)
83 __field(struct fence *, fence)
84 __field(char *, ring_name)
85 __field(u32, num_ibs)
86 ),
87
88 TP_fast_assign(
89 __entry->adev = job->adev;
90 __entry->sched_job = &job->base;
91 __entry->ib = job->ibs;
92 __entry->fence = &job->base.s_fence->base;
93 __entry->ring_name = job->ibs[0].ring->name;
94 __entry->num_ibs = job->num_ibs;
95 ),
96 TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
97 __entry->adev, __entry->sched_job, __entry->ib,
98 __entry->fence, __entry->ring_name, __entry->num_ibs)
99);
100
101
51TRACE_EVENT(amdgpu_vm_grab_id, 102TRACE_EVENT(amdgpu_vm_grab_id,
52 TP_PROTO(unsigned vmid, int ring), 103 TP_PROTO(unsigned vmid, int ring),
53 TP_ARGS(vmid, ring), 104 TP_ARGS(vmid, ring),
@@ -196,49 +247,6 @@ TRACE_EVENT(amdgpu_bo_list_set,
196 TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) 247 TP_printk("list=%p, bo=%p", __entry->list, __entry->bo)
197); 248);
198 249
199DECLARE_EVENT_CLASS(amdgpu_fence_request,
200
201 TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
202
203 TP_ARGS(dev, ring, seqno),
204
205 TP_STRUCT__entry(
206 __field(u32, dev)
207 __field(int, ring)
208 __field(u32, seqno)
209 ),
210
211 TP_fast_assign(
212 __entry->dev = dev->primary->index;
213 __entry->ring = ring;
214 __entry->seqno = seqno;
215 ),
216
217 TP_printk("dev=%u, ring=%d, seqno=%u",
218 __entry->dev, __entry->ring, __entry->seqno)
219);
220
221DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_emit,
222
223 TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
224
225 TP_ARGS(dev, ring, seqno)
226);
227
228DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_begin,
229
230 TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
231
232 TP_ARGS(dev, ring, seqno)
233);
234
235DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_end,
236
237 TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
238
239 TP_ARGS(dev, ring, seqno)
240);
241
242DECLARE_EVENT_CLASS(amdgpu_semaphore_request, 250DECLARE_EVENT_CLASS(amdgpu_semaphore_request,
243 251
244 TP_PROTO(int ring, struct amdgpu_semaphore *sem), 252 TP_PROTO(int ring, struct amdgpu_semaphore *sem),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 81bb8e9fc26d..d4bac5f49939 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1073,10 +1073,10 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
1073 ret = drm_mm_dump_table(m, mm); 1073 ret = drm_mm_dump_table(m, mm);
1074 spin_unlock(&glob->lru_lock); 1074 spin_unlock(&glob->lru_lock);
1075 if (ttm_pl == TTM_PL_VRAM) 1075 if (ttm_pl == TTM_PL_VRAM)
1076 seq_printf(m, "man size:%llu pages, ram usage:%luMB, vis usage:%luMB\n", 1076 seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
1077 adev->mman.bdev.man[ttm_pl].size, 1077 adev->mman.bdev.man[ttm_pl].size,
1078 atomic64_read(&adev->vram_usage) >> 20, 1078 (u64)atomic64_read(&adev->vram_usage) >> 20,
1079 atomic64_read(&adev->vram_vis_usage) >> 20); 1079 (u64)atomic64_read(&adev->vram_vis_usage) >> 20);
1080 return ret; 1080 return ret;
1081} 1081}
1082 1082
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 633a32a48560..159ce54bbd8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -143,10 +143,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
143 unsigned i; 143 unsigned i;
144 144
145 /* check if the id is still valid */ 145 /* check if the id is still valid */
146 if (vm_id->id && vm_id->last_id_use && 146 if (vm_id->id) {
147 vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) { 147 unsigned id = vm_id->id;
148 trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); 148 long owner;
149 return 0; 149
150 owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
151 if (owner == (long)vm) {
152 trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
153 return 0;
154 }
150 } 155 }
151 156
152 /* we definately need to flush */ 157 /* we definately need to flush */
@@ -154,7 +159,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
154 159
155 /* skip over VMID 0, since it is the system VM */ 160 /* skip over VMID 0, since it is the system VM */
156 for (i = 1; i < adev->vm_manager.nvm; ++i) { 161 for (i = 1; i < adev->vm_manager.nvm; ++i) {
157 struct fence *fence = adev->vm_manager.active[i]; 162 struct fence *fence = adev->vm_manager.ids[i].active;
158 struct amdgpu_ring *fring; 163 struct amdgpu_ring *fring;
159 164
160 if (fence == NULL) { 165 if (fence == NULL) {
@@ -176,7 +181,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
176 if (choices[i]) { 181 if (choices[i]) {
177 struct fence *fence; 182 struct fence *fence;
178 183
179 fence = adev->vm_manager.active[choices[i]]; 184 fence = adev->vm_manager.ids[choices[i]].active;
180 vm_id->id = choices[i]; 185 vm_id->id = choices[i];
181 186
182 trace_amdgpu_vm_grab_id(choices[i], ring->idx); 187 trace_amdgpu_vm_grab_id(choices[i], ring->idx);
@@ -207,24 +212,21 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
207 uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); 212 uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
208 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; 213 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
209 struct fence *flushed_updates = vm_id->flushed_updates; 214 struct fence *flushed_updates = vm_id->flushed_updates;
210 bool is_earlier = false; 215 bool is_later;
211
212 if (flushed_updates && updates) {
213 BUG_ON(flushed_updates->context != updates->context);
214 is_earlier = (updates->seqno - flushed_updates->seqno <=
215 INT_MAX) ? true : false;
216 }
217 216
218 if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates || 217 if (!flushed_updates)
219 is_earlier) { 218 is_later = true;
219 else if (!updates)
220 is_later = false;
221 else
222 is_later = fence_is_later(updates, flushed_updates);
220 223
224 if (pd_addr != vm_id->pd_gpu_addr || is_later) {
221 trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); 225 trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
222 if (is_earlier) { 226 if (is_later) {
223 vm_id->flushed_updates = fence_get(updates); 227 vm_id->flushed_updates = fence_get(updates);
224 fence_put(flushed_updates); 228 fence_put(flushed_updates);
225 } 229 }
226 if (!flushed_updates)
227 vm_id->flushed_updates = fence_get(updates);
228 vm_id->pd_gpu_addr = pd_addr; 230 vm_id->pd_gpu_addr = pd_addr;
229 amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); 231 amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
230 } 232 }
@@ -244,16 +246,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
244 */ 246 */
245void amdgpu_vm_fence(struct amdgpu_device *adev, 247void amdgpu_vm_fence(struct amdgpu_device *adev,
246 struct amdgpu_vm *vm, 248 struct amdgpu_vm *vm,
247 struct amdgpu_fence *fence) 249 struct fence *fence)
248{ 250{
249 unsigned ridx = fence->ring->idx; 251 struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
250 unsigned vm_id = vm->ids[ridx].id; 252 unsigned vm_id = vm->ids[ring->idx].id;
251
252 fence_put(adev->vm_manager.active[vm_id]);
253 adev->vm_manager.active[vm_id] = fence_get(&fence->base);
254 253
255 fence_put(vm->ids[ridx].last_id_use); 254 fence_put(adev->vm_manager.ids[vm_id].active);
256 vm->ids[ridx].last_id_use = fence_get(&fence->base); 255 adev->vm_manager.ids[vm_id].active = fence_get(fence);
256 atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
257} 257}
258 258
259/** 259/**
@@ -332,6 +332,8 @@ int amdgpu_vm_free_job(struct amdgpu_job *job)
332 * 332 *
333 * @adev: amdgpu_device pointer 333 * @adev: amdgpu_device pointer
334 * @bo: bo to clear 334 * @bo: bo to clear
335 *
336 * need to reserve bo first before calling it.
335 */ 337 */
336static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, 338static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
337 struct amdgpu_bo *bo) 339 struct amdgpu_bo *bo)
@@ -343,24 +345,20 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
343 uint64_t addr; 345 uint64_t addr;
344 int r; 346 int r;
345 347
346 r = amdgpu_bo_reserve(bo, false);
347 if (r)
348 return r;
349
350 r = reservation_object_reserve_shared(bo->tbo.resv); 348 r = reservation_object_reserve_shared(bo->tbo.resv);
351 if (r) 349 if (r)
352 return r; 350 return r;
353 351
354 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 352 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
355 if (r) 353 if (r)
356 goto error_unreserve; 354 goto error;
357 355
358 addr = amdgpu_bo_gpu_offset(bo); 356 addr = amdgpu_bo_gpu_offset(bo);
359 entries = amdgpu_bo_size(bo) / 8; 357 entries = amdgpu_bo_size(bo) / 8;
360 358
361 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); 359 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
362 if (!ib) 360 if (!ib)
363 goto error_unreserve; 361 goto error;
364 362
365 r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib); 363 r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
366 if (r) 364 if (r)
@@ -378,16 +376,14 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
378 if (!r) 376 if (!r)
379 amdgpu_bo_fence(bo, fence, true); 377 amdgpu_bo_fence(bo, fence, true);
380 fence_put(fence); 378 fence_put(fence);
381 if (amdgpu_enable_scheduler) { 379 if (amdgpu_enable_scheduler)
382 amdgpu_bo_unreserve(bo);
383 return 0; 380 return 0;
384 } 381
385error_free: 382error_free:
386 amdgpu_ib_free(adev, ib); 383 amdgpu_ib_free(adev, ib);
387 kfree(ib); 384 kfree(ib);
388 385
389error_unreserve: 386error:
390 amdgpu_bo_unreserve(bo);
391 return r; 387 return r;
392} 388}
393 389
@@ -989,7 +985,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
989 * Add a mapping of the BO at the specefied addr into the VM. 985 * Add a mapping of the BO at the specefied addr into the VM.
990 * Returns 0 for success, error for failure. 986 * Returns 0 for success, error for failure.
991 * 987 *
992 * Object has to be reserved and gets unreserved by this function! 988 * Object has to be reserved and unreserved outside!
993 */ 989 */
994int amdgpu_vm_bo_map(struct amdgpu_device *adev, 990int amdgpu_vm_bo_map(struct amdgpu_device *adev,
995 struct amdgpu_bo_va *bo_va, 991 struct amdgpu_bo_va *bo_va,
@@ -1005,30 +1001,27 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1005 1001
1006 /* validate the parameters */ 1002 /* validate the parameters */
1007 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || 1003 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1008 size == 0 || size & AMDGPU_GPU_PAGE_MASK) { 1004 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1009 amdgpu_bo_unreserve(bo_va->bo);
1010 return -EINVAL; 1005 return -EINVAL;
1011 }
1012 1006
1013 /* make sure object fit at this offset */ 1007 /* make sure object fit at this offset */
1014 eaddr = saddr + size; 1008 eaddr = saddr + size;
1015 if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) { 1009 if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
1016 amdgpu_bo_unreserve(bo_va->bo);
1017 return -EINVAL; 1010 return -EINVAL;
1018 }
1019 1011
1020 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; 1012 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
1021 if (last_pfn > adev->vm_manager.max_pfn) { 1013 if (last_pfn > adev->vm_manager.max_pfn) {
1022 dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", 1014 dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
1023 last_pfn, adev->vm_manager.max_pfn); 1015 last_pfn, adev->vm_manager.max_pfn);
1024 amdgpu_bo_unreserve(bo_va->bo);
1025 return -EINVAL; 1016 return -EINVAL;
1026 } 1017 }
1027 1018
1028 saddr /= AMDGPU_GPU_PAGE_SIZE; 1019 saddr /= AMDGPU_GPU_PAGE_SIZE;
1029 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1020 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1030 1021
1022 spin_lock(&vm->it_lock);
1031 it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); 1023 it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
1024 spin_unlock(&vm->it_lock);
1032 if (it) { 1025 if (it) {
1033 struct amdgpu_bo_va_mapping *tmp; 1026 struct amdgpu_bo_va_mapping *tmp;
1034 tmp = container_of(it, struct amdgpu_bo_va_mapping, it); 1027 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
@@ -1036,14 +1029,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1036 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " 1029 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1037 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, 1030 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1038 tmp->it.start, tmp->it.last + 1); 1031 tmp->it.start, tmp->it.last + 1);
1039 amdgpu_bo_unreserve(bo_va->bo);
1040 r = -EINVAL; 1032 r = -EINVAL;
1041 goto error; 1033 goto error;
1042 } 1034 }
1043 1035
1044 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 1036 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1045 if (!mapping) { 1037 if (!mapping) {
1046 amdgpu_bo_unreserve(bo_va->bo);
1047 r = -ENOMEM; 1038 r = -ENOMEM;
1048 goto error; 1039 goto error;
1049 } 1040 }
@@ -1055,7 +1046,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1055 mapping->flags = flags; 1046 mapping->flags = flags;
1056 1047
1057 list_add(&mapping->list, &bo_va->invalids); 1048 list_add(&mapping->list, &bo_va->invalids);
1049 spin_lock(&vm->it_lock);
1058 interval_tree_insert(&mapping->it, &vm->va); 1050 interval_tree_insert(&mapping->it, &vm->va);
1051 spin_unlock(&vm->it_lock);
1059 trace_amdgpu_vm_bo_map(bo_va, mapping); 1052 trace_amdgpu_vm_bo_map(bo_va, mapping);
1060 1053
1061 /* Make sure the page tables are allocated */ 1054 /* Make sure the page tables are allocated */
@@ -1067,8 +1060,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1067 if (eaddr > vm->max_pde_used) 1060 if (eaddr > vm->max_pde_used)
1068 vm->max_pde_used = eaddr; 1061 vm->max_pde_used = eaddr;
1069 1062
1070 amdgpu_bo_unreserve(bo_va->bo);
1071
1072 /* walk over the address space and allocate the page tables */ 1063 /* walk over the address space and allocate the page tables */
1073 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { 1064 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1074 struct reservation_object *resv = vm->page_directory->tbo.resv; 1065 struct reservation_object *resv = vm->page_directory->tbo.resv;
@@ -1077,13 +1068,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1077 if (vm->page_tables[pt_idx].bo) 1068 if (vm->page_tables[pt_idx].bo)
1078 continue; 1069 continue;
1079 1070
1080 ww_mutex_lock(&resv->lock, NULL);
1081 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, 1071 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1082 AMDGPU_GPU_PAGE_SIZE, true, 1072 AMDGPU_GPU_PAGE_SIZE, true,
1083 AMDGPU_GEM_DOMAIN_VRAM, 1073 AMDGPU_GEM_DOMAIN_VRAM,
1084 AMDGPU_GEM_CREATE_NO_CPU_ACCESS, 1074 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1085 NULL, resv, &pt); 1075 NULL, resv, &pt);
1086 ww_mutex_unlock(&resv->lock);
1087 if (r) 1076 if (r)
1088 goto error_free; 1077 goto error_free;
1089 1078
@@ -1101,7 +1090,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1101 1090
1102error_free: 1091error_free:
1103 list_del(&mapping->list); 1092 list_del(&mapping->list);
1093 spin_lock(&vm->it_lock);
1104 interval_tree_remove(&mapping->it, &vm->va); 1094 interval_tree_remove(&mapping->it, &vm->va);
1095 spin_unlock(&vm->it_lock);
1105 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1096 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1106 kfree(mapping); 1097 kfree(mapping);
1107 1098
@@ -1119,7 +1110,7 @@ error:
1119 * Remove a mapping of the BO at the specefied addr from the VM. 1110 * Remove a mapping of the BO at the specefied addr from the VM.
1120 * Returns 0 for success, error for failure. 1111 * Returns 0 for success, error for failure.
1121 * 1112 *
1122 * Object has to be reserved and gets unreserved by this function! 1113 * Object has to be reserved and unreserved outside!
1123 */ 1114 */
1124int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 1115int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1125 struct amdgpu_bo_va *bo_va, 1116 struct amdgpu_bo_va *bo_va,
@@ -1144,21 +1135,20 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1144 break; 1135 break;
1145 } 1136 }
1146 1137
1147 if (&mapping->list == &bo_va->invalids) { 1138 if (&mapping->list == &bo_va->invalids)
1148 amdgpu_bo_unreserve(bo_va->bo);
1149 return -ENOENT; 1139 return -ENOENT;
1150 }
1151 } 1140 }
1152 1141
1153 list_del(&mapping->list); 1142 list_del(&mapping->list);
1143 spin_lock(&vm->it_lock);
1154 interval_tree_remove(&mapping->it, &vm->va); 1144 interval_tree_remove(&mapping->it, &vm->va);
1145 spin_unlock(&vm->it_lock);
1155 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1146 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1156 1147
1157 if (valid) 1148 if (valid)
1158 list_add(&mapping->list, &vm->freed); 1149 list_add(&mapping->list, &vm->freed);
1159 else 1150 else
1160 kfree(mapping); 1151 kfree(mapping);
1161 amdgpu_bo_unreserve(bo_va->bo);
1162 1152
1163 return 0; 1153 return 0;
1164} 1154}
@@ -1187,13 +1177,17 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1187 1177
1188 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { 1178 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1189 list_del(&mapping->list); 1179 list_del(&mapping->list);
1180 spin_lock(&vm->it_lock);
1190 interval_tree_remove(&mapping->it, &vm->va); 1181 interval_tree_remove(&mapping->it, &vm->va);
1182 spin_unlock(&vm->it_lock);
1191 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1183 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1192 list_add(&mapping->list, &vm->freed); 1184 list_add(&mapping->list, &vm->freed);
1193 } 1185 }
1194 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { 1186 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1195 list_del(&mapping->list); 1187 list_del(&mapping->list);
1188 spin_lock(&vm->it_lock);
1196 interval_tree_remove(&mapping->it, &vm->va); 1189 interval_tree_remove(&mapping->it, &vm->va);
1190 spin_unlock(&vm->it_lock);
1197 kfree(mapping); 1191 kfree(mapping);
1198 } 1192 }
1199 1193
@@ -1241,7 +1235,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1241 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1235 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1242 vm->ids[i].id = 0; 1236 vm->ids[i].id = 0;
1243 vm->ids[i].flushed_updates = NULL; 1237 vm->ids[i].flushed_updates = NULL;
1244 vm->ids[i].last_id_use = NULL;
1245 } 1238 }
1246 mutex_init(&vm->mutex); 1239 mutex_init(&vm->mutex);
1247 vm->va = RB_ROOT; 1240 vm->va = RB_ROOT;
@@ -1249,7 +1242,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1249 INIT_LIST_HEAD(&vm->invalidated); 1242 INIT_LIST_HEAD(&vm->invalidated);
1250 INIT_LIST_HEAD(&vm->cleared); 1243 INIT_LIST_HEAD(&vm->cleared);
1251 INIT_LIST_HEAD(&vm->freed); 1244 INIT_LIST_HEAD(&vm->freed);
1252 1245 spin_lock_init(&vm->it_lock);
1253 pd_size = amdgpu_vm_directory_size(adev); 1246 pd_size = amdgpu_vm_directory_size(adev);
1254 pd_entries = amdgpu_vm_num_pdes(adev); 1247 pd_entries = amdgpu_vm_num_pdes(adev);
1255 1248
@@ -1269,8 +1262,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1269 NULL, NULL, &vm->page_directory); 1262 NULL, NULL, &vm->page_directory);
1270 if (r) 1263 if (r)
1271 return r; 1264 return r;
1272 1265 r = amdgpu_bo_reserve(vm->page_directory, false);
1266 if (r) {
1267 amdgpu_bo_unref(&vm->page_directory);
1268 vm->page_directory = NULL;
1269 return r;
1270 }
1273 r = amdgpu_vm_clear_bo(adev, vm->page_directory); 1271 r = amdgpu_vm_clear_bo(adev, vm->page_directory);
1272 amdgpu_bo_unreserve(vm->page_directory);
1274 if (r) { 1273 if (r) {
1275 amdgpu_bo_unref(&vm->page_directory); 1274 amdgpu_bo_unref(&vm->page_directory);
1276 vm->page_directory = NULL; 1275 vm->page_directory = NULL;
@@ -1313,11 +1312,28 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1313 1312
1314 amdgpu_bo_unref(&vm->page_directory); 1313 amdgpu_bo_unref(&vm->page_directory);
1315 fence_put(vm->page_directory_fence); 1314 fence_put(vm->page_directory_fence);
1316
1317 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1315 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1316 unsigned id = vm->ids[i].id;
1317
1318 atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
1319 (long)vm, 0);
1318 fence_put(vm->ids[i].flushed_updates); 1320 fence_put(vm->ids[i].flushed_updates);
1319 fence_put(vm->ids[i].last_id_use);
1320 } 1321 }
1321 1322
1322 mutex_destroy(&vm->mutex); 1323 mutex_destroy(&vm->mutex);
1323} 1324}
1325
1326/**
1327 * amdgpu_vm_manager_fini - cleanup VM manager
1328 *
1329 * @adev: amdgpu_device pointer
1330 *
1331 * Cleanup the VM manager and free resources.
1332 */
1333void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1334{
1335 unsigned i;
1336
1337 for (i = 0; i < AMDGPU_NUM_VM; ++i)
1338 fence_put(adev->vm_manager.ids[i].active);
1339}
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index a1a35a5df8e7..57a2e347f04d 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -6569,12 +6569,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6569 switch (state) { 6569 switch (state) {
6570 case AMDGPU_IRQ_STATE_DISABLE: 6570 case AMDGPU_IRQ_STATE_DISABLE:
6571 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6571 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6572 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 6572 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6573 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6573 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6574 break; 6574 break;
6575 case AMDGPU_IRQ_STATE_ENABLE: 6575 case AMDGPU_IRQ_STATE_ENABLE:
6576 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6576 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6577 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 6577 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6578 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6578 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6579 break; 6579 break;
6580 default: 6580 default:
@@ -6586,12 +6586,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6586 switch (state) { 6586 switch (state) {
6587 case AMDGPU_IRQ_STATE_DISABLE: 6587 case AMDGPU_IRQ_STATE_DISABLE:
6588 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6588 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6589 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 6589 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6590 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6590 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6591 break; 6591 break;
6592 case AMDGPU_IRQ_STATE_ENABLE: 6592 case AMDGPU_IRQ_STATE_ENABLE:
6593 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6593 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6594 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 6594 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6595 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6595 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6596 break; 6596 break;
6597 default: 6597 default:
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 6776cf756d40..e1dcab98e249 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -268,7 +268,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
268 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, 268 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
269 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, 269 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
270 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100, 270 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
271 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
272 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, 271 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
273 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, 272 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
274 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, 273 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
@@ -296,10 +295,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
296 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, 295 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
297 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, 296 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
298 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, 297 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
299 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
300 mmPCIE_DATA, 0x000f0000, 0x00000000,
301 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
302 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
303 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, 298 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
304}; 299};
305 300
@@ -1000,7 +995,7 @@ static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
1000 adev->gfx.config.max_cu_per_sh = 16; 995 adev->gfx.config.max_cu_per_sh = 16;
1001 adev->gfx.config.max_sh_per_se = 1; 996 adev->gfx.config.max_sh_per_se = 1;
1002 adev->gfx.config.max_backends_per_se = 4; 997 adev->gfx.config.max_backends_per_se = 4;
1003 adev->gfx.config.max_texture_channel_caches = 8; 998 adev->gfx.config.max_texture_channel_caches = 16;
1004 adev->gfx.config.max_gprs = 256; 999 adev->gfx.config.max_gprs = 256;
1005 adev->gfx.config.max_gs_threads = 32; 1000 adev->gfx.config.max_gs_threads = 32;
1006 adev->gfx.config.max_hw_contexts = 8; 1001 adev->gfx.config.max_hw_contexts = 8;
@@ -1613,6 +1608,296 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
1613 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); 1608 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
1614 } 1609 }
1615 case CHIP_FIJI: 1610 case CHIP_FIJI:
1611 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1612 switch (reg_offset) {
1613 case 0:
1614 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1615 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1616 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1617 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1618 break;
1619 case 1:
1620 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1621 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1622 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1623 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1624 break;
1625 case 2:
1626 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1627 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1628 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1629 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1630 break;
1631 case 3:
1632 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1633 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1634 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1635 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1636 break;
1637 case 4:
1638 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1639 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1640 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1641 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1642 break;
1643 case 5:
1644 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1645 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1646 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1647 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1648 break;
1649 case 6:
1650 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1651 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1652 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1653 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1654 break;
1655 case 7:
1656 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1657 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1658 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1659 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1660 break;
1661 case 8:
1662 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1663 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
1664 break;
1665 case 9:
1666 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1667 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1668 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1669 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1670 break;
1671 case 10:
1672 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1673 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1674 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1675 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1676 break;
1677 case 11:
1678 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1679 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1680 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1681 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1682 break;
1683 case 12:
1684 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1685 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1686 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1687 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1688 break;
1689 case 13:
1690 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1691 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1692 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1693 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1694 break;
1695 case 14:
1696 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1697 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1698 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1699 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1700 break;
1701 case 15:
1702 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1703 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1704 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1705 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1706 break;
1707 case 16:
1708 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1709 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1710 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1711 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1712 break;
1713 case 17:
1714 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1715 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1716 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1717 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1718 break;
1719 case 18:
1720 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1721 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1722 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1723 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1724 break;
1725 case 19:
1726 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1727 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1728 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1729 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1730 break;
1731 case 20:
1732 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1733 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1734 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1735 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1736 break;
1737 case 21:
1738 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1739 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1740 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1741 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1742 break;
1743 case 22:
1744 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1745 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1746 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1747 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1748 break;
1749 case 23:
1750 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1751 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1752 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1753 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1754 break;
1755 case 24:
1756 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1757 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1758 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1759 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1760 break;
1761 case 25:
1762 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1763 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1764 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1765 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1766 break;
1767 case 26:
1768 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1769 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1770 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1771 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1772 break;
1773 case 27:
1774 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1775 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1776 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1777 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1778 break;
1779 case 28:
1780 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1781 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1782 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1783 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1784 break;
1785 case 29:
1786 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1787 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1788 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1789 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1790 break;
1791 case 30:
1792 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1793 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1794 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1795 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1796 break;
1797 default:
1798 gb_tile_moden = 0;
1799 break;
1800 }
1801 adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
1802 WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
1803 }
1804 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
1805 switch (reg_offset) {
1806 case 0:
1807 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1808 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1809 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1810 NUM_BANKS(ADDR_SURF_8_BANK));
1811 break;
1812 case 1:
1813 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1814 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1815 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1816 NUM_BANKS(ADDR_SURF_8_BANK));
1817 break;
1818 case 2:
1819 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1820 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1821 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1822 NUM_BANKS(ADDR_SURF_8_BANK));
1823 break;
1824 case 3:
1825 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1826 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1827 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1828 NUM_BANKS(ADDR_SURF_8_BANK));
1829 break;
1830 case 4:
1831 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1832 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1833 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1834 NUM_BANKS(ADDR_SURF_8_BANK));
1835 break;
1836 case 5:
1837 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1838 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1839 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1840 NUM_BANKS(ADDR_SURF_8_BANK));
1841 break;
1842 case 6:
1843 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1844 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1845 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1846 NUM_BANKS(ADDR_SURF_8_BANK));
1847 break;
1848 case 8:
1849 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1850 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1851 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1852 NUM_BANKS(ADDR_SURF_8_BANK));
1853 break;
1854 case 9:
1855 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1856 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1857 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1858 NUM_BANKS(ADDR_SURF_8_BANK));
1859 break;
1860 case 10:
1861 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1862 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1863 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1864 NUM_BANKS(ADDR_SURF_8_BANK));
1865 break;
1866 case 11:
1867 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1868 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1869 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1870 NUM_BANKS(ADDR_SURF_8_BANK));
1871 break;
1872 case 12:
1873 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1874 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1875 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1876 NUM_BANKS(ADDR_SURF_8_BANK));
1877 break;
1878 case 13:
1879 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1880 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1881 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1882 NUM_BANKS(ADDR_SURF_8_BANK));
1883 break;
1884 case 14:
1885 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1886 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1887 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1888 NUM_BANKS(ADDR_SURF_4_BANK));
1889 break;
1890 case 7:
1891 /* unused idx */
1892 continue;
1893 default:
1894 gb_tile_moden = 0;
1895 break;
1896 }
1897 adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
1898 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
1899 }
1900 break;
1616 case CHIP_TONGA: 1901 case CHIP_TONGA:
1617 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 1902 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1618 switch (reg_offset) { 1903 switch (reg_offset) {
@@ -2971,10 +3256,13 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
2971 amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); 3256 amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
2972 switch (adev->asic_type) { 3257 switch (adev->asic_type) {
2973 case CHIP_TONGA: 3258 case CHIP_TONGA:
2974 case CHIP_FIJI:
2975 amdgpu_ring_write(ring, 0x16000012); 3259 amdgpu_ring_write(ring, 0x16000012);
2976 amdgpu_ring_write(ring, 0x0000002A); 3260 amdgpu_ring_write(ring, 0x0000002A);
2977 break; 3261 break;
3262 case CHIP_FIJI:
3263 amdgpu_ring_write(ring, 0x3a00161a);
3264 amdgpu_ring_write(ring, 0x0000002e);
3265 break;
2978 case CHIP_TOPAZ: 3266 case CHIP_TOPAZ:
2979 case CHIP_CARRIZO: 3267 case CHIP_CARRIZO:
2980 amdgpu_ring_write(ring, 0x00000002); 3268 amdgpu_ring_write(ring, 0x00000002);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 85bbcdc73fff..7427d8cd4c43 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -40,7 +40,7 @@
40static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); 40static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
41static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); 41static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
42 42
43MODULE_FIRMWARE("radeon/boniare_mc.bin"); 43MODULE_FIRMWARE("radeon/bonaire_mc.bin");
44MODULE_FIRMWARE("radeon/hawaii_mc.bin"); 44MODULE_FIRMWARE("radeon/hawaii_mc.bin");
45 45
46/** 46/**
@@ -501,6 +501,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
501 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); 501 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
502 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); 502 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
503 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); 503 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
504 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
504 WREG32(mmVM_L2_CNTL, tmp); 505 WREG32(mmVM_L2_CNTL, tmp);
505 tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); 506 tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
506 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 507 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
@@ -960,12 +961,10 @@ static int gmc_v7_0_sw_init(void *handle)
960 961
961static int gmc_v7_0_sw_fini(void *handle) 962static int gmc_v7_0_sw_fini(void *handle)
962{ 963{
963 int i;
964 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 964 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
965 965
966 if (adev->vm_manager.enabled) { 966 if (adev->vm_manager.enabled) {
967 for (i = 0; i < AMDGPU_NUM_VM; ++i) 967 amdgpu_vm_manager_fini(adev);
968 fence_put(adev->vm_manager.active[i]);
969 gmc_v7_0_vm_fini(adev); 968 gmc_v7_0_vm_fini(adev);
970 adev->vm_manager.enabled = false; 969 adev->vm_manager.enabled = false;
971 } 970 }
@@ -1010,12 +1009,10 @@ static int gmc_v7_0_hw_fini(void *handle)
1010 1009
1011static int gmc_v7_0_suspend(void *handle) 1010static int gmc_v7_0_suspend(void *handle)
1012{ 1011{
1013 int i;
1014 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1012 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1015 1013
1016 if (adev->vm_manager.enabled) { 1014 if (adev->vm_manager.enabled) {
1017 for (i = 0; i < AMDGPU_NUM_VM; ++i) 1015 amdgpu_vm_manager_fini(adev);
1018 fence_put(adev->vm_manager.active[i]);
1019 gmc_v7_0_vm_fini(adev); 1016 gmc_v7_0_vm_fini(adev);
1020 adev->vm_manager.enabled = false; 1017 adev->vm_manager.enabled = false;
1021 } 1018 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1bcc4e74e3b4..cb0e50ebb528 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -629,6 +629,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
629 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); 629 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
630 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); 630 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
631 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); 631 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
632 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
632 WREG32(mmVM_L2_CNTL, tmp); 633 WREG32(mmVM_L2_CNTL, tmp);
633 tmp = RREG32(mmVM_L2_CNTL2); 634 tmp = RREG32(mmVM_L2_CNTL2);
634 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); 635 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
@@ -979,12 +980,10 @@ static int gmc_v8_0_sw_init(void *handle)
979 980
980static int gmc_v8_0_sw_fini(void *handle) 981static int gmc_v8_0_sw_fini(void *handle)
981{ 982{
982 int i;
983 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 983 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
984 984
985 if (adev->vm_manager.enabled) { 985 if (adev->vm_manager.enabled) {
986 for (i = 0; i < AMDGPU_NUM_VM; ++i) 986 amdgpu_vm_manager_fini(adev);
987 fence_put(adev->vm_manager.active[i]);
988 gmc_v8_0_vm_fini(adev); 987 gmc_v8_0_vm_fini(adev);
989 adev->vm_manager.enabled = false; 988 adev->vm_manager.enabled = false;
990 } 989 }
@@ -1031,12 +1030,10 @@ static int gmc_v8_0_hw_fini(void *handle)
1031 1030
1032static int gmc_v8_0_suspend(void *handle) 1031static int gmc_v8_0_suspend(void *handle)
1033{ 1032{
1034 int i;
1035 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1033 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1036 1034
1037 if (adev->vm_manager.enabled) { 1035 if (adev->vm_manager.enabled) {
1038 for (i = 0; i < AMDGPU_NUM_VM; ++i) 1036 amdgpu_vm_manager_fini(adev);
1039 fence_put(adev->vm_manager.active[i]);
1040 gmc_v8_0_vm_fini(adev); 1037 gmc_v8_0_vm_fini(adev);
1041 adev->vm_manager.enabled = false; 1038 adev->vm_manager.enabled = false;
1042 } 1039 }
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index 144f50acc971..c89dc777768f 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -16,6 +16,8 @@ TRACE_EVENT(amd_sched_job,
16 TP_ARGS(sched_job), 16 TP_ARGS(sched_job),
17 TP_STRUCT__entry( 17 TP_STRUCT__entry(
18 __field(struct amd_sched_entity *, entity) 18 __field(struct amd_sched_entity *, entity)
19 __field(struct amd_sched_job *, sched_job)
20 __field(struct fence *, fence)
19 __field(const char *, name) 21 __field(const char *, name)
20 __field(u32, job_count) 22 __field(u32, job_count)
21 __field(int, hw_job_count) 23 __field(int, hw_job_count)
@@ -23,16 +25,32 @@ TRACE_EVENT(amd_sched_job,
23 25
24 TP_fast_assign( 26 TP_fast_assign(
25 __entry->entity = sched_job->s_entity; 27 __entry->entity = sched_job->s_entity;
28 __entry->sched_job = sched_job;
29 __entry->fence = &sched_job->s_fence->base;
26 __entry->name = sched_job->sched->name; 30 __entry->name = sched_job->sched->name;
27 __entry->job_count = kfifo_len( 31 __entry->job_count = kfifo_len(
28 &sched_job->s_entity->job_queue) / sizeof(sched_job); 32 &sched_job->s_entity->job_queue) / sizeof(sched_job);
29 __entry->hw_job_count = atomic_read( 33 __entry->hw_job_count = atomic_read(
30 &sched_job->sched->hw_rq_count); 34 &sched_job->sched->hw_rq_count);
31 ), 35 ),
32 TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d", 36 TP_printk("entity=%p, sched job=%p, fence=%p, ring=%s, job count:%u, hw job count:%d",
33 __entry->entity, __entry->name, __entry->job_count, 37 __entry->entity, __entry->sched_job, __entry->fence, __entry->name,
34 __entry->hw_job_count) 38 __entry->job_count, __entry->hw_job_count)
35); 39);
40
41TRACE_EVENT(amd_sched_process_job,
42 TP_PROTO(struct amd_sched_fence *fence),
43 TP_ARGS(fence),
44 TP_STRUCT__entry(
45 __field(struct fence *, fence)
46 ),
47
48 TP_fast_assign(
49 __entry->fence = &fence->base;
50 ),
51 TP_printk("fence=%p signaled", __entry->fence)
52);
53
36#endif 54#endif
37 55
38/* This part must be outside protection */ 56/* This part must be outside protection */
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 89619a5a4289..ea30d6ad4c13 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -34,6 +34,9 @@ static struct amd_sched_job *
34amd_sched_entity_pop_job(struct amd_sched_entity *entity); 34amd_sched_entity_pop_job(struct amd_sched_entity *entity);
35static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); 35static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
36 36
37struct kmem_cache *sched_fence_slab;
38atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
39
37/* Initialize a given run queue struct */ 40/* Initialize a given run queue struct */
38static void amd_sched_rq_init(struct amd_sched_rq *rq) 41static void amd_sched_rq_init(struct amd_sched_rq *rq)
39{ 42{
@@ -273,22 +276,13 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
273 * 276 *
274 * Returns 0 for success, negative error code otherwise. 277 * Returns 0 for success, negative error code otherwise.
275 */ 278 */
276int amd_sched_entity_push_job(struct amd_sched_job *sched_job) 279void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
277{ 280{
278 struct amd_sched_entity *entity = sched_job->s_entity; 281 struct amd_sched_entity *entity = sched_job->s_entity;
279 struct amd_sched_fence *fence = amd_sched_fence_create(
280 entity, sched_job->owner);
281
282 if (!fence)
283 return -ENOMEM;
284
285 fence_get(&fence->base);
286 sched_job->s_fence = fence;
287 282
288 wait_event(entity->sched->job_scheduled, 283 wait_event(entity->sched->job_scheduled,
289 amd_sched_entity_in(sched_job)); 284 amd_sched_entity_in(sched_job));
290 trace_amd_sched_job(sched_job); 285 trace_amd_sched_job(sched_job);
291 return 0;
292} 286}
293 287
294/** 288/**
@@ -343,6 +337,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
343 list_del_init(&s_fence->list); 337 list_del_init(&s_fence->list);
344 spin_unlock_irqrestore(&sched->fence_list_lock, flags); 338 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
345 } 339 }
340 trace_amd_sched_process_job(s_fence);
346 fence_put(&s_fence->base); 341 fence_put(&s_fence->base);
347 wake_up_interruptible(&sched->wake_up_worker); 342 wake_up_interruptible(&sched->wake_up_worker);
348} 343}
@@ -450,6 +445,13 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
450 init_waitqueue_head(&sched->wake_up_worker); 445 init_waitqueue_head(&sched->wake_up_worker);
451 init_waitqueue_head(&sched->job_scheduled); 446 init_waitqueue_head(&sched->job_scheduled);
452 atomic_set(&sched->hw_rq_count, 0); 447 atomic_set(&sched->hw_rq_count, 0);
448 if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
449 sched_fence_slab = kmem_cache_create(
450 "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
451 SLAB_HWCACHE_ALIGN, NULL);
452 if (!sched_fence_slab)
453 return -ENOMEM;
454 }
453 455
454 /* Each scheduler will run on a seperate kernel thread */ 456 /* Each scheduler will run on a seperate kernel thread */
455 sched->thread = kthread_run(amd_sched_main, sched, sched->name); 457 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
@@ -470,4 +472,6 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
470{ 472{
471 if (sched->thread) 473 if (sched->thread)
472 kthread_stop(sched->thread); 474 kthread_stop(sched->thread);
475 if (atomic_dec_and_test(&sched_fence_slab_ref))
476 kmem_cache_destroy(sched_fence_slab);
473} 477}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 929e9aced041..939692b14f4b 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -30,6 +30,9 @@
30struct amd_gpu_scheduler; 30struct amd_gpu_scheduler;
31struct amd_sched_rq; 31struct amd_sched_rq;
32 32
33extern struct kmem_cache *sched_fence_slab;
34extern atomic_t sched_fence_slab_ref;
35
33/** 36/**
34 * A scheduler entity is a wrapper around a job queue or a group 37 * A scheduler entity is a wrapper around a job queue or a group
35 * of other entities. Entities take turns emitting jobs from their 38 * of other entities. Entities take turns emitting jobs from their
@@ -76,7 +79,6 @@ struct amd_sched_job {
76 struct amd_gpu_scheduler *sched; 79 struct amd_gpu_scheduler *sched;
77 struct amd_sched_entity *s_entity; 80 struct amd_sched_entity *s_entity;
78 struct amd_sched_fence *s_fence; 81 struct amd_sched_fence *s_fence;
79 void *owner;
80}; 82};
81 83
82extern const struct fence_ops amd_sched_fence_ops; 84extern const struct fence_ops amd_sched_fence_ops;
@@ -128,7 +130,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
128 uint32_t jobs); 130 uint32_t jobs);
129void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, 131void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
130 struct amd_sched_entity *entity); 132 struct amd_sched_entity *entity);
131int amd_sched_entity_push_job(struct amd_sched_job *sched_job); 133void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
132 134
133struct amd_sched_fence *amd_sched_fence_create( 135struct amd_sched_fence *amd_sched_fence_create(
134 struct amd_sched_entity *s_entity, void *owner); 136 struct amd_sched_entity *s_entity, void *owner);
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index d802638094f4..8d2130b9ff05 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -32,7 +32,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
32 struct amd_sched_fence *fence = NULL; 32 struct amd_sched_fence *fence = NULL;
33 unsigned seq; 33 unsigned seq;
34 34
35 fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL); 35 fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
36 if (fence == NULL) 36 if (fence == NULL)
37 return NULL; 37 return NULL;
38 fence->owner = owner; 38 fence->owner = owner;
@@ -71,11 +71,17 @@ static bool amd_sched_fence_enable_signaling(struct fence *f)
71 return true; 71 return true;
72} 72}
73 73
74static void amd_sched_fence_release(struct fence *f)
75{
76 struct amd_sched_fence *fence = to_amd_sched_fence(f);
77 kmem_cache_free(sched_fence_slab, fence);
78}
79
74const struct fence_ops amd_sched_fence_ops = { 80const struct fence_ops amd_sched_fence_ops = {
75 .get_driver_name = amd_sched_fence_get_driver_name, 81 .get_driver_name = amd_sched_fence_get_driver_name,
76 .get_timeline_name = amd_sched_fence_get_timeline_name, 82 .get_timeline_name = amd_sched_fence_get_timeline_name,
77 .enable_signaling = amd_sched_fence_enable_signaling, 83 .enable_signaling = amd_sched_fence_enable_signaling,
78 .signaled = NULL, 84 .signaled = NULL,
79 .wait = fence_default_wait, 85 .wait = fence_default_wait,
80 .release = NULL, 86 .release = amd_sched_fence_release,
81}; 87};
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 7bb3845d9974..aeee083c7f95 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1432,6 +1432,45 @@ static int atomic_set_prop(struct drm_atomic_state *state,
1432 return ret; 1432 return ret;
1433} 1433}
1434 1434
1435/**
1436 * drm_atomic_update_old_fb -- Unset old_fb pointers and set plane->fb pointers.
1437 *
1438 * @dev: drm device to check.
1439 * @plane_mask: plane mask for planes that were updated.
1440 * @ret: return value, can be -EDEADLK for a retry.
1441 *
1442 * Before doing an update plane->old_fb is set to plane->fb,
1443 * but before dropping the locks old_fb needs to be set to NULL
1444 * and plane->fb updated. This is a common operation for each
1445 * atomic update, so this call is split off as a helper.
1446 */
1447void drm_atomic_clean_old_fb(struct drm_device *dev,
1448 unsigned plane_mask,
1449 int ret)
1450{
1451 struct drm_plane *plane;
1452
1453 /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
1454 * locks (ie. while it is still safe to deref plane->state). We
1455 * need to do this here because the driver entry points cannot
1456 * distinguish between legacy and atomic ioctls.
1457 */
1458 drm_for_each_plane_mask(plane, dev, plane_mask) {
1459 if (ret == 0) {
1460 struct drm_framebuffer *new_fb = plane->state->fb;
1461 if (new_fb)
1462 drm_framebuffer_reference(new_fb);
1463 plane->fb = new_fb;
1464 plane->crtc = plane->state->crtc;
1465
1466 if (plane->old_fb)
1467 drm_framebuffer_unreference(plane->old_fb);
1468 }
1469 plane->old_fb = NULL;
1470 }
1471}
1472EXPORT_SYMBOL(drm_atomic_clean_old_fb);
1473
1435int drm_mode_atomic_ioctl(struct drm_device *dev, 1474int drm_mode_atomic_ioctl(struct drm_device *dev,
1436 void *data, struct drm_file *file_priv) 1475 void *data, struct drm_file *file_priv)
1437{ 1476{
@@ -1446,7 +1485,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
1446 struct drm_plane *plane; 1485 struct drm_plane *plane;
1447 struct drm_crtc *crtc; 1486 struct drm_crtc *crtc;
1448 struct drm_crtc_state *crtc_state; 1487 struct drm_crtc_state *crtc_state;
1449 unsigned plane_mask = 0; 1488 unsigned plane_mask;
1450 int ret = 0; 1489 int ret = 0;
1451 unsigned int i, j; 1490 unsigned int i, j;
1452 1491
@@ -1486,6 +1525,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
1486 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); 1525 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
1487 1526
1488retry: 1527retry:
1528 plane_mask = 0;
1489 copied_objs = 0; 1529 copied_objs = 0;
1490 copied_props = 0; 1530 copied_props = 0;
1491 1531
@@ -1576,24 +1616,7 @@ retry:
1576 } 1616 }
1577 1617
1578out: 1618out:
1579 /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping 1619 drm_atomic_clean_old_fb(dev, plane_mask, ret);
1580 * locks (ie. while it is still safe to deref plane->state). We
1581 * need to do this here because the driver entry points cannot
1582 * distinguish between legacy and atomic ioctls.
1583 */
1584 drm_for_each_plane_mask(plane, dev, plane_mask) {
1585 if (ret == 0) {
1586 struct drm_framebuffer *new_fb = plane->state->fb;
1587 if (new_fb)
1588 drm_framebuffer_reference(new_fb);
1589 plane->fb = new_fb;
1590 plane->crtc = plane->state->crtc;
1591
1592 if (plane->old_fb)
1593 drm_framebuffer_unreference(plane->old_fb);
1594 }
1595 plane->old_fb = NULL;
1596 }
1597 1620
1598 if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { 1621 if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
1599 /* 1622 /*
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 0c6f62168776..e5aec45bf985 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -210,6 +210,14 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
210 return -EINVAL; 210 return -EINVAL;
211 } 211 }
212 212
213 if (!drm_encoder_crtc_ok(new_encoder, connector_state->crtc)) {
214 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d]\n",
215 new_encoder->base.id,
216 new_encoder->name,
217 connector_state->crtc->base.id);
218 return -EINVAL;
219 }
220
213 if (new_encoder == connector_state->best_encoder) { 221 if (new_encoder == connector_state->best_encoder) {
214 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n", 222 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n",
215 connector->base.id, 223 connector->base.id,
@@ -1553,6 +1561,9 @@ retry:
1553 goto fail; 1561 goto fail;
1554 } 1562 }
1555 1563
1564 if (plane_state->crtc && (plane == plane->crtc->cursor))
1565 plane_state->state->legacy_cursor_update = true;
1566
1556 ret = __drm_atomic_helper_disable_plane(plane, plane_state); 1567 ret = __drm_atomic_helper_disable_plane(plane, plane_state);
1557 if (ret != 0) 1568 if (ret != 0)
1558 goto fail; 1569 goto fail;
@@ -1605,9 +1616,6 @@ int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
1605 plane_state->src_h = 0; 1616 plane_state->src_h = 0;
1606 plane_state->src_w = 0; 1617 plane_state->src_w = 0;
1607 1618
1608 if (plane->crtc && (plane == plane->crtc->cursor))
1609 plane_state->state->legacy_cursor_update = true;
1610
1611 return 0; 1619 return 0;
1612} 1620}
1613 1621
@@ -1741,6 +1749,7 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
1741 struct drm_crtc_state *crtc_state; 1749 struct drm_crtc_state *crtc_state;
1742 struct drm_plane_state *primary_state; 1750 struct drm_plane_state *primary_state;
1743 struct drm_crtc *crtc = set->crtc; 1751 struct drm_crtc *crtc = set->crtc;
1752 int hdisplay, vdisplay;
1744 int ret; 1753 int ret;
1745 1754
1746 crtc_state = drm_atomic_get_crtc_state(state, crtc); 1755 crtc_state = drm_atomic_get_crtc_state(state, crtc);
@@ -1783,19 +1792,21 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
1783 if (ret != 0) 1792 if (ret != 0)
1784 return ret; 1793 return ret;
1785 1794
1795 drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay);
1796
1786 drm_atomic_set_fb_for_plane(primary_state, set->fb); 1797 drm_atomic_set_fb_for_plane(primary_state, set->fb);
1787 primary_state->crtc_x = 0; 1798 primary_state->crtc_x = 0;
1788 primary_state->crtc_y = 0; 1799 primary_state->crtc_y = 0;
1789 primary_state->crtc_h = set->mode->vdisplay; 1800 primary_state->crtc_h = vdisplay;
1790 primary_state->crtc_w = set->mode->hdisplay; 1801 primary_state->crtc_w = hdisplay;
1791 primary_state->src_x = set->x << 16; 1802 primary_state->src_x = set->x << 16;
1792 primary_state->src_y = set->y << 16; 1803 primary_state->src_y = set->y << 16;
1793 if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) { 1804 if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
1794 primary_state->src_h = set->mode->hdisplay << 16; 1805 primary_state->src_h = hdisplay << 16;
1795 primary_state->src_w = set->mode->vdisplay << 16; 1806 primary_state->src_w = vdisplay << 16;
1796 } else { 1807 } else {
1797 primary_state->src_h = set->mode->vdisplay << 16; 1808 primary_state->src_h = vdisplay << 16;
1798 primary_state->src_w = set->mode->hdisplay << 16; 1809 primary_state->src_w = hdisplay << 16;
1799 } 1810 }
1800 1811
1801commit: 1812commit:
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index e673c13c7391..69cbab5e5c81 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -342,6 +342,7 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper)
342 struct drm_plane *plane; 342 struct drm_plane *plane;
343 struct drm_atomic_state *state; 343 struct drm_atomic_state *state;
344 int i, ret; 344 int i, ret;
345 unsigned plane_mask;
345 346
346 state = drm_atomic_state_alloc(dev); 347 state = drm_atomic_state_alloc(dev);
347 if (!state) 348 if (!state)
@@ -349,11 +350,10 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper)
349 350
350 state->acquire_ctx = dev->mode_config.acquire_ctx; 351 state->acquire_ctx = dev->mode_config.acquire_ctx;
351retry: 352retry:
353 plane_mask = 0;
352 drm_for_each_plane(plane, dev) { 354 drm_for_each_plane(plane, dev) {
353 struct drm_plane_state *plane_state; 355 struct drm_plane_state *plane_state;
354 356
355 plane->old_fb = plane->fb;
356
357 plane_state = drm_atomic_get_plane_state(state, plane); 357 plane_state = drm_atomic_get_plane_state(state, plane);
358 if (IS_ERR(plane_state)) { 358 if (IS_ERR(plane_state)) {
359 ret = PTR_ERR(plane_state); 359 ret = PTR_ERR(plane_state);
@@ -362,6 +362,9 @@ retry:
362 362
363 plane_state->rotation = BIT(DRM_ROTATE_0); 363 plane_state->rotation = BIT(DRM_ROTATE_0);
364 364
365 plane->old_fb = plane->fb;
366 plane_mask |= 1 << drm_plane_index(plane);
367
365 /* disable non-primary: */ 368 /* disable non-primary: */
366 if (plane->type == DRM_PLANE_TYPE_PRIMARY) 369 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
367 continue; 370 continue;
@@ -382,19 +385,7 @@ retry:
382 ret = drm_atomic_commit(state); 385 ret = drm_atomic_commit(state);
383 386
384fail: 387fail:
385 drm_for_each_plane(plane, dev) { 388 drm_atomic_clean_old_fb(dev, plane_mask, ret);
386 if (ret == 0) {
387 struct drm_framebuffer *new_fb = plane->state->fb;
388 if (new_fb)
389 drm_framebuffer_reference(new_fb);
390 plane->fb = new_fb;
391 plane->crtc = plane->state->crtc;
392
393 if (plane->old_fb)
394 drm_framebuffer_unreference(plane->old_fb);
395 }
396 plane->old_fb = NULL;
397 }
398 389
399 if (ret == -EDEADLK) 390 if (ret == -EDEADLK)
400 goto backoff; 391 goto backoff;
@@ -1236,7 +1227,9 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
1236 struct drm_fb_helper *fb_helper = info->par; 1227 struct drm_fb_helper *fb_helper = info->par;
1237 struct drm_device *dev = fb_helper->dev; 1228 struct drm_device *dev = fb_helper->dev;
1238 struct drm_atomic_state *state; 1229 struct drm_atomic_state *state;
1230 struct drm_plane *plane;
1239 int i, ret; 1231 int i, ret;
1232 unsigned plane_mask;
1240 1233
1241 state = drm_atomic_state_alloc(dev); 1234 state = drm_atomic_state_alloc(dev);
1242 if (!state) 1235 if (!state)
@@ -1244,19 +1237,22 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
1244 1237
1245 state->acquire_ctx = dev->mode_config.acquire_ctx; 1238 state->acquire_ctx = dev->mode_config.acquire_ctx;
1246retry: 1239retry:
1240 plane_mask = 0;
1247 for(i = 0; i < fb_helper->crtc_count; i++) { 1241 for(i = 0; i < fb_helper->crtc_count; i++) {
1248 struct drm_mode_set *mode_set; 1242 struct drm_mode_set *mode_set;
1249 1243
1250 mode_set = &fb_helper->crtc_info[i].mode_set; 1244 mode_set = &fb_helper->crtc_info[i].mode_set;
1251 1245
1252 mode_set->crtc->primary->old_fb = mode_set->crtc->primary->fb;
1253
1254 mode_set->x = var->xoffset; 1246 mode_set->x = var->xoffset;
1255 mode_set->y = var->yoffset; 1247 mode_set->y = var->yoffset;
1256 1248
1257 ret = __drm_atomic_helper_set_config(mode_set, state); 1249 ret = __drm_atomic_helper_set_config(mode_set, state);
1258 if (ret != 0) 1250 if (ret != 0)
1259 goto fail; 1251 goto fail;
1252
1253 plane = mode_set->crtc->primary;
1254 plane_mask |= drm_plane_index(plane);
1255 plane->old_fb = plane->fb;
1260 } 1256 }
1261 1257
1262 ret = drm_atomic_commit(state); 1258 ret = drm_atomic_commit(state);
@@ -1268,26 +1264,7 @@ retry:
1268 1264
1269 1265
1270fail: 1266fail:
1271 for(i = 0; i < fb_helper->crtc_count; i++) { 1267 drm_atomic_clean_old_fb(dev, plane_mask, ret);
1272 struct drm_mode_set *mode_set;
1273 struct drm_plane *plane;
1274
1275 mode_set = &fb_helper->crtc_info[i].mode_set;
1276 plane = mode_set->crtc->primary;
1277
1278 if (ret == 0) {
1279 struct drm_framebuffer *new_fb = plane->state->fb;
1280
1281 if (new_fb)
1282 drm_framebuffer_reference(new_fb);
1283 plane->fb = new_fb;
1284 plane->crtc = plane->state->crtc;
1285
1286 if (plane->old_fb)
1287 drm_framebuffer_unreference(plane->old_fb);
1288 }
1289 plane->old_fb = NULL;
1290 }
1291 1268
1292 if (ret == -EDEADLK) 1269 if (ret == -EDEADLK)
1293 goto backoff; 1270 goto backoff;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8afda459a26e..95bb27de774f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -351,6 +351,8 @@ enum intel_dpll_id {
351 /* hsw/bdw */ 351 /* hsw/bdw */
352 DPLL_ID_WRPLL1 = 0, 352 DPLL_ID_WRPLL1 = 0,
353 DPLL_ID_WRPLL2 = 1, 353 DPLL_ID_WRPLL2 = 1,
354 DPLL_ID_SPLL = 2,
355
354 /* skl */ 356 /* skl */
355 DPLL_ID_SKL_DPLL1 = 0, 357 DPLL_ID_SKL_DPLL1 = 0,
356 DPLL_ID_SKL_DPLL2 = 1, 358 DPLL_ID_SKL_DPLL2 = 1,
@@ -367,6 +369,7 @@ struct intel_dpll_hw_state {
367 369
368 /* hsw, bdw */ 370 /* hsw, bdw */
369 uint32_t wrpll; 371 uint32_t wrpll;
372 uint32_t spll;
370 373
371 /* skl */ 374 /* skl */
372 /* 375 /*
@@ -2648,6 +2651,7 @@ struct i915_params {
2648 int enable_cmd_parser; 2651 int enable_cmd_parser;
2649 /* leave bools at the end to not create holes */ 2652 /* leave bools at the end to not create holes */
2650 bool enable_hangcheck; 2653 bool enable_hangcheck;
2654 bool fastboot;
2651 bool prefault_disable; 2655 bool prefault_disable;
2652 bool load_detect_test; 2656 bool load_detect_test;
2653 bool reset; 2657 bool reset;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5cf4a1998273..91bb1fc27420 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3809,6 +3809,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3809int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 3809int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3810 struct drm_file *file) 3810 struct drm_file *file)
3811{ 3811{
3812 struct drm_i915_private *dev_priv = dev->dev_private;
3812 struct drm_i915_gem_caching *args = data; 3813 struct drm_i915_gem_caching *args = data;
3813 struct drm_i915_gem_object *obj; 3814 struct drm_i915_gem_object *obj;
3814 enum i915_cache_level level; 3815 enum i915_cache_level level;
@@ -3837,9 +3838,11 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3837 return -EINVAL; 3838 return -EINVAL;
3838 } 3839 }
3839 3840
3841 intel_runtime_pm_get(dev_priv);
3842
3840 ret = i915_mutex_lock_interruptible(dev); 3843 ret = i915_mutex_lock_interruptible(dev);
3841 if (ret) 3844 if (ret)
3842 return ret; 3845 goto rpm_put;
3843 3846
3844 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 3847 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3845 if (&obj->base == NULL) { 3848 if (&obj->base == NULL) {
@@ -3852,6 +3855,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3852 drm_gem_object_unreference(&obj->base); 3855 drm_gem_object_unreference(&obj->base);
3853unlock: 3856unlock:
3854 mutex_unlock(&dev->struct_mutex); 3857 mutex_unlock(&dev->struct_mutex);
3858rpm_put:
3859 intel_runtime_pm_put(dev_priv);
3860
3855 return ret; 3861 return ret;
3856} 3862}
3857 3863
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 96bb23865eac..4be13a5eb932 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -40,6 +40,7 @@ struct i915_params i915 __read_mostly = {
40 .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT), 40 .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
41 .disable_power_well = -1, 41 .disable_power_well = -1,
42 .enable_ips = 1, 42 .enable_ips = 1,
43 .fastboot = 0,
43 .prefault_disable = 0, 44 .prefault_disable = 0,
44 .load_detect_test = 0, 45 .load_detect_test = 0,
45 .reset = true, 46 .reset = true,
@@ -133,6 +134,10 @@ MODULE_PARM_DESC(disable_power_well,
133module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600); 134module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600);
134MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); 135MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
135 136
137module_param_named(fastboot, i915.fastboot, bool, 0600);
138MODULE_PARM_DESC(fastboot,
139 "Try to skip unnecessary mode sets at boot time (default: false)");
140
136module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600); 141module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600);
137MODULE_PARM_DESC(prefault_disable, 142MODULE_PARM_DESC(prefault_disable,
138 "Disable page prefaulting for pread/pwrite/reloc (default:false). " 143 "Disable page prefaulting for pread/pwrite/reloc (default:false). "
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index b84aaa0bb48a..6a2c76e367a5 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -138,18 +138,6 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
138 pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder); 138 pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
139} 139}
140 140
141static void hsw_crt_pre_enable(struct intel_encoder *encoder)
142{
143 struct drm_device *dev = encoder->base.dev;
144 struct drm_i915_private *dev_priv = dev->dev_private;
145
146 WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n");
147 I915_WRITE(SPLL_CTL,
148 SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC);
149 POSTING_READ(SPLL_CTL);
150 udelay(20);
151}
152
153/* Note: The caller is required to filter out dpms modes not supported by the 141/* Note: The caller is required to filter out dpms modes not supported by the
154 * platform. */ 142 * platform. */
155static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) 143static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -216,19 +204,6 @@ static void pch_post_disable_crt(struct intel_encoder *encoder)
216 intel_disable_crt(encoder); 204 intel_disable_crt(encoder);
217} 205}
218 206
219static void hsw_crt_post_disable(struct intel_encoder *encoder)
220{
221 struct drm_device *dev = encoder->base.dev;
222 struct drm_i915_private *dev_priv = dev->dev_private;
223 uint32_t val;
224
225 DRM_DEBUG_KMS("Disabling SPLL\n");
226 val = I915_READ(SPLL_CTL);
227 WARN_ON(!(val & SPLL_PLL_ENABLE));
228 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
229 POSTING_READ(SPLL_CTL);
230}
231
232static void intel_enable_crt(struct intel_encoder *encoder) 207static void intel_enable_crt(struct intel_encoder *encoder)
233{ 208{
234 struct intel_crt *crt = intel_encoder_to_crt(encoder); 209 struct intel_crt *crt = intel_encoder_to_crt(encoder);
@@ -280,6 +255,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
280 if (HAS_DDI(dev)) { 255 if (HAS_DDI(dev)) {
281 pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL; 256 pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
282 pipe_config->port_clock = 135000 * 2; 257 pipe_config->port_clock = 135000 * 2;
258
259 pipe_config->dpll_hw_state.wrpll = 0;
260 pipe_config->dpll_hw_state.spll =
261 SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
283 } 262 }
284 263
285 return true; 264 return true;
@@ -860,8 +839,6 @@ void intel_crt_init(struct drm_device *dev)
860 if (HAS_DDI(dev)) { 839 if (HAS_DDI(dev)) {
861 crt->base.get_config = hsw_crt_get_config; 840 crt->base.get_config = hsw_crt_get_config;
862 crt->base.get_hw_state = intel_ddi_get_hw_state; 841 crt->base.get_hw_state = intel_ddi_get_hw_state;
863 crt->base.pre_enable = hsw_crt_pre_enable;
864 crt->base.post_disable = hsw_crt_post_disable;
865 } else { 842 } else {
866 crt->base.get_config = intel_crt_get_config; 843 crt->base.get_config = intel_crt_get_config;
867 crt->base.get_hw_state = intel_crt_get_hw_state; 844 crt->base.get_hw_state = intel_crt_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b25e99a432fb..a6752a61d99f 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1286,6 +1286,18 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
1286 } 1286 }
1287 1287
1288 crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id); 1288 crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
1289 } else if (crtc_state->ddi_pll_sel == PORT_CLK_SEL_SPLL) {
1290 struct drm_atomic_state *state = crtc_state->base.state;
1291 struct intel_shared_dpll_config *spll =
1292 &intel_atomic_get_shared_dpll_state(state)[DPLL_ID_SPLL];
1293
1294 if (spll->crtc_mask &&
1295 WARN_ON(spll->hw_state.spll != crtc_state->dpll_hw_state.spll))
1296 return false;
1297
1298 crtc_state->shared_dpll = DPLL_ID_SPLL;
1299 spll->hw_state.spll = crtc_state->dpll_hw_state.spll;
1300 spll->crtc_mask |= 1 << intel_crtc->pipe;
1289 } 1301 }
1290 1302
1291 return true; 1303 return true;
@@ -2437,7 +2449,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
2437 } 2449 }
2438} 2450}
2439 2451
2440static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv, 2452static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
2441 struct intel_shared_dpll *pll) 2453 struct intel_shared_dpll *pll)
2442{ 2454{
2443 I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll); 2455 I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
@@ -2445,9 +2457,17 @@ static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
2445 udelay(20); 2457 udelay(20);
2446} 2458}
2447 2459
2448static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv, 2460static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
2449 struct intel_shared_dpll *pll) 2461 struct intel_shared_dpll *pll)
2450{ 2462{
2463 I915_WRITE(SPLL_CTL, pll->config.hw_state.spll);
2464 POSTING_READ(SPLL_CTL);
2465 udelay(20);
2466}
2467
2468static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
2469 struct intel_shared_dpll *pll)
2470{
2451 uint32_t val; 2471 uint32_t val;
2452 2472
2453 val = I915_READ(WRPLL_CTL(pll->id)); 2473 val = I915_READ(WRPLL_CTL(pll->id));
@@ -2455,9 +2475,19 @@ static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv,
2455 POSTING_READ(WRPLL_CTL(pll->id)); 2475 POSTING_READ(WRPLL_CTL(pll->id));
2456} 2476}
2457 2477
2458static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, 2478static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
2459 struct intel_shared_dpll *pll, 2479 struct intel_shared_dpll *pll)
2460 struct intel_dpll_hw_state *hw_state) 2480{
2481 uint32_t val;
2482
2483 val = I915_READ(SPLL_CTL);
2484 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
2485 POSTING_READ(SPLL_CTL);
2486}
2487
2488static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
2489 struct intel_shared_dpll *pll,
2490 struct intel_dpll_hw_state *hw_state)
2461{ 2491{
2462 uint32_t val; 2492 uint32_t val;
2463 2493
@@ -2470,25 +2500,50 @@ static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2470 return val & WRPLL_PLL_ENABLE; 2500 return val & WRPLL_PLL_ENABLE;
2471} 2501}
2472 2502
2503static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
2504 struct intel_shared_dpll *pll,
2505 struct intel_dpll_hw_state *hw_state)
2506{
2507 uint32_t val;
2508
2509 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
2510 return false;
2511
2512 val = I915_READ(SPLL_CTL);
2513 hw_state->spll = val;
2514
2515 return val & SPLL_PLL_ENABLE;
2516}
2517
2518
2473static const char * const hsw_ddi_pll_names[] = { 2519static const char * const hsw_ddi_pll_names[] = {
2474 "WRPLL 1", 2520 "WRPLL 1",
2475 "WRPLL 2", 2521 "WRPLL 2",
2522 "SPLL"
2476}; 2523};
2477 2524
2478static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv) 2525static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
2479{ 2526{
2480 int i; 2527 int i;
2481 2528
2482 dev_priv->num_shared_dpll = 2; 2529 dev_priv->num_shared_dpll = 3;
2483 2530
2484 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 2531 for (i = 0; i < 2; i++) {
2485 dev_priv->shared_dplls[i].id = i; 2532 dev_priv->shared_dplls[i].id = i;
2486 dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i]; 2533 dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
2487 dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable; 2534 dev_priv->shared_dplls[i].disable = hsw_ddi_wrpll_disable;
2488 dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable; 2535 dev_priv->shared_dplls[i].enable = hsw_ddi_wrpll_enable;
2489 dev_priv->shared_dplls[i].get_hw_state = 2536 dev_priv->shared_dplls[i].get_hw_state =
2490 hsw_ddi_pll_get_hw_state; 2537 hsw_ddi_wrpll_get_hw_state;
2491 } 2538 }
2539
2540 /* SPLL is special, but needs to be initialized anyway.. */
2541 dev_priv->shared_dplls[i].id = i;
2542 dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
2543 dev_priv->shared_dplls[i].disable = hsw_ddi_spll_disable;
2544 dev_priv->shared_dplls[i].enable = hsw_ddi_spll_enable;
2545 dev_priv->shared_dplls[i].get_hw_state = hsw_ddi_spll_get_hw_state;
2546
2492} 2547}
2493 2548
2494static const char * const skl_ddi_pll_names[] = { 2549static const char * const skl_ddi_pll_names[] = {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f62ffc04c21d..71860f8680f9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2646,11 +2646,13 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2646 return; 2646 return;
2647 2647
2648valid_fb: 2648valid_fb:
2649 plane_state->src_x = plane_state->src_y = 0; 2649 plane_state->src_x = 0;
2650 plane_state->src_y = 0;
2650 plane_state->src_w = fb->width << 16; 2651 plane_state->src_w = fb->width << 16;
2651 plane_state->src_h = fb->height << 16; 2652 plane_state->src_h = fb->height << 16;
2652 2653
2653 plane_state->crtc_x = plane_state->src_y = 0; 2654 plane_state->crtc_x = 0;
2655 plane_state->crtc_y = 0;
2654 plane_state->crtc_w = fb->width; 2656 plane_state->crtc_w = fb->width;
2655 plane_state->crtc_h = fb->height; 2657 plane_state->crtc_h = fb->height;
2656 2658
@@ -4237,6 +4239,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
4237 struct intel_shared_dpll *pll; 4239 struct intel_shared_dpll *pll;
4238 struct intel_shared_dpll_config *shared_dpll; 4240 struct intel_shared_dpll_config *shared_dpll;
4239 enum intel_dpll_id i; 4241 enum intel_dpll_id i;
4242 int max = dev_priv->num_shared_dpll;
4240 4243
4241 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); 4244 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
4242 4245
@@ -4271,9 +4274,11 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
4271 WARN_ON(shared_dpll[i].crtc_mask); 4274 WARN_ON(shared_dpll[i].crtc_mask);
4272 4275
4273 goto found; 4276 goto found;
4274 } 4277 } else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
4278 /* Do not consider SPLL */
4279 max = 2;
4275 4280
4276 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4281 for (i = 0; i < max; i++) {
4277 pll = &dev_priv->shared_dplls[i]; 4282 pll = &dev_priv->shared_dplls[i];
4278 4283
4279 /* Only want to check enabled timings first */ 4284 /* Only want to check enabled timings first */
@@ -9723,6 +9728,8 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9723 case PORT_CLK_SEL_WRPLL2: 9728 case PORT_CLK_SEL_WRPLL2:
9724 pipe_config->shared_dpll = DPLL_ID_WRPLL2; 9729 pipe_config->shared_dpll = DPLL_ID_WRPLL2;
9725 break; 9730 break;
9731 case PORT_CLK_SEL_SPLL:
9732 pipe_config->shared_dpll = DPLL_ID_SPLL;
9726 } 9733 }
9727} 9734}
9728 9735
@@ -12003,9 +12010,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
12003 pipe_config->dpll_hw_state.cfgcr1, 12010 pipe_config->dpll_hw_state.cfgcr1,
12004 pipe_config->dpll_hw_state.cfgcr2); 12011 pipe_config->dpll_hw_state.cfgcr2);
12005 } else if (HAS_DDI(dev)) { 12012 } else if (HAS_DDI(dev)) {
12006 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x\n", 12013 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
12007 pipe_config->ddi_pll_sel, 12014 pipe_config->ddi_pll_sel,
12008 pipe_config->dpll_hw_state.wrpll); 12015 pipe_config->dpll_hw_state.wrpll,
12016 pipe_config->dpll_hw_state.spll);
12009 } else { 12017 } else {
12010 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " 12018 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12011 "fp0: 0x%x, fp1: 0x%x\n", 12019 "fp0: 0x%x, fp1: 0x%x\n",
@@ -12528,6 +12536,7 @@ intel_pipe_config_compare(struct drm_device *dev,
12528 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 12536 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12529 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 12537 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12530 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 12538 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12539 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12531 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 12540 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12532 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 12541 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12533 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 12542 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
@@ -13032,6 +13041,9 @@ static int intel_atomic_check(struct drm_device *dev,
13032 struct intel_crtc_state *pipe_config = 13041 struct intel_crtc_state *pipe_config =
13033 to_intel_crtc_state(crtc_state); 13042 to_intel_crtc_state(crtc_state);
13034 13043
13044 memset(&to_intel_crtc(crtc)->atomic, 0,
13045 sizeof(struct intel_crtc_atomic_commit));
13046
13035 /* Catch I915_MODE_FLAG_INHERITED */ 13047 /* Catch I915_MODE_FLAG_INHERITED */
13036 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) 13048 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13037 crtc_state->mode_changed = true; 13049 crtc_state->mode_changed = true;
@@ -13056,7 +13068,8 @@ static int intel_atomic_check(struct drm_device *dev,
13056 if (ret) 13068 if (ret)
13057 return ret; 13069 return ret;
13058 13070
13059 if (intel_pipe_config_compare(state->dev, 13071 if (i915.fastboot &&
13072 intel_pipe_config_compare(state->dev,
13060 to_intel_crtc_state(crtc->state), 13073 to_intel_crtc_state(crtc->state),
13061 pipe_config, true)) { 13074 pipe_config, true)) {
13062 crtc_state->mode_changed = false; 13075 crtc_state->mode_changed = false;
@@ -14364,16 +14377,17 @@ static int intel_framebuffer_init(struct drm_device *dev,
14364static struct drm_framebuffer * 14377static struct drm_framebuffer *
14365intel_user_framebuffer_create(struct drm_device *dev, 14378intel_user_framebuffer_create(struct drm_device *dev,
14366 struct drm_file *filp, 14379 struct drm_file *filp,
14367 struct drm_mode_fb_cmd2 *mode_cmd) 14380 struct drm_mode_fb_cmd2 *user_mode_cmd)
14368{ 14381{
14369 struct drm_i915_gem_object *obj; 14382 struct drm_i915_gem_object *obj;
14383 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14370 14384
14371 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, 14385 obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
14372 mode_cmd->handles[0])); 14386 mode_cmd.handles[0]));
14373 if (&obj->base == NULL) 14387 if (&obj->base == NULL)
14374 return ERR_PTR(-ENOENT); 14388 return ERR_PTR(-ENOENT);
14375 14389
14376 return intel_framebuffer_create(dev, mode_cmd, obj); 14390 return intel_framebuffer_create(dev, &mode_cmd, obj);
14377} 14391}
14378 14392
14379#ifndef CONFIG_DRM_FBDEV_EMULATION 14393#ifndef CONFIG_DRM_FBDEV_EMULATION
@@ -14705,6 +14719,9 @@ static struct intel_quirk intel_quirks[] = {
14705 /* Apple Macbook 2,1 (Core 2 T7400) */ 14719 /* Apple Macbook 2,1 (Core 2 T7400) */
14706 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, 14720 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
14707 14721
14722 /* Apple Macbook 4,1 */
14723 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
14724
14708 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 14725 /* Toshiba CB35 Chromebook (Celeron 2955U) */
14709 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 14726 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
14710 14727
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d52a15df6917..071a76b9ac52 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4449,7 +4449,7 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
4449 POSTING_READ(GEN6_RPNSWREQ); 4449 POSTING_READ(GEN6_RPNSWREQ);
4450 4450
4451 dev_priv->rps.cur_freq = val; 4451 dev_priv->rps.cur_freq = val;
4452 trace_intel_gpu_freq_change(val * 50); 4452 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4453} 4453}
4454 4454
4455static void valleyview_set_rps(struct drm_device *dev, u8 val) 4455static void valleyview_set_rps(struct drm_device *dev, u8 val)
@@ -7255,7 +7255,8 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7255int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) 7255int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
7256{ 7256{
7257 if (IS_GEN9(dev_priv->dev)) 7257 if (IS_GEN9(dev_priv->dev))
7258 return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER; 7258 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
7259 GEN9_FREQ_SCALER);
7259 else if (IS_CHERRYVIEW(dev_priv->dev)) 7260 else if (IS_CHERRYVIEW(dev_priv->dev))
7260 return chv_gpu_freq(dev_priv, val); 7261 return chv_gpu_freq(dev_priv, val);
7261 else if (IS_VALLEYVIEW(dev_priv->dev)) 7262 else if (IS_VALLEYVIEW(dev_priv->dev))
@@ -7267,13 +7268,14 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
7267int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) 7268int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
7268{ 7269{
7269 if (IS_GEN9(dev_priv->dev)) 7270 if (IS_GEN9(dev_priv->dev))
7270 return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER; 7271 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
7272 GT_FREQUENCY_MULTIPLIER);
7271 else if (IS_CHERRYVIEW(dev_priv->dev)) 7273 else if (IS_CHERRYVIEW(dev_priv->dev))
7272 return chv_freq_opcode(dev_priv, val); 7274 return chv_freq_opcode(dev_priv, val);
7273 else if (IS_VALLEYVIEW(dev_priv->dev)) 7275 else if (IS_VALLEYVIEW(dev_priv->dev))
7274 return byt_freq_opcode(dev_priv, val); 7276 return byt_freq_opcode(dev_priv, val);
7275 else 7277 else
7276 return val / GT_FREQUENCY_MULTIPLIER; 7278 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
7277} 7279}
7278 7280
7279struct request_boost { 7281struct request_boost {
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 4f2068fe5d88..a7bf6a90eae5 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -70,6 +70,11 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
70 BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev); 70 BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev);
71 BUG_ON(pixels_current == pixels_prev); 71 BUG_ON(pixels_current == pixels_prev);
72 72
73 if (!handle || !file_priv) {
74 mga_hide_cursor(mdev);
75 return 0;
76 }
77
73 obj = drm_gem_object_lookup(dev, file_priv, handle); 78 obj = drm_gem_object_lookup(dev, file_priv, handle);
74 if (!obj) 79 if (!obj)
75 return -ENOENT; 80 return -ENOENT;
@@ -88,12 +93,6 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
88 goto out_unreserve1; 93 goto out_unreserve1;
89 } 94 }
90 95
91 if (!handle) {
92 mga_hide_cursor(mdev);
93 ret = 0;
94 goto out1;
95 }
96
97 /* Move cursor buffers into VRAM if they aren't already */ 96 /* Move cursor buffers into VRAM if they aren't already */
98 if (!pixels_1->pin_count) { 97 if (!pixels_1->pin_count) {
99 ret = mgag200_bo_pin(pixels_1, TTM_PL_FLAG_VRAM, 98 ret = mgag200_bo_pin(pixels_1, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d3024883b844..84d45633d28c 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -221,11 +221,17 @@ int radeon_bo_create(struct radeon_device *rdev,
221 if (!(rdev->flags & RADEON_IS_PCIE)) 221 if (!(rdev->flags & RADEON_IS_PCIE))
222 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 222 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
223 223
224 /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx
225 * See https://bugs.freedesktop.org/show_bug.cgi?id=91268
226 */
227 if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
228 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
229
224#ifdef CONFIG_X86_32 230#ifdef CONFIG_X86_32
225 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit 231 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
226 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 232 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
227 */ 233 */
228 bo->flags &= ~RADEON_GEM_GTT_WC; 234 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
229#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) 235#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
230 /* Don't try to enable write-combining when it can't work, or things 236 /* Don't try to enable write-combining when it can't work, or things
231 * may be slow 237 * may be slow
@@ -235,9 +241,10 @@ int radeon_bo_create(struct radeon_device *rdev,
235#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ 241#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
236 thanks to write-combining 242 thanks to write-combining
237 243
238 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " 244 if (bo->flags & RADEON_GEM_GTT_WC)
239 "better performance thanks to write-combining\n"); 245 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
240 bo->flags &= ~RADEON_GEM_GTT_WC; 246 "better performance thanks to write-combining\n");
247 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
241#endif 248#endif
242 249
243 radeon_ttm_placement_from_domain(bo, domain); 250 radeon_ttm_placement_from_domain(bo, domain);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 6d80dde23400..f4f03dcc1530 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1542,8 +1542,7 @@ int radeon_pm_late_init(struct radeon_device *rdev)
1542 ret = device_create_file(rdev->dev, &dev_attr_power_method); 1542 ret = device_create_file(rdev->dev, &dev_attr_power_method);
1543 if (ret) 1543 if (ret)
1544 DRM_ERROR("failed to create device file for power method\n"); 1544 DRM_ERROR("failed to create device file for power method\n");
1545 if (!ret) 1545 rdev->pm.sysfs_initialized = true;
1546 rdev->pm.sysfs_initialized = true;
1547 } 1546 }
1548 1547
1549 mutex_lock(&rdev->pm.mutex); 1548 mutex_lock(&rdev->pm.mutex);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index e72bf46042e0..a82b891ae1fe 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2927,7 +2927,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, 2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, 2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
2929 { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, 2929 { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
2930 { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 }, 2930 { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
2931 { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, 2931 { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
2932 { 0, 0, 0, 0 }, 2932 { 0, 0, 0, 0 },
2933}; 2933};
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 7a9f4768591e..265064c62d49 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -168,7 +168,7 @@ static int vc4_get_clock_select(struct drm_crtc *crtc)
168 struct drm_connector *connector; 168 struct drm_connector *connector;
169 169
170 drm_for_each_connector(connector, crtc->dev) { 170 drm_for_each_connector(connector, crtc->dev) {
171 if (connector && connector->state->crtc == crtc) { 171 if (connector->state->crtc == crtc) {
172 struct drm_encoder *encoder = connector->encoder; 172 struct drm_encoder *encoder = connector->encoder;
173 struct vc4_encoder *vc4_encoder = 173 struct vc4_encoder *vc4_encoder =
174 to_vc4_encoder(encoder); 174 to_vc4_encoder(encoder);
@@ -401,7 +401,8 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
401 dlist_next++; 401 dlist_next++;
402 402
403 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), 403 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
404 (u32 *)vc4_crtc->dlist - (u32 *)vc4->hvs->dlist); 404 (u32 __iomem *)vc4_crtc->dlist -
405 (u32 __iomem *)vc4->hvs->dlist);
405 406
406 /* Make the next display list start after ours. */ 407 /* Make the next display list start after ours. */
407 vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist); 408 vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist);
@@ -591,14 +592,14 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
591 * that will take too much. 592 * that will take too much.
592 */ 593 */
593 primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY); 594 primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY);
594 if (!primary_plane) { 595 if (IS_ERR(primary_plane)) {
595 dev_err(dev, "failed to construct primary plane\n"); 596 dev_err(dev, "failed to construct primary plane\n");
596 ret = PTR_ERR(primary_plane); 597 ret = PTR_ERR(primary_plane);
597 goto err; 598 goto err;
598 } 599 }
599 600
600 cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR); 601 cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
601 if (!cursor_plane) { 602 if (IS_ERR(cursor_plane)) {
602 dev_err(dev, "failed to construct cursor plane\n"); 603 dev_err(dev, "failed to construct cursor plane\n");
603 ret = PTR_ERR(cursor_plane); 604 ret = PTR_ERR(cursor_plane);
604 goto err_primary; 605 goto err_primary;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 6e730605edcc..d5db9e0f3b73 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -259,7 +259,6 @@ static struct platform_driver vc4_platform_driver = {
259 .remove = vc4_platform_drm_remove, 259 .remove = vc4_platform_drm_remove,
260 .driver = { 260 .driver = {
261 .name = "vc4-drm", 261 .name = "vc4-drm",
262 .owner = THIS_MODULE,
263 .of_match_table = vc4_of_match, 262 .of_match_table = vc4_of_match,
264 }, 263 },
265}; 264};
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index ab1673f672a4..8098c5b21ba4 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -75,10 +75,10 @@ void vc4_hvs_dump_state(struct drm_device *dev)
75 for (i = 0; i < 64; i += 4) { 75 for (i = 0; i < 64; i += 4) {
76 DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n", 76 DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n",
77 i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D", 77 i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D",
78 ((uint32_t *)vc4->hvs->dlist)[i + 0], 78 readl((u32 __iomem *)vc4->hvs->dlist + i + 0),
79 ((uint32_t *)vc4->hvs->dlist)[i + 1], 79 readl((u32 __iomem *)vc4->hvs->dlist + i + 1),
80 ((uint32_t *)vc4->hvs->dlist)[i + 2], 80 readl((u32 __iomem *)vc4->hvs->dlist + i + 2),
81 ((uint32_t *)vc4->hvs->dlist)[i + 3]); 81 readl((u32 __iomem *)vc4->hvs->dlist + i + 3));
82 } 82 }
83} 83}
84 84
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index cdd8b10c0147..887f3caad0be 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -70,7 +70,7 @@ static bool plane_enabled(struct drm_plane_state *state)
70 return state->fb && state->crtc; 70 return state->fb && state->crtc;
71} 71}
72 72
73struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane) 73static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
74{ 74{
75 struct vc4_plane_state *vc4_state; 75 struct vc4_plane_state *vc4_state;
76 76
@@ -97,8 +97,8 @@ struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
97 return &vc4_state->base; 97 return &vc4_state->base;
98} 98}
99 99
100void vc4_plane_destroy_state(struct drm_plane *plane, 100static void vc4_plane_destroy_state(struct drm_plane *plane,
101 struct drm_plane_state *state) 101 struct drm_plane_state *state)
102{ 102{
103 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 103 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
104 104
@@ -108,7 +108,7 @@ void vc4_plane_destroy_state(struct drm_plane *plane,
108} 108}
109 109
110/* Called during init to allocate the plane's atomic state. */ 110/* Called during init to allocate the plane's atomic state. */
111void vc4_plane_reset(struct drm_plane *plane) 111static void vc4_plane_reset(struct drm_plane *plane)
112{ 112{
113 struct vc4_plane_state *vc4_state; 113 struct vc4_plane_state *vc4_state;
114 114
@@ -157,6 +157,16 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
157 int crtc_w = state->crtc_w; 157 int crtc_w = state->crtc_w;
158 int crtc_h = state->crtc_h; 158 int crtc_h = state->crtc_h;
159 159
160 if (state->crtc_w << 16 != state->src_w ||
161 state->crtc_h << 16 != state->src_h) {
162 /* We don't support scaling yet, which involves
163 * allocating the LBM memory for scaling temporary
164 * storage, and putting filter kernels in the HVS
165 * context.
166 */
167 return -EINVAL;
168 }
169
160 if (crtc_x < 0) { 170 if (crtc_x < 0) {
161 offset += drm_format_plane_cpp(fb->pixel_format, 0) * -crtc_x; 171 offset += drm_format_plane_cpp(fb->pixel_format, 0) * -crtc_x;
162 crtc_w += crtc_x; 172 crtc_w += crtc_x;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 8b29949507d1..01a4f05c1642 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2481,7 +2481,7 @@ void wacom_setup_device_quirks(struct wacom *wacom)
2481 if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { 2481 if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
2482 if (features->touch_max) 2482 if (features->touch_max)
2483 features->device_type |= WACOM_DEVICETYPE_TOUCH; 2483 features->device_type |= WACOM_DEVICETYPE_TOUCH;
2484 if (features->type >= INTUOSHT || features->type <= BAMBOO_PT) 2484 if (features->type >= INTUOSHT && features->type <= BAMBOO_PT)
2485 features->device_type |= WACOM_DEVICETYPE_PAD; 2485 features->device_type |= WACOM_DEVICETYPE_PAD;
2486 2486
2487 features->x_max = 4096; 2487 features->x_max = 4096;
@@ -3213,7 +3213,8 @@ static const struct wacom_features wacom_features_0x32F =
3213 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; 3213 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
3214static const struct wacom_features wacom_features_0x336 = 3214static const struct wacom_features wacom_features_0x336 =
3215 { "Wacom DTU1141", 23472, 13203, 1023, 0, 3215 { "Wacom DTU1141", 23472, 13203, 1023, 0,
3216 DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 }; 3216 DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
3217 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
3217static const struct wacom_features wacom_features_0x57 = 3218static const struct wacom_features wacom_features_0x57 =
3218 { "Wacom DTK2241", 95640, 54060, 2047, 63, 3219 { "Wacom DTK2241", 95640, 54060, 2047, 63,
3219 DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6, 3220 DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 842b0043ad94..8f59f057cdf4 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -324,6 +324,7 @@ config SENSORS_APPLESMC
324config SENSORS_ARM_SCPI 324config SENSORS_ARM_SCPI
325 tristate "ARM SCPI Sensors" 325 tristate "ARM SCPI Sensors"
326 depends on ARM_SCPI_PROTOCOL 326 depends on ARM_SCPI_PROTOCOL
327 depends on THERMAL || !THERMAL_OF
327 help 328 help
328 This driver provides support for temperature, voltage, current 329 This driver provides support for temperature, voltage, current
329 and power sensors available on ARM Ltd's SCP based platforms. The 330 and power sensors available on ARM Ltd's SCP based platforms. The
@@ -1471,6 +1472,7 @@ config SENSORS_INA209
1471config SENSORS_INA2XX 1472config SENSORS_INA2XX
1472 tristate "Texas Instruments INA219 and compatibles" 1473 tristate "Texas Instruments INA219 and compatibles"
1473 depends on I2C 1474 depends on I2C
1475 select REGMAP_I2C
1474 help 1476 help
1475 If you say yes here you get support for INA219, INA220, INA226, 1477 If you say yes here you get support for INA219, INA220, INA226,
1476 INA230, and INA231 power monitor chips. 1478 INA230, and INA231 power monitor chips.
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 1f5e956941b1..0af7fd311979 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -537,7 +537,7 @@ static int applesmc_init_index(struct applesmc_registers *s)
537static int applesmc_init_smcreg_try(void) 537static int applesmc_init_smcreg_try(void)
538{ 538{
539 struct applesmc_registers *s = &smcreg; 539 struct applesmc_registers *s = &smcreg;
540 bool left_light_sensor, right_light_sensor; 540 bool left_light_sensor = 0, right_light_sensor = 0;
541 unsigned int count; 541 unsigned int count;
542 u8 tmp[1]; 542 u8 tmp[1];
543 int ret; 543 int ret;
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 2c1241bbf9af..7e20567bc369 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -117,7 +117,7 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
117 struct scpi_ops *scpi_ops; 117 struct scpi_ops *scpi_ops;
118 struct device *hwdev, *dev = &pdev->dev; 118 struct device *hwdev, *dev = &pdev->dev;
119 struct scpi_sensors *scpi_sensors; 119 struct scpi_sensors *scpi_sensors;
120 int ret; 120 int ret, idx;
121 121
122 scpi_ops = get_scpi_ops(); 122 scpi_ops = get_scpi_ops();
123 if (!scpi_ops) 123 if (!scpi_ops)
@@ -146,8 +146,8 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
146 146
147 scpi_sensors->scpi_ops = scpi_ops; 147 scpi_sensors->scpi_ops = scpi_ops;
148 148
149 for (i = 0; i < nr_sensors; i++) { 149 for (i = 0, idx = 0; i < nr_sensors; i++) {
150 struct sensor_data *sensor = &scpi_sensors->data[i]; 150 struct sensor_data *sensor = &scpi_sensors->data[idx];
151 151
152 ret = scpi_ops->sensor_get_info(i, &sensor->info); 152 ret = scpi_ops->sensor_get_info(i, &sensor->info);
153 if (ret) 153 if (ret)
@@ -183,7 +183,7 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
183 num_power++; 183 num_power++;
184 break; 184 break;
185 default: 185 default:
186 break; 186 continue;
187 } 187 }
188 188
189 sensor->dev_attr_input.attr.mode = S_IRUGO; 189 sensor->dev_attr_input.attr.mode = S_IRUGO;
@@ -194,11 +194,12 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
194 sensor->dev_attr_label.show = scpi_show_label; 194 sensor->dev_attr_label.show = scpi_show_label;
195 sensor->dev_attr_label.attr.name = sensor->label; 195 sensor->dev_attr_label.attr.name = sensor->label;
196 196
197 scpi_sensors->attrs[i << 1] = &sensor->dev_attr_input.attr; 197 scpi_sensors->attrs[idx << 1] = &sensor->dev_attr_input.attr;
198 scpi_sensors->attrs[(i << 1) + 1] = &sensor->dev_attr_label.attr; 198 scpi_sensors->attrs[(idx << 1) + 1] = &sensor->dev_attr_label.attr;
199 199
200 sysfs_attr_init(scpi_sensors->attrs[i << 1]); 200 sysfs_attr_init(scpi_sensors->attrs[idx << 1]);
201 sysfs_attr_init(scpi_sensors->attrs[(i << 1) + 1]); 201 sysfs_attr_init(scpi_sensors->attrs[(idx << 1) + 1]);
202 idx++;
202 } 203 }
203 204
204 scpi_sensors->group.attrs = scpi_sensors->attrs; 205 scpi_sensors->group.attrs = scpi_sensors->attrs;
@@ -236,8 +237,8 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
236 237
237 zone->sensor_id = i; 238 zone->sensor_id = i;
238 zone->scpi_sensors = scpi_sensors; 239 zone->scpi_sensors = scpi_sensors;
239 zone->tzd = thermal_zone_of_sensor_register(dev, i, zone, 240 zone->tzd = thermal_zone_of_sensor_register(dev,
240 &scpi_sensor_ops); 241 sensor->info.sensor_id, zone, &scpi_sensor_ops);
241 /* 242 /*
242 * The call to thermal_zone_of_sensor_register returns 243 * The call to thermal_zone_of_sensor_register returns
243 * an error for sensors that are not associated with 244 * an error for sensors that are not associated with
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index e24c2b680b47..7b0aa82ea38b 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -126,6 +126,7 @@ config I2C_I801
126 Sunrise Point-LP (PCH) 126 Sunrise Point-LP (PCH)
127 DNV (SOC) 127 DNV (SOC)
128 Broxton (SOC) 128 Broxton (SOC)
129 Lewisburg (PCH)
129 130
130 This driver can also be built as a module. If so, the module 131 This driver can also be built as a module. If so, the module
131 will be called i2c-i801. 132 will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index c306751ceadb..f62d69799a9c 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -62,6 +62,8 @@
62 * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes 62 * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes
63 * DNV (SOC) 0x19df 32 hard yes yes yes 63 * DNV (SOC) 0x19df 32 hard yes yes yes
64 * Broxton (SOC) 0x5ad4 32 hard yes yes yes 64 * Broxton (SOC) 0x5ad4 32 hard yes yes yes
65 * Lewisburg (PCH) 0xa1a3 32 hard yes yes yes
66 * Lewisburg Supersku (PCH) 0xa223 32 hard yes yes yes
65 * 67 *
66 * Features supported by this driver: 68 * Features supported by this driver:
67 * Software PEC no 69 * Software PEC no
@@ -206,6 +208,8 @@
206#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS 0x9d23 208#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS 0x9d23
207#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df 209#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df
208#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4 210#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
211#define PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS 0xa1a3
212#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223
209 213
210struct i801_mux_config { 214struct i801_mux_config {
211 char *gpio_chip; 215 char *gpio_chip;
@@ -869,6 +873,8 @@ static const struct pci_device_id i801_ids[] = {
869 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) }, 873 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
870 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) }, 874 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) },
871 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) }, 875 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
876 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
877 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS) },
872 { 0, } 878 { 0, }
873}; 879};
874 880
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 1e4d99da4164..9bb0b056b25f 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -50,6 +50,7 @@
50#include <linux/of_device.h> 50#include <linux/of_device.h>
51#include <linux/of_dma.h> 51#include <linux/of_dma.h>
52#include <linux/of_gpio.h> 52#include <linux/of_gpio.h>
53#include <linux/pinctrl/consumer.h>
53#include <linux/platform_data/i2c-imx.h> 54#include <linux/platform_data/i2c-imx.h>
54#include <linux/platform_device.h> 55#include <linux/platform_device.h>
55#include <linux/sched.h> 56#include <linux/sched.h>
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index e23a7b068c60..0b20449e48cf 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -662,8 +662,10 @@ static void __xiic_start_xfer(struct xiic_i2c *i2c)
662 662
663static void xiic_start_xfer(struct xiic_i2c *i2c) 663static void xiic_start_xfer(struct xiic_i2c *i2c)
664{ 664{
665 665 spin_lock(&i2c->lock);
666 xiic_reinit(i2c);
666 __xiic_start_xfer(i2c); 667 __xiic_start_xfer(i2c);
668 spin_unlock(&i2c->lock);
667} 669}
668 670
669static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 671static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 040af5cc8143..ba8eb087f224 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -715,7 +715,7 @@ static int i2c_device_probe(struct device *dev)
715 if (wakeirq > 0 && wakeirq != client->irq) 715 if (wakeirq > 0 && wakeirq != client->irq)
716 status = dev_pm_set_dedicated_wake_irq(dev, wakeirq); 716 status = dev_pm_set_dedicated_wake_irq(dev, wakeirq);
717 else if (client->irq > 0) 717 else if (client->irq > 0)
718 status = dev_pm_set_wake_irq(dev, wakeirq); 718 status = dev_pm_set_wake_irq(dev, client->irq);
719 else 719 else
720 status = 0; 720 status = 0;
721 721
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index eea0c79111e7..4d960d3b93c0 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -101,7 +101,7 @@
101#define AD7795_CH_AIN1M_AIN1M 8 /* AIN1(-) - AIN1(-) */ 101#define AD7795_CH_AIN1M_AIN1M 8 /* AIN1(-) - AIN1(-) */
102 102
103/* ID Register Bit Designations (AD7793_REG_ID) */ 103/* ID Register Bit Designations (AD7793_REG_ID) */
104#define AD7785_ID 0xB 104#define AD7785_ID 0x3
105#define AD7792_ID 0xA 105#define AD7792_ID 0xA
106#define AD7793_ID 0xB 106#define AD7793_ID 0xB
107#define AD7794_ID 0xF 107#define AD7794_ID 0xF
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 599cde3d03a1..b10f629cc44b 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -106,6 +106,13 @@
106 106
107#define DEFAULT_SAMPLE_TIME 1000 107#define DEFAULT_SAMPLE_TIME 1000
108 108
109/* V at 25°C of 696 mV */
110#define VF610_VTEMP25_3V0 950
111/* V at 25°C of 699 mV */
112#define VF610_VTEMP25_3V3 867
113/* Typical sensor slope coefficient at all temperatures */
114#define VF610_TEMP_SLOPE_COEFF 1840
115
109enum clk_sel { 116enum clk_sel {
110 VF610_ADCIOC_BUSCLK_SET, 117 VF610_ADCIOC_BUSCLK_SET,
111 VF610_ADCIOC_ALTCLK_SET, 118 VF610_ADCIOC_ALTCLK_SET,
@@ -197,6 +204,8 @@ static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
197 adc_feature->clk_div = 8; 204 adc_feature->clk_div = 8;
198 } 205 }
199 206
207 adck_rate = ipg_rate / adc_feature->clk_div;
208
200 /* 209 /*
201 * Determine the long sample time adder value to be used based 210 * Determine the long sample time adder value to be used based
202 * on the default minimum sample time provided. 211 * on the default minimum sample time provided.
@@ -221,7 +230,6 @@ static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
221 * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode 230 * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
222 * LSTAdder(Long Sample Time): 3, 5, 7, 9, 13, 17, 21, 25 ADCK cycles 231 * LSTAdder(Long Sample Time): 3, 5, 7, 9, 13, 17, 21, 25 ADCK cycles
223 */ 232 */
224 adck_rate = ipg_rate / info->adc_feature.clk_div;
225 for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++) 233 for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++)
226 info->sample_freq_avail[i] = 234 info->sample_freq_avail[i] =
227 adck_rate / (6 + vf610_hw_avgs[i] * 235 adck_rate / (6 + vf610_hw_avgs[i] *
@@ -663,11 +671,13 @@ static int vf610_read_raw(struct iio_dev *indio_dev,
663 break; 671 break;
664 case IIO_TEMP: 672 case IIO_TEMP:
665 /* 673 /*
666 * Calculate in degree Celsius times 1000 674 * Calculate in degree Celsius times 1000
667 * Using sensor slope of 1.84 mV/°C and 675 * Using the typical sensor slope of 1.84 mV/°C
668 * V at 25°C of 696 mV 676 * and VREFH_ADC at 3.3V, V at 25°C of 699 mV
669 */ 677 */
670 *val = 25000 - ((int)info->value - 864) * 1000000 / 1840; 678 *val = 25000 - ((int)info->value - VF610_VTEMP25_3V3) *
679 1000000 / VF610_TEMP_SLOPE_COEFF;
680
671 break; 681 break;
672 default: 682 default:
673 mutex_unlock(&indio_dev->mlock); 683 mutex_unlock(&indio_dev->mlock);
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 0370624a35db..02e636a1c49a 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -841,6 +841,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
841 case XADC_REG_VCCINT: 841 case XADC_REG_VCCINT:
842 case XADC_REG_VCCAUX: 842 case XADC_REG_VCCAUX:
843 case XADC_REG_VREFP: 843 case XADC_REG_VREFP:
844 case XADC_REG_VREFN:
844 case XADC_REG_VCCBRAM: 845 case XADC_REG_VCCBRAM:
845 case XADC_REG_VCCPINT: 846 case XADC_REG_VCCPINT:
846 case XADC_REG_VCCPAUX: 847 case XADC_REG_VCCPAUX:
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index 9e4d2c18b554..81ca0081a019 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -113,12 +113,16 @@ enum ad5064_type {
113 ID_AD5065, 113 ID_AD5065,
114 ID_AD5628_1, 114 ID_AD5628_1,
115 ID_AD5628_2, 115 ID_AD5628_2,
116 ID_AD5629_1,
117 ID_AD5629_2,
116 ID_AD5648_1, 118 ID_AD5648_1,
117 ID_AD5648_2, 119 ID_AD5648_2,
118 ID_AD5666_1, 120 ID_AD5666_1,
119 ID_AD5666_2, 121 ID_AD5666_2,
120 ID_AD5668_1, 122 ID_AD5668_1,
121 ID_AD5668_2, 123 ID_AD5668_2,
124 ID_AD5669_1,
125 ID_AD5669_2,
122}; 126};
123 127
124static int ad5064_write(struct ad5064_state *st, unsigned int cmd, 128static int ad5064_write(struct ad5064_state *st, unsigned int cmd,
@@ -291,7 +295,7 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
291 { }, 295 { },
292}; 296};
293 297
294#define AD5064_CHANNEL(chan, addr, bits) { \ 298#define AD5064_CHANNEL(chan, addr, bits, _shift) { \
295 .type = IIO_VOLTAGE, \ 299 .type = IIO_VOLTAGE, \
296 .indexed = 1, \ 300 .indexed = 1, \
297 .output = 1, \ 301 .output = 1, \
@@ -303,36 +307,39 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
303 .sign = 'u', \ 307 .sign = 'u', \
304 .realbits = (bits), \ 308 .realbits = (bits), \
305 .storagebits = 16, \ 309 .storagebits = 16, \
306 .shift = 20 - bits, \ 310 .shift = (_shift), \
307 }, \ 311 }, \
308 .ext_info = ad5064_ext_info, \ 312 .ext_info = ad5064_ext_info, \
309} 313}
310 314
311#define DECLARE_AD5064_CHANNELS(name, bits) \ 315#define DECLARE_AD5064_CHANNELS(name, bits, shift) \
312const struct iio_chan_spec name[] = { \ 316const struct iio_chan_spec name[] = { \
313 AD5064_CHANNEL(0, 0, bits), \ 317 AD5064_CHANNEL(0, 0, bits, shift), \
314 AD5064_CHANNEL(1, 1, bits), \ 318 AD5064_CHANNEL(1, 1, bits, shift), \
315 AD5064_CHANNEL(2, 2, bits), \ 319 AD5064_CHANNEL(2, 2, bits, shift), \
316 AD5064_CHANNEL(3, 3, bits), \ 320 AD5064_CHANNEL(3, 3, bits, shift), \
317 AD5064_CHANNEL(4, 4, bits), \ 321 AD5064_CHANNEL(4, 4, bits, shift), \
318 AD5064_CHANNEL(5, 5, bits), \ 322 AD5064_CHANNEL(5, 5, bits, shift), \
319 AD5064_CHANNEL(6, 6, bits), \ 323 AD5064_CHANNEL(6, 6, bits, shift), \
320 AD5064_CHANNEL(7, 7, bits), \ 324 AD5064_CHANNEL(7, 7, bits, shift), \
321} 325}
322 326
323#define DECLARE_AD5065_CHANNELS(name, bits) \ 327#define DECLARE_AD5065_CHANNELS(name, bits, shift) \
324const struct iio_chan_spec name[] = { \ 328const struct iio_chan_spec name[] = { \
325 AD5064_CHANNEL(0, 0, bits), \ 329 AD5064_CHANNEL(0, 0, bits, shift), \
326 AD5064_CHANNEL(1, 3, bits), \ 330 AD5064_CHANNEL(1, 3, bits, shift), \
327} 331}
328 332
329static DECLARE_AD5064_CHANNELS(ad5024_channels, 12); 333static DECLARE_AD5064_CHANNELS(ad5024_channels, 12, 8);
330static DECLARE_AD5064_CHANNELS(ad5044_channels, 14); 334static DECLARE_AD5064_CHANNELS(ad5044_channels, 14, 6);
331static DECLARE_AD5064_CHANNELS(ad5064_channels, 16); 335static DECLARE_AD5064_CHANNELS(ad5064_channels, 16, 4);
332 336
333static DECLARE_AD5065_CHANNELS(ad5025_channels, 12); 337static DECLARE_AD5065_CHANNELS(ad5025_channels, 12, 8);
334static DECLARE_AD5065_CHANNELS(ad5045_channels, 14); 338static DECLARE_AD5065_CHANNELS(ad5045_channels, 14, 6);
335static DECLARE_AD5065_CHANNELS(ad5065_channels, 16); 339static DECLARE_AD5065_CHANNELS(ad5065_channels, 16, 4);
340
341static DECLARE_AD5064_CHANNELS(ad5629_channels, 12, 4);
342static DECLARE_AD5064_CHANNELS(ad5669_channels, 16, 0);
336 343
337static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { 344static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
338 [ID_AD5024] = { 345 [ID_AD5024] = {
@@ -382,6 +389,18 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
382 .channels = ad5024_channels, 389 .channels = ad5024_channels,
383 .num_channels = 8, 390 .num_channels = 8,
384 }, 391 },
392 [ID_AD5629_1] = {
393 .shared_vref = true,
394 .internal_vref = 2500000,
395 .channels = ad5629_channels,
396 .num_channels = 8,
397 },
398 [ID_AD5629_2] = {
399 .shared_vref = true,
400 .internal_vref = 5000000,
401 .channels = ad5629_channels,
402 .num_channels = 8,
403 },
385 [ID_AD5648_1] = { 404 [ID_AD5648_1] = {
386 .shared_vref = true, 405 .shared_vref = true,
387 .internal_vref = 2500000, 406 .internal_vref = 2500000,
@@ -418,6 +437,18 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
418 .channels = ad5064_channels, 437 .channels = ad5064_channels,
419 .num_channels = 8, 438 .num_channels = 8,
420 }, 439 },
440 [ID_AD5669_1] = {
441 .shared_vref = true,
442 .internal_vref = 2500000,
443 .channels = ad5669_channels,
444 .num_channels = 8,
445 },
446 [ID_AD5669_2] = {
447 .shared_vref = true,
448 .internal_vref = 5000000,
449 .channels = ad5669_channels,
450 .num_channels = 8,
451 },
421}; 452};
422 453
423static inline unsigned int ad5064_num_vref(struct ad5064_state *st) 454static inline unsigned int ad5064_num_vref(struct ad5064_state *st)
@@ -597,10 +628,16 @@ static int ad5064_i2c_write(struct ad5064_state *st, unsigned int cmd,
597 unsigned int addr, unsigned int val) 628 unsigned int addr, unsigned int val)
598{ 629{
599 struct i2c_client *i2c = to_i2c_client(st->dev); 630 struct i2c_client *i2c = to_i2c_client(st->dev);
631 int ret;
600 632
601 st->data.i2c[0] = (cmd << 4) | addr; 633 st->data.i2c[0] = (cmd << 4) | addr;
602 put_unaligned_be16(val, &st->data.i2c[1]); 634 put_unaligned_be16(val, &st->data.i2c[1]);
603 return i2c_master_send(i2c, st->data.i2c, 3); 635
636 ret = i2c_master_send(i2c, st->data.i2c, 3);
637 if (ret < 0)
638 return ret;
639
640 return 0;
604} 641}
605 642
606static int ad5064_i2c_probe(struct i2c_client *i2c, 643static int ad5064_i2c_probe(struct i2c_client *i2c,
@@ -616,12 +653,12 @@ static int ad5064_i2c_remove(struct i2c_client *i2c)
616} 653}
617 654
618static const struct i2c_device_id ad5064_i2c_ids[] = { 655static const struct i2c_device_id ad5064_i2c_ids[] = {
619 {"ad5629-1", ID_AD5628_1}, 656 {"ad5629-1", ID_AD5629_1},
620 {"ad5629-2", ID_AD5628_2}, 657 {"ad5629-2", ID_AD5629_2},
621 {"ad5629-3", ID_AD5628_2}, /* similar enough to ad5629-2 */ 658 {"ad5629-3", ID_AD5629_2}, /* similar enough to ad5629-2 */
622 {"ad5669-1", ID_AD5668_1}, 659 {"ad5669-1", ID_AD5669_1},
623 {"ad5669-2", ID_AD5668_2}, 660 {"ad5669-2", ID_AD5669_2},
624 {"ad5669-3", ID_AD5668_2}, /* similar enough to ad5669-2 */ 661 {"ad5669-3", ID_AD5669_2}, /* similar enough to ad5669-2 */
625 {} 662 {}
626}; 663};
627MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids); 664MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids);
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
index 12128d1ca570..71991b5c0658 100644
--- a/drivers/iio/humidity/si7020.c
+++ b/drivers/iio/humidity/si7020.c
@@ -50,10 +50,10 @@ static int si7020_read_raw(struct iio_dev *indio_dev,
50 50
51 switch (mask) { 51 switch (mask) {
52 case IIO_CHAN_INFO_RAW: 52 case IIO_CHAN_INFO_RAW:
53 ret = i2c_smbus_read_word_data(*client, 53 ret = i2c_smbus_read_word_swapped(*client,
54 chan->type == IIO_TEMP ? 54 chan->type == IIO_TEMP ?
55 SI7020CMD_TEMP_HOLD : 55 SI7020CMD_TEMP_HOLD :
56 SI7020CMD_RH_HOLD); 56 SI7020CMD_RH_HOLD);
57 if (ret < 0) 57 if (ret < 0)
58 return ret; 58 return ret;
59 *val = ret >> 2; 59 *val = ret >> 2;
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index cbe198cb3699..471ee36b9c6e 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -216,6 +216,7 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
216 u8 *page_addr = (u8 *) (pa & PAGE_MASK); 216 u8 *page_addr = (u8 *) (pa & PAGE_MASK);
217 dma_addr_t start_dma_addr = dma_addr; 217 dma_addr_t start_dma_addr = dma_addr;
218 unsigned long irq_flags, nr_pages, i; 218 unsigned long irq_flags, nr_pages, i;
219 unsigned long *entry;
219 int rc = 0; 220 int rc = 0;
220 221
221 if (dma_addr < s390_domain->domain.geometry.aperture_start || 222 if (dma_addr < s390_domain->domain.geometry.aperture_start ||
@@ -228,8 +229,12 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
228 229
229 spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags); 230 spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags);
230 for (i = 0; i < nr_pages; i++) { 231 for (i = 0; i < nr_pages; i++) {
231 dma_update_cpu_trans(s390_domain->dma_table, page_addr, 232 entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr);
232 dma_addr, flags); 233 if (!entry) {
234 rc = -ENOMEM;
235 goto undo_cpu_trans;
236 }
237 dma_update_cpu_trans(entry, page_addr, flags);
233 page_addr += PAGE_SIZE; 238 page_addr += PAGE_SIZE;
234 dma_addr += PAGE_SIZE; 239 dma_addr += PAGE_SIZE;
235 } 240 }
@@ -242,6 +247,20 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
242 break; 247 break;
243 } 248 }
244 spin_unlock(&s390_domain->list_lock); 249 spin_unlock(&s390_domain->list_lock);
250
251undo_cpu_trans:
252 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
253 flags = ZPCI_PTE_INVALID;
254 while (i-- > 0) {
255 page_addr -= PAGE_SIZE;
256 dma_addr -= PAGE_SIZE;
257 entry = dma_walk_cpu_trans(s390_domain->dma_table,
258 dma_addr);
259 if (!entry)
260 break;
261 dma_update_cpu_trans(entry, page_addr, flags);
262 }
263 }
245 spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags); 264 spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags);
246 265
247 return rc; 266 return rc;
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index 44a077f3a4a2..f174ce0ca361 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -84,12 +84,15 @@ void __init gic_dist_config(void __iomem *base, int gic_irqs,
84 writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i); 84 writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i);
85 85
86 /* 86 /*
87 * Disable all interrupts. Leave the PPI and SGIs alone 87 * Deactivate and disable all SPIs. Leave the PPI and SGIs
88 * as they are enabled by redistributor registers. 88 * alone as they are in the redistributor registers on GICv3.
89 */ 89 */
90 for (i = 32; i < gic_irqs; i += 32) 90 for (i = 32; i < gic_irqs; i += 32) {
91 writel_relaxed(GICD_INT_EN_CLR_X32, 91 writel_relaxed(GICD_INT_EN_CLR_X32,
92 base + GIC_DIST_ENABLE_CLEAR + i / 8); 92 base + GIC_DIST_ACTIVE_CLEAR + i / 8);
93 writel_relaxed(GICD_INT_EN_CLR_X32,
94 base + GIC_DIST_ENABLE_CLEAR + i / 8);
95 }
93 96
94 if (sync_access) 97 if (sync_access)
95 sync_access(); 98 sync_access();
@@ -102,7 +105,9 @@ void gic_cpu_config(void __iomem *base, void (*sync_access)(void))
102 /* 105 /*
103 * Deal with the banked PPI and SGI interrupts - disable all 106 * Deal with the banked PPI and SGI interrupts - disable all
104 * PPI interrupts, ensure all SGI interrupts are enabled. 107 * PPI interrupts, ensure all SGI interrupts are enabled.
108 * Make sure everything is deactivated.
105 */ 109 */
110 writel_relaxed(GICD_INT_EN_CLR_X32, base + GIC_DIST_ACTIVE_CLEAR);
106 writel_relaxed(GICD_INT_EN_CLR_PPI, base + GIC_DIST_ENABLE_CLEAR); 111 writel_relaxed(GICD_INT_EN_CLR_PPI, base + GIC_DIST_ENABLE_CLEAR);
107 writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET); 112 writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET);
108 113
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 515c823c1c95..abf2ffaed392 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -73,9 +73,11 @@ struct gic_chip_data {
73 union gic_base cpu_base; 73 union gic_base cpu_base;
74#ifdef CONFIG_CPU_PM 74#ifdef CONFIG_CPU_PM
75 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; 75 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
76 u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
76 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; 77 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
77 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; 78 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
78 u32 __percpu *saved_ppi_enable; 79 u32 __percpu *saved_ppi_enable;
80 u32 __percpu *saved_ppi_active;
79 u32 __percpu *saved_ppi_conf; 81 u32 __percpu *saved_ppi_conf;
80#endif 82#endif
81 struct irq_domain *domain; 83 struct irq_domain *domain;
@@ -566,6 +568,10 @@ static void gic_dist_save(unsigned int gic_nr)
566 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 568 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
567 gic_data[gic_nr].saved_spi_enable[i] = 569 gic_data[gic_nr].saved_spi_enable[i] =
568 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 570 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
571
572 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
573 gic_data[gic_nr].saved_spi_active[i] =
574 readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
569} 575}
570 576
571/* 577/*
@@ -604,9 +610,19 @@ static void gic_dist_restore(unsigned int gic_nr)
604 writel_relaxed(gic_data[gic_nr].saved_spi_target[i], 610 writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
605 dist_base + GIC_DIST_TARGET + i * 4); 611 dist_base + GIC_DIST_TARGET + i * 4);
606 612
607 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 613 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
614 writel_relaxed(GICD_INT_EN_CLR_X32,
615 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
608 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], 616 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
609 dist_base + GIC_DIST_ENABLE_SET + i * 4); 617 dist_base + GIC_DIST_ENABLE_SET + i * 4);
618 }
619
620 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
621 writel_relaxed(GICD_INT_EN_CLR_X32,
622 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
623 writel_relaxed(gic_data[gic_nr].saved_spi_active[i],
624 dist_base + GIC_DIST_ACTIVE_SET + i * 4);
625 }
610 626
611 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL); 627 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
612} 628}
@@ -631,6 +647,10 @@ static void gic_cpu_save(unsigned int gic_nr)
631 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 647 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
632 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 648 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
633 649
650 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active);
651 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
652 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
653
634 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); 654 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
635 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) 655 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
636 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); 656 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
@@ -654,8 +674,18 @@ static void gic_cpu_restore(unsigned int gic_nr)
654 return; 674 return;
655 675
656 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); 676 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
657 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 677 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
678 writel_relaxed(GICD_INT_EN_CLR_X32,
679 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
658 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); 680 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
681 }
682
683 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active);
684 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
685 writel_relaxed(GICD_INT_EN_CLR_X32,
686 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
687 writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4);
688 }
659 689
660 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); 690 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
661 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) 691 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
@@ -710,6 +740,10 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
710 sizeof(u32)); 740 sizeof(u32));
711 BUG_ON(!gic->saved_ppi_enable); 741 BUG_ON(!gic->saved_ppi_enable);
712 742
743 gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
744 sizeof(u32));
745 BUG_ON(!gic->saved_ppi_active);
746
713 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, 747 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
714 sizeof(u32)); 748 sizeof(u32));
715 BUG_ON(!gic->saved_ppi_conf); 749 BUG_ON(!gic->saved_ppi_conf);
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 35759a91d47d..e8f847226a19 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -1992,9 +1992,9 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
1992 (unsigned long long)pci_resource_start(pci_dev, 0)); 1992 (unsigned long long)pci_resource_start(pci_dev, 0));
1993 1993
1994 pci_set_master(pci_dev); 1994 pci_set_master(pci_dev);
1995 if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { 1995 err = pci_set_dma_mask(pci_dev, 0xffffffff);
1996 if (err) {
1996 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); 1997 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1997 err = -EIO;
1998 goto fail_context; 1998 goto fail_context;
1999 } 1999 }
2000 2000
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index dbc695f32760..0042803a9de7 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -1319,7 +1319,8 @@ static int cx25821_initdev(struct pci_dev *pci_dev,
1319 dev->pci_lat, (unsigned long long)dev->base_io_addr); 1319 dev->pci_lat, (unsigned long long)dev->base_io_addr);
1320 1320
1321 pci_set_master(pci_dev); 1321 pci_set_master(pci_dev);
1322 if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { 1322 err = pci_set_dma_mask(pci_dev, 0xffffffff);
1323 if (err) {
1323 pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); 1324 pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1324 err = -EIO; 1325 err = -EIO;
1325 goto fail_irq; 1326 goto fail_irq;
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index 0ed1b6530374..1b5268f9bb24 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -890,9 +890,9 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
890 return err; 890 return err;
891 } 891 }
892 892
893 if (!pci_set_dma_mask(pci,DMA_BIT_MASK(32))) { 893 err = pci_set_dma_mask(pci,DMA_BIT_MASK(32));
894 if (err) {
894 dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name); 895 dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name);
895 err = -EIO;
896 cx88_core_put(core, pci); 896 cx88_core_put(core, pci);
897 return err; 897 return err;
898 } 898 }
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index 9db7767d1fe0..f34c229f9b37 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -393,7 +393,8 @@ static int cx8802_init_common(struct cx8802_dev *dev)
393 if (pci_enable_device(dev->pci)) 393 if (pci_enable_device(dev->pci))
394 return -EIO; 394 return -EIO;
395 pci_set_master(dev->pci); 395 pci_set_master(dev->pci);
396 if (!pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32))) { 396 err = pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32));
397 if (err) {
397 printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name); 398 printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name);
398 return -EIO; 399 return -EIO;
399 } 400 }
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index 0de1ad5a977d..aef9acf351f6 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1314,9 +1314,9 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
1314 dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); 1314 dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
1315 1315
1316 pci_set_master(pci_dev); 1316 pci_set_master(pci_dev);
1317 if (!pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32))) { 1317 err = pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32));
1318 if (err) {
1318 printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name); 1319 printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name);
1319 err = -EIO;
1320 goto fail_core; 1320 goto fail_core;
1321 } 1321 }
1322 dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev); 1322 dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index 60b2d462f98d..3fdbd81b5580 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -810,7 +810,7 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
810 "%s(): board vendor 0x%x, revision 0x%x\n", 810 "%s(): board vendor 0x%x, revision 0x%x\n",
811 __func__, board_vendor, board_revision); 811 __func__, board_vendor, board_revision);
812 pci_set_master(pci_dev); 812 pci_set_master(pci_dev);
813 if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { 813 if (pci_set_dma_mask(pci_dev, 0xffffffff) < 0) {
814 dev_err(&pci_dev->dev, 814 dev_err(&pci_dev->dev,
815 "%s(): 32bit PCI DMA is not supported\n", __func__); 815 "%s(): 32bit PCI DMA is not supported\n", __func__);
816 goto pci_detect_err; 816 goto pci_detect_err;
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index e79d63eb774e..f720cea80e28 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -951,9 +951,9 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
951 pci_name(pci_dev), dev->pci_rev, pci_dev->irq, 951 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
952 dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); 952 dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
953 pci_set_master(pci_dev); 953 pci_set_master(pci_dev);
954 if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { 954 err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
955 if (err) {
955 pr_warn("%s: Oops: no 32bit PCI DMA ???\n", dev->name); 956 pr_warn("%s: Oops: no 32bit PCI DMA ???\n", dev->name);
956 err = -EIO;
957 goto fail1; 957 goto fail1;
958 } 958 }
959 959
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 8f36b48ef733..8bbd092fbe1d 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -1264,9 +1264,9 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
1264 1264
1265 pci_set_master(pci_dev); 1265 pci_set_master(pci_dev);
1266 /* TODO */ 1266 /* TODO */
1267 if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { 1267 err = pci_set_dma_mask(pci_dev, 0xffffffff);
1268 if (err) {
1268 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); 1269 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1269 err = -EIO;
1270 goto fail_irq; 1270 goto fail_irq;
1271 } 1271 }
1272 1272
diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
index 8c5655d351d3..4e77618fbb2b 100644
--- a/drivers/media/pci/tw68/tw68-core.c
+++ b/drivers/media/pci/tw68/tw68-core.c
@@ -257,9 +257,9 @@ static int tw68_initdev(struct pci_dev *pci_dev,
257 dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq, 257 dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
258 dev->pci_lat, (u64)pci_resource_start(pci_dev, 0)); 258 dev->pci_lat, (u64)pci_resource_start(pci_dev, 0));
259 pci_set_master(pci_dev); 259 pci_set_master(pci_dev);
260 if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { 260 err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
261 if (err) {
261 pr_info("%s: Oops: no 32bit PCI DMA ???\n", dev->name); 262 pr_info("%s: Oops: no 32bit PCI DMA ???\n", dev->name);
262 err = -EIO;
263 goto fail1; 263 goto fail1;
264 } 264 }
265 265
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 23b6c8e8701c..d8486168415a 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -65,8 +65,7 @@ MODULE_ALIAS("mmc:block");
65#define MMC_SANITIZE_REQ_TIMEOUT 240000 65#define MMC_SANITIZE_REQ_TIMEOUT 240000
66#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) 66#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
67 67
68#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \ 68#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
69 (req->cmd_flags & REQ_META)) && \
70 (rq_data_dir(req) == WRITE)) 69 (rq_data_dir(req) == WRITE))
71#define PACKED_CMD_VER 0x01 70#define PACKED_CMD_VER 0x01
72#define PACKED_CMD_WR 0x02 71#define PACKED_CMD_WR 0x02
@@ -1467,13 +1466,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1467 1466
1468 /* 1467 /*
1469 * Reliable writes are used to implement Forced Unit Access and 1468 * Reliable writes are used to implement Forced Unit Access and
1470 * REQ_META accesses, and are supported only on MMCs. 1469 * are supported only on MMCs.
1471 *
1472 * XXX: this really needs a good explanation of why REQ_META
1473 * is treated special.
1474 */ 1470 */
1475 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || 1471 bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
1476 (req->cmd_flags & REQ_META)) &&
1477 (rq_data_dir(req) == WRITE) && 1472 (rq_data_dir(req) == WRITE) &&
1478 (md->flags & MMC_BLK_REL_WR); 1473 (md->flags & MMC_BLK_REL_WR);
1479 1474
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c793fda27321..3a9a79ec4343 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1040,9 +1040,24 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
1040 return err; 1040 return err;
1041} 1041}
1042 1042
1043/* Caller must hold re-tuning */
1044static int mmc_switch_status(struct mmc_card *card)
1045{
1046 u32 status;
1047 int err;
1048
1049 err = mmc_send_status(card, &status);
1050 if (err)
1051 return err;
1052
1053 return mmc_switch_status_error(card->host, status);
1054}
1055
1043static int mmc_select_hs400(struct mmc_card *card) 1056static int mmc_select_hs400(struct mmc_card *card)
1044{ 1057{
1045 struct mmc_host *host = card->host; 1058 struct mmc_host *host = card->host;
1059 bool send_status = true;
1060 unsigned int max_dtr;
1046 int err = 0; 1061 int err = 0;
1047 u8 val; 1062 u8 val;
1048 1063
@@ -1053,25 +1068,36 @@ static int mmc_select_hs400(struct mmc_card *card)
1053 host->ios.bus_width == MMC_BUS_WIDTH_8)) 1068 host->ios.bus_width == MMC_BUS_WIDTH_8))
1054 return 0; 1069 return 0;
1055 1070
1056 /* 1071 if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
1057 * Before switching to dual data rate operation for HS400, 1072 send_status = false;
1058 * it is required to convert from HS200 mode to HS mode.
1059 */
1060 mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
1061 mmc_set_bus_speed(card);
1062 1073
1074 /* Reduce frequency to HS frequency */
1075 max_dtr = card->ext_csd.hs_max_dtr;
1076 mmc_set_clock(host, max_dtr);
1077
1078 /* Switch card to HS mode */
1063 val = EXT_CSD_TIMING_HS | 1079 val = EXT_CSD_TIMING_HS |
1064 card->drive_strength << EXT_CSD_DRV_STR_SHIFT; 1080 card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1065 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1081 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1066 EXT_CSD_HS_TIMING, val, 1082 EXT_CSD_HS_TIMING, val,
1067 card->ext_csd.generic_cmd6_time, 1083 card->ext_csd.generic_cmd6_time,
1068 true, true, true); 1084 true, send_status, true);
1069 if (err) { 1085 if (err) {
1070 pr_err("%s: switch to high-speed from hs200 failed, err:%d\n", 1086 pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
1071 mmc_hostname(host), err); 1087 mmc_hostname(host), err);
1072 return err; 1088 return err;
1073 } 1089 }
1074 1090
1091 /* Set host controller to HS timing */
1092 mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
1093
1094 if (!send_status) {
1095 err = mmc_switch_status(card);
1096 if (err)
1097 goto out_err;
1098 }
1099
1100 /* Switch card to DDR */
1075 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1101 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1076 EXT_CSD_BUS_WIDTH, 1102 EXT_CSD_BUS_WIDTH,
1077 EXT_CSD_DDR_BUS_WIDTH_8, 1103 EXT_CSD_DDR_BUS_WIDTH_8,
@@ -1082,22 +1108,35 @@ static int mmc_select_hs400(struct mmc_card *card)
1082 return err; 1108 return err;
1083 } 1109 }
1084 1110
1111 /* Switch card to HS400 */
1085 val = EXT_CSD_TIMING_HS400 | 1112 val = EXT_CSD_TIMING_HS400 |
1086 card->drive_strength << EXT_CSD_DRV_STR_SHIFT; 1113 card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1087 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1114 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1088 EXT_CSD_HS_TIMING, val, 1115 EXT_CSD_HS_TIMING, val,
1089 card->ext_csd.generic_cmd6_time, 1116 card->ext_csd.generic_cmd6_time,
1090 true, true, true); 1117 true, send_status, true);
1091 if (err) { 1118 if (err) {
1092 pr_err("%s: switch to hs400 failed, err:%d\n", 1119 pr_err("%s: switch to hs400 failed, err:%d\n",
1093 mmc_hostname(host), err); 1120 mmc_hostname(host), err);
1094 return err; 1121 return err;
1095 } 1122 }
1096 1123
1124 /* Set host controller to HS400 timing and frequency */
1097 mmc_set_timing(host, MMC_TIMING_MMC_HS400); 1125 mmc_set_timing(host, MMC_TIMING_MMC_HS400);
1098 mmc_set_bus_speed(card); 1126 mmc_set_bus_speed(card);
1099 1127
1128 if (!send_status) {
1129 err = mmc_switch_status(card);
1130 if (err)
1131 goto out_err;
1132 }
1133
1100 return 0; 1134 return 0;
1135
1136out_err:
1137 pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1138 __func__, err);
1139 return err;
1101} 1140}
1102 1141
1103int mmc_hs200_to_hs400(struct mmc_card *card) 1142int mmc_hs200_to_hs400(struct mmc_card *card)
@@ -1105,19 +1144,6 @@ int mmc_hs200_to_hs400(struct mmc_card *card)
1105 return mmc_select_hs400(card); 1144 return mmc_select_hs400(card);
1106} 1145}
1107 1146
1108/* Caller must hold re-tuning */
1109static int mmc_switch_status(struct mmc_card *card)
1110{
1111 u32 status;
1112 int err;
1113
1114 err = mmc_send_status(card, &status);
1115 if (err)
1116 return err;
1117
1118 return mmc_switch_status_error(card->host, status);
1119}
1120
1121int mmc_hs400_to_hs200(struct mmc_card *card) 1147int mmc_hs400_to_hs200(struct mmc_card *card)
1122{ 1148{
1123 struct mmc_host *host = card->host; 1149 struct mmc_host *host = card->host;
@@ -1219,6 +1245,8 @@ static void mmc_select_driver_type(struct mmc_card *card)
1219static int mmc_select_hs200(struct mmc_card *card) 1245static int mmc_select_hs200(struct mmc_card *card)
1220{ 1246{
1221 struct mmc_host *host = card->host; 1247 struct mmc_host *host = card->host;
1248 bool send_status = true;
1249 unsigned int old_timing;
1222 int err = -EINVAL; 1250 int err = -EINVAL;
1223 u8 val; 1251 u8 val;
1224 1252
@@ -1234,6 +1262,9 @@ static int mmc_select_hs200(struct mmc_card *card)
1234 1262
1235 mmc_select_driver_type(card); 1263 mmc_select_driver_type(card);
1236 1264
1265 if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
1266 send_status = false;
1267
1237 /* 1268 /*
1238 * Set the bus width(4 or 8) with host's support and 1269 * Set the bus width(4 or 8) with host's support and
1239 * switch to HS200 mode if bus width is set successfully. 1270 * switch to HS200 mode if bus width is set successfully.
@@ -1245,11 +1276,25 @@ static int mmc_select_hs200(struct mmc_card *card)
1245 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1276 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1246 EXT_CSD_HS_TIMING, val, 1277 EXT_CSD_HS_TIMING, val,
1247 card->ext_csd.generic_cmd6_time, 1278 card->ext_csd.generic_cmd6_time,
1248 true, true, true); 1279 true, send_status, true);
1249 if (!err) 1280 if (err)
1250 mmc_set_timing(host, MMC_TIMING_MMC_HS200); 1281 goto err;
1282 old_timing = host->ios.timing;
1283 mmc_set_timing(host, MMC_TIMING_MMC_HS200);
1284 if (!send_status) {
1285 err = mmc_switch_status(card);
1286 /*
1287 * mmc_select_timing() assumes timing has not changed if
1288 * it is a switch error.
1289 */
1290 if (err == -EBADMSG)
1291 mmc_set_timing(host, old_timing);
1292 }
1251 } 1293 }
1252err: 1294err:
1295 if (err)
1296 pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1297 __func__, err);
1253 return err; 1298 return err;
1254} 1299}
1255 1300
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index af71de5fda3b..1dee533634c9 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -473,6 +473,7 @@ config MMC_DAVINCI
473 473
474config MMC_GOLDFISH 474config MMC_GOLDFISH
475 tristate "goldfish qemu Multimedia Card Interface support" 475 tristate "goldfish qemu Multimedia Card Interface support"
476 depends on HAS_DMA
476 depends on GOLDFISH || COMPILE_TEST 477 depends on GOLDFISH || COMPILE_TEST
477 help 478 help
478 This selects the Goldfish Multimedia card Interface emulation 479 This selects the Goldfish Multimedia card Interface emulation
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 39568cc29a2a..33dfd7e72516 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -1276,7 +1276,7 @@ static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay)
1276 int start = 0, len = 0; 1276 int start = 0, len = 0;
1277 int start_final = 0, len_final = 0; 1277 int start_final = 0, len_final = 0;
1278 u8 final_phase = 0xff; 1278 u8 final_phase = 0xff;
1279 struct msdc_delay_phase delay_phase; 1279 struct msdc_delay_phase delay_phase = { 0, };
1280 1280
1281 if (delay == 0) { 1281 if (delay == 0) {
1282 dev_err(host->dev, "phase error: [map:%x]\n", delay); 1282 dev_err(host->dev, "phase error: [map:%x]\n", delay);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 8cadd74e8407..ce08896b9d69 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -805,7 +805,7 @@ static int pxamci_probe(struct platform_device *pdev)
805 goto out; 805 goto out;
806 } else { 806 } else {
807 mmc->caps |= host->pdata->gpio_card_ro_invert ? 807 mmc->caps |= host->pdata->gpio_card_ro_invert ?
808 MMC_CAP2_RO_ACTIVE_HIGH : 0; 808 0 : MMC_CAP2_RO_ACTIVE_HIGH;
809 } 809 }
810 810
811 if (gpio_is_valid(gpio_cd)) 811 if (gpio_is_valid(gpio_cd))
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index dc4e8446f1ff..5a99a93ed025 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -25,6 +25,7 @@
25 25
26#include <linux/gpio.h> 26#include <linux/gpio.h>
27 27
28#include <asm/mach-jz4740/gpio.h>
28#include <asm/mach-jz4740/jz4740_nand.h> 29#include <asm/mach-jz4740/jz4740_nand.h>
29 30
30#define JZ_REG_NAND_CTRL 0x50 31#define JZ_REG_NAND_CTRL 0x50
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index cc74142938b0..ece544efccc3 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -3110,7 +3110,7 @@ static void nand_resume(struct mtd_info *mtd)
3110 */ 3110 */
3111static void nand_shutdown(struct mtd_info *mtd) 3111static void nand_shutdown(struct mtd_info *mtd)
3112{ 3112{
3113 nand_get_device(mtd, FL_SHUTDOWN); 3113 nand_get_device(mtd, FL_PM_SUSPENDED);
3114} 3114}
3115 3115
3116/* Set default functions */ 3116/* Set default functions */
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 9093577755f6..0527f485c3dc 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -15,9 +15,7 @@
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/phy.h> 16#include <linux/phy.h>
17#include <net/dsa.h> 17#include <net/dsa.h>
18 18#include "mv88e6060.h"
19#define REG_PORT(p) (8 + (p))
20#define REG_GLOBAL 0x0f
21 19
22static int reg_read(struct dsa_switch *ds, int addr, int reg) 20static int reg_read(struct dsa_switch *ds, int addr, int reg)
23{ 21{
@@ -67,13 +65,14 @@ static char *mv88e6060_probe(struct device *host_dev, int sw_addr)
67 if (bus == NULL) 65 if (bus == NULL)
68 return NULL; 66 return NULL;
69 67
70 ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03); 68 ret = mdiobus_read(bus, sw_addr + REG_PORT(0), PORT_SWITCH_ID);
71 if (ret >= 0) { 69 if (ret >= 0) {
72 if (ret == 0x0600) 70 if (ret == PORT_SWITCH_ID_6060)
73 return "Marvell 88E6060 (A0)"; 71 return "Marvell 88E6060 (A0)";
74 if (ret == 0x0601 || ret == 0x0602) 72 if (ret == PORT_SWITCH_ID_6060_R1 ||
73 ret == PORT_SWITCH_ID_6060_R2)
75 return "Marvell 88E6060 (B0)"; 74 return "Marvell 88E6060 (B0)";
76 if ((ret & 0xfff0) == 0x0600) 75 if ((ret & PORT_SWITCH_ID_6060_MASK) == PORT_SWITCH_ID_6060)
77 return "Marvell 88E6060"; 76 return "Marvell 88E6060";
78 } 77 }
79 78
@@ -87,22 +86,26 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
87 unsigned long timeout; 86 unsigned long timeout;
88 87
89 /* Set all ports to the disabled state. */ 88 /* Set all ports to the disabled state. */
90 for (i = 0; i < 6; i++) { 89 for (i = 0; i < MV88E6060_PORTS; i++) {
91 ret = REG_READ(REG_PORT(i), 0x04); 90 ret = REG_READ(REG_PORT(i), PORT_CONTROL);
92 REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); 91 REG_WRITE(REG_PORT(i), PORT_CONTROL,
92 ret & ~PORT_CONTROL_STATE_MASK);
93 } 93 }
94 94
95 /* Wait for transmit queues to drain. */ 95 /* Wait for transmit queues to drain. */
96 usleep_range(2000, 4000); 96 usleep_range(2000, 4000);
97 97
98 /* Reset the switch. */ 98 /* Reset the switch. */
99 REG_WRITE(REG_GLOBAL, 0x0a, 0xa130); 99 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
100 GLOBAL_ATU_CONTROL_SWRESET |
101 GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
102 GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
100 103
101 /* Wait up to one second for reset to complete. */ 104 /* Wait up to one second for reset to complete. */
102 timeout = jiffies + 1 * HZ; 105 timeout = jiffies + 1 * HZ;
103 while (time_before(jiffies, timeout)) { 106 while (time_before(jiffies, timeout)) {
104 ret = REG_READ(REG_GLOBAL, 0x00); 107 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
105 if ((ret & 0x8000) == 0x0000) 108 if (ret & GLOBAL_STATUS_INIT_READY)
106 break; 109 break;
107 110
108 usleep_range(1000, 2000); 111 usleep_range(1000, 2000);
@@ -119,13 +122,15 @@ static int mv88e6060_setup_global(struct dsa_switch *ds)
119 * set the maximum frame size to 1536 bytes, and mask all 122 * set the maximum frame size to 1536 bytes, and mask all
120 * interrupt sources. 123 * interrupt sources.
121 */ 124 */
122 REG_WRITE(REG_GLOBAL, 0x04, 0x0800); 125 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536);
123 126
124 /* Enable automatic address learning, set the address 127 /* Enable automatic address learning, set the address
125 * database size to 1024 entries, and set the default aging 128 * database size to 1024 entries, and set the default aging
126 * time to 5 minutes. 129 * time to 5 minutes.
127 */ 130 */
128 REG_WRITE(REG_GLOBAL, 0x0a, 0x2130); 131 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
132 GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
133 GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
129 134
130 return 0; 135 return 0;
131} 136}
@@ -139,25 +144,30 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
139 * state to Forwarding. Additionally, if this is the CPU 144 * state to Forwarding. Additionally, if this is the CPU
140 * port, enable Ingress and Egress Trailer tagging mode. 145 * port, enable Ingress and Egress Trailer tagging mode.
141 */ 146 */
142 REG_WRITE(addr, 0x04, dsa_is_cpu_port(ds, p) ? 0x4103 : 0x0003); 147 REG_WRITE(addr, PORT_CONTROL,
148 dsa_is_cpu_port(ds, p) ?
149 PORT_CONTROL_TRAILER |
150 PORT_CONTROL_INGRESS_MODE |
151 PORT_CONTROL_STATE_FORWARDING :
152 PORT_CONTROL_STATE_FORWARDING);
143 153
144 /* Port based VLAN map: give each port its own address 154 /* Port based VLAN map: give each port its own address
145 * database, allow the CPU port to talk to each of the 'real' 155 * database, allow the CPU port to talk to each of the 'real'
146 * ports, and allow each of the 'real' ports to only talk to 156 * ports, and allow each of the 'real' ports to only talk to
147 * the CPU port. 157 * the CPU port.
148 */ 158 */
149 REG_WRITE(addr, 0x06, 159 REG_WRITE(addr, PORT_VLAN_MAP,
150 ((p & 0xf) << 12) | 160 ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) |
151 (dsa_is_cpu_port(ds, p) ? 161 (dsa_is_cpu_port(ds, p) ?
152 ds->phys_port_mask : 162 ds->phys_port_mask :
153 (1 << ds->dst->cpu_port))); 163 BIT(ds->dst->cpu_port)));
154 164
155 /* Port Association Vector: when learning source addresses 165 /* Port Association Vector: when learning source addresses
156 * of packets, add the address to the address database using 166 * of packets, add the address to the address database using
157 * a port bitmap that has only the bit for this port set and 167 * a port bitmap that has only the bit for this port set and
158 * the other bits clear. 168 * the other bits clear.
159 */ 169 */
160 REG_WRITE(addr, 0x0b, 1 << p); 170 REG_WRITE(addr, PORT_ASSOC_VECTOR, BIT(p));
161 171
162 return 0; 172 return 0;
163} 173}
@@ -177,7 +187,7 @@ static int mv88e6060_setup(struct dsa_switch *ds)
177 if (ret < 0) 187 if (ret < 0)
178 return ret; 188 return ret;
179 189
180 for (i = 0; i < 6; i++) { 190 for (i = 0; i < MV88E6060_PORTS; i++) {
181 ret = mv88e6060_setup_port(ds, i); 191 ret = mv88e6060_setup_port(ds, i);
182 if (ret < 0) 192 if (ret < 0)
183 return ret; 193 return ret;
@@ -188,16 +198,17 @@ static int mv88e6060_setup(struct dsa_switch *ds)
188 198
189static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr) 199static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr)
190{ 200{
191 REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]); 201 /* Use the same MAC Address as FD Pause frames for all ports */
192 REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]); 202 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 9) | addr[1]);
193 REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]); 203 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
204 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
194 205
195 return 0; 206 return 0;
196} 207}
197 208
198static int mv88e6060_port_to_phy_addr(int port) 209static int mv88e6060_port_to_phy_addr(int port)
199{ 210{
200 if (port >= 0 && port <= 5) 211 if (port >= 0 && port < MV88E6060_PORTS)
201 return port; 212 return port;
202 return -1; 213 return -1;
203} 214}
@@ -225,54 +236,6 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
225 return reg_write(ds, addr, regnum, val); 236 return reg_write(ds, addr, regnum, val);
226} 237}
227 238
228static void mv88e6060_poll_link(struct dsa_switch *ds)
229{
230 int i;
231
232 for (i = 0; i < DSA_MAX_PORTS; i++) {
233 struct net_device *dev;
234 int uninitialized_var(port_status);
235 int link;
236 int speed;
237 int duplex;
238 int fc;
239
240 dev = ds->ports[i];
241 if (dev == NULL)
242 continue;
243
244 link = 0;
245 if (dev->flags & IFF_UP) {
246 port_status = reg_read(ds, REG_PORT(i), 0x00);
247 if (port_status < 0)
248 continue;
249
250 link = !!(port_status & 0x1000);
251 }
252
253 if (!link) {
254 if (netif_carrier_ok(dev)) {
255 netdev_info(dev, "link down\n");
256 netif_carrier_off(dev);
257 }
258 continue;
259 }
260
261 speed = (port_status & 0x0100) ? 100 : 10;
262 duplex = (port_status & 0x0200) ? 1 : 0;
263 fc = ((port_status & 0xc000) == 0xc000) ? 1 : 0;
264
265 if (!netif_carrier_ok(dev)) {
266 netdev_info(dev,
267 "link up, %d Mb/s, %s duplex, flow control %sabled\n",
268 speed,
269 duplex ? "full" : "half",
270 fc ? "en" : "dis");
271 netif_carrier_on(dev);
272 }
273 }
274}
275
276static struct dsa_switch_driver mv88e6060_switch_driver = { 239static struct dsa_switch_driver mv88e6060_switch_driver = {
277 .tag_protocol = DSA_TAG_PROTO_TRAILER, 240 .tag_protocol = DSA_TAG_PROTO_TRAILER,
278 .probe = mv88e6060_probe, 241 .probe = mv88e6060_probe,
@@ -280,7 +243,6 @@ static struct dsa_switch_driver mv88e6060_switch_driver = {
280 .set_addr = mv88e6060_set_addr, 243 .set_addr = mv88e6060_set_addr,
281 .phy_read = mv88e6060_phy_read, 244 .phy_read = mv88e6060_phy_read,
282 .phy_write = mv88e6060_phy_write, 245 .phy_write = mv88e6060_phy_write,
283 .poll_link = mv88e6060_poll_link,
284}; 246};
285 247
286static int __init mv88e6060_init(void) 248static int __init mv88e6060_init(void)
diff --git a/drivers/net/dsa/mv88e6060.h b/drivers/net/dsa/mv88e6060.h
new file mode 100644
index 000000000000..cc9b2ed4aff4
--- /dev/null
+++ b/drivers/net/dsa/mv88e6060.h
@@ -0,0 +1,111 @@
1/*
2 * drivers/net/dsa/mv88e6060.h - Marvell 88e6060 switch chip support
3 * Copyright (c) 2015 Neil Armstrong
4 *
5 * Based on mv88e6xxx.h
6 * Copyright (c) 2008 Marvell Semiconductor
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __MV88E6060_H
15#define __MV88E6060_H
16
17#define MV88E6060_PORTS 6
18
19#define REG_PORT(p) (0x8 + (p))
20#define PORT_STATUS 0x00
21#define PORT_STATUS_PAUSE_EN BIT(15)
22#define PORT_STATUS_MY_PAUSE BIT(14)
23#define PORT_STATUS_FC (PORT_STATUS_MY_PAUSE | PORT_STATUS_PAUSE_EN)
24#define PORT_STATUS_RESOLVED BIT(13)
25#define PORT_STATUS_LINK BIT(12)
26#define PORT_STATUS_PORTMODE BIT(11)
27#define PORT_STATUS_PHYMODE BIT(10)
28#define PORT_STATUS_DUPLEX BIT(9)
29#define PORT_STATUS_SPEED BIT(8)
30#define PORT_SWITCH_ID 0x03
31#define PORT_SWITCH_ID_6060 0x0600
32#define PORT_SWITCH_ID_6060_MASK 0xfff0
33#define PORT_SWITCH_ID_6060_R1 0x0601
34#define PORT_SWITCH_ID_6060_R2 0x0602
35#define PORT_CONTROL 0x04
36#define PORT_CONTROL_FORCE_FLOW_CTRL BIT(15)
37#define PORT_CONTROL_TRAILER BIT(14)
38#define PORT_CONTROL_HEADER BIT(11)
39#define PORT_CONTROL_INGRESS_MODE BIT(8)
40#define PORT_CONTROL_VLAN_TUNNEL BIT(7)
41#define PORT_CONTROL_STATE_MASK 0x03
42#define PORT_CONTROL_STATE_DISABLED 0x00
43#define PORT_CONTROL_STATE_BLOCKING 0x01
44#define PORT_CONTROL_STATE_LEARNING 0x02
45#define PORT_CONTROL_STATE_FORWARDING 0x03
46#define PORT_VLAN_MAP 0x06
47#define PORT_VLAN_MAP_DBNUM_SHIFT 12
48#define PORT_VLAN_MAP_TABLE_MASK 0x1f
49#define PORT_ASSOC_VECTOR 0x0b
50#define PORT_ASSOC_VECTOR_MONITOR BIT(15)
51#define PORT_ASSOC_VECTOR_PAV_MASK 0x1f
52#define PORT_RX_CNTR 0x10
53#define PORT_TX_CNTR 0x11
54
55#define REG_GLOBAL 0x0f
56#define GLOBAL_STATUS 0x00
57#define GLOBAL_STATUS_SW_MODE_MASK (0x3 << 12)
58#define GLOBAL_STATUS_SW_MODE_0 (0x0 << 12)
59#define GLOBAL_STATUS_SW_MODE_1 (0x1 << 12)
60#define GLOBAL_STATUS_SW_MODE_2 (0x2 << 12)
61#define GLOBAL_STATUS_SW_MODE_3 (0x3 << 12)
62#define GLOBAL_STATUS_INIT_READY BIT(11)
63#define GLOBAL_STATUS_ATU_FULL BIT(3)
64#define GLOBAL_STATUS_ATU_DONE BIT(2)
65#define GLOBAL_STATUS_PHY_INT BIT(1)
66#define GLOBAL_STATUS_EEINT BIT(0)
67#define GLOBAL_MAC_01 0x01
68#define GLOBAL_MAC_01_DIFF_ADDR BIT(8)
69#define GLOBAL_MAC_23 0x02
70#define GLOBAL_MAC_45 0x03
71#define GLOBAL_CONTROL 0x04
72#define GLOBAL_CONTROL_DISCARD_EXCESS BIT(13)
73#define GLOBAL_CONTROL_MAX_FRAME_1536 BIT(10)
74#define GLOBAL_CONTROL_RELOAD_EEPROM BIT(9)
75#define GLOBAL_CONTROL_CTRMODE BIT(8)
76#define GLOBAL_CONTROL_ATU_FULL_EN BIT(3)
77#define GLOBAL_CONTROL_ATU_DONE_EN BIT(2)
78#define GLOBAL_CONTROL_PHYINT_EN BIT(1)
79#define GLOBAL_CONTROL_EEPROM_DONE_EN BIT(0)
80#define GLOBAL_ATU_CONTROL 0x0a
81#define GLOBAL_ATU_CONTROL_SWRESET BIT(15)
82#define GLOBAL_ATU_CONTROL_LEARNDIS BIT(14)
83#define GLOBAL_ATU_CONTROL_ATUSIZE_256 (0x0 << 12)
84#define GLOBAL_ATU_CONTROL_ATUSIZE_512 (0x1 << 12)
85#define GLOBAL_ATU_CONTROL_ATUSIZE_1024 (0x2 << 12)
86#define GLOBAL_ATU_CONTROL_ATE_AGE_SHIFT 4
87#define GLOBAL_ATU_CONTROL_ATE_AGE_MASK (0xff << 4)
88#define GLOBAL_ATU_CONTROL_ATE_AGE_5MIN (0x13 << 4)
89#define GLOBAL_ATU_OP 0x0b
90#define GLOBAL_ATU_OP_BUSY BIT(15)
91#define GLOBAL_ATU_OP_NOP (0 << 12)
92#define GLOBAL_ATU_OP_FLUSH_ALL ((1 << 12) | GLOBAL_ATU_OP_BUSY)
93#define GLOBAL_ATU_OP_FLUSH_UNLOCKED ((2 << 12) | GLOBAL_ATU_OP_BUSY)
94#define GLOBAL_ATU_OP_LOAD_DB ((3 << 12) | GLOBAL_ATU_OP_BUSY)
95#define GLOBAL_ATU_OP_GET_NEXT_DB ((4 << 12) | GLOBAL_ATU_OP_BUSY)
96#define GLOBAL_ATU_OP_FLUSH_DB ((5 << 12) | GLOBAL_ATU_OP_BUSY)
97#define GLOBAL_ATU_OP_FLUSH_UNLOCKED_DB ((6 << 12) | GLOBAL_ATU_OP_BUSY)
98#define GLOBAL_ATU_DATA 0x0c
99#define GLOBAL_ATU_DATA_PORT_VECTOR_MASK 0x3f0
100#define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT 4
101#define GLOBAL_ATU_DATA_STATE_MASK 0x0f
102#define GLOBAL_ATU_DATA_STATE_UNUSED 0x00
103#define GLOBAL_ATU_DATA_STATE_UC_STATIC 0x0e
104#define GLOBAL_ATU_DATA_STATE_UC_LOCKED 0x0f
105#define GLOBAL_ATU_DATA_STATE_MC_STATIC 0x07
106#define GLOBAL_ATU_DATA_STATE_MC_LOCKED 0x0e
107#define GLOBAL_ATU_MAC_01 0x0d
108#define GLOBAL_ATU_MAC_23 0x0e
109#define GLOBAL_ATU_MAC_45 0x0f
110
111#endif
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 05aa7597dab9..955d06b9cdba 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -78,7 +78,6 @@ source "drivers/net/ethernet/ibm/Kconfig"
78source "drivers/net/ethernet/intel/Kconfig" 78source "drivers/net/ethernet/intel/Kconfig"
79source "drivers/net/ethernet/i825xx/Kconfig" 79source "drivers/net/ethernet/i825xx/Kconfig"
80source "drivers/net/ethernet/xscale/Kconfig" 80source "drivers/net/ethernet/xscale/Kconfig"
81source "drivers/net/ethernet/icplus/Kconfig"
82 81
83config JME 82config JME
84 tristate "JMicron(R) PCI-Express Gigabit Ethernet support" 83 tristate "JMicron(R) PCI-Express Gigabit Ethernet support"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index ddfc808110a1..4a2ee98738f0 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -41,7 +41,6 @@ obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
41obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ 41obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
42obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ 42obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/
43obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ 43obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/
44obj-$(CONFIG_IP1000) += icplus/
45obj-$(CONFIG_JME) += jme.o 44obj-$(CONFIG_JME) += jme.o
46obj-$(CONFIG_KORINA) += korina.o 45obj-$(CONFIG_KORINA) += korina.o
47obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o 46obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index e2afabf3a465..7ccebae9cb48 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1500,10 +1500,11 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
1500 return -ENODEV; 1500 return -ENODEV;
1501 } 1501 }
1502 1502
1503 if (!pci_set_dma_mask(pdev, PCNET32_DMA_MASK)) { 1503 err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK);
1504 if (err) {
1504 if (pcnet32_debug & NETIF_MSG_PROBE) 1505 if (pcnet32_debug & NETIF_MSG_PROBE)
1505 pr_err("architecture does not support 32bit PCI busmaster DMA\n"); 1506 pr_err("architecture does not support 32bit PCI busmaster DMA\n");
1506 return -ENODEV; 1507 return err;
1507 } 1508 }
1508 if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { 1509 if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
1509 if (pcnet32_debug & NETIF_MSG_PROBE) 1510 if (pcnet32_debug & NETIF_MSG_PROBE)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index f1d62d5dbaff..c9b036789184 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13207,7 +13207,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
13207 13207
13208 /* VF with OLD Hypervisor or old PF do not support filtering */ 13208 /* VF with OLD Hypervisor or old PF do not support filtering */
13209 if (IS_PF(bp)) { 13209 if (IS_PF(bp)) {
13210 if (CHIP_IS_E1x(bp)) 13210 if (chip_is_e1x)
13211 bp->accept_any_vlan = true; 13211 bp->accept_any_vlan = true;
13212 else 13212 else
13213 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 13213 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index f683d97d7614..b89504405b72 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -560,7 +560,7 @@ static int liquidio_resume(struct pci_dev *pdev)
560#endif 560#endif
561 561
562/* For PCI-E Advanced Error Recovery (AER) Interface */ 562/* For PCI-E Advanced Error Recovery (AER) Interface */
563static struct pci_error_handlers liquidio_err_handler = { 563static const struct pci_error_handlers liquidio_err_handler = {
564 .error_detected = liquidio_pcie_error_detected, 564 .error_detected = liquidio_pcie_error_detected,
565 .mmio_enabled = liquidio_pcie_mmio_enabled, 565 .mmio_enabled = liquidio_pcie_mmio_enabled,
566 .slot_reset = liquidio_pcie_slot_reset, 566 .slot_reset = liquidio_pcie_slot_reset,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a9377727c11c..7f709cbdcd87 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1583,8 +1583,14 @@ err_disable_device:
1583static void nicvf_remove(struct pci_dev *pdev) 1583static void nicvf_remove(struct pci_dev *pdev)
1584{ 1584{
1585 struct net_device *netdev = pci_get_drvdata(pdev); 1585 struct net_device *netdev = pci_get_drvdata(pdev);
1586 struct nicvf *nic = netdev_priv(netdev); 1586 struct nicvf *nic;
1587 struct net_device *pnetdev = nic->pnicvf->netdev; 1587 struct net_device *pnetdev;
1588
1589 if (!netdev)
1590 return;
1591
1592 nic = netdev_priv(netdev);
1593 pnetdev = nic->pnicvf->netdev;
1588 1594
1589 /* Check if this Qset is assigned to different VF. 1595 /* Check if this Qset is assigned to different VF.
1590 * If yes, clean primary and all secondary Qsets. 1596 * If yes, clean primary and all secondary Qsets.
diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig
index f6e858d0b9d4..ebdc83247bb6 100644
--- a/drivers/net/ethernet/dlink/Kconfig
+++ b/drivers/net/ethernet/dlink/Kconfig
@@ -17,15 +17,16 @@ config NET_VENDOR_DLINK
17if NET_VENDOR_DLINK 17if NET_VENDOR_DLINK
18 18
19config DL2K 19config DL2K
20 tristate "DL2000/TC902x-based Gigabit Ethernet support" 20 tristate "DL2000/TC902x/IP1000A-based Gigabit Ethernet support"
21 depends on PCI 21 depends on PCI
22 select CRC32 22 select CRC32
23 ---help--- 23 ---help---
24 This driver supports DL2000/TC902x-based Gigabit ethernet cards, 24 This driver supports DL2000/TC902x/IP1000A-based Gigabit ethernet cards,
25 which includes 25 which includes
26 D-Link DGE-550T Gigabit Ethernet Adapter. 26 D-Link DGE-550T Gigabit Ethernet Adapter.
27 D-Link DL2000-based Gigabit Ethernet Adapter. 27 D-Link DL2000-based Gigabit Ethernet Adapter.
28 Sundance/Tamarack TC902x Gigabit Ethernet Adapter. 28 Sundance/Tamarack TC902x Gigabit Ethernet Adapter.
29 ICPlus IP1000A-based cards
29 30
30 To compile this driver as a module, choose M here: the 31 To compile this driver as a module, choose M here: the
31 module will be called dl2k. 32 module will be called dl2k.
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index cf0a5fcdaaaf..ccca4799c27b 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -253,6 +253,19 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
253 if (err) 253 if (err)
254 goto err_out_unmap_rx; 254 goto err_out_unmap_rx;
255 255
256 if (np->chip_id == CHIP_IP1000A &&
257 (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) {
258 /* PHY magic taken from ipg driver, undocumented registers */
259 mii_write(dev, np->phy_addr, 31, 0x0001);
260 mii_write(dev, np->phy_addr, 27, 0x01e0);
261 mii_write(dev, np->phy_addr, 31, 0x0002);
262 mii_write(dev, np->phy_addr, 27, 0xeb8e);
263 mii_write(dev, np->phy_addr, 31, 0x0000);
264 mii_write(dev, np->phy_addr, 30, 0x005e);
265 /* advertise 1000BASE-T half & full duplex, prefer MASTER */
266 mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700);
267 }
268
256 /* Fiber device? */ 269 /* Fiber device? */
257 np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0; 270 np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
258 np->link_status = 0; 271 np->link_status = 0;
@@ -361,6 +374,11 @@ parse_eeprom (struct net_device *dev)
361 for (i = 0; i < 6; i++) 374 for (i = 0; i < 6; i++)
362 dev->dev_addr[i] = psrom->mac_addr[i]; 375 dev->dev_addr[i] = psrom->mac_addr[i];
363 376
377 if (np->chip_id == CHIP_IP1000A) {
378 np->led_mode = psrom->led_mode;
379 return 0;
380 }
381
364 if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) { 382 if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) {
365 return 0; 383 return 0;
366 } 384 }
@@ -406,6 +424,28 @@ parse_eeprom (struct net_device *dev)
406 return 0; 424 return 0;
407} 425}
408 426
427static void rio_set_led_mode(struct net_device *dev)
428{
429 struct netdev_private *np = netdev_priv(dev);
430 void __iomem *ioaddr = np->ioaddr;
431 u32 mode;
432
433 if (np->chip_id != CHIP_IP1000A)
434 return;
435
436 mode = dr32(ASICCtrl);
437 mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED);
438
439 if (np->led_mode & 0x01)
440 mode |= IPG_AC_LED_MODE;
441 if (np->led_mode & 0x02)
442 mode |= IPG_AC_LED_MODE_BIT_1;
443 if (np->led_mode & 0x08)
444 mode |= IPG_AC_LED_SPEED;
445
446 dw32(ASICCtrl, mode);
447}
448
409static int 449static int
410rio_open (struct net_device *dev) 450rio_open (struct net_device *dev)
411{ 451{
@@ -424,6 +464,8 @@ rio_open (struct net_device *dev)
424 GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset); 464 GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
425 mdelay(10); 465 mdelay(10);
426 466
467 rio_set_led_mode(dev);
468
427 /* DebugCtrl bit 4, 5, 9 must set */ 469 /* DebugCtrl bit 4, 5, 9 must set */
428 dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230); 470 dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230);
429 471
@@ -433,9 +475,13 @@ rio_open (struct net_device *dev)
433 475
434 alloc_list (dev); 476 alloc_list (dev);
435 477
436 /* Get station address */ 478 /* Set station address */
437 for (i = 0; i < 6; i++) 479 /* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works
438 dw8(StationAddr0 + i, dev->dev_addr[i]); 480 * too. However, it doesn't work on IP1000A so we use 16-bit access.
481 */
482 for (i = 0; i < 3; i++)
483 dw16(StationAddr0 + 2 * i,
484 cpu_to_le16(((u16 *)dev->dev_addr)[i]));
439 485
440 set_multicast (dev); 486 set_multicast (dev);
441 if (np->coalesce) { 487 if (np->coalesce) {
@@ -780,6 +826,7 @@ tx_error (struct net_device *dev, int tx_status)
780 break; 826 break;
781 mdelay (1); 827 mdelay (1);
782 } 828 }
829 rio_set_led_mode(dev);
783 rio_free_tx (dev, 1); 830 rio_free_tx (dev, 1);
784 /* Reset TFDListPtr */ 831 /* Reset TFDListPtr */
785 dw32(TFDListPtr0, np->tx_ring_dma + 832 dw32(TFDListPtr0, np->tx_ring_dma +
@@ -799,6 +846,7 @@ tx_error (struct net_device *dev, int tx_status)
799 break; 846 break;
800 mdelay (1); 847 mdelay (1);
801 } 848 }
849 rio_set_led_mode(dev);
802 /* Let TxStartThresh stay default value */ 850 /* Let TxStartThresh stay default value */
803 } 851 }
804 /* Maximum Collisions */ 852 /* Maximum Collisions */
@@ -965,6 +1013,7 @@ rio_error (struct net_device *dev, int int_status)
965 dev->name, int_status); 1013 dev->name, int_status);
966 dw16(ASICCtrl + 2, GlobalReset | HostReset); 1014 dw16(ASICCtrl + 2, GlobalReset | HostReset);
967 mdelay (500); 1015 mdelay (500);
1016 rio_set_led_mode(dev);
968 } 1017 }
969} 1018}
970 1019
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 23c07b007069..8f4f61262d5c 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -211,6 +211,10 @@ enum ASICCtrl_HiWord_bits {
211 ResetBusy = 0x0400, 211 ResetBusy = 0x0400,
212}; 212};
213 213
214#define IPG_AC_LED_MODE BIT(14)
215#define IPG_AC_LED_SPEED BIT(27)
216#define IPG_AC_LED_MODE_BIT_1 BIT(29)
217
214/* Transmit Frame Control bits */ 218/* Transmit Frame Control bits */
215enum TFC_bits { 219enum TFC_bits {
216 DwordAlign = 0x00000000, 220 DwordAlign = 0x00000000,
@@ -332,7 +336,10 @@ typedef struct t_SROM {
332 u16 asic_ctrl; /* 0x02 */ 336 u16 asic_ctrl; /* 0x02 */
333 u16 sub_vendor_id; /* 0x04 */ 337 u16 sub_vendor_id; /* 0x04 */
334 u16 sub_system_id; /* 0x06 */ 338 u16 sub_system_id; /* 0x06 */
335 u16 reserved1[12]; /* 0x08-0x1f */ 339 u16 pci_base_1; /* 0x08 (IP1000A only) */
340 u16 pci_base_2; /* 0x0a (IP1000A only) */
341 u16 led_mode; /* 0x0c (IP1000A only) */
342 u16 reserved1[9]; /* 0x0e-0x1f */
336 u8 mac_addr[6]; /* 0x20-0x25 */ 343 u8 mac_addr[6]; /* 0x20-0x25 */
337 u8 reserved2[10]; /* 0x26-0x2f */ 344 u8 reserved2[10]; /* 0x26-0x2f */
338 u8 sib[204]; /* 0x30-0xfb */ 345 u8 sib[204]; /* 0x30-0xfb */
@@ -397,6 +404,7 @@ struct netdev_private {
397 u16 advertising; /* NWay media advertisement */ 404 u16 advertising; /* NWay media advertisement */
398 u16 negotiate; /* Negotiated media */ 405 u16 negotiate; /* Negotiated media */
399 int phy_addr; /* PHY addresses. */ 406 int phy_addr; /* PHY addresses. */
407 u16 led_mode; /* LED mode read from EEPROM (IP1000A only) */
400}; 408};
401 409
402/* The station address location in the EEPROM. */ 410/* The station address location in the EEPROM. */
@@ -407,10 +415,15 @@ struct netdev_private {
407 class_mask of the class are honored during the comparison. 415 class_mask of the class are honored during the comparison.
408 driver_data Data private to the driver. 416 driver_data Data private to the driver.
409*/ 417*/
418#define CHIP_IP1000A 1
410 419
411static const struct pci_device_id rio_pci_tbl[] = { 420static const struct pci_device_id rio_pci_tbl[] = {
412 {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, }, 421 {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, },
413 {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, }, 422 {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, },
423 { PCI_VDEVICE(SUNDANCE, 0x1023), CHIP_IP1000A },
424 { PCI_VDEVICE(SUNDANCE, 0x2021), CHIP_IP1000A },
425 { PCI_VDEVICE(DLINK, 0x9021), CHIP_IP1000A },
426 { PCI_VDEVICE(DLINK, 0x4020), CHIP_IP1000A },
414 { } 427 { }
415}; 428};
416MODULE_DEVICE_TABLE (pci, rio_pci_tbl); 429MODULE_DEVICE_TABLE (pci, rio_pci_tbl);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index f4cb8e425853..734f655c99c1 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1062,9 +1062,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1062static int be_set_rss_hash_opts(struct be_adapter *adapter, 1062static int be_set_rss_hash_opts(struct be_adapter *adapter,
1063 struct ethtool_rxnfc *cmd) 1063 struct ethtool_rxnfc *cmd)
1064{ 1064{
1065 struct be_rx_obj *rxo; 1065 int status;
1066 int status = 0, i, j;
1067 u8 rsstable[128];
1068 u32 rss_flags = adapter->rss_info.rss_flags; 1066 u32 rss_flags = adapter->rss_info.rss_flags;
1069 1067
1070 if (cmd->data != L3_RSS_FLAGS && 1068 if (cmd->data != L3_RSS_FLAGS &&
@@ -1113,20 +1111,11 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
1113 } 1111 }
1114 1112
1115 if (rss_flags == adapter->rss_info.rss_flags) 1113 if (rss_flags == adapter->rss_info.rss_flags)
1116 return status; 1114 return 0;
1117
1118 if (be_multi_rxq(adapter)) {
1119 for (j = 0; j < 128; j += adapter->num_rss_qs) {
1120 for_all_rss_queues(adapter, rxo, i) {
1121 if ((j + i) >= 128)
1122 break;
1123 rsstable[j + i] = rxo->rss_id;
1124 }
1125 }
1126 }
1127 1115
1128 status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable, 1116 status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable,
1129 rss_flags, 128, adapter->rss_info.rss_hkey); 1117 rss_flags, RSS_INDIR_TABLE_LEN,
1118 adapter->rss_info.rss_hkey);
1130 if (!status) 1119 if (!status)
1131 adapter->rss_info.rss_flags = rss_flags; 1120 adapter->rss_info.rss_flags = rss_flags;
1132 1121
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index eb48a977f8da..b6ad02909d6b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3518,7 +3518,7 @@ static int be_rx_qs_create(struct be_adapter *adapter)
3518 3518
3519 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN); 3519 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3520 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, 3520 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3521 128, rss_key); 3521 RSS_INDIR_TABLE_LEN, rss_key);
3522 if (rc) { 3522 if (rc) {
3523 rss->rss_flags = RSS_ENABLE_NONE; 3523 rss->rss_flags = RSS_ENABLE_NONE;
3524 return rc; 3524 return rc;
diff --git a/drivers/net/ethernet/icplus/Kconfig b/drivers/net/ethernet/icplus/Kconfig
deleted file mode 100644
index 14a66e9d2e26..000000000000
--- a/drivers/net/ethernet/icplus/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
1#
2# IC Plus device configuration
3#
4
5config IP1000
6 tristate "IP1000 Gigabit Ethernet support"
7 depends on PCI
8 select MII
9 ---help---
10 This driver supports IP1000 gigabit Ethernet cards.
11
12 To compile this driver as a module, choose M here: the module
13 will be called ipg. This is recommended.
diff --git a/drivers/net/ethernet/icplus/Makefile b/drivers/net/ethernet/icplus/Makefile
deleted file mode 100644
index 5bc87c1f36aa..000000000000
--- a/drivers/net/ethernet/icplus/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1#
2# Makefile for the IC Plus device drivers
3#
4
5obj-$(CONFIG_IP1000) += ipg.o
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
deleted file mode 100644
index c3b6af83f070..000000000000
--- a/drivers/net/ethernet/icplus/ipg.c
+++ /dev/null
@@ -1,2300 +0,0 @@
1/*
2 * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter
3 *
4 * Copyright (C) 2003, 2007 IC Plus Corp
5 *
6 * Original Author:
7 *
8 * Craig Rich
9 * Sundance Technology, Inc.
10 * www.sundanceti.com
11 * craig_rich@sundanceti.com
12 *
13 * Current Maintainer:
14 *
15 * Sorbica Shieh.
16 * http://www.icplus.com.tw
17 * sorbica@icplus.com.tw
18 *
19 * Jesse Huang
20 * http://www.icplus.com.tw
21 * jesse@icplus.com.tw
22 */
23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/crc32.h>
27#include <linux/ethtool.h>
28#include <linux/interrupt.h>
29#include <linux/gfp.h>
30#include <linux/mii.h>
31#include <linux/mutex.h>
32
33#include <asm/div64.h>
34
35#define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH)
36#define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH)
37#define IPG_RESET_MASK \
38 (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \
39 IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \
40 IPG_AC_AUTO_INIT)
41
42#define ipg_w32(val32, reg) iowrite32((val32), ioaddr + (reg))
43#define ipg_w16(val16, reg) iowrite16((val16), ioaddr + (reg))
44#define ipg_w8(val8, reg) iowrite8((val8), ioaddr + (reg))
45
46#define ipg_r32(reg) ioread32(ioaddr + (reg))
47#define ipg_r16(reg) ioread16(ioaddr + (reg))
48#define ipg_r8(reg) ioread8(ioaddr + (reg))
49
50enum {
51 netdev_io_size = 128
52};
53
54#include "ipg.h"
55#define DRV_NAME "ipg"
56
57MODULE_AUTHOR("IC Plus Corp. 2003");
58MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver");
59MODULE_LICENSE("GPL");
60
61/*
62 * Defaults
63 */
64#define IPG_MAX_RXFRAME_SIZE 0x0600
65#define IPG_RXFRAG_SIZE 0x0600
66#define IPG_RXSUPPORT_SIZE 0x0600
67#define IPG_IS_JUMBO false
68
69/*
70 * Variable record -- index by leading revision/length
71 * Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN
72 */
73static const unsigned short DefaultPhyParam[] = {
74 /* 11/12/03 IP1000A v1-3 rev=0x40 */
75 /*--------------------------------------------------------------------------
76 (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2,
77 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6,
78 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700,
79 --------------------------------------------------------------------------*/
80 /* 12/17/03 IP1000A v1-4 rev=0x40 */
81 (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
82 0x0000,
83 30, 0x005e, 9, 0x0700,
84 /* 01/09/04 IP1000A v1-5 rev=0x41 */
85 (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
86 0x0000,
87 30, 0x005e, 9, 0x0700,
88 0x0000
89};
90
91static const char * const ipg_brand_name[] = {
92 "IC PLUS IP1000 1000/100/10 based NIC",
93 "Sundance Technology ST2021 based NIC",
94 "Tamarack Microelectronics TC9020/9021 based NIC",
95 "D-Link NIC IP1000A"
96};
97
98static const struct pci_device_id ipg_pci_tbl[] = {
99 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
100 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
101 { PCI_VDEVICE(DLINK, 0x9021), 2 },
102 { PCI_VDEVICE(DLINK, 0x4020), 3 },
103 { 0, }
104};
105
106MODULE_DEVICE_TABLE(pci, ipg_pci_tbl);
107
108static inline void __iomem *ipg_ioaddr(struct net_device *dev)
109{
110 struct ipg_nic_private *sp = netdev_priv(dev);
111 return sp->ioaddr;
112}
113
114#ifdef IPG_DEBUG
115static void ipg_dump_rfdlist(struct net_device *dev)
116{
117 struct ipg_nic_private *sp = netdev_priv(dev);
118 void __iomem *ioaddr = sp->ioaddr;
119 unsigned int i;
120 u32 offset;
121
122 IPG_DEBUG_MSG("_dump_rfdlist\n");
123
124 netdev_info(dev, "rx_current = %02x\n", sp->rx_current);
125 netdev_info(dev, "rx_dirty = %02x\n", sp->rx_dirty);
126 netdev_info(dev, "RFDList start address = %016lx\n",
127 (unsigned long)sp->rxd_map);
128 netdev_info(dev, "RFDListPtr register = %08x%08x\n",
129 ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0));
130
131 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
132 offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd;
133 netdev_info(dev, "%02x %04x RFDNextPtr = %016lx\n",
134 i, offset, (unsigned long)sp->rxd[i].next_desc);
135 offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd;
136 netdev_info(dev, "%02x %04x RFS = %016lx\n",
137 i, offset, (unsigned long)sp->rxd[i].rfs);
138 offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd;
139 netdev_info(dev, "%02x %04x frag_info = %016lx\n",
140 i, offset, (unsigned long)sp->rxd[i].frag_info);
141 }
142}
143
144static void ipg_dump_tfdlist(struct net_device *dev)
145{
146 struct ipg_nic_private *sp = netdev_priv(dev);
147 void __iomem *ioaddr = sp->ioaddr;
148 unsigned int i;
149 u32 offset;
150
151 IPG_DEBUG_MSG("_dump_tfdlist\n");
152
153 netdev_info(dev, "tx_current = %02x\n", sp->tx_current);
154 netdev_info(dev, "tx_dirty = %02x\n", sp->tx_dirty);
155 netdev_info(dev, "TFDList start address = %016lx\n",
156 (unsigned long) sp->txd_map);
157 netdev_info(dev, "TFDListPtr register = %08x%08x\n",
158 ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0));
159
160 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
161 offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd;
162 netdev_info(dev, "%02x %04x TFDNextPtr = %016lx\n",
163 i, offset, (unsigned long)sp->txd[i].next_desc);
164
165 offset = (u32) &sp->txd[i].tfc - (u32) sp->txd;
166 netdev_info(dev, "%02x %04x TFC = %016lx\n",
167 i, offset, (unsigned long) sp->txd[i].tfc);
168 offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd;
169 netdev_info(dev, "%02x %04x frag_info = %016lx\n",
170 i, offset, (unsigned long) sp->txd[i].frag_info);
171 }
172}
173#endif
174
175static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data)
176{
177 ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL);
178 ndelay(IPG_PC_PHYCTRLWAIT_NS);
179}
180
181static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data)
182{
183 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data);
184 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data);
185}
186
187static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity)
188{
189 phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR;
190
191 ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity);
192}
193
194static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity)
195{
196 ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR |
197 phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL);
198}
199
200static u16 read_phy_bit(void __iomem *ioaddr, u8 phyctrlpolarity)
201{
202 u16 bit_data;
203
204 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity);
205
206 bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1;
207
208 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity);
209
210 return bit_data;
211}
212
213/*
214 * Read a register from the Physical Layer device located
215 * on the IPG NIC, using the IPG PHYCTRL register.
216 */
217static int mdio_read(struct net_device *dev, int phy_id, int phy_reg)
218{
219 void __iomem *ioaddr = ipg_ioaddr(dev);
220 /*
221 * The GMII mangement frame structure for a read is as follows:
222 *
223 * |Preamble|st|op|phyad|regad|ta| data |idle|
224 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
225 *
226 * <32 1s> = 32 consecutive logic 1 values
227 * A = bit of Physical Layer device address (MSB first)
228 * R = bit of register address (MSB first)
229 * z = High impedance state
230 * D = bit of read data (MSB first)
231 *
232 * Transmission order is 'Preamble' field first, bits transmitted
233 * left to right (first to last).
234 */
235 struct {
236 u32 field;
237 unsigned int len;
238 } p[] = {
239 { GMII_PREAMBLE, 32 }, /* Preamble */
240 { GMII_ST, 2 }, /* ST */
241 { GMII_READ, 2 }, /* OP */
242 { phy_id, 5 }, /* PHYAD */
243 { phy_reg, 5 }, /* REGAD */
244 { 0x0000, 2 }, /* TA */
245 { 0x0000, 16 }, /* DATA */
246 { 0x0000, 1 } /* IDLE */
247 };
248 unsigned int i, j;
249 u8 polarity, data;
250
251 polarity = ipg_r8(PHY_CTRL);
252 polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
253
254 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
255 for (j = 0; j < 5; j++) {
256 for (i = 0; i < p[j].len; i++) {
257 /* For each variable length field, the MSB must be
258 * transmitted first. Rotate through the field bits,
259 * starting with the MSB, and move each bit into the
260 * the 1st (2^1) bit position (this is the bit position
261 * corresponding to the MgmtData bit of the PhyCtrl
262 * register for the IPG).
263 *
264 * Example: ST = 01;
265 *
266 * First write a '0' to bit 1 of the PhyCtrl
267 * register, then write a '1' to bit 1 of the
268 * PhyCtrl register.
269 *
270 * To do this, right shift the MSB of ST by the value:
271 * [field length - 1 - #ST bits already written]
272 * then left shift this result by 1.
273 */
274 data = (p[j].field >> (p[j].len - 1 - i)) << 1;
275 data &= IPG_PC_MGMTDATA;
276 data |= polarity | IPG_PC_MGMTDIR;
277
278 ipg_drive_phy_ctl_low_high(ioaddr, data);
279 }
280 }
281
282 send_three_state(ioaddr, polarity);
283
284 read_phy_bit(ioaddr, polarity);
285
286 /*
287 * For a read cycle, the bits for the next two fields (TA and
288 * DATA) are driven by the PHY (the IPG reads these bits).
289 */
290 for (i = 0; i < p[6].len; i++) {
291 p[6].field |=
292 (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i));
293 }
294
295 send_three_state(ioaddr, polarity);
296 send_three_state(ioaddr, polarity);
297 send_three_state(ioaddr, polarity);
298 send_end(ioaddr, polarity);
299
300 /* Return the value of the DATA field. */
301 return p[6].field;
302}
303
304/*
305 * Write to a register from the Physical Layer device located
306 * on the IPG NIC, using the IPG PHYCTRL register.
307 */
308static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val)
309{
310 void __iomem *ioaddr = ipg_ioaddr(dev);
311 /*
312 * The GMII mangement frame structure for a read is as follows:
313 *
314 * |Preamble|st|op|phyad|regad|ta| data |idle|
315 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
316 *
317 * <32 1s> = 32 consecutive logic 1 values
318 * A = bit of Physical Layer device address (MSB first)
319 * R = bit of register address (MSB first)
320 * z = High impedance state
321 * D = bit of write data (MSB first)
322 *
323 * Transmission order is 'Preamble' field first, bits transmitted
324 * left to right (first to last).
325 */
326 struct {
327 u32 field;
328 unsigned int len;
329 } p[] = {
330 { GMII_PREAMBLE, 32 }, /* Preamble */
331 { GMII_ST, 2 }, /* ST */
332 { GMII_WRITE, 2 }, /* OP */
333 { phy_id, 5 }, /* PHYAD */
334 { phy_reg, 5 }, /* REGAD */
335 { 0x0002, 2 }, /* TA */
336 { val & 0xffff, 16 }, /* DATA */
337 { 0x0000, 1 } /* IDLE */
338 };
339 unsigned int i, j;
340 u8 polarity, data;
341
342 polarity = ipg_r8(PHY_CTRL);
343 polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
344
345 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
346 for (j = 0; j < 7; j++) {
347 for (i = 0; i < p[j].len; i++) {
348 /* For each variable length field, the MSB must be
349 * transmitted first. Rotate through the field bits,
350 * starting with the MSB, and move each bit into the
351 * the 1st (2^1) bit position (this is the bit position
352 * corresponding to the MgmtData bit of the PhyCtrl
353 * register for the IPG).
354 *
355 * Example: ST = 01;
356 *
357 * First write a '0' to bit 1 of the PhyCtrl
358 * register, then write a '1' to bit 1 of the
359 * PhyCtrl register.
360 *
361 * To do this, right shift the MSB of ST by the value:
362 * [field length - 1 - #ST bits already written]
363 * then left shift this result by 1.
364 */
365 data = (p[j].field >> (p[j].len - 1 - i)) << 1;
366 data &= IPG_PC_MGMTDATA;
367 data |= polarity | IPG_PC_MGMTDIR;
368
369 ipg_drive_phy_ctl_low_high(ioaddr, data);
370 }
371 }
372
373 /* The last cycle is a tri-state, so read from the PHY. */
374 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
375 ipg_r8(PHY_CTRL);
376 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
377}
378
379static void ipg_set_led_mode(struct net_device *dev)
380{
381 struct ipg_nic_private *sp = netdev_priv(dev);
382 void __iomem *ioaddr = sp->ioaddr;
383 u32 mode;
384
385 mode = ipg_r32(ASIC_CTRL);
386 mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED);
387
388 if ((sp->led_mode & 0x03) > 1)
389 mode |= IPG_AC_LED_MODE_BIT_1; /* Write Asic Control Bit 29 */
390
391 if ((sp->led_mode & 0x01) == 1)
392 mode |= IPG_AC_LED_MODE; /* Write Asic Control Bit 14 */
393
394 if ((sp->led_mode & 0x08) == 8)
395 mode |= IPG_AC_LED_SPEED; /* Write Asic Control Bit 27 */
396
397 ipg_w32(mode, ASIC_CTRL);
398}
399
400static void ipg_set_phy_set(struct net_device *dev)
401{
402 struct ipg_nic_private *sp = netdev_priv(dev);
403 void __iomem *ioaddr = sp->ioaddr;
404 int physet;
405
406 physet = ipg_r8(PHY_SET);
407 physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET);
408 physet |= ((sp->led_mode & 0x70) >> 4);
409 ipg_w8(physet, PHY_SET);
410}
411
412static int ipg_reset(struct net_device *dev, u32 resetflags)
413{
414 /* Assert functional resets via the IPG AsicCtrl
415 * register as specified by the 'resetflags' input
416 * parameter.
417 */
418 void __iomem *ioaddr = ipg_ioaddr(dev);
419 unsigned int timeout_count = 0;
420
421 IPG_DEBUG_MSG("_reset\n");
422
423 ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL);
424
425 /* Delay added to account for problem with 10Mbps reset. */
426 mdelay(IPG_AC_RESETWAIT);
427
428 while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) {
429 mdelay(IPG_AC_RESETWAIT);
430 if (++timeout_count > IPG_AC_RESET_TIMEOUT)
431 return -ETIME;
432 }
433 /* Set LED Mode in Asic Control */
434 ipg_set_led_mode(dev);
435
436 /* Set PHYSet Register Value */
437 ipg_set_phy_set(dev);
438 return 0;
439}
440
441/* Find the GMII PHY address. */
442static int ipg_find_phyaddr(struct net_device *dev)
443{
444 unsigned int phyaddr, i;
445
446 for (i = 0; i < 32; i++) {
447 u32 status;
448
449 /* Search for the correct PHY address among 32 possible. */
450 phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32;
451
452 /* 10/22/03 Grace change verify from GMII_PHY_STATUS to
453 GMII_PHY_ID1
454 */
455
456 status = mdio_read(dev, phyaddr, MII_BMSR);
457
458 if ((status != 0xFFFF) && (status != 0))
459 return phyaddr;
460 }
461
462 return 0x1f;
463}
464
465/*
466 * Configure IPG based on result of IEEE 802.3 PHY
467 * auto-negotiation.
468 */
469static int ipg_config_autoneg(struct net_device *dev)
470{
471 struct ipg_nic_private *sp = netdev_priv(dev);
472 void __iomem *ioaddr = sp->ioaddr;
473 unsigned int txflowcontrol;
474 unsigned int rxflowcontrol;
475 unsigned int fullduplex;
476 u32 mac_ctrl_val;
477 u32 asicctrl;
478 u8 phyctrl;
479 const char *speed;
480 const char *duplex;
481 const char *tx_desc;
482 const char *rx_desc;
483
484 IPG_DEBUG_MSG("_config_autoneg\n");
485
486 asicctrl = ipg_r32(ASIC_CTRL);
487 phyctrl = ipg_r8(PHY_CTRL);
488 mac_ctrl_val = ipg_r32(MAC_CTRL);
489
490 /* Set flags for use in resolving auto-negotiation, assuming
491 * non-1000Mbps, half duplex, no flow control.
492 */
493 fullduplex = 0;
494 txflowcontrol = 0;
495 rxflowcontrol = 0;
496
497 /* To accommodate a problem in 10Mbps operation,
498 * set a global flag if PHY running in 10Mbps mode.
499 */
500 sp->tenmbpsmode = 0;
501
502 /* Determine actual speed of operation. */
503 switch (phyctrl & IPG_PC_LINK_SPEED) {
504 case IPG_PC_LINK_SPEED_10MBPS:
505 speed = "10Mbps";
506 sp->tenmbpsmode = 1;
507 break;
508 case IPG_PC_LINK_SPEED_100MBPS:
509 speed = "100Mbps";
510 break;
511 case IPG_PC_LINK_SPEED_1000MBPS:
512 speed = "1000Mbps";
513 break;
514 default:
515 speed = "undefined!";
516 return 0;
517 }
518
519 netdev_info(dev, "Link speed = %s\n", speed);
520 if (sp->tenmbpsmode == 1)
521 netdev_info(dev, "10Mbps operational mode enabled\n");
522
523 if (phyctrl & IPG_PC_DUPLEX_STATUS) {
524 fullduplex = 1;
525 txflowcontrol = 1;
526 rxflowcontrol = 1;
527 }
528
529 /* Configure full duplex, and flow control. */
530 if (fullduplex == 1) {
531
532 /* Configure IPG for full duplex operation. */
533
534 duplex = "full";
535
536 mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD;
537
538 if (txflowcontrol == 1) {
539 tx_desc = "";
540 mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE;
541 } else {
542 tx_desc = "no ";
543 mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE;
544 }
545
546 if (rxflowcontrol == 1) {
547 rx_desc = "";
548 mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE;
549 } else {
550 rx_desc = "no ";
551 mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE;
552 }
553 } else {
554 duplex = "half";
555 tx_desc = "no ";
556 rx_desc = "no ";
557 mac_ctrl_val &= (~IPG_MC_DUPLEX_SELECT_FD &
558 ~IPG_MC_TX_FLOW_CONTROL_ENABLE &
559 ~IPG_MC_RX_FLOW_CONTROL_ENABLE);
560 }
561
562 netdev_info(dev, "setting %s duplex, %sTX, %sRX flow control\n",
563 duplex, tx_desc, rx_desc);
564 ipg_w32(mac_ctrl_val, MAC_CTRL);
565
566 return 0;
567}
568
569/* Determine and configure multicast operation and set
570 * receive mode for IPG.
571 */
572static void ipg_nic_set_multicast_list(struct net_device *dev)
573{
574 void __iomem *ioaddr = ipg_ioaddr(dev);
575 struct netdev_hw_addr *ha;
576 unsigned int hashindex;
577 u32 hashtable[2];
578 u8 receivemode;
579
580 IPG_DEBUG_MSG("_nic_set_multicast_list\n");
581
582 receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST;
583
584 if (dev->flags & IFF_PROMISC) {
585 /* NIC to be configured in promiscuous mode. */
586 receivemode = IPG_RM_RECEIVEALLFRAMES;
587 } else if ((dev->flags & IFF_ALLMULTI) ||
588 ((dev->flags & IFF_MULTICAST) &&
589 (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) {
590 /* NIC to be configured to receive all multicast
591 * frames. */
592 receivemode |= IPG_RM_RECEIVEMULTICAST;
593 } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
594 /* NIC to be configured to receive selected
595 * multicast addresses. */
596 receivemode |= IPG_RM_RECEIVEMULTICASTHASH;
597 }
598
599 /* Calculate the bits to set for the 64 bit, IPG HASHTABLE.
600 * The IPG applies a cyclic-redundancy-check (the same CRC
601 * used to calculate the frame data FCS) to the destination
602 * address all incoming multicast frames whose destination
603 * address has the multicast bit set. The least significant
604 * 6 bits of the CRC result are used as an addressing index
605 * into the hash table. If the value of the bit addressed by
606 * this index is a 1, the frame is passed to the host system.
607 */
608
609 /* Clear hashtable. */
610 hashtable[0] = 0x00000000;
611 hashtable[1] = 0x00000000;
612
613 /* Cycle through all multicast addresses to filter. */
614 netdev_for_each_mc_addr(ha, dev) {
615 /* Calculate CRC result for each multicast address. */
616 hashindex = crc32_le(0xffffffff, ha->addr,
617 ETH_ALEN);
618
619 /* Use only the least significant 6 bits. */
620 hashindex = hashindex & 0x3F;
621
622 /* Within "hashtable", set bit number "hashindex"
623 * to a logic 1.
624 */
625 set_bit(hashindex, (void *)hashtable);
626 }
627
628 /* Write the value of the hashtable, to the 4, 16 bit
629 * HASHTABLE IPG registers.
630 */
631 ipg_w32(hashtable[0], HASHTABLE_0);
632 ipg_w32(hashtable[1], HASHTABLE_1);
633
634 ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE);
635
636 IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE));
637}
638
639static int ipg_io_config(struct net_device *dev)
640{
641 struct ipg_nic_private *sp = netdev_priv(dev);
642 void __iomem *ioaddr = ipg_ioaddr(dev);
643 u32 origmacctrl;
644 u32 restoremacctrl;
645
646 IPG_DEBUG_MSG("_io_config\n");
647
648 origmacctrl = ipg_r32(MAC_CTRL);
649
650 restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE;
651
652 /* Based on compilation option, determine if FCS is to be
653 * stripped on receive frames by IPG.
654 */
655 if (!IPG_STRIP_FCS_ON_RX)
656 restoremacctrl |= IPG_MC_RCV_FCS;
657
658 /* Determine if transmitter and/or receiver are
659 * enabled so we may restore MACCTRL correctly.
660 */
661 if (origmacctrl & IPG_MC_TX_ENABLED)
662 restoremacctrl |= IPG_MC_TX_ENABLE;
663
664 if (origmacctrl & IPG_MC_RX_ENABLED)
665 restoremacctrl |= IPG_MC_RX_ENABLE;
666
667 /* Transmitter and receiver must be disabled before setting
668 * IFSSelect.
669 */
670 ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) &
671 IPG_MC_RSVD_MASK, MAC_CTRL);
672
673 /* Now that transmitter and receiver are disabled, write
674 * to IFSSelect.
675 */
676 ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL);
677
678 /* Set RECEIVEMODE register. */
679 ipg_nic_set_multicast_list(dev);
680
681 ipg_w16(sp->max_rxframe_size, MAX_FRAME_SIZE);
682
683 ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE, RX_DMA_POLL_PERIOD);
684 ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH);
685 ipg_w8(IPG_RXDMABURSTTHRESH_VALUE, RX_DMA_BURST_THRESH);
686 ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE, TX_DMA_POLL_PERIOD);
687 ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH);
688 ipg_w8(IPG_TXDMABURSTTHRESH_VALUE, TX_DMA_BURST_THRESH);
689 ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE |
690 IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED |
691 IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT |
692 IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE);
693 ipg_w16(IPG_FLOWONTHRESH_VALUE, FLOW_ON_THRESH);
694 ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH);
695
696 /* IPG multi-frag frame bug workaround.
697 * Per silicon revision B3 eratta.
698 */
699 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL);
700
701 /* IPG TX poll now bug workaround.
702 * Per silicon revision B3 eratta.
703 */
704 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL);
705
706 /* IPG RX poll now bug workaround.
707 * Per silicon revision B3 eratta.
708 */
709 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL);
710
711 /* Now restore MACCTRL to original setting. */
712 ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL);
713
714 /* Disable unused RMON statistics. */
715 ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK);
716
717 /* Disable unused MIB statistics. */
718 ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD |
719 IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES |
720 IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES |
721 IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK |
722 IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS |
723 IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK);
724
725 return 0;
726}
727
728/*
729 * Create a receive buffer within system memory and update
730 * NIC private structure appropriately.
731 */
732static int ipg_get_rxbuff(struct net_device *dev, int entry)
733{
734 struct ipg_nic_private *sp = netdev_priv(dev);
735 struct ipg_rx *rxfd = sp->rxd + entry;
736 struct sk_buff *skb;
737 u64 rxfragsize;
738
739 IPG_DEBUG_MSG("_get_rxbuff\n");
740
741 skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size);
742 if (!skb) {
743 sp->rx_buff[entry] = NULL;
744 return -ENOMEM;
745 }
746
747 /* Save the address of the sk_buff structure. */
748 sp->rx_buff[entry] = skb;
749
750 rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
751 sp->rx_buf_sz, PCI_DMA_FROMDEVICE));
752
753 /* Set the RFD fragment length. */
754 rxfragsize = sp->rxfrag_size;
755 rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN);
756
757 return 0;
758}
759
760static int init_rfdlist(struct net_device *dev)
761{
762 struct ipg_nic_private *sp = netdev_priv(dev);
763 void __iomem *ioaddr = sp->ioaddr;
764 unsigned int i;
765
766 IPG_DEBUG_MSG("_init_rfdlist\n");
767
768 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
769 struct ipg_rx *rxfd = sp->rxd + i;
770
771 if (sp->rx_buff[i]) {
772 pci_unmap_single(sp->pdev,
773 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
774 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
775 dev_kfree_skb_irq(sp->rx_buff[i]);
776 sp->rx_buff[i] = NULL;
777 }
778
779 /* Clear out the RFS field. */
780 rxfd->rfs = 0x0000000000000000;
781
782 if (ipg_get_rxbuff(dev, i) < 0) {
783 /*
784 * A receive buffer was not ready, break the
785 * RFD list here.
786 */
787 IPG_DEBUG_MSG("Cannot allocate Rx buffer\n");
788
789 /* Just in case we cannot allocate a single RFD.
790 * Should not occur.
791 */
792 if (i == 0) {
793 netdev_err(dev, "No memory available for RFD list\n");
794 return -ENOMEM;
795 }
796 }
797
798 rxfd->next_desc = cpu_to_le64(sp->rxd_map +
799 sizeof(struct ipg_rx)*(i + 1));
800 }
801 sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map);
802
803 sp->rx_current = 0;
804 sp->rx_dirty = 0;
805
806 /* Write the location of the RFDList to the IPG. */
807 ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0);
808 ipg_w32(0x00000000, RFD_LIST_PTR_1);
809
810 return 0;
811}
812
813static void init_tfdlist(struct net_device *dev)
814{
815 struct ipg_nic_private *sp = netdev_priv(dev);
816 void __iomem *ioaddr = sp->ioaddr;
817 unsigned int i;
818
819 IPG_DEBUG_MSG("_init_tfdlist\n");
820
821 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
822 struct ipg_tx *txfd = sp->txd + i;
823
824 txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
825
826 if (sp->tx_buff[i]) {
827 dev_kfree_skb_irq(sp->tx_buff[i]);
828 sp->tx_buff[i] = NULL;
829 }
830
831 txfd->next_desc = cpu_to_le64(sp->txd_map +
832 sizeof(struct ipg_tx)*(i + 1));
833 }
834 sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map);
835
836 sp->tx_current = 0;
837 sp->tx_dirty = 0;
838
839 /* Write the location of the TFDList to the IPG. */
840 IPG_DDEBUG_MSG("Starting TFDListPtr = %08x\n",
841 (u32) sp->txd_map);
842 ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0);
843 ipg_w32(0x00000000, TFD_LIST_PTR_1);
844
845 sp->reset_current_tfd = 1;
846}
847
848/*
849 * Free all transmit buffers which have already been transferred
850 * via DMA to the IPG.
851 */
852static void ipg_nic_txfree(struct net_device *dev)
853{
854 struct ipg_nic_private *sp = netdev_priv(dev);
855 unsigned int released, pending, dirty;
856
857 IPG_DEBUG_MSG("_nic_txfree\n");
858
859 pending = sp->tx_current - sp->tx_dirty;
860 dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH;
861
862 for (released = 0; released < pending; released++) {
863 struct sk_buff *skb = sp->tx_buff[dirty];
864 struct ipg_tx *txfd = sp->txd + dirty;
865
866 IPG_DEBUG_MSG("TFC = %016lx\n", (unsigned long) txfd->tfc);
867
868 /* Look at each TFD's TFC field beginning
869 * at the last freed TFD up to the current TFD.
870 * If the TFDDone bit is set, free the associated
871 * buffer.
872 */
873 if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE)))
874 break;
875
876 /* Free the transmit buffer. */
877 if (skb) {
878 pci_unmap_single(sp->pdev,
879 le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
880 skb->len, PCI_DMA_TODEVICE);
881
882 dev_kfree_skb_irq(skb);
883
884 sp->tx_buff[dirty] = NULL;
885 }
886 dirty = (dirty + 1) % IPG_TFDLIST_LENGTH;
887 }
888
889 sp->tx_dirty += released;
890
891 if (netif_queue_stopped(dev) &&
892 (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) {
893 netif_wake_queue(dev);
894 }
895}
896
897static void ipg_tx_timeout(struct net_device *dev)
898{
899 struct ipg_nic_private *sp = netdev_priv(dev);
900 void __iomem *ioaddr = sp->ioaddr;
901
902 ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK |
903 IPG_AC_FIFO);
904
905 spin_lock_irq(&sp->lock);
906
907 /* Re-configure after DMA reset. */
908 if (ipg_io_config(dev) < 0)
909 netdev_info(dev, "Error during re-configuration\n");
910
911 init_tfdlist(dev);
912
913 spin_unlock_irq(&sp->lock);
914
915 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK,
916 MAC_CTRL);
917}
918
919/*
920 * For TxComplete interrupts, free all transmit
921 * buffers which have already been transferred via DMA
922 * to the IPG.
923 */
924static void ipg_nic_txcleanup(struct net_device *dev)
925{
926 struct ipg_nic_private *sp = netdev_priv(dev);
927 void __iomem *ioaddr = sp->ioaddr;
928 unsigned int i;
929
930 IPG_DEBUG_MSG("_nic_txcleanup\n");
931
932 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
933 /* Reading the TXSTATUS register clears the
934 * TX_COMPLETE interrupt.
935 */
936 u32 txstatusdword = ipg_r32(TX_STATUS);
937
938 IPG_DEBUG_MSG("TxStatus = %08x\n", txstatusdword);
939
940 /* Check for Transmit errors. Error bits only valid if
941 * TX_COMPLETE bit in the TXSTATUS register is a 1.
942 */
943 if (!(txstatusdword & IPG_TS_TX_COMPLETE))
944 break;
945
946 /* If in 10Mbps mode, indicate transmit is ready. */
947 if (sp->tenmbpsmode) {
948 netif_wake_queue(dev);
949 }
950
951 /* Transmit error, increment stat counters. */
952 if (txstatusdword & IPG_TS_TX_ERROR) {
953 IPG_DEBUG_MSG("Transmit error\n");
954 sp->stats.tx_errors++;
955 }
956
957 /* Late collision, re-enable transmitter. */
958 if (txstatusdword & IPG_TS_LATE_COLLISION) {
959 IPG_DEBUG_MSG("Late collision on transmit\n");
960 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
961 IPG_MC_RSVD_MASK, MAC_CTRL);
962 }
963
964 /* Maximum collisions, re-enable transmitter. */
965 if (txstatusdword & IPG_TS_TX_MAX_COLL) {
966 IPG_DEBUG_MSG("Maximum collisions on transmit\n");
967 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
968 IPG_MC_RSVD_MASK, MAC_CTRL);
969 }
970
971 /* Transmit underrun, reset and re-enable
972 * transmitter.
973 */
974 if (txstatusdword & IPG_TS_TX_UNDERRUN) {
975 IPG_DEBUG_MSG("Transmitter underrun\n");
976 sp->stats.tx_fifo_errors++;
977 ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA |
978 IPG_AC_NETWORK | IPG_AC_FIFO);
979
980 /* Re-configure after DMA reset. */
981 if (ipg_io_config(dev) < 0) {
982 netdev_info(dev, "Error during re-configuration\n");
983 }
984 init_tfdlist(dev);
985
986 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
987 IPG_MC_RSVD_MASK, MAC_CTRL);
988 }
989 }
990
991 ipg_nic_txfree(dev);
992}
993
994/* Provides statistical information about the IPG NIC. */
995static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
996{
997 struct ipg_nic_private *sp = netdev_priv(dev);
998 void __iomem *ioaddr = sp->ioaddr;
999 u16 temp1;
1000 u16 temp2;
1001
1002 IPG_DEBUG_MSG("_nic_get_stats\n");
1003
1004 /* Check to see if the NIC has been initialized via nic_open,
1005 * before trying to read statistic registers.
1006 */
1007 if (!netif_running(dev))
1008 return &sp->stats;
1009
1010 sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK);
1011 sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK);
1012 sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK);
1013 sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK);
1014 temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS);
1015 sp->stats.rx_errors += temp1;
1016 sp->stats.rx_missed_errors += temp1;
1017 temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) +
1018 ipg_r32(IPG_LATECOLLISIONS);
1019 temp2 = ipg_r16(IPG_CARRIERSENSEERRORS);
1020 sp->stats.collisions += temp1;
1021 sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS);
1022 sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) +
1023 ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2;
1024 sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK);
1025
1026 /* detailed tx_errors */
1027 sp->stats.tx_carrier_errors += temp2;
1028
1029 /* detailed rx_errors */
1030 sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) +
1031 ipg_r16(IPG_FRAMETOOLONGERRORS);
1032 sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS);
1033
1034 /* Unutilized IPG statistic registers. */
1035 ipg_r32(IPG_MCSTFRAMESRCVDOK);
1036
1037 return &sp->stats;
1038}
1039
1040/* Restore used receive buffers. */
1041static int ipg_nic_rxrestore(struct net_device *dev)
1042{
1043 struct ipg_nic_private *sp = netdev_priv(dev);
1044 const unsigned int curr = sp->rx_current;
1045 unsigned int dirty = sp->rx_dirty;
1046
1047 IPG_DEBUG_MSG("_nic_rxrestore\n");
1048
1049 for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) {
1050 unsigned int entry = dirty % IPG_RFDLIST_LENGTH;
1051
1052 /* rx_copybreak may poke hole here and there. */
1053 if (sp->rx_buff[entry])
1054 continue;
1055
1056 /* Generate a new receive buffer to replace the
1057 * current buffer (which will be released by the
1058 * Linux system).
1059 */
1060 if (ipg_get_rxbuff(dev, entry) < 0) {
1061 IPG_DEBUG_MSG("Cannot allocate new Rx buffer\n");
1062
1063 break;
1064 }
1065
1066 /* Reset the RFS field. */
1067 sp->rxd[entry].rfs = 0x0000000000000000;
1068 }
1069 sp->rx_dirty = dirty;
1070
1071 return 0;
1072}
1073
1074/* use jumboindex and jumbosize to control jumbo frame status
1075 * initial status is jumboindex=-1 and jumbosize=0
1076 * 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done.
1077 * 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving
1078 * 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump
1079 * previous receiving and need to continue dumping the current one
1080 */
1081enum {
1082 NORMAL_PACKET,
1083 ERROR_PACKET
1084};
1085
1086enum {
1087 FRAME_NO_START_NO_END = 0,
1088 FRAME_WITH_START = 1,
1089 FRAME_WITH_END = 10,
1090 FRAME_WITH_START_WITH_END = 11
1091};
1092
1093static void ipg_nic_rx_free_skb(struct net_device *dev)
1094{
1095 struct ipg_nic_private *sp = netdev_priv(dev);
1096 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1097
1098 if (sp->rx_buff[entry]) {
1099 struct ipg_rx *rxfd = sp->rxd + entry;
1100
1101 pci_unmap_single(sp->pdev,
1102 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1103 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1104 dev_kfree_skb_irq(sp->rx_buff[entry]);
1105 sp->rx_buff[entry] = NULL;
1106 }
1107}
1108
1109static int ipg_nic_rx_check_frame_type(struct net_device *dev)
1110{
1111 struct ipg_nic_private *sp = netdev_priv(dev);
1112 struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH);
1113 int type = FRAME_NO_START_NO_END;
1114
1115 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART)
1116 type += FRAME_WITH_START;
1117 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND)
1118 type += FRAME_WITH_END;
1119 return type;
1120}
1121
1122static int ipg_nic_rx_check_error(struct net_device *dev)
1123{
1124 struct ipg_nic_private *sp = netdev_priv(dev);
1125 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1126 struct ipg_rx *rxfd = sp->rxd + entry;
1127
1128 if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
1129 (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
1130 IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
1131 IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) {
1132 IPG_DEBUG_MSG("Rx error, RFS = %016lx\n",
1133 (unsigned long) rxfd->rfs);
1134
1135 /* Increment general receive error statistic. */
1136 sp->stats.rx_errors++;
1137
1138 /* Increment detailed receive error statistics. */
1139 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
1140 IPG_DEBUG_MSG("RX FIFO overrun occurred\n");
1141
1142 sp->stats.rx_fifo_errors++;
1143 }
1144
1145 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
1146 IPG_DEBUG_MSG("RX runt occurred\n");
1147 sp->stats.rx_length_errors++;
1148 }
1149
1150 /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME,
1151 * error count handled by a IPG statistic register.
1152 */
1153
1154 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
1155 IPG_DEBUG_MSG("RX alignment error occurred\n");
1156 sp->stats.rx_frame_errors++;
1157 }
1158
1159 /* Do nothing for IPG_RFS_RXFCSERROR, error count
1160 * handled by a IPG statistic register.
1161 */
1162
1163 /* Free the memory associated with the RX
1164 * buffer since it is erroneous and we will
1165 * not pass it to higher layer processes.
1166 */
1167 if (sp->rx_buff[entry]) {
1168 pci_unmap_single(sp->pdev,
1169 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1170 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1171
1172 dev_kfree_skb_irq(sp->rx_buff[entry]);
1173 sp->rx_buff[entry] = NULL;
1174 }
1175 return ERROR_PACKET;
1176 }
1177 return NORMAL_PACKET;
1178}
1179
1180static void ipg_nic_rx_with_start_and_end(struct net_device *dev,
1181 struct ipg_nic_private *sp,
1182 struct ipg_rx *rxfd, unsigned entry)
1183{
1184 struct ipg_jumbo *jumbo = &sp->jumbo;
1185 struct sk_buff *skb;
1186 int framelen;
1187
1188 if (jumbo->found_start) {
1189 dev_kfree_skb_irq(jumbo->skb);
1190 jumbo->found_start = 0;
1191 jumbo->current_size = 0;
1192 jumbo->skb = NULL;
1193 }
1194
1195 /* 1: found error, 0 no error */
1196 if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
1197 return;
1198
1199 skb = sp->rx_buff[entry];
1200 if (!skb)
1201 return;
1202
1203 /* accept this frame and send to upper layer */
1204 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1205 if (framelen > sp->rxfrag_size)
1206 framelen = sp->rxfrag_size;
1207
1208 skb_put(skb, framelen);
1209 skb->protocol = eth_type_trans(skb, dev);
1210 skb_checksum_none_assert(skb);
1211 netif_rx(skb);
1212 sp->rx_buff[entry] = NULL;
1213}
1214
1215static void ipg_nic_rx_with_start(struct net_device *dev,
1216 struct ipg_nic_private *sp,
1217 struct ipg_rx *rxfd, unsigned entry)
1218{
1219 struct ipg_jumbo *jumbo = &sp->jumbo;
1220 struct pci_dev *pdev = sp->pdev;
1221 struct sk_buff *skb;
1222
1223 /* 1: found error, 0 no error */
1224 if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
1225 return;
1226
1227 /* accept this frame and send to upper layer */
1228 skb = sp->rx_buff[entry];
1229 if (!skb)
1230 return;
1231
1232 if (jumbo->found_start)
1233 dev_kfree_skb_irq(jumbo->skb);
1234
1235 pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1236 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1237
1238 skb_put(skb, sp->rxfrag_size);
1239
1240 jumbo->found_start = 1;
1241 jumbo->current_size = sp->rxfrag_size;
1242 jumbo->skb = skb;
1243
1244 sp->rx_buff[entry] = NULL;
1245}
1246
1247static void ipg_nic_rx_with_end(struct net_device *dev,
1248 struct ipg_nic_private *sp,
1249 struct ipg_rx *rxfd, unsigned entry)
1250{
1251 struct ipg_jumbo *jumbo = &sp->jumbo;
1252
1253 /* 1: found error, 0 no error */
1254 if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
1255 struct sk_buff *skb = sp->rx_buff[entry];
1256
1257 if (!skb)
1258 return;
1259
1260 if (jumbo->found_start) {
1261 int framelen, endframelen;
1262
1263 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1264
1265 endframelen = framelen - jumbo->current_size;
1266 if (framelen > sp->rxsupport_size)
1267 dev_kfree_skb_irq(jumbo->skb);
1268 else {
1269 memcpy(skb_put(jumbo->skb, endframelen),
1270 skb->data, endframelen);
1271
1272 jumbo->skb->protocol =
1273 eth_type_trans(jumbo->skb, dev);
1274
1275 skb_checksum_none_assert(jumbo->skb);
1276 netif_rx(jumbo->skb);
1277 }
1278 }
1279
1280 jumbo->found_start = 0;
1281 jumbo->current_size = 0;
1282 jumbo->skb = NULL;
1283
1284 ipg_nic_rx_free_skb(dev);
1285 } else {
1286 dev_kfree_skb_irq(jumbo->skb);
1287 jumbo->found_start = 0;
1288 jumbo->current_size = 0;
1289 jumbo->skb = NULL;
1290 }
1291}
1292
1293static void ipg_nic_rx_no_start_no_end(struct net_device *dev,
1294 struct ipg_nic_private *sp,
1295 struct ipg_rx *rxfd, unsigned entry)
1296{
1297 struct ipg_jumbo *jumbo = &sp->jumbo;
1298
1299 /* 1: found error, 0 no error */
1300 if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
1301 struct sk_buff *skb = sp->rx_buff[entry];
1302
1303 if (skb) {
1304 if (jumbo->found_start) {
1305 jumbo->current_size += sp->rxfrag_size;
1306 if (jumbo->current_size <= sp->rxsupport_size) {
1307 memcpy(skb_put(jumbo->skb,
1308 sp->rxfrag_size),
1309 skb->data, sp->rxfrag_size);
1310 }
1311 }
1312 ipg_nic_rx_free_skb(dev);
1313 }
1314 } else {
1315 dev_kfree_skb_irq(jumbo->skb);
1316 jumbo->found_start = 0;
1317 jumbo->current_size = 0;
1318 jumbo->skb = NULL;
1319 }
1320}
1321
1322static int ipg_nic_rx_jumbo(struct net_device *dev)
1323{
1324 struct ipg_nic_private *sp = netdev_priv(dev);
1325 unsigned int curr = sp->rx_current;
1326 void __iomem *ioaddr = sp->ioaddr;
1327 unsigned int i;
1328
1329 IPG_DEBUG_MSG("_nic_rx\n");
1330
1331 for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
1332 unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1333 struct ipg_rx *rxfd = sp->rxd + entry;
1334
1335 if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE)))
1336 break;
1337
1338 switch (ipg_nic_rx_check_frame_type(dev)) {
1339 case FRAME_WITH_START_WITH_END:
1340 ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry);
1341 break;
1342 case FRAME_WITH_START:
1343 ipg_nic_rx_with_start(dev, sp, rxfd, entry);
1344 break;
1345 case FRAME_WITH_END:
1346 ipg_nic_rx_with_end(dev, sp, rxfd, entry);
1347 break;
1348 case FRAME_NO_START_NO_END:
1349 ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry);
1350 break;
1351 }
1352 }
1353
1354 sp->rx_current = curr;
1355
1356 if (i == IPG_MAXRFDPROCESS_COUNT) {
1357 /* There are more RFDs to process, however the
1358 * allocated amount of RFD processing time has
1359 * expired. Assert Interrupt Requested to make
1360 * sure we come back to process the remaining RFDs.
1361 */
1362 ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
1363 }
1364
1365 ipg_nic_rxrestore(dev);
1366
1367 return 0;
1368}
1369
1370static int ipg_nic_rx(struct net_device *dev)
1371{
1372 /* Transfer received Ethernet frames to higher network layers. */
1373 struct ipg_nic_private *sp = netdev_priv(dev);
1374 unsigned int curr = sp->rx_current;
1375 void __iomem *ioaddr = sp->ioaddr;
1376 struct ipg_rx *rxfd;
1377 unsigned int i;
1378
1379 IPG_DEBUG_MSG("_nic_rx\n");
1380
1381#define __RFS_MASK \
1382 cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND)
1383
1384 for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
1385 unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1386 struct sk_buff *skb = sp->rx_buff[entry];
1387 unsigned int framelen;
1388
1389 rxfd = sp->rxd + entry;
1390
1391 if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb)
1392 break;
1393
1394 /* Get received frame length. */
1395 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1396
1397 /* Check for jumbo frame arrival with too small
1398 * RXFRAG_SIZE.
1399 */
1400 if (framelen > sp->rxfrag_size) {
1401 IPG_DEBUG_MSG
1402 ("RFS FrameLen > allocated fragment size\n");
1403
1404 framelen = sp->rxfrag_size;
1405 }
1406
1407 if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
1408 (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
1409 IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
1410 IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) {
1411
1412 IPG_DEBUG_MSG("Rx error, RFS = %016lx\n",
1413 (unsigned long int) rxfd->rfs);
1414
1415 /* Increment general receive error statistic. */
1416 sp->stats.rx_errors++;
1417
1418 /* Increment detailed receive error statistics. */
1419 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
1420 IPG_DEBUG_MSG("RX FIFO overrun occurred\n");
1421 sp->stats.rx_fifo_errors++;
1422 }
1423
1424 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
1425 IPG_DEBUG_MSG("RX runt occurred\n");
1426 sp->stats.rx_length_errors++;
1427 }
1428
1429 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ;
1430 /* Do nothing, error count handled by a IPG
1431 * statistic register.
1432 */
1433
1434 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
1435 IPG_DEBUG_MSG("RX alignment error occurred\n");
1436 sp->stats.rx_frame_errors++;
1437 }
1438
1439 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ;
1440 /* Do nothing, error count handled by a IPG
1441 * statistic register.
1442 */
1443
1444 /* Free the memory associated with the RX
1445 * buffer since it is erroneous and we will
1446 * not pass it to higher layer processes.
1447 */
1448 if (skb) {
1449 __le64 info = rxfd->frag_info;
1450
1451 pci_unmap_single(sp->pdev,
1452 le64_to_cpu(info) & ~IPG_RFI_FRAGLEN,
1453 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1454
1455 dev_kfree_skb_irq(skb);
1456 }
1457 } else {
1458
1459 /* Adjust the new buffer length to accommodate the size
1460 * of the received frame.
1461 */
1462 skb_put(skb, framelen);
1463
1464 /* Set the buffer's protocol field to Ethernet. */
1465 skb->protocol = eth_type_trans(skb, dev);
1466
1467 /* The IPG encountered an error with (or
1468 * there were no) IP/TCP/UDP checksums.
1469 * This may or may not indicate an invalid
1470 * IP/TCP/UDP frame was received. Let the
1471 * upper layer decide.
1472 */
1473 skb_checksum_none_assert(skb);
1474
1475 /* Hand off frame for higher layer processing.
1476 * The function netif_rx() releases the sk_buff
1477 * when processing completes.
1478 */
1479 netif_rx(skb);
1480 }
1481
1482 /* Assure RX buffer is not reused by IPG. */
1483 sp->rx_buff[entry] = NULL;
1484 }
1485
1486 /*
1487 * If there are more RFDs to process and the allocated amount of RFD
1488 * processing time has expired, assert Interrupt Requested to make
1489 * sure we come back to process the remaining RFDs.
1490 */
1491 if (i == IPG_MAXRFDPROCESS_COUNT)
1492 ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
1493
1494#ifdef IPG_DEBUG
1495 /* Check if the RFD list contained no receive frame data. */
1496 if (!i)
1497 sp->EmptyRFDListCount++;
1498#endif
1499 while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) &&
1500 !((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) &&
1501 (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) {
1502 unsigned int entry = curr++ % IPG_RFDLIST_LENGTH;
1503
1504 rxfd = sp->rxd + entry;
1505
1506 IPG_DEBUG_MSG("Frame requires multiple RFDs\n");
1507
1508 /* An unexpected event, additional code needed to handle
1509 * properly. So for the time being, just disregard the
1510 * frame.
1511 */
1512
1513 /* Free the memory associated with the RX
1514 * buffer since it is erroneous and we will
1515 * not pass it to higher layer processes.
1516 */
1517 if (sp->rx_buff[entry]) {
1518 pci_unmap_single(sp->pdev,
1519 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1520 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1521 dev_kfree_skb_irq(sp->rx_buff[entry]);
1522 }
1523
1524 /* Assure RX buffer is not reused by IPG. */
1525 sp->rx_buff[entry] = NULL;
1526 }
1527
1528 sp->rx_current = curr;
1529
1530 /* Check to see if there are a minimum number of used
1531 * RFDs before restoring any (should improve performance.)
1532 */
1533 if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE)
1534 ipg_nic_rxrestore(dev);
1535
1536 return 0;
1537}
1538
1539static void ipg_reset_after_host_error(struct work_struct *work)
1540{
1541 struct ipg_nic_private *sp =
1542 container_of(work, struct ipg_nic_private, task.work);
1543 struct net_device *dev = sp->dev;
1544
1545 /*
1546 * Acknowledge HostError interrupt by resetting
1547 * IPG DMA and HOST.
1548 */
1549 ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
1550
1551 init_rfdlist(dev);
1552 init_tfdlist(dev);
1553
1554 if (ipg_io_config(dev) < 0) {
1555 netdev_info(dev, "Cannot recover from PCI error\n");
1556 schedule_delayed_work(&sp->task, HZ);
1557 }
1558}
1559
1560static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst)
1561{
1562 struct net_device *dev = dev_inst;
1563 struct ipg_nic_private *sp = netdev_priv(dev);
1564 void __iomem *ioaddr = sp->ioaddr;
1565 unsigned int handled = 0;
1566 u16 status;
1567
1568 IPG_DEBUG_MSG("_interrupt_handler\n");
1569
1570 if (sp->is_jumbo)
1571 ipg_nic_rxrestore(dev);
1572
1573 spin_lock(&sp->lock);
1574
1575 /* Get interrupt source information, and acknowledge
1576 * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly,
1577 * IntRequested, MacControlFrame, LinkEvent) interrupts
1578 * if issued. Also, all IPG interrupts are disabled by
1579 * reading IntStatusAck.
1580 */
1581 status = ipg_r16(INT_STATUS_ACK);
1582
1583 IPG_DEBUG_MSG("IntStatusAck = %04x\n", status);
1584
1585 /* Shared IRQ of remove event. */
1586 if (!(status & IPG_IS_RSVD_MASK))
1587 goto out_enable;
1588
1589 handled = 1;
1590
1591 if (unlikely(!netif_running(dev)))
1592 goto out_unlock;
1593
1594 /* If RFDListEnd interrupt, restore all used RFDs. */
1595 if (status & IPG_IS_RFD_LIST_END) {
1596 IPG_DEBUG_MSG("RFDListEnd Interrupt\n");
1597
1598 /* The RFD list end indicates an RFD was encountered
1599 * with a 0 NextPtr, or with an RFDDone bit set to 1
1600 * (indicating the RFD is not read for use by the
1601 * IPG.) Try to restore all RFDs.
1602 */
1603 ipg_nic_rxrestore(dev);
1604
1605#ifdef IPG_DEBUG
1606 /* Increment the RFDlistendCount counter. */
1607 sp->RFDlistendCount++;
1608#endif
1609 }
1610
1611 /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or
1612 * IntRequested interrupt, process received frames. */
1613 if ((status & IPG_IS_RX_DMA_PRIORITY) ||
1614 (status & IPG_IS_RFD_LIST_END) ||
1615 (status & IPG_IS_RX_DMA_COMPLETE) ||
1616 (status & IPG_IS_INT_REQUESTED)) {
1617#ifdef IPG_DEBUG
1618 /* Increment the RFD list checked counter if interrupted
1619 * only to check the RFD list. */
1620 if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END |
1621 IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) &
1622 (IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE |
1623 IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE |
1624 IPG_IS_UPDATE_STATS)))
1625 sp->RFDListCheckedCount++;
1626#endif
1627
1628 if (sp->is_jumbo)
1629 ipg_nic_rx_jumbo(dev);
1630 else
1631 ipg_nic_rx(dev);
1632 }
1633
1634 /* If TxDMAComplete interrupt, free used TFDs. */
1635 if (status & IPG_IS_TX_DMA_COMPLETE)
1636 ipg_nic_txfree(dev);
1637
1638 /* TxComplete interrupts indicate one of numerous actions.
1639 * Determine what action to take based on TXSTATUS register.
1640 */
1641 if (status & IPG_IS_TX_COMPLETE)
1642 ipg_nic_txcleanup(dev);
1643
1644 /* If UpdateStats interrupt, update Linux Ethernet statistics */
1645 if (status & IPG_IS_UPDATE_STATS)
1646 ipg_nic_get_stats(dev);
1647
1648 /* If HostError interrupt, reset IPG. */
1649 if (status & IPG_IS_HOST_ERROR) {
1650 IPG_DDEBUG_MSG("HostError Interrupt\n");
1651
1652 schedule_delayed_work(&sp->task, 0);
1653 }
1654
1655 /* If LinkEvent interrupt, resolve autonegotiation. */
1656 if (status & IPG_IS_LINK_EVENT) {
1657 if (ipg_config_autoneg(dev) < 0)
1658 netdev_info(dev, "Auto-negotiation error\n");
1659 }
1660
1661 /* If MACCtrlFrame interrupt, do nothing. */
1662 if (status & IPG_IS_MAC_CTRL_FRAME)
1663 IPG_DEBUG_MSG("MACCtrlFrame interrupt\n");
1664
1665 /* If RxComplete interrupt, do nothing. */
1666 if (status & IPG_IS_RX_COMPLETE)
1667 IPG_DEBUG_MSG("RxComplete interrupt\n");
1668
1669 /* If RxEarly interrupt, do nothing. */
1670 if (status & IPG_IS_RX_EARLY)
1671 IPG_DEBUG_MSG("RxEarly interrupt\n");
1672
1673out_enable:
1674 /* Re-enable IPG interrupts. */
1675 ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE |
1676 IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE |
1677 IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE);
1678out_unlock:
1679 spin_unlock(&sp->lock);
1680
1681 return IRQ_RETVAL(handled);
1682}
1683
1684static void ipg_rx_clear(struct ipg_nic_private *sp)
1685{
1686 unsigned int i;
1687
1688 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
1689 if (sp->rx_buff[i]) {
1690 struct ipg_rx *rxfd = sp->rxd + i;
1691
1692 dev_kfree_skb_irq(sp->rx_buff[i]);
1693 sp->rx_buff[i] = NULL;
1694 pci_unmap_single(sp->pdev,
1695 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1696 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1697 }
1698 }
1699}
1700
1701static void ipg_tx_clear(struct ipg_nic_private *sp)
1702{
1703 unsigned int i;
1704
1705 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
1706 if (sp->tx_buff[i]) {
1707 struct ipg_tx *txfd = sp->txd + i;
1708
1709 pci_unmap_single(sp->pdev,
1710 le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
1711 sp->tx_buff[i]->len, PCI_DMA_TODEVICE);
1712
1713 dev_kfree_skb_irq(sp->tx_buff[i]);
1714
1715 sp->tx_buff[i] = NULL;
1716 }
1717 }
1718}
1719
1720static int ipg_nic_open(struct net_device *dev)
1721{
1722 struct ipg_nic_private *sp = netdev_priv(dev);
1723 void __iomem *ioaddr = sp->ioaddr;
1724 struct pci_dev *pdev = sp->pdev;
1725 int rc;
1726
1727 IPG_DEBUG_MSG("_nic_open\n");
1728
1729 sp->rx_buf_sz = sp->rxsupport_size;
1730
1731 /* Check for interrupt line conflicts, and request interrupt
1732 * line for IPG.
1733 *
1734 * IMPORTANT: Disable IPG interrupts prior to registering
1735 * IRQ.
1736 */
1737 ipg_w16(0x0000, INT_ENABLE);
1738
1739 /* Register the interrupt line to be used by the IPG within
1740 * the Linux system.
1741 */
1742 rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED,
1743 dev->name, dev);
1744 if (rc < 0) {
1745 netdev_info(dev, "Error when requesting interrupt\n");
1746 goto out;
1747 }
1748
1749 dev->irq = pdev->irq;
1750
1751 rc = -ENOMEM;
1752
1753 sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES,
1754 &sp->rxd_map, GFP_KERNEL);
1755 if (!sp->rxd)
1756 goto err_free_irq_0;
1757
1758 sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES,
1759 &sp->txd_map, GFP_KERNEL);
1760 if (!sp->txd)
1761 goto err_free_rx_1;
1762
1763 rc = init_rfdlist(dev);
1764 if (rc < 0) {
1765 netdev_info(dev, "Error during configuration\n");
1766 goto err_free_tx_2;
1767 }
1768
1769 init_tfdlist(dev);
1770
1771 rc = ipg_io_config(dev);
1772 if (rc < 0) {
1773 netdev_info(dev, "Error during configuration\n");
1774 goto err_release_tfdlist_3;
1775 }
1776
1777 /* Resolve autonegotiation. */
1778 if (ipg_config_autoneg(dev) < 0)
1779 netdev_info(dev, "Auto-negotiation error\n");
1780
1781 /* initialize JUMBO Frame control variable */
1782 sp->jumbo.found_start = 0;
1783 sp->jumbo.current_size = 0;
1784 sp->jumbo.skb = NULL;
1785
1786 /* Enable transmit and receive operation of the IPG. */
1787 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) &
1788 IPG_MC_RSVD_MASK, MAC_CTRL);
1789
1790 netif_start_queue(dev);
1791out:
1792 return rc;
1793
1794err_release_tfdlist_3:
1795 ipg_tx_clear(sp);
1796 ipg_rx_clear(sp);
1797err_free_tx_2:
1798 dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
1799err_free_rx_1:
1800 dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
1801err_free_irq_0:
1802 free_irq(pdev->irq, dev);
1803 goto out;
1804}
1805
1806static int ipg_nic_stop(struct net_device *dev)
1807{
1808 struct ipg_nic_private *sp = netdev_priv(dev);
1809 void __iomem *ioaddr = sp->ioaddr;
1810 struct pci_dev *pdev = sp->pdev;
1811
1812 IPG_DEBUG_MSG("_nic_stop\n");
1813
1814 netif_stop_queue(dev);
1815
1816 IPG_DUMPTFDLIST(dev);
1817
1818 do {
1819 (void) ipg_r16(INT_STATUS_ACK);
1820
1821 ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
1822
1823 synchronize_irq(pdev->irq);
1824 } while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK);
1825
1826 ipg_rx_clear(sp);
1827
1828 ipg_tx_clear(sp);
1829
1830 pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
1831 pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
1832
1833 free_irq(pdev->irq, dev);
1834
1835 return 0;
1836}
1837
1838static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb,
1839 struct net_device *dev)
1840{
1841 struct ipg_nic_private *sp = netdev_priv(dev);
1842 void __iomem *ioaddr = sp->ioaddr;
1843 unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH;
1844 unsigned long flags;
1845 struct ipg_tx *txfd;
1846
1847 IPG_DDEBUG_MSG("_nic_hard_start_xmit\n");
1848
1849 /* If in 10Mbps mode, stop the transmit queue so
1850 * no more transmit frames are accepted.
1851 */
1852 if (sp->tenmbpsmode)
1853 netif_stop_queue(dev);
1854
1855 if (sp->reset_current_tfd) {
1856 sp->reset_current_tfd = 0;
1857 entry = 0;
1858 }
1859
1860 txfd = sp->txd + entry;
1861
1862 sp->tx_buff[entry] = skb;
1863
1864 /* Clear all TFC fields, except TFDDONE. */
1865 txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
1866
1867 /* Specify the TFC field within the TFD. */
1868 txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED |
1869 (IPG_TFC_FRAMEID & sp->tx_current) |
1870 (IPG_TFC_FRAGCOUNT & (1 << 24)));
1871 /*
1872 * 16--17 (WordAlign) <- 3 (disable),
1873 * 0--15 (FrameId) <- sp->tx_current,
1874 * 24--27 (FragCount) <- 1
1875 */
1876
1877 /* Request TxComplete interrupts at an interval defined
1878 * by the constant IPG_FRAMESBETWEENTXCOMPLETES.
1879 * Request TxComplete interrupt for every frame
1880 * if in 10Mbps mode to accommodate problem with 10Mbps
1881 * processing.
1882 */
1883 if (sp->tenmbpsmode)
1884 txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE);
1885 txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE);
1886 /* Based on compilation option, determine if FCS is to be
1887 * appended to transmit frame by IPG.
1888 */
1889 if (!(IPG_APPEND_FCS_ON_TX))
1890 txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE);
1891
1892 /* Based on compilation option, determine if IP, TCP and/or
1893 * UDP checksums are to be added to transmit frame by IPG.
1894 */
1895 if (IPG_ADD_IPCHECKSUM_ON_TX)
1896 txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE);
1897
1898 if (IPG_ADD_TCPCHECKSUM_ON_TX)
1899 txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE);
1900
1901 if (IPG_ADD_UDPCHECKSUM_ON_TX)
1902 txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE);
1903
1904 /* Based on compilation option, determine if VLAN tag info is to be
1905 * inserted into transmit frame by IPG.
1906 */
1907 if (IPG_INSERT_MANUAL_VLAN_TAG) {
1908 txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT |
1909 ((u64) IPG_MANUAL_VLAN_VID << 32) |
1910 ((u64) IPG_MANUAL_VLAN_CFI << 44) |
1911 ((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45));
1912 }
1913
1914 /* The fragment start location within system memory is defined
1915 * by the sk_buff structure's data field. The physical address
1916 * of this location within the system's virtual memory space
1917 * is determined using the IPG_HOST2BUS_MAP function.
1918 */
1919 txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
1920 skb->len, PCI_DMA_TODEVICE));
1921
1922 /* The length of the fragment within system memory is defined by
1923 * the sk_buff structure's len field.
1924 */
1925 txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN &
1926 ((u64) (skb->len & 0xffff) << 48));
1927
1928 /* Clear the TFDDone bit last to indicate the TFD is ready
1929 * for transfer to the IPG.
1930 */
1931 txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE);
1932
1933 spin_lock_irqsave(&sp->lock, flags);
1934
1935 sp->tx_current++;
1936
1937 mmiowb();
1938
1939 ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL);
1940
1941 if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH))
1942 netif_stop_queue(dev);
1943
1944 spin_unlock_irqrestore(&sp->lock, flags);
1945
1946 return NETDEV_TX_OK;
1947}
1948
1949static void ipg_set_phy_default_param(unsigned char rev,
1950 struct net_device *dev, int phy_address)
1951{
1952 unsigned short length;
1953 unsigned char revision;
1954 const unsigned short *phy_param;
1955 unsigned short address, value;
1956
1957 phy_param = &DefaultPhyParam[0];
1958 length = *phy_param & 0x00FF;
1959 revision = (unsigned char)((*phy_param) >> 8);
1960 phy_param++;
1961 while (length != 0) {
1962 if (rev == revision) {
1963 while (length > 1) {
1964 address = *phy_param;
1965 value = *(phy_param + 1);
1966 phy_param += 2;
1967 mdio_write(dev, phy_address, address, value);
1968 length -= 4;
1969 }
1970 break;
1971 } else {
1972 phy_param += length / 2;
1973 length = *phy_param & 0x00FF;
1974 revision = (unsigned char)((*phy_param) >> 8);
1975 phy_param++;
1976 }
1977 }
1978}
1979
1980static int read_eeprom(struct net_device *dev, int eep_addr)
1981{
1982 void __iomem *ioaddr = ipg_ioaddr(dev);
1983 unsigned int i;
1984 int ret = 0;
1985 u16 value;
1986
1987 value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff);
1988 ipg_w16(value, EEPROM_CTRL);
1989
1990 for (i = 0; i < 1000; i++) {
1991 u16 data;
1992
1993 mdelay(10);
1994 data = ipg_r16(EEPROM_CTRL);
1995 if (!(data & IPG_EC_EEPROM_BUSY)) {
1996 ret = ipg_r16(EEPROM_DATA);
1997 break;
1998 }
1999 }
2000 return ret;
2001}
2002
2003static void ipg_init_mii(struct net_device *dev)
2004{
2005 struct ipg_nic_private *sp = netdev_priv(dev);
2006 struct mii_if_info *mii_if = &sp->mii_if;
2007 int phyaddr;
2008
2009 mii_if->dev = dev;
2010 mii_if->mdio_read = mdio_read;
2011 mii_if->mdio_write = mdio_write;
2012 mii_if->phy_id_mask = 0x1f;
2013 mii_if->reg_num_mask = 0x1f;
2014
2015 mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev);
2016
2017 if (phyaddr != 0x1f) {
2018 u16 mii_phyctrl, mii_1000cr;
2019
2020 mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000);
2021 mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF |
2022 GMII_PHY_1000BASETCONTROL_PreferMaster;
2023 mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr);
2024
2025 mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR);
2026
2027 /* Set default phyparam */
2028 ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr);
2029
2030 /* Reset PHY */
2031 mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART;
2032 mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl);
2033
2034 }
2035}
2036
2037static int ipg_hw_init(struct net_device *dev)
2038{
2039 struct ipg_nic_private *sp = netdev_priv(dev);
2040 void __iomem *ioaddr = sp->ioaddr;
2041 unsigned int i;
2042 int rc;
2043
2044 /* Read/Write and Reset EEPROM Value */
2045 /* Read LED Mode Configuration from EEPROM */
2046 sp->led_mode = read_eeprom(dev, 6);
2047
2048 /* Reset all functions within the IPG. Do not assert
2049 * RST_OUT as not compatible with some PHYs.
2050 */
2051 rc = ipg_reset(dev, IPG_RESET_MASK);
2052 if (rc < 0)
2053 goto out;
2054
2055 ipg_init_mii(dev);
2056
2057 /* Read MAC Address from EEPROM */
2058 for (i = 0; i < 3; i++)
2059 sp->station_addr[i] = read_eeprom(dev, 16 + i);
2060
2061 for (i = 0; i < 3; i++)
2062 ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i);
2063
2064 /* Set station address in ethernet_device structure. */
2065 dev->dev_addr[0] = ipg_r16(STATION_ADDRESS_0) & 0x00ff;
2066 dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8;
2067 dev->dev_addr[2] = ipg_r16(STATION_ADDRESS_1) & 0x00ff;
2068 dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8;
2069 dev->dev_addr[4] = ipg_r16(STATION_ADDRESS_2) & 0x00ff;
2070 dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8;
2071out:
2072 return rc;
2073}
2074
2075static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2076{
2077 struct ipg_nic_private *sp = netdev_priv(dev);
2078 int rc;
2079
2080 mutex_lock(&sp->mii_mutex);
2081 rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL);
2082 mutex_unlock(&sp->mii_mutex);
2083
2084 return rc;
2085}
2086
2087static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu)
2088{
2089 struct ipg_nic_private *sp = netdev_priv(dev);
2090 int err;
2091
2092 /* Function to accommodate changes to Maximum Transfer Unit
2093 * (or MTU) of IPG NIC. Cannot use default function since
2094 * the default will not allow for MTU > 1500 bytes.
2095 */
2096
2097 IPG_DEBUG_MSG("_nic_change_mtu\n");
2098
2099 /*
2100 * Check that the new MTU value is between 68 (14 byte header, 46 byte
2101 * payload, 4 byte FCS) and 10 KB, which is the largest supported MTU.
2102 */
2103 if (new_mtu < 68 || new_mtu > 10240)
2104 return -EINVAL;
2105
2106 err = ipg_nic_stop(dev);
2107 if (err)
2108 return err;
2109
2110 dev->mtu = new_mtu;
2111
2112 sp->max_rxframe_size = new_mtu;
2113
2114 sp->rxfrag_size = new_mtu;
2115 if (sp->rxfrag_size > 4088)
2116 sp->rxfrag_size = 4088;
2117
2118 sp->rxsupport_size = sp->max_rxframe_size;
2119
2120 if (new_mtu > 0x0600)
2121 sp->is_jumbo = true;
2122 else
2123 sp->is_jumbo = false;
2124
2125 return ipg_nic_open(dev);
2126}
2127
2128static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2129{
2130 struct ipg_nic_private *sp = netdev_priv(dev);
2131 int rc;
2132
2133 mutex_lock(&sp->mii_mutex);
2134 rc = mii_ethtool_gset(&sp->mii_if, cmd);
2135 mutex_unlock(&sp->mii_mutex);
2136
2137 return rc;
2138}
2139
2140static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2141{
2142 struct ipg_nic_private *sp = netdev_priv(dev);
2143 int rc;
2144
2145 mutex_lock(&sp->mii_mutex);
2146 rc = mii_ethtool_sset(&sp->mii_if, cmd);
2147 mutex_unlock(&sp->mii_mutex);
2148
2149 return rc;
2150}
2151
2152static int ipg_nway_reset(struct net_device *dev)
2153{
2154 struct ipg_nic_private *sp = netdev_priv(dev);
2155 int rc;
2156
2157 mutex_lock(&sp->mii_mutex);
2158 rc = mii_nway_restart(&sp->mii_if);
2159 mutex_unlock(&sp->mii_mutex);
2160
2161 return rc;
2162}
2163
2164static const struct ethtool_ops ipg_ethtool_ops = {
2165 .get_settings = ipg_get_settings,
2166 .set_settings = ipg_set_settings,
2167 .nway_reset = ipg_nway_reset,
2168};
2169
2170static void ipg_remove(struct pci_dev *pdev)
2171{
2172 struct net_device *dev = pci_get_drvdata(pdev);
2173 struct ipg_nic_private *sp = netdev_priv(dev);
2174
2175 IPG_DEBUG_MSG("_remove\n");
2176
2177 /* Un-register Ethernet device. */
2178 unregister_netdev(dev);
2179
2180 pci_iounmap(pdev, sp->ioaddr);
2181
2182 pci_release_regions(pdev);
2183
2184 free_netdev(dev);
2185 pci_disable_device(pdev);
2186}
2187
2188static const struct net_device_ops ipg_netdev_ops = {
2189 .ndo_open = ipg_nic_open,
2190 .ndo_stop = ipg_nic_stop,
2191 .ndo_start_xmit = ipg_nic_hard_start_xmit,
2192 .ndo_get_stats = ipg_nic_get_stats,
2193 .ndo_set_rx_mode = ipg_nic_set_multicast_list,
2194 .ndo_do_ioctl = ipg_ioctl,
2195 .ndo_tx_timeout = ipg_tx_timeout,
2196 .ndo_change_mtu = ipg_nic_change_mtu,
2197 .ndo_set_mac_address = eth_mac_addr,
2198 .ndo_validate_addr = eth_validate_addr,
2199};
2200
2201static int ipg_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2202{
2203 unsigned int i = id->driver_data;
2204 struct ipg_nic_private *sp;
2205 struct net_device *dev;
2206 void __iomem *ioaddr;
2207 int rc;
2208
2209 rc = pci_enable_device(pdev);
2210 if (rc < 0)
2211 goto out;
2212
2213 pr_info("%s: %s\n", pci_name(pdev), ipg_brand_name[i]);
2214
2215 pci_set_master(pdev);
2216
2217 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
2218 if (rc < 0) {
2219 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2220 if (rc < 0) {
2221 pr_err("%s: DMA config failed\n", pci_name(pdev));
2222 goto err_disable_0;
2223 }
2224 }
2225
2226 /*
2227 * Initialize net device.
2228 */
2229 dev = alloc_etherdev(sizeof(struct ipg_nic_private));
2230 if (!dev) {
2231 rc = -ENOMEM;
2232 goto err_disable_0;
2233 }
2234
2235 sp = netdev_priv(dev);
2236 spin_lock_init(&sp->lock);
2237 mutex_init(&sp->mii_mutex);
2238
2239 sp->is_jumbo = IPG_IS_JUMBO;
2240 sp->rxfrag_size = IPG_RXFRAG_SIZE;
2241 sp->rxsupport_size = IPG_RXSUPPORT_SIZE;
2242 sp->max_rxframe_size = IPG_MAX_RXFRAME_SIZE;
2243
2244 /* Declare IPG NIC functions for Ethernet device methods.
2245 */
2246 dev->netdev_ops = &ipg_netdev_ops;
2247 SET_NETDEV_DEV(dev, &pdev->dev);
2248 dev->ethtool_ops = &ipg_ethtool_ops;
2249
2250 rc = pci_request_regions(pdev, DRV_NAME);
2251 if (rc)
2252 goto err_free_dev_1;
2253
2254 ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
2255 if (!ioaddr) {
2256 pr_err("%s: cannot map MMIO\n", pci_name(pdev));
2257 rc = -EIO;
2258 goto err_release_regions_2;
2259 }
2260
2261 /* Save the pointer to the PCI device information. */
2262 sp->ioaddr = ioaddr;
2263 sp->pdev = pdev;
2264 sp->dev = dev;
2265
2266 INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error);
2267
2268 pci_set_drvdata(pdev, dev);
2269
2270 rc = ipg_hw_init(dev);
2271 if (rc < 0)
2272 goto err_unmap_3;
2273
2274 rc = register_netdev(dev);
2275 if (rc < 0)
2276 goto err_unmap_3;
2277
2278 netdev_info(dev, "Ethernet device registered\n");
2279out:
2280 return rc;
2281
2282err_unmap_3:
2283 pci_iounmap(pdev, ioaddr);
2284err_release_regions_2:
2285 pci_release_regions(pdev);
2286err_free_dev_1:
2287 free_netdev(dev);
2288err_disable_0:
2289 pci_disable_device(pdev);
2290 goto out;
2291}
2292
2293static struct pci_driver ipg_pci_driver = {
2294 .name = IPG_DRIVER_NAME,
2295 .id_table = ipg_pci_tbl,
2296 .probe = ipg_probe,
2297 .remove = ipg_remove,
2298};
2299
2300module_pci_driver(ipg_pci_driver);
diff --git a/drivers/net/ethernet/icplus/ipg.h b/drivers/net/ethernet/icplus/ipg.h
deleted file mode 100644
index de606281f97b..000000000000
--- a/drivers/net/ethernet/icplus/ipg.h
+++ /dev/null
@@ -1,748 +0,0 @@
1/*
2 * Include file for Gigabit Ethernet device driver for Network
3 * Interface Cards (NICs) utilizing the Tamarack Microelectronics
4 * Inc. IPG Gigabit or Triple Speed Ethernet Media Access
5 * Controller.
6 */
7#ifndef __LINUX_IPG_H
8#define __LINUX_IPG_H
9
10#include <linux/module.h>
11
12#include <linux/kernel.h>
13#include <linux/pci.h>
14#include <linux/ioport.h>
15#include <linux/errno.h>
16#include <asm/io.h>
17#include <linux/delay.h>
18#include <linux/types.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/skbuff.h>
22#include <asm/bitops.h>
23
24/*
25 * Constants
26 */
27
28/* GMII based PHY IDs */
29#define NS 0x2000
30#define MARVELL 0x0141
31#define ICPLUS_PHY 0x243
32
33/* NIC Physical Layer Device MII register fields. */
34#define MII_PHY_SELECTOR_IEEE8023 0x0001
35#define MII_PHY_TECHABILITYFIELD 0x1FE0
36
37/* GMII_PHY_1000 need to set to prefer master */
38#define GMII_PHY_1000BASETCONTROL_PreferMaster 0x0400
39
40/* NIC Physical Layer Device GMII constants. */
41#define GMII_PREAMBLE 0xFFFFFFFF
42#define GMII_ST 0x1
43#define GMII_READ 0x2
44#define GMII_WRITE 0x1
45#define GMII_TA_READ_MASK 0x1
46#define GMII_TA_WRITE 0x2
47
48/* I/O register offsets. */
49enum ipg_regs {
50 DMA_CTRL = 0x00,
51 RX_DMA_STATUS = 0x08, /* Unused + reserved */
52 TFD_LIST_PTR_0 = 0x10,
53 TFD_LIST_PTR_1 = 0x14,
54 TX_DMA_BURST_THRESH = 0x18,
55 TX_DMA_URGENT_THRESH = 0x19,
56 TX_DMA_POLL_PERIOD = 0x1a,
57 RFD_LIST_PTR_0 = 0x1c,
58 RFD_LIST_PTR_1 = 0x20,
59 RX_DMA_BURST_THRESH = 0x24,
60 RX_DMA_URGENT_THRESH = 0x25,
61 RX_DMA_POLL_PERIOD = 0x26,
62 DEBUG_CTRL = 0x2c,
63 ASIC_CTRL = 0x30,
64 FIFO_CTRL = 0x38, /* Unused */
65 FLOW_OFF_THRESH = 0x3c,
66 FLOW_ON_THRESH = 0x3e,
67 EEPROM_DATA = 0x48,
68 EEPROM_CTRL = 0x4a,
69 EXPROM_ADDR = 0x4c, /* Unused */
70 EXPROM_DATA = 0x50, /* Unused */
71 WAKE_EVENT = 0x51, /* Unused */
72 COUNTDOWN = 0x54, /* Unused */
73 INT_STATUS_ACK = 0x5a,
74 INT_ENABLE = 0x5c,
75 INT_STATUS = 0x5e, /* Unused */
76 TX_STATUS = 0x60,
77 MAC_CTRL = 0x6c,
78 VLAN_TAG = 0x70, /* Unused */
79 PHY_SET = 0x75,
80 PHY_CTRL = 0x76,
81 STATION_ADDRESS_0 = 0x78,
82 STATION_ADDRESS_1 = 0x7a,
83 STATION_ADDRESS_2 = 0x7c,
84 MAX_FRAME_SIZE = 0x86,
85 RECEIVE_MODE = 0x88,
86 HASHTABLE_0 = 0x8c,
87 HASHTABLE_1 = 0x90,
88 RMON_STATISTICS_MASK = 0x98,
89 STATISTICS_MASK = 0x9c,
90 RX_JUMBO_FRAMES = 0xbc, /* Unused */
91 TCP_CHECKSUM_ERRORS = 0xc0, /* Unused */
92 IP_CHECKSUM_ERRORS = 0xc2, /* Unused */
93 UDP_CHECKSUM_ERRORS = 0xc4, /* Unused */
94 TX_JUMBO_FRAMES = 0xf4 /* Unused */
95};
96
97/* Ethernet MIB statistic register offsets. */
98#define IPG_OCTETRCVOK 0xA8
99#define IPG_MCSTOCTETRCVDOK 0xAC
100#define IPG_BCSTOCTETRCVOK 0xB0
101#define IPG_FRAMESRCVDOK 0xB4
102#define IPG_MCSTFRAMESRCVDOK 0xB8
103#define IPG_BCSTFRAMESRCVDOK 0xBE
104#define IPG_MACCONTROLFRAMESRCVD 0xC6
105#define IPG_FRAMETOOLONGERRORS 0xC8
106#define IPG_INRANGELENGTHERRORS 0xCA
107#define IPG_FRAMECHECKSEQERRORS 0xCC
108#define IPG_FRAMESLOSTRXERRORS 0xCE
109#define IPG_OCTETXMTOK 0xD0
110#define IPG_MCSTOCTETXMTOK 0xD4
111#define IPG_BCSTOCTETXMTOK 0xD8
112#define IPG_FRAMESXMTDOK 0xDC
113#define IPG_MCSTFRAMESXMTDOK 0xE0
114#define IPG_FRAMESWDEFERREDXMT 0xE4
115#define IPG_LATECOLLISIONS 0xE8
116#define IPG_MULTICOLFRAMES 0xEC
117#define IPG_SINGLECOLFRAMES 0xF0
118#define IPG_BCSTFRAMESXMTDOK 0xF6
119#define IPG_CARRIERSENSEERRORS 0xF8
120#define IPG_MACCONTROLFRAMESXMTDOK 0xFA
121#define IPG_FRAMESABORTXSCOLLS 0xFC
122#define IPG_FRAMESWEXDEFERRAL 0xFE
123
124/* RMON statistic register offsets. */
125#define IPG_ETHERSTATSCOLLISIONS 0x100
126#define IPG_ETHERSTATSOCTETSTRANSMIT 0x104
127#define IPG_ETHERSTATSPKTSTRANSMIT 0x108
128#define IPG_ETHERSTATSPKTS64OCTESTSTRANSMIT 0x10C
129#define IPG_ETHERSTATSPKTS65TO127OCTESTSTRANSMIT 0x110
130#define IPG_ETHERSTATSPKTS128TO255OCTESTSTRANSMIT 0x114
131#define IPG_ETHERSTATSPKTS256TO511OCTESTSTRANSMIT 0x118
132#define IPG_ETHERSTATSPKTS512TO1023OCTESTSTRANSMIT 0x11C
133#define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120
134#define IPG_ETHERSTATSCRCALIGNERRORS 0x124
135#define IPG_ETHERSTATSUNDERSIZEPKTS 0x128
136#define IPG_ETHERSTATSFRAGMENTS 0x12C
137#define IPG_ETHERSTATSJABBERS 0x130
138#define IPG_ETHERSTATSOCTETS 0x134
139#define IPG_ETHERSTATSPKTS 0x138
140#define IPG_ETHERSTATSPKTS64OCTESTS 0x13C
141#define IPG_ETHERSTATSPKTS65TO127OCTESTS 0x140
142#define IPG_ETHERSTATSPKTS128TO255OCTESTS 0x144
143#define IPG_ETHERSTATSPKTS256TO511OCTESTS 0x148
144#define IPG_ETHERSTATSPKTS512TO1023OCTESTS 0x14C
145#define IPG_ETHERSTATSPKTS1024TO1518OCTESTS 0x150
146
147/* RMON statistic register equivalents. */
148#define IPG_ETHERSTATSMULTICASTPKTSTRANSMIT 0xE0
149#define IPG_ETHERSTATSBROADCASTPKTSTRANSMIT 0xF6
150#define IPG_ETHERSTATSMULTICASTPKTS 0xB8
151#define IPG_ETHERSTATSBROADCASTPKTS 0xBE
152#define IPG_ETHERSTATSOVERSIZEPKTS 0xC8
153#define IPG_ETHERSTATSDROPEVENTS 0xCE
154
155/* Serial EEPROM offsets */
156#define IPG_EEPROM_CONFIGPARAM 0x00
157#define IPG_EEPROM_ASICCTRL 0x01
158#define IPG_EEPROM_SUBSYSTEMVENDORID 0x02
159#define IPG_EEPROM_SUBSYSTEMID 0x03
160#define IPG_EEPROM_STATIONADDRESS0 0x10
161#define IPG_EEPROM_STATIONADDRESS1 0x11
162#define IPG_EEPROM_STATIONADDRESS2 0x12
163
164/* Register & data structure bit masks */
165
166/* PCI register masks. */
167
168/* IOBaseAddress */
169#define IPG_PIB_RSVD_MASK 0xFFFFFE01
170#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00
171#define IPG_PIB_IOBASEADDRIND 0x00000001
172
173/* MemBaseAddress */
174#define IPG_PMB_RSVD_MASK 0xFFFFFE07
175#define IPG_PMB_MEMBASEADDRIND 0x00000001
176#define IPG_PMB_MEMMAPTYPE 0x00000006
177#define IPG_PMB_MEMMAPTYPE0 0x00000002
178#define IPG_PMB_MEMMAPTYPE1 0x00000004
179#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00
180
181/* ConfigStatus */
182#define IPG_CS_RSVD_MASK 0xFFB0
183#define IPG_CS_CAPABILITIES 0x0010
184#define IPG_CS_66MHZCAPABLE 0x0020
185#define IPG_CS_FASTBACK2BACK 0x0080
186#define IPG_CS_DATAPARITYREPORTED 0x0100
187#define IPG_CS_DEVSELTIMING 0x0600
188#define IPG_CS_SIGNALEDTARGETABORT 0x0800
189#define IPG_CS_RECEIVEDTARGETABORT 0x1000
190#define IPG_CS_RECEIVEDMASTERABORT 0x2000
191#define IPG_CS_SIGNALEDSYSTEMERROR 0x4000
192#define IPG_CS_DETECTEDPARITYERROR 0x8000
193
194/* TFD data structure masks. */
195
196/* TFDList, TFC */
197#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFFULL
198#define IPG_TFC_FRAMEID 0x000000000000FFFFULL
199#define IPG_TFC_WORDALIGN 0x0000000000030000ULL
200#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000ULL
201#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000ULL
202#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000ULL
203#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000ULL
204#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000ULL
205#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000ULL
206#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000ULL
207#define IPG_TFC_TXINDICATE 0x0000000000400000ULL
208#define IPG_TFC_TXDMAINDICATE 0x0000000000800000ULL
209#define IPG_TFC_FRAGCOUNT 0x000000000F000000ULL
210#define IPG_TFC_VLANTAGINSERT 0x0000000010000000ULL
211#define IPG_TFC_TFDDONE 0x0000000080000000ULL
212#define IPG_TFC_VID 0x00000FFF00000000ULL
213#define IPG_TFC_CFI 0x0000100000000000ULL
214#define IPG_TFC_USERPRIORITY 0x0000E00000000000ULL
215
216/* TFDList, FragInfo */
217#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL
218#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFFULL
219#define IPG_TFI_FRAGLEN 0xFFFF000000000000ULL
220
221/* RFD data structure masks. */
222
223/* RFDList, RFS */
224#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFFULL
225#define IPG_RFS_RXFRAMELEN 0x000000000000FFFFULL
226#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000ULL
227#define IPG_RFS_RXRUNTFRAME 0x0000000000020000ULL
228#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000ULL
229#define IPG_RFS_RXFCSERROR 0x0000000000080000ULL
230#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000ULL
231#define IPG_RFS_RXLENGTHERROR 0x0000000000200000ULL
232#define IPG_RFS_VLANDETECTED 0x0000000000400000ULL
233#define IPG_RFS_TCPDETECTED 0x0000000000800000ULL
234#define IPG_RFS_TCPERROR 0x0000000001000000ULL
235#define IPG_RFS_UDPDETECTED 0x0000000002000000ULL
236#define IPG_RFS_UDPERROR 0x0000000004000000ULL
237#define IPG_RFS_IPDETECTED 0x0000000008000000ULL
238#define IPG_RFS_IPERROR 0x0000000010000000ULL
239#define IPG_RFS_FRAMESTART 0x0000000020000000ULL
240#define IPG_RFS_FRAMEEND 0x0000000040000000ULL
241#define IPG_RFS_RFDDONE 0x0000000080000000ULL
242#define IPG_RFS_TCI 0x0000FFFF00000000ULL
243
244/* RFDList, FragInfo */
245#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL
246#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFFULL
247#define IPG_RFI_FRAGLEN 0xFFFF000000000000ULL
248
249/* I/O Register masks. */
250
251/* RMON Statistics Mask */
252#define IPG_RZ_ALL 0x0FFFFFFF
253
254/* Statistics Mask */
255#define IPG_SM_ALL 0x0FFFFFFF
256#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001
257#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002
258#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004
259#define IPG_SM_RXJUMBOFRAMES 0x00000008
260#define IPG_SM_TCPCHECKSUMERRORS 0x00000010
261#define IPG_SM_IPCHECKSUMERRORS 0x00000020
262#define IPG_SM_UDPCHECKSUMERRORS 0x00000040
263#define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080
264#define IPG_SM_FRAMESTOOLONGERRORS 0x00000100
265#define IPG_SM_INRANGELENGTHERRORS 0x00000200
266#define IPG_SM_FRAMECHECKSEQERRORS 0x00000400
267#define IPG_SM_FRAMESLOSTRXERRORS 0x00000800
268#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000
269#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000
270#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000
271#define IPG_SM_FRAMESWDEFERREDXMT 0x00008000
272#define IPG_SM_LATECOLLISIONS 0x00010000
273#define IPG_SM_MULTICOLFRAMES 0x00020000
274#define IPG_SM_SINGLECOLFRAMES 0x00040000
275#define IPG_SM_TXJUMBOFRAMES 0x00080000
276#define IPG_SM_CARRIERSENSEERRORS 0x00100000
277#define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000
278#define IPG_SM_FRAMESABORTXSCOLLS 0x00400000
279#define IPG_SM_FRAMESWEXDEFERAL 0x00800000
280
281/* Countdown */
282#define IPG_CD_RSVD_MASK 0x0700FFFF
283#define IPG_CD_COUNT 0x0000FFFF
284#define IPG_CD_COUNTDOWNSPEED 0x01000000
285#define IPG_CD_COUNTDOWNMODE 0x02000000
286#define IPG_CD_COUNTINTENABLED 0x04000000
287
288/* TxDMABurstThresh */
289#define IPG_TB_RSVD_MASK 0xFF
290
291/* TxDMAUrgentThresh */
292#define IPG_TU_RSVD_MASK 0xFF
293
294/* TxDMAPollPeriod */
295#define IPG_TP_RSVD_MASK 0xFF
296
297/* RxDMAUrgentThresh */
298#define IPG_RU_RSVD_MASK 0xFF
299
300/* RxDMAPollPeriod */
301#define IPG_RP_RSVD_MASK 0xFF
302
303/* ReceiveMode */
304#define IPG_RM_RSVD_MASK 0x3F
305#define IPG_RM_RECEIVEUNICAST 0x01
306#define IPG_RM_RECEIVEMULTICAST 0x02
307#define IPG_RM_RECEIVEBROADCAST 0x04
308#define IPG_RM_RECEIVEALLFRAMES 0x08
309#define IPG_RM_RECEIVEMULTICASTHASH 0x10
310#define IPG_RM_RECEIVEIPMULTICAST 0x20
311
312/* PhySet */
313#define IPG_PS_MEM_LENB9B 0x01
314#define IPG_PS_MEM_LEN9 0x02
315#define IPG_PS_NON_COMPDET 0x04
316
317/* PhyCtrl */
318#define IPG_PC_RSVD_MASK 0xFF
319#define IPG_PC_MGMTCLK_LO 0x00
320#define IPG_PC_MGMTCLK_HI 0x01
321#define IPG_PC_MGMTCLK 0x01
322#define IPG_PC_MGMTDATA 0x02
323#define IPG_PC_MGMTDIR 0x04
324#define IPG_PC_DUPLEX_POLARITY 0x08
325#define IPG_PC_DUPLEX_STATUS 0x10
326#define IPG_PC_LINK_POLARITY 0x20
327#define IPG_PC_LINK_SPEED 0xC0
328#define IPG_PC_LINK_SPEED_10MBPS 0x40
329#define IPG_PC_LINK_SPEED_100MBPS 0x80
330#define IPG_PC_LINK_SPEED_1000MBPS 0xC0
331
332/* DMACtrl */
333#define IPG_DC_RSVD_MASK 0xC07D9818
334#define IPG_DC_RX_DMA_COMPLETE 0x00000008
335#define IPG_DC_RX_DMA_POLL_NOW 0x00000010
336#define IPG_DC_TX_DMA_COMPLETE 0x00000800
337#define IPG_DC_TX_DMA_POLL_NOW 0x00001000
338#define IPG_DC_TX_DMA_IN_PROG 0x00008000
339#define IPG_DC_RX_EARLY_DISABLE 0x00010000
340#define IPG_DC_MWI_DISABLE 0x00040000
341#define IPG_DC_TX_WRITE_BACK_DISABLE 0x00080000
342#define IPG_DC_TX_BURST_LIMIT 0x00700000
343#define IPG_DC_TARGET_ABORT 0x40000000
344#define IPG_DC_MASTER_ABORT 0x80000000
345
346/* ASICCtrl */
347#define IPG_AC_RSVD_MASK 0x07FFEFF2
348#define IPG_AC_EXP_ROM_SIZE 0x00000002
349#define IPG_AC_PHY_SPEED10 0x00000010
350#define IPG_AC_PHY_SPEED100 0x00000020
351#define IPG_AC_PHY_SPEED1000 0x00000040
352#define IPG_AC_PHY_MEDIA 0x00000080
353#define IPG_AC_FORCED_CFG 0x00000700
354#define IPG_AC_D3RESETDISABLE 0x00000800
355#define IPG_AC_SPEED_UP_MODE 0x00002000
356#define IPG_AC_LED_MODE 0x00004000
357#define IPG_AC_RST_OUT_POLARITY 0x00008000
358#define IPG_AC_GLOBAL_RESET 0x00010000
359#define IPG_AC_RX_RESET 0x00020000
360#define IPG_AC_TX_RESET 0x00040000
361#define IPG_AC_DMA 0x00080000
362#define IPG_AC_FIFO 0x00100000
363#define IPG_AC_NETWORK 0x00200000
364#define IPG_AC_HOST 0x00400000
365#define IPG_AC_AUTO_INIT 0x00800000
366#define IPG_AC_RST_OUT 0x01000000
367#define IPG_AC_INT_REQUEST 0x02000000
368#define IPG_AC_RESET_BUSY 0x04000000
369#define IPG_AC_LED_SPEED 0x08000000
370#define IPG_AC_LED_MODE_BIT_1 0x20000000
371
372/* EepromCtrl */
373#define IPG_EC_RSVD_MASK 0x83FF
374#define IPG_EC_EEPROM_ADDR 0x00FF
375#define IPG_EC_EEPROM_OPCODE 0x0300
376#define IPG_EC_EEPROM_SUBCOMMAD 0x0000
377#define IPG_EC_EEPROM_WRITEOPCODE 0x0100
378#define IPG_EC_EEPROM_READOPCODE 0x0200
379#define IPG_EC_EEPROM_ERASEOPCODE 0x0300
380#define IPG_EC_EEPROM_BUSY 0x8000
381
382/* FIFOCtrl */
383#define IPG_FC_RSVD_MASK 0xC001
384#define IPG_FC_RAM_TEST_MODE 0x0001
385#define IPG_FC_TRANSMITTING 0x4000
386#define IPG_FC_RECEIVING 0x8000
387
388/* TxStatus */
389#define IPG_TS_RSVD_MASK 0xFFFF00DD
390#define IPG_TS_TX_ERROR 0x00000001
391#define IPG_TS_LATE_COLLISION 0x00000004
392#define IPG_TS_TX_MAX_COLL 0x00000008
393#define IPG_TS_TX_UNDERRUN 0x00000010
394#define IPG_TS_TX_IND_REQD 0x00000040
395#define IPG_TS_TX_COMPLETE 0x00000080
396#define IPG_TS_TX_FRAMEID 0xFFFF0000
397
398/* WakeEvent */
399#define IPG_WE_WAKE_PKT_ENABLE 0x01
400#define IPG_WE_MAGIC_PKT_ENABLE 0x02
401#define IPG_WE_LINK_EVT_ENABLE 0x04
402#define IPG_WE_WAKE_POLARITY 0x08
403#define IPG_WE_WAKE_PKT_EVT 0x10
404#define IPG_WE_MAGIC_PKT_EVT 0x20
405#define IPG_WE_LINK_EVT 0x40
406#define IPG_WE_WOL_ENABLE 0x80
407
408/* IntEnable */
409#define IPG_IE_RSVD_MASK 0x1FFE
410#define IPG_IE_HOST_ERROR 0x0002
411#define IPG_IE_TX_COMPLETE 0x0004
412#define IPG_IE_MAC_CTRL_FRAME 0x0008
413#define IPG_IE_RX_COMPLETE 0x0010
414#define IPG_IE_RX_EARLY 0x0020
415#define IPG_IE_INT_REQUESTED 0x0040
416#define IPG_IE_UPDATE_STATS 0x0080
417#define IPG_IE_LINK_EVENT 0x0100
418#define IPG_IE_TX_DMA_COMPLETE 0x0200
419#define IPG_IE_RX_DMA_COMPLETE 0x0400
420#define IPG_IE_RFD_LIST_END 0x0800
421#define IPG_IE_RX_DMA_PRIORITY 0x1000
422
423/* IntStatus */
424#define IPG_IS_RSVD_MASK 0x1FFF
425#define IPG_IS_INTERRUPT_STATUS 0x0001
426#define IPG_IS_HOST_ERROR 0x0002
427#define IPG_IS_TX_COMPLETE 0x0004
428#define IPG_IS_MAC_CTRL_FRAME 0x0008
429#define IPG_IS_RX_COMPLETE 0x0010
430#define IPG_IS_RX_EARLY 0x0020
431#define IPG_IS_INT_REQUESTED 0x0040
432#define IPG_IS_UPDATE_STATS 0x0080
433#define IPG_IS_LINK_EVENT 0x0100
434#define IPG_IS_TX_DMA_COMPLETE 0x0200
435#define IPG_IS_RX_DMA_COMPLETE 0x0400
436#define IPG_IS_RFD_LIST_END 0x0800
437#define IPG_IS_RX_DMA_PRIORITY 0x1000
438
439/* MACCtrl */
440#define IPG_MC_RSVD_MASK 0x7FE33FA3
441#define IPG_MC_IFS_SELECT 0x00000003
442#define IPG_MC_IFS_4352BIT 0x00000003
443#define IPG_MC_IFS_1792BIT 0x00000002
444#define IPG_MC_IFS_1024BIT 0x00000001
445#define IPG_MC_IFS_96BIT 0x00000000
446#define IPG_MC_DUPLEX_SELECT 0x00000020
447#define IPG_MC_DUPLEX_SELECT_FD 0x00000020
448#define IPG_MC_DUPLEX_SELECT_HD 0x00000000
449#define IPG_MC_TX_FLOW_CONTROL_ENABLE 0x00000080
450#define IPG_MC_RX_FLOW_CONTROL_ENABLE 0x00000100
451#define IPG_MC_RCV_FCS 0x00000200
452#define IPG_MC_FIFO_LOOPBACK 0x00000400
453#define IPG_MC_MAC_LOOPBACK 0x00000800
454#define IPG_MC_AUTO_VLAN_TAGGING 0x00001000
455#define IPG_MC_AUTO_VLAN_UNTAGGING 0x00002000
456#define IPG_MC_COLLISION_DETECT 0x00010000
457#define IPG_MC_CARRIER_SENSE 0x00020000
458#define IPG_MC_STATISTICS_ENABLE 0x00200000
459#define IPG_MC_STATISTICS_DISABLE 0x00400000
460#define IPG_MC_STATISTICS_ENABLED 0x00800000
461#define IPG_MC_TX_ENABLE 0x01000000
462#define IPG_MC_TX_DISABLE 0x02000000
463#define IPG_MC_TX_ENABLED 0x04000000
464#define IPG_MC_RX_ENABLE 0x08000000
465#define IPG_MC_RX_DISABLE 0x10000000
466#define IPG_MC_RX_ENABLED 0x20000000
467#define IPG_MC_PAUSED 0x40000000
468
469/*
470 * Tune
471 */
472
473/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS append on TX. */
474#define IPG_APPEND_FCS_ON_TX 1
475
476/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS strip on RX. */
477#define IPG_STRIP_FCS_ON_RX 1
478
479/* Assign IPG_DROP_ON_RX_ETH_ERRORS > 0 to drop RX frames with
480 * Ethernet errors.
481 */
482#define IPG_DROP_ON_RX_ETH_ERRORS 1
483
484/* Assign IPG_INSERT_MANUAL_VLAN_TAG > 0 to insert VLAN tags manually
485 * (via TFC).
486 */
487#define IPG_INSERT_MANUAL_VLAN_TAG 0
488
489/* Assign IPG_ADD_IPCHECKSUM_ON_TX > 0 for auto IP checksum on TX. */
490#define IPG_ADD_IPCHECKSUM_ON_TX 0
491
492/* Assign IPG_ADD_TCPCHECKSUM_ON_TX > 0 for auto TCP checksum on TX.
493 * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER.
494 */
495#define IPG_ADD_TCPCHECKSUM_ON_TX 0
496
497/* Assign IPG_ADD_UDPCHECKSUM_ON_TX > 0 for auto UDP checksum on TX.
498 * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER.
499 */
500#define IPG_ADD_UDPCHECKSUM_ON_TX 0
501
502/* If inserting VLAN tags manually, assign the IPG_MANUAL_VLAN_xx
503 * constants as desired.
504 */
505#define IPG_MANUAL_VLAN_VID 0xABC
506#define IPG_MANUAL_VLAN_CFI 0x1
507#define IPG_MANUAL_VLAN_USERPRIORITY 0x5
508
509#define IPG_IO_REG_RANGE 0xFF
510#define IPG_MEM_REG_RANGE 0x154
511#define IPG_DRIVER_NAME "Sundance Technology IPG Triple-Speed Ethernet"
512#define IPG_NIC_PHY_ADDRESS 0x01
513#define IPG_DMALIST_ALIGN_PAD 0x07
514#define IPG_MULTICAST_HASHTABLE_SIZE 0x40
515
516/* Number of milliseconds to wait after issuing a software reset.
517 * 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation.
518 */
519#define IPG_AC_RESETWAIT 0x05
520
521/* Number of IPG_AC_RESETWAIT timeperiods before declaring timeout. */
522#define IPG_AC_RESET_TIMEOUT 0x0A
523
524/* Minimum number of nanoseconds used to toggle MDC clock during
525 * MII/GMII register access.
526 */
527#define IPG_PC_PHYCTRLWAIT_NS 200
528
529#define IPG_TFDLIST_LENGTH 0x100
530
531/* Number of frames between TxDMAComplete interrupt.
532 * 0 < IPG_FRAMESBETWEENTXDMACOMPLETES <= IPG_TFDLIST_LENGTH
533 */
534#define IPG_FRAMESBETWEENTXDMACOMPLETES 0x1
535
536#define IPG_RFDLIST_LENGTH 0x100
537
538/* Maximum number of RFDs to process per interrupt.
539 * 1 < IPG_MAXRFDPROCESS_COUNT < IPG_RFDLIST_LENGTH
540 */
541#define IPG_MAXRFDPROCESS_COUNT 0x80
542
543/* Minimum margin between last freed RFD, and current RFD.
544 * 1 < IPG_MINUSEDRFDSTOFREE < IPG_RFDLIST_LENGTH
545 */
546#define IPG_MINUSEDRFDSTOFREE 0x80
547
548/* specify the jumbo frame maximum size
549 * per unit is 0x600 (the rx_buffer size that one RFD can carry)
550 */
551#define MAX_JUMBOSIZE 0x8 /* max is 12K */
552
553/* Key register values loaded at driver start up. */
554
555/* TXDMAPollPeriod is specified in 320ns increments.
556 *
557 * Value Time
558 * ---------------------
559 * 0x00-0x01 320ns
560 * 0x03 ~1us
561 * 0x1F ~10us
562 * 0xFF ~82us
563 */
564#define IPG_TXDMAPOLLPERIOD_VALUE 0x26
565
566/* TxDMAUrgentThresh specifies the minimum amount of
567 * data in the transmit FIFO before asserting an
568 * urgent transmit DMA request.
569 *
570 * Value Min TxFIFO occupied space before urgent TX request
571 * ---------------------------------------------------------------
572 * 0x00-0x04 128 bytes (1024 bits)
573 * 0x27 1248 bytes (~10000 bits)
574 * 0x30 1536 bytes (12288 bits)
575 * 0xFF 8192 bytes (65535 bits)
576 */
577#define IPG_TXDMAURGENTTHRESH_VALUE 0x04
578
579/* TxDMABurstThresh specifies the minimum amount of
580 * free space in the transmit FIFO before asserting an
581 * transmit DMA request.
582 *
583 * Value Min TxFIFO free space before TX request
584 * ----------------------------------------------------
585 * 0x00-0x08 256 bytes
586 * 0x30 1536 bytes
587 * 0xFF 8192 bytes
588 */
589#define IPG_TXDMABURSTTHRESH_VALUE 0x30
590
591/* RXDMAPollPeriod is specified in 320ns increments.
592 *
593 * Value Time
594 * ---------------------
595 * 0x00-0x01 320ns
596 * 0x03 ~1us
597 * 0x1F ~10us
598 * 0xFF ~82us
599 */
600#define IPG_RXDMAPOLLPERIOD_VALUE 0x01
601
602/* RxDMAUrgentThresh specifies the minimum amount of
603 * free space within the receive FIFO before asserting
604 * a urgent receive DMA request.
605 *
606 * Value Min RxFIFO free space before urgent RX request
607 * ---------------------------------------------------------------
608 * 0x00-0x04 128 bytes (1024 bits)
609 * 0x27 1248 bytes (~10000 bits)
610 * 0x30 1536 bytes (12288 bits)
611 * 0xFF 8192 bytes (65535 bits)
612 */
613#define IPG_RXDMAURGENTTHRESH_VALUE 0x30
614
615/* RxDMABurstThresh specifies the minimum amount of
616 * occupied space within the receive FIFO before asserting
617 * a receive DMA request.
618 *
619 * Value Min TxFIFO free space before TX request
620 * ----------------------------------------------------
621 * 0x00-0x08 256 bytes
622 * 0x30 1536 bytes
623 * 0xFF 8192 bytes
624 */
625#define IPG_RXDMABURSTTHRESH_VALUE 0x30
626
627/* FlowOnThresh specifies the maximum amount of occupied
628 * space in the receive FIFO before a PAUSE frame with
629 * maximum pause time transmitted.
630 *
631 * Value Max RxFIFO occupied space before PAUSE
632 * ---------------------------------------------------
633 * 0x0000 0 bytes
634 * 0x0740 29,696 bytes
635 * 0x07FF 32,752 bytes
636 */
637#define IPG_FLOWONTHRESH_VALUE 0x0740
638
639/* FlowOffThresh specifies the minimum amount of occupied
640 * space in the receive FIFO before a PAUSE frame with
641 * zero pause time is transmitted.
642 *
643 * Value Max RxFIFO occupied space before PAUSE
644 * ---------------------------------------------------
645 * 0x0000 0 bytes
646 * 0x00BF 3056 bytes
647 * 0x07FF 32,752 bytes
648 */
649#define IPG_FLOWOFFTHRESH_VALUE 0x00BF
650
651/*
652 * Miscellaneous macros.
653 */
654
655/* Macros for printing debug statements. */
656#ifdef IPG_DEBUG
657# define IPG_DEBUG_MSG(fmt, args...) \
658do { \
659 if (0) \
660 printk(KERN_DEBUG "IPG: " fmt, ##args); \
661} while (0)
662# define IPG_DDEBUG_MSG(fmt, args...) \
663 printk(KERN_DEBUG "IPG: " fmt, ##args)
664# define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args)
665# define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args)
666#else
667# define IPG_DEBUG_MSG(fmt, args...) \
668do { \
669 if (0) \
670 printk(KERN_DEBUG "IPG: " fmt, ##args); \
671} while (0)
672# define IPG_DDEBUG_MSG(fmt, args...) \
673do { \
674 if (0) \
675 printk(KERN_DEBUG "IPG: " fmt, ##args); \
676} while (0)
677# define IPG_DUMPRFDLIST(args)
678# define IPG_DUMPTFDLIST(args)
679#endif
680
681/*
682 * End miscellaneous macros.
683 */
684
685/* Transmit Frame Descriptor. The IPG supports 15 fragments,
686 * however Linux requires only a single fragment. Note, each
687 * TFD field is 64 bits wide.
688 */
689struct ipg_tx {
690 __le64 next_desc;
691 __le64 tfc;
692 __le64 frag_info;
693};
694
695/* Receive Frame Descriptor. Note, each RFD field is 64 bits wide.
696 */
697struct ipg_rx {
698 __le64 next_desc;
699 __le64 rfs;
700 __le64 frag_info;
701};
702
703struct ipg_jumbo {
704 int found_start;
705 int current_size;
706 struct sk_buff *skb;
707};
708
709/* Structure of IPG NIC specific data. */
710struct ipg_nic_private {
711 void __iomem *ioaddr;
712 struct ipg_tx *txd;
713 struct ipg_rx *rxd;
714 dma_addr_t txd_map;
715 dma_addr_t rxd_map;
716 struct sk_buff *tx_buff[IPG_TFDLIST_LENGTH];
717 struct sk_buff *rx_buff[IPG_RFDLIST_LENGTH];
718 unsigned int tx_current;
719 unsigned int tx_dirty;
720 unsigned int rx_current;
721 unsigned int rx_dirty;
722 bool is_jumbo;
723 struct ipg_jumbo jumbo;
724 unsigned long rxfrag_size;
725 unsigned long rxsupport_size;
726 unsigned long max_rxframe_size;
727 unsigned int rx_buf_sz;
728 struct pci_dev *pdev;
729 struct net_device *dev;
730 struct net_device_stats stats;
731 spinlock_t lock;
732 int tenmbpsmode;
733
734 u16 led_mode;
735 u16 station_addr[3]; /* Station Address in EEPROM Reg 0x10..0x12 */
736
737 struct mutex mii_mutex;
738 struct mii_if_info mii_if;
739 int reset_current_tfd;
740#ifdef IPG_DEBUG
741 int RFDlistendCount;
742 int RFDListCheckedCount;
743 int EmptyRFDListCount;
744#endif
745 struct delayed_work task;
746};
747
748#endif /* __LINUX_IPG_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 85f1b1e7e505..31c491e02e69 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -892,9 +892,10 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
892 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; 892 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
893 dev->caps.port_mask[i] = dev->caps.port_type[i]; 893 dev->caps.port_mask[i] = dev->caps.port_type[i];
894 dev->caps.phys_port_id[i] = func_cap.phys_port_id; 894 dev->caps.phys_port_id[i] = func_cap.phys_port_id;
895 if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, 895 err = mlx4_get_slave_pkey_gid_tbl_len(dev, i,
896 &dev->caps.gid_table_len[i], 896 &dev->caps.gid_table_len[i],
897 &dev->caps.pkey_table_len[i])) 897 &dev->caps.pkey_table_len[i]);
898 if (err)
898 goto err_mem; 899 goto err_mem;
899 } 900 }
900 901
@@ -906,6 +907,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
906 dev->caps.uar_page_size * dev->caps.num_uars, 907 dev->caps.uar_page_size * dev->caps.num_uars,
907 (unsigned long long) 908 (unsigned long long)
908 pci_resource_len(dev->persist->pdev, 2)); 909 pci_resource_len(dev->persist->pdev, 2));
910 err = -ENOMEM;
909 goto err_mem; 911 goto err_mem;
910 } 912 }
911 913
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 9813d34f3e5b..6fec3e993d02 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -4952,26 +4952,41 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4952 struct res_counter *counter; 4952 struct res_counter *counter;
4953 struct res_counter *tmp; 4953 struct res_counter *tmp;
4954 int err; 4954 int err;
4955 int index; 4955 int *counters_arr = NULL;
4956 int i, j;
4956 4957
4957 err = move_all_busy(dev, slave, RES_COUNTER); 4958 err = move_all_busy(dev, slave, RES_COUNTER);
4958 if (err) 4959 if (err)
4959 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n", 4960 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4960 slave); 4961 slave);
4961 4962
4962 spin_lock_irq(mlx4_tlock(dev)); 4963 counters_arr = kmalloc_array(dev->caps.max_counters,
4963 list_for_each_entry_safe(counter, tmp, counter_list, com.list) { 4964 sizeof(*counters_arr), GFP_KERNEL);
4964 if (counter->com.owner == slave) { 4965 if (!counters_arr)
4965 index = counter->com.res_id; 4966 return;
4966 rb_erase(&counter->com.node, 4967
4967 &tracker->res_tree[RES_COUNTER]); 4968 do {
4968 list_del(&counter->com.list); 4969 i = 0;
4969 kfree(counter); 4970 j = 0;
4970 __mlx4_counter_free(dev, index); 4971 spin_lock_irq(mlx4_tlock(dev));
4972 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4973 if (counter->com.owner == slave) {
4974 counters_arr[i++] = counter->com.res_id;
4975 rb_erase(&counter->com.node,
4976 &tracker->res_tree[RES_COUNTER]);
4977 list_del(&counter->com.list);
4978 kfree(counter);
4979 }
4980 }
4981 spin_unlock_irq(mlx4_tlock(dev));
4982
4983 while (j < i) {
4984 __mlx4_counter_free(dev, counters_arr[j++]);
4971 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 4985 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4972 } 4986 }
4973 } 4987 } while (i);
4974 spin_unlock_irq(mlx4_tlock(dev)); 4988
4989 kfree(counters_arr);
4975} 4990}
4976 4991
4977static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) 4992static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index f2ae62dd8c09..22e72bf1ae48 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -334,9 +334,15 @@ struct mlx5e_tx_skb_cb {
334 334
335#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb) 335#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb)
336 336
337enum mlx5e_dma_map_type {
338 MLX5E_DMA_MAP_SINGLE,
339 MLX5E_DMA_MAP_PAGE
340};
341
337struct mlx5e_sq_dma { 342struct mlx5e_sq_dma {
338 dma_addr_t addr; 343 dma_addr_t addr;
339 u32 size; 344 u32 size;
345 enum mlx5e_dma_map_type type;
340}; 346};
341 347
342enum { 348enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5fc4d2d78cdf..1e52db32c73d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1332,6 +1332,42 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
1332 return err; 1332 return err;
1333} 1333}
1334 1334
1335static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
1336 u32 tirn)
1337{
1338 void *in;
1339 int inlen;
1340 int err;
1341
1342 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1343 in = mlx5_vzalloc(inlen);
1344 if (!in)
1345 return -ENOMEM;
1346
1347 MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
1348
1349 err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
1350
1351 kvfree(in);
1352
1353 return err;
1354}
1355
1356static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
1357{
1358 int err;
1359 int i;
1360
1361 for (i = 0; i < MLX5E_NUM_TT; i++) {
1362 err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev,
1363 priv->tirn[i]);
1364 if (err)
1365 return err;
1366 }
1367
1368 return 0;
1369}
1370
1335static int mlx5e_set_dev_port_mtu(struct net_device *netdev) 1371static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
1336{ 1372{
1337 struct mlx5e_priv *priv = netdev_priv(netdev); 1373 struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -1376,6 +1412,13 @@ int mlx5e_open_locked(struct net_device *netdev)
1376 goto err_clear_state_opened_flag; 1412 goto err_clear_state_opened_flag;
1377 } 1413 }
1378 1414
1415 err = mlx5e_refresh_tirs_self_loopback_enable(priv);
1416 if (err) {
1417 netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
1418 __func__, err);
1419 goto err_close_channels;
1420 }
1421
1379 mlx5e_update_carrier(priv); 1422 mlx5e_update_carrier(priv);
1380 mlx5e_redirect_rqts(priv); 1423 mlx5e_redirect_rqts(priv);
1381 1424
@@ -1383,6 +1426,8 @@ int mlx5e_open_locked(struct net_device *netdev)
1383 1426
1384 return 0; 1427 return 0;
1385 1428
1429err_close_channels:
1430 mlx5e_close_channels(priv);
1386err_clear_state_opened_flag: 1431err_clear_state_opened_flag:
1387 clear_bit(MLX5E_STATE_OPENED, &priv->state); 1432 clear_bit(MLX5E_STATE_OPENED, &priv->state);
1388 return err; 1433 return err;
@@ -1856,6 +1901,8 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
1856 1901
1857 mlx5_query_port_max_mtu(mdev, &max_mtu, 1); 1902 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
1858 1903
1904 max_mtu = MLX5E_HW2SW_MTU(max_mtu);
1905
1859 if (new_mtu > max_mtu) { 1906 if (new_mtu > max_mtu) {
1860 netdev_err(netdev, 1907 netdev_err(netdev,
1861 "%s: Bad MTU (%d) > (%d) Max\n", 1908 "%s: Bad MTU (%d) > (%d) Max\n",
@@ -1909,6 +1956,9 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
1909 "Not creating net device, some required device capabilities are missing\n"); 1956 "Not creating net device, some required device capabilities are missing\n");
1910 return -ENOTSUPP; 1957 return -ENOTSUPP;
1911 } 1958 }
1959 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
1960 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
1961
1912 return 0; 1962 return 0;
1913} 1963}
1914 1964
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index cd8f85a251d7..1341b1d3c421 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -61,39 +61,47 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
61 } 61 }
62} 62}
63 63
64static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr, 64static inline void mlx5e_tx_dma_unmap(struct device *pdev,
65 u32 *size) 65 struct mlx5e_sq_dma *dma)
66{ 66{
67 sq->dma_fifo_pc--; 67 switch (dma->type) {
68 *addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr; 68 case MLX5E_DMA_MAP_SINGLE:
69 *size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size; 69 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
70} 70 break;
71 71 case MLX5E_DMA_MAP_PAGE:
72static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb) 72 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
73{ 73 break;
74 dma_addr_t addr; 74 default:
75 u32 size; 75 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
76 int i;
77
78 for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
79 mlx5e_dma_pop_last_pushed(sq, &addr, &size);
80 dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
81 } 76 }
82} 77}
83 78
84static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr, 79static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
85 u32 size) 80 dma_addr_t addr,
81 u32 size,
82 enum mlx5e_dma_map_type map_type)
86{ 83{
87 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr; 84 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
88 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size; 85 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
86 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].type = map_type;
89 sq->dma_fifo_pc++; 87 sq->dma_fifo_pc++;
90} 88}
91 89
92static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr, 90static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
93 u32 *size)
94{ 91{
95 *addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr; 92 return &sq->dma_fifo[i & sq->dma_fifo_mask];
96 *size = sq->dma_fifo[i & sq->dma_fifo_mask].size; 93}
94
95static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
96{
97 int i;
98
99 for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
100 struct mlx5e_sq_dma *last_pushed_dma =
101 mlx5e_dma_get(sq, --sq->dma_fifo_pc);
102
103 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
104 }
97} 105}
98 106
99u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, 107u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
@@ -118,8 +126,15 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
118 */ 126 */
119#define MLX5E_MIN_INLINE ETH_HLEN 127#define MLX5E_MIN_INLINE ETH_HLEN
120 128
121 if (bf && (skb_headlen(skb) <= sq->max_inline)) 129 if (bf) {
122 return skb_headlen(skb); 130 u16 ihs = skb_headlen(skb);
131
132 if (skb_vlan_tag_present(skb))
133 ihs += VLAN_HLEN;
134
135 if (ihs <= sq->max_inline)
136 return skb_headlen(skb);
137 }
123 138
124 return MLX5E_MIN_INLINE; 139 return MLX5E_MIN_INLINE;
125} 140}
@@ -218,7 +233,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
218 dseg->lkey = sq->mkey_be; 233 dseg->lkey = sq->mkey_be;
219 dseg->byte_count = cpu_to_be32(headlen); 234 dseg->byte_count = cpu_to_be32(headlen);
220 235
221 mlx5e_dma_push(sq, dma_addr, headlen); 236 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
222 MLX5E_TX_SKB_CB(skb)->num_dma++; 237 MLX5E_TX_SKB_CB(skb)->num_dma++;
223 238
224 dseg++; 239 dseg++;
@@ -237,7 +252,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
237 dseg->lkey = sq->mkey_be; 252 dseg->lkey = sq->mkey_be;
238 dseg->byte_count = cpu_to_be32(fsz); 253 dseg->byte_count = cpu_to_be32(fsz);
239 254
240 mlx5e_dma_push(sq, dma_addr, fsz); 255 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
241 MLX5E_TX_SKB_CB(skb)->num_dma++; 256 MLX5E_TX_SKB_CB(skb)->num_dma++;
242 257
243 dseg++; 258 dseg++;
@@ -353,13 +368,10 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
353 } 368 }
354 369
355 for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) { 370 for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
356 dma_addr_t addr; 371 struct mlx5e_sq_dma *dma =
357 u32 size; 372 mlx5e_dma_get(sq, dma_fifo_cc++);
358 373
359 mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size); 374 mlx5e_tx_dma_unmap(sq->pdev, dma);
360 dma_fifo_cc++;
361 dma_unmap_single(sq->pdev, addr, size,
362 DMA_TO_DEVICE);
363 } 375 }
364 376
365 npkts++; 377 npkts++;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index b4f21232019a..79ef799f88ab 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7429,15 +7429,15 @@ process_pkt:
7429 7429
7430 rtl8169_rx_vlan_tag(desc, skb); 7430 rtl8169_rx_vlan_tag(desc, skb);
7431 7431
7432 if (skb->pkt_type == PACKET_MULTICAST)
7433 dev->stats.multicast++;
7434
7432 napi_gro_receive(&tp->napi, skb); 7435 napi_gro_receive(&tp->napi, skb);
7433 7436
7434 u64_stats_update_begin(&tp->rx_stats.syncp); 7437 u64_stats_update_begin(&tp->rx_stats.syncp);
7435 tp->rx_stats.packets++; 7438 tp->rx_stats.packets++;
7436 tp->rx_stats.bytes += pkt_size; 7439 tp->rx_stats.bytes += pkt_size;
7437 u64_stats_update_end(&tp->rx_stats.syncp); 7440 u64_stats_update_end(&tp->rx_stats.syncp);
7438
7439 if (skb->pkt_type == PACKET_MULTICAST)
7440 dev->stats.multicast++;
7441 } 7441 }
7442release_descriptor: 7442release_descriptor:
7443 desc->opts2 = 0; 7443 desc->opts2 = 0;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index aa7b2083cb53..ee8d1ec61fab 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -408,8 +408,6 @@ static int ravb_dmac_init(struct net_device *ndev)
408 /* Interrupt enable: */ 408 /* Interrupt enable: */
409 /* Frame receive */ 409 /* Frame receive */
410 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0); 410 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
411 /* Receive FIFO full warning */
412 ravb_write(ndev, RIC1_RFWE, RIC1);
413 /* Receive FIFO full error, descriptor empty */ 411 /* Receive FIFO full error, descriptor empty */
414 ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2); 412 ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
415 /* Frame transmitted, timestamp FIFO updated */ 413 /* Frame transmitted, timestamp FIFO updated */
@@ -733,8 +731,10 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
733 ((tis & tic) & BIT(q))) { 731 ((tis & tic) & BIT(q))) {
734 if (napi_schedule_prep(&priv->napi[q])) { 732 if (napi_schedule_prep(&priv->napi[q])) {
735 /* Mask RX and TX interrupts */ 733 /* Mask RX and TX interrupts */
736 ravb_write(ndev, ric0 & ~BIT(q), RIC0); 734 ric0 &= ~BIT(q);
737 ravb_write(ndev, tic & ~BIT(q), TIC); 735 tic &= ~BIT(q);
736 ravb_write(ndev, ric0, RIC0);
737 ravb_write(ndev, tic, TIC);
738 __napi_schedule(&priv->napi[q]); 738 __napi_schedule(&priv->napi[q]);
739 } else { 739 } else {
740 netdev_warn(ndev, 740 netdev_warn(ndev,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index d288f1c928de..a3c42a376741 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -3422,7 +3422,7 @@ out:
3422 * with our request for slot reset the mmio_enabled callback will never be 3422 * with our request for slot reset the mmio_enabled callback will never be
3423 * called, and the link_reset callback is not used by AER or EEH mechanisms. 3423 * called, and the link_reset callback is not used by AER or EEH mechanisms.
3424 */ 3424 */
3425static struct pci_error_handlers efx_err_handlers = { 3425static const struct pci_error_handlers efx_err_handlers = {
3426 .error_detected = efx_io_error_detected, 3426 .error_detected = efx_io_error_detected,
3427 .slot_reset = efx_io_slot_reset, 3427 .slot_reset = efx_io_slot_reset,
3428 .resume = efx_io_resume, 3428 .resume = efx_io_resume,
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index c860c9007e49..219a99b7a631 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -809,22 +809,17 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
809 809
810static int smsc911x_phy_reset(struct smsc911x_data *pdata) 810static int smsc911x_phy_reset(struct smsc911x_data *pdata)
811{ 811{
812 struct phy_device *phy_dev = pdata->phy_dev;
813 unsigned int temp; 812 unsigned int temp;
814 unsigned int i = 100000; 813 unsigned int i = 100000;
815 814
816 BUG_ON(!phy_dev); 815 temp = smsc911x_reg_read(pdata, PMT_CTRL);
817 BUG_ON(!phy_dev->bus); 816 smsc911x_reg_write(pdata, PMT_CTRL, temp | PMT_CTRL_PHY_RST_);
818
819 SMSC_TRACE(pdata, hw, "Performing PHY BCR Reset");
820 smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, BMCR_RESET);
821 do { 817 do {
822 msleep(1); 818 msleep(1);
823 temp = smsc911x_mii_read(phy_dev->bus, phy_dev->addr, 819 temp = smsc911x_reg_read(pdata, PMT_CTRL);
824 MII_BMCR); 820 } while ((i--) && (temp & PMT_CTRL_PHY_RST_));
825 } while ((i--) && (temp & BMCR_RESET));
826 821
827 if (temp & BMCR_RESET) { 822 if (unlikely(temp & PMT_CTRL_PHY_RST_)) {
828 SMSC_WARN(pdata, hw, "PHY reset failed to complete"); 823 SMSC_WARN(pdata, hw, "PHY reset failed to complete");
829 return -EIO; 824 return -EIO;
830 } 825 }
@@ -2296,7 +2291,7 @@ static int smsc911x_init(struct net_device *dev)
2296 } 2291 }
2297 2292
2298 /* Reset the LAN911x */ 2293 /* Reset the LAN911x */
2299 if (smsc911x_soft_reset(pdata)) 2294 if (smsc911x_phy_reset(pdata) || smsc911x_soft_reset(pdata))
2300 return -ENODEV; 2295 return -ENODEV;
2301 2296
2302 dev->flags |= IFF_MULTICAST; 2297 dev->flags |= IFF_MULTICAST;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 9d89bdbf029f..82de68b1a452 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -337,11 +337,11 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
337 QSGMII_PHY_RX_SIGNAL_DETECT_EN | 337 QSGMII_PHY_RX_SIGNAL_DETECT_EN |
338 QSGMII_PHY_TX_DRIVER_EN | 338 QSGMII_PHY_TX_DRIVER_EN |
339 QSGMII_PHY_QSGMII_EN | 339 QSGMII_PHY_QSGMII_EN |
340 0x4 << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET | 340 0x4ul << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET |
341 0x3 << QSGMII_PHY_RX_DC_BIAS_OFFSET | 341 0x3ul << QSGMII_PHY_RX_DC_BIAS_OFFSET |
342 0x1 << QSGMII_PHY_RX_INPUT_EQU_OFFSET | 342 0x1ul << QSGMII_PHY_RX_INPUT_EQU_OFFSET |
343 0x2 << QSGMII_PHY_CDR_PI_SLEW_OFFSET | 343 0x2ul << QSGMII_PHY_CDR_PI_SLEW_OFFSET |
344 0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET); 344 0xCul << QSGMII_PHY_TX_DRV_AMP_OFFSET);
345 } 345 }
346 346
347 plat_dat->has_gmac = true; 347 plat_dat->has_gmac = true;
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ae68afd50a15..f38696ceee74 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -345,13 +345,6 @@ VELOCITY_PARAM(flow_control, "Enable flow control ability");
345*/ 345*/
346VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode"); 346VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
347 347
348#define VAL_PKT_LEN_DEF 0
349/* ValPktLen[] is used for setting the checksum offload ability of NIC.
350 0: Receive frame with invalid layer 2 length (Default)
351 1: Drop frame with invalid layer 2 length
352*/
353VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
354
355#define WOL_OPT_DEF 0 348#define WOL_OPT_DEF 0
356#define WOL_OPT_MIN 0 349#define WOL_OPT_MIN 0
357#define WOL_OPT_MAX 7 350#define WOL_OPT_MAX 7
@@ -494,7 +487,6 @@ static void velocity_get_options(struct velocity_opt *opts, int index,
494 487
495 velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname); 488 velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
496 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); 489 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
497 velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
498 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); 490 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
499 velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); 491 velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
500 opts->numrx = (opts->numrx & ~3); 492 opts->numrx = (opts->numrx & ~3);
@@ -2055,8 +2047,9 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2055 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; 2047 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2056 struct sk_buff *skb; 2048 struct sk_buff *skb;
2057 2049
2058 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { 2050 if (unlikely(rd->rdesc0.RSR & (RSR_STP | RSR_EDP | RSR_RL))) {
2059 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name); 2051 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP))
2052 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name);
2060 stats->rx_length_errors++; 2053 stats->rx_length_errors++;
2061 return -EINVAL; 2054 return -EINVAL;
2062 } 2055 }
@@ -2069,17 +2062,6 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2069 dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma, 2062 dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
2070 vptr->rx.buf_sz, DMA_FROM_DEVICE); 2063 vptr->rx.buf_sz, DMA_FROM_DEVICE);
2071 2064
2072 /*
2073 * Drop frame not meeting IEEE 802.3
2074 */
2075
2076 if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
2077 if (rd->rdesc0.RSR & RSR_RL) {
2078 stats->rx_length_errors++;
2079 return -EINVAL;
2080 }
2081 }
2082
2083 velocity_rx_csum(rd, skb); 2065 velocity_rx_csum(rd, skb);
2084 2066
2085 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { 2067 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index bb8b5304d851..b103adb8d62e 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -599,7 +599,7 @@ int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
599 FJES_CMD_REQ_RES_CODE_BUSY) && 599 FJES_CMD_REQ_RES_CODE_BUSY) &&
600 (timeout > 0)) { 600 (timeout > 0)) {
601 msleep(200 + hw->my_epid * 20); 601 msleep(200 + hw->my_epid * 20);
602 timeout -= (200 + hw->my_epid * 20); 602 timeout -= (200 + hw->my_epid * 20);
603 603
604 res_buf->unshare_buffer.length = 0; 604 res_buf->unshare_buffer.length = 0;
605 res_buf->unshare_buffer.code = 0; 605 res_buf->unshare_buffer.code = 0;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index d50887e3df6d..8c48bb2a94ea 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -254,7 +254,7 @@ acct:
254 } 254 }
255} 255}
256 256
257static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb, 257static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
258 bool local) 258 bool local)
259{ 259{
260 struct ipvl_dev *ipvlan = addr->master; 260 struct ipvl_dev *ipvlan = addr->master;
@@ -262,6 +262,7 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb,
262 unsigned int len; 262 unsigned int len;
263 rx_handler_result_t ret = RX_HANDLER_CONSUMED; 263 rx_handler_result_t ret = RX_HANDLER_CONSUMED;
264 bool success = false; 264 bool success = false;
265 struct sk_buff *skb = *pskb;
265 266
266 len = skb->len + ETH_HLEN; 267 len = skb->len + ETH_HLEN;
267 if (unlikely(!(dev->flags & IFF_UP))) { 268 if (unlikely(!(dev->flags & IFF_UP))) {
@@ -273,6 +274,7 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb,
273 if (!skb) 274 if (!skb)
274 goto out; 275 goto out;
275 276
277 *pskb = skb;
276 skb->dev = dev; 278 skb->dev = dev;
277 skb->pkt_type = PACKET_HOST; 279 skb->pkt_type = PACKET_HOST;
278 280
@@ -486,7 +488,7 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
486 488
487 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); 489 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
488 if (addr) 490 if (addr)
489 return ipvlan_rcv_frame(addr, skb, true); 491 return ipvlan_rcv_frame(addr, &skb, true);
490 492
491out: 493out:
492 skb->dev = ipvlan->phy_dev; 494 skb->dev = ipvlan->phy_dev;
@@ -506,7 +508,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
506 if (lyr3h) { 508 if (lyr3h) {
507 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); 509 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
508 if (addr) 510 if (addr)
509 return ipvlan_rcv_frame(addr, skb, true); 511 return ipvlan_rcv_frame(addr, &skb, true);
510 } 512 }
511 skb = skb_share_check(skb, GFP_ATOMIC); 513 skb = skb_share_check(skb, GFP_ATOMIC);
512 if (!skb) 514 if (!skb)
@@ -589,7 +591,7 @@ static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
589 591
590 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); 592 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
591 if (addr) 593 if (addr)
592 ret = ipvlan_rcv_frame(addr, skb, false); 594 ret = ipvlan_rcv_frame(addr, pskb, false);
593 595
594out: 596out:
595 return ret; 597 return ret;
@@ -626,7 +628,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
626 628
627 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); 629 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
628 if (addr) 630 if (addr)
629 ret = ipvlan_rcv_frame(addr, skb, false); 631 ret = ipvlan_rcv_frame(addr, pskb, false);
630 } 632 }
631 633
632 return ret; 634 return ret;
@@ -651,5 +653,5 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
651 WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n", 653 WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n",
652 port->mode); 654 port->mode);
653 kfree_skb(skb); 655 kfree_skb(skb);
654 return NET_RX_DROP; 656 return RX_HANDLER_CONSUMED;
655} 657}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 86f6c6292c27..06c8bfeaccd6 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -415,6 +415,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
415 skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN); 415 skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN);
416 if (!skb) 416 if (!skb)
417 return RX_HANDLER_CONSUMED; 417 return RX_HANDLER_CONSUMED;
418 *pskb = skb;
418 eth = eth_hdr(skb); 419 eth = eth_hdr(skb);
419 macvlan_forward_source(skb, port, eth->h_source); 420 macvlan_forward_source(skb, port, eth->h_source);
420 src = macvlan_hash_lookup(port, eth->h_source); 421 src = macvlan_hash_lookup(port, eth->h_source);
@@ -456,6 +457,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
456 goto out; 457 goto out;
457 } 458 }
458 459
460 *pskb = skb;
459 skb->dev = dev; 461 skb->dev = dev;
460 skb->pkt_type = PACKET_HOST; 462 skb->pkt_type = PACKET_HOST;
461 463
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index fabf11d32d27..2d020a3ec0b5 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -308,6 +308,8 @@ static struct phy_driver at803x_driver[] = {
308 .flags = PHY_HAS_INTERRUPT, 308 .flags = PHY_HAS_INTERRUPT,
309 .config_aneg = genphy_config_aneg, 309 .config_aneg = genphy_config_aneg,
310 .read_status = genphy_read_status, 310 .read_status = genphy_read_status,
311 .ack_interrupt = at803x_ack_interrupt,
312 .config_intr = at803x_config_intr,
311 .driver = { 313 .driver = {
312 .owner = THIS_MODULE, 314 .owner = THIS_MODULE,
313 }, 315 },
@@ -327,6 +329,8 @@ static struct phy_driver at803x_driver[] = {
327 .flags = PHY_HAS_INTERRUPT, 329 .flags = PHY_HAS_INTERRUPT,
328 .config_aneg = genphy_config_aneg, 330 .config_aneg = genphy_config_aneg,
329 .read_status = genphy_read_status, 331 .read_status = genphy_read_status,
332 .ack_interrupt = at803x_ack_interrupt,
333 .config_intr = at803x_config_intr,
330 .driver = { 334 .driver = {
331 .owner = THIS_MODULE, 335 .owner = THIS_MODULE,
332 }, 336 },
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 5de8d5827536..0240552b50f3 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1154,6 +1154,21 @@ static struct phy_driver marvell_drivers[] = {
1154 .driver = { .owner = THIS_MODULE }, 1154 .driver = { .owner = THIS_MODULE },
1155 }, 1155 },
1156 { 1156 {
1157 .phy_id = MARVELL_PHY_ID_88E1540,
1158 .phy_id_mask = MARVELL_PHY_ID_MASK,
1159 .name = "Marvell 88E1540",
1160 .features = PHY_GBIT_FEATURES,
1161 .flags = PHY_HAS_INTERRUPT,
1162 .config_aneg = &m88e1510_config_aneg,
1163 .read_status = &marvell_read_status,
1164 .ack_interrupt = &marvell_ack_interrupt,
1165 .config_intr = &marvell_config_intr,
1166 .did_interrupt = &m88e1121_did_interrupt,
1167 .resume = &genphy_resume,
1168 .suspend = &genphy_suspend,
1169 .driver = { .owner = THIS_MODULE },
1170 },
1171 {
1157 .phy_id = MARVELL_PHY_ID_88E3016, 1172 .phy_id = MARVELL_PHY_ID_88E3016,
1158 .phy_id_mask = MARVELL_PHY_ID_MASK, 1173 .phy_id_mask = MARVELL_PHY_ID_MASK,
1159 .name = "Marvell 88E3016", 1174 .name = "Marvell 88E3016",
@@ -1186,6 +1201,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
1186 { MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK }, 1201 { MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK },
1187 { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK }, 1202 { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK },
1188 { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK }, 1203 { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK },
1204 { MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
1189 { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, 1205 { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
1190 { } 1206 { }
1191}; 1207};
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index adb48abafc87..48ce6ef400fe 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -863,6 +863,9 @@ void phy_state_machine(struct work_struct *work)
863 needs_aneg = true; 863 needs_aneg = true;
864 break; 864 break;
865 case PHY_NOLINK: 865 case PHY_NOLINK:
866 if (phy_interrupt_is_valid(phydev))
867 break;
868
866 err = phy_read_status(phydev); 869 err = phy_read_status(phydev);
867 if (err) 870 if (err)
868 break; 871 break;
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 76cad712ddb2..dd295dbaa074 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -66,6 +66,7 @@
66#define PHY_ID_VSC8244 0x000fc6c0 66#define PHY_ID_VSC8244 0x000fc6c0
67#define PHY_ID_VSC8514 0x00070670 67#define PHY_ID_VSC8514 0x00070670
68#define PHY_ID_VSC8574 0x000704a0 68#define PHY_ID_VSC8574 0x000704a0
69#define PHY_ID_VSC8601 0x00070420
69#define PHY_ID_VSC8662 0x00070660 70#define PHY_ID_VSC8662 0x00070660
70#define PHY_ID_VSC8221 0x000fc550 71#define PHY_ID_VSC8221 0x000fc550
71#define PHY_ID_VSC8211 0x000fc4b0 72#define PHY_ID_VSC8211 0x000fc4b0
@@ -133,7 +134,8 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
133 (phydev->drv->phy_id == PHY_ID_VSC8234 || 134 (phydev->drv->phy_id == PHY_ID_VSC8234 ||
134 phydev->drv->phy_id == PHY_ID_VSC8244 || 135 phydev->drv->phy_id == PHY_ID_VSC8244 ||
135 phydev->drv->phy_id == PHY_ID_VSC8514 || 136 phydev->drv->phy_id == PHY_ID_VSC8514 ||
136 phydev->drv->phy_id == PHY_ID_VSC8574) ? 137 phydev->drv->phy_id == PHY_ID_VSC8574 ||
138 phydev->drv->phy_id == PHY_ID_VSC8601) ?
137 MII_VSC8244_IMASK_MASK : 139 MII_VSC8244_IMASK_MASK :
138 MII_VSC8221_IMASK_MASK); 140 MII_VSC8221_IMASK_MASK);
139 else { 141 else {
@@ -272,6 +274,18 @@ static struct phy_driver vsc82xx_driver[] = {
272 .config_intr = &vsc82xx_config_intr, 274 .config_intr = &vsc82xx_config_intr,
273 .driver = { .owner = THIS_MODULE,}, 275 .driver = { .owner = THIS_MODULE,},
274}, { 276}, {
277 .phy_id = PHY_ID_VSC8601,
278 .name = "Vitesse VSC8601",
279 .phy_id_mask = 0x000ffff0,
280 .features = PHY_GBIT_FEATURES,
281 .flags = PHY_HAS_INTERRUPT,
282 .config_init = &genphy_config_init,
283 .config_aneg = &genphy_config_aneg,
284 .read_status = &genphy_read_status,
285 .ack_interrupt = &vsc824x_ack_interrupt,
286 .config_intr = &vsc82xx_config_intr,
287 .driver = { .owner = THIS_MODULE,},
288}, {
275 .phy_id = PHY_ID_VSC8662, 289 .phy_id = PHY_ID_VSC8662,
276 .name = "Vitesse VSC8662", 290 .name = "Vitesse VSC8662",
277 .phy_id_mask = 0x000ffff0, 291 .phy_id_mask = 0x000ffff0,
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index c78d3cb1b464..3da70bf9936a 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -696,6 +696,11 @@ static const struct usb_device_id products[] = {
696 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 696 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
697 .driver_info = (kernel_ulong_t) &wwan_info, 697 .driver_info = (kernel_ulong_t) &wwan_info,
698}, { 698}, {
699 /* Dell DW5580 modules */
700 USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x81ba, USB_CLASS_COMM,
701 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
702 .driver_info = (kernel_ulong_t)&wwan_info,
703}, {
699 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, 704 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
700 USB_CDC_PROTO_NONE), 705 USB_CDC_PROTO_NONE),
701 .driver_info = (unsigned long) &cdc_info, 706 .driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 46f4caddccbe..899ea4288197 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2157,12 +2157,13 @@ vmxnet3_set_mc(struct net_device *netdev)
2157 if (!netdev_mc_empty(netdev)) { 2157 if (!netdev_mc_empty(netdev)) {
2158 new_table = vmxnet3_copy_mc(netdev); 2158 new_table = vmxnet3_copy_mc(netdev);
2159 if (new_table) { 2159 if (new_table) {
2160 rxConf->mfTableLen = cpu_to_le16( 2160 size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
2161 netdev_mc_count(netdev) * ETH_ALEN); 2161
2162 rxConf->mfTableLen = cpu_to_le16(sz);
2162 new_table_pa = dma_map_single( 2163 new_table_pa = dma_map_single(
2163 &adapter->pdev->dev, 2164 &adapter->pdev->dev,
2164 new_table, 2165 new_table,
2165 rxConf->mfTableLen, 2166 sz,
2166 PCI_DMA_TODEVICE); 2167 PCI_DMA_TODEVICE);
2167 } 2168 }
2168 2169
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 3f859a55c035..4c58c83dc225 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.3.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.4.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040300 75#define VMXNET3_DRIVER_VERSION_NUM 0x01040400
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index e735c728e3b3..edb1984201e9 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1685,8 +1685,8 @@ static void pci_dma_configure(struct pci_dev *dev)
1685{ 1685{
1686 struct device *bridge = pci_get_host_bridge_device(dev); 1686 struct device *bridge = pci_get_host_bridge_device(dev);
1687 1687
1688 if (IS_ENABLED(CONFIG_OF) && dev->dev.of_node) { 1688 if (IS_ENABLED(CONFIG_OF) &&
1689 if (bridge->parent) 1689 bridge->parent && bridge->parent->of_node) {
1690 of_dma_configure(&dev->dev, bridge->parent->of_node); 1690 of_dma_configure(&dev->dev, bridge->parent->of_node);
1691 } else if (has_acpi_companion(bridge)) { 1691 } else if (has_acpi_companion(bridge)) {
1692 struct acpi_device *adev = to_acpi_device_node(bridge->fwnode); 1692 struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 548a18916a31..a831d18596a5 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1080,28 +1080,10 @@ void __init chsc_init_cleanup(void)
1080 free_page((unsigned long)sei_page); 1080 free_page((unsigned long)sei_page);
1081} 1081}
1082 1082
1083int chsc_enable_facility(int operation_code) 1083int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code)
1084{ 1084{
1085 unsigned long flags;
1086 int ret; 1085 int ret;
1087 struct {
1088 struct chsc_header request;
1089 u8 reserved1:4;
1090 u8 format:4;
1091 u8 reserved2;
1092 u16 operation_code;
1093 u32 reserved3;
1094 u32 reserved4;
1095 u32 operation_data_area[252];
1096 struct chsc_header response;
1097 u32 reserved5:4;
1098 u32 format2:4;
1099 u32 reserved6:24;
1100 } __attribute__ ((packed)) *sda_area;
1101 1086
1102 spin_lock_irqsave(&chsc_page_lock, flags);
1103 memset(chsc_page, 0, PAGE_SIZE);
1104 sda_area = chsc_page;
1105 sda_area->request.length = 0x0400; 1087 sda_area->request.length = 0x0400;
1106 sda_area->request.code = 0x0031; 1088 sda_area->request.code = 0x0031;
1107 sda_area->operation_code = operation_code; 1089 sda_area->operation_code = operation_code;
@@ -1119,10 +1101,25 @@ int chsc_enable_facility(int operation_code)
1119 default: 1101 default:
1120 ret = chsc_error_from_response(sda_area->response.code); 1102 ret = chsc_error_from_response(sda_area->response.code);
1121 } 1103 }
1104out:
1105 return ret;
1106}
1107
1108int chsc_enable_facility(int operation_code)
1109{
1110 struct chsc_sda_area *sda_area;
1111 unsigned long flags;
1112 int ret;
1113
1114 spin_lock_irqsave(&chsc_page_lock, flags);
1115 memset(chsc_page, 0, PAGE_SIZE);
1116 sda_area = chsc_page;
1117
1118 ret = __chsc_enable_facility(sda_area, operation_code);
1122 if (ret != 0) 1119 if (ret != 0)
1123 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 1120 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
1124 operation_code, sda_area->response.code); 1121 operation_code, sda_area->response.code);
1125out: 1122
1126 spin_unlock_irqrestore(&chsc_page_lock, flags); 1123 spin_unlock_irqrestore(&chsc_page_lock, flags);
1127 return ret; 1124 return ret;
1128} 1125}
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 76c9b50700b2..0de134c3a204 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -115,6 +115,20 @@ struct chsc_scpd {
115 u8 data[PAGE_SIZE - 20]; 115 u8 data[PAGE_SIZE - 20];
116} __attribute__ ((packed)); 116} __attribute__ ((packed));
117 117
118struct chsc_sda_area {
119 struct chsc_header request;
120 u8 :4;
121 u8 format:4;
122 u8 :8;
123 u16 operation_code;
124 u32 :32;
125 u32 :32;
126 u32 operation_data_area[252];
127 struct chsc_header response;
128 u32 :4;
129 u32 format2:4;
130 u32 :24;
131} __packed __aligned(PAGE_SIZE);
118 132
119extern int chsc_get_ssd_info(struct subchannel_id schid, 133extern int chsc_get_ssd_info(struct subchannel_id schid,
120 struct chsc_ssd_info *ssd); 134 struct chsc_ssd_info *ssd);
@@ -122,6 +136,7 @@ extern int chsc_determine_css_characteristics(void);
122extern int chsc_init(void); 136extern int chsc_init(void);
123extern void chsc_init_cleanup(void); 137extern void chsc_init_cleanup(void);
124 138
139int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code);
125extern int chsc_enable_facility(int); 140extern int chsc_enable_facility(int);
126struct channel_subsystem; 141struct channel_subsystem;
127extern int chsc_secm(struct channel_subsystem *, int); 142extern int chsc_secm(struct channel_subsystem *, int);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index b5620e818d6b..690b8547e828 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -925,18 +925,32 @@ void reipl_ccw_dev(struct ccw_dev_id *devid)
925 925
926int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) 926int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
927{ 927{
928 static struct chsc_sda_area sda_area __initdata;
928 struct subchannel_id schid; 929 struct subchannel_id schid;
929 struct schib schib; 930 struct schib schib;
930 931
931 schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; 932 schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
932 if (!schid.one) 933 if (!schid.one)
933 return -ENODEV; 934 return -ENODEV;
935
936 if (schid.ssid) {
937 /*
938 * Firmware should have already enabled MSS but whoever started
939 * the kernel might have initiated a channel subsystem reset.
940 * Ensure that MSS is enabled.
941 */
942 memset(&sda_area, 0, sizeof(sda_area));
943 if (__chsc_enable_facility(&sda_area, CHSC_SDA_OC_MSS))
944 return -ENODEV;
945 }
934 if (stsch_err(schid, &schib)) 946 if (stsch_err(schid, &schib))
935 return -ENODEV; 947 return -ENODEV;
936 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) 948 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
937 return -ENODEV; 949 return -ENODEV;
938 if (!schib.pmcw.dnv) 950 if (!schib.pmcw.dnv)
939 return -ENODEV; 951 return -ENODEV;
952
953 iplinfo->ssid = schid.ssid;
940 iplinfo->devno = schib.pmcw.dev; 954 iplinfo->devno = schib.pmcw.dev;
941 iplinfo->is_qdio = schib.pmcw.qf; 955 iplinfo->is_qdio = schib.pmcw.qf;
942 return 0; 956 return 0;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 2ee3053bdc12..489e703dc82d 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -702,17 +702,12 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
702 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 702 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
703 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 703 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
704 } else { 704 } else {
705#ifdef CONFIG_SMP
706 css->global_pgid.pgid_high.cpu_addr = stap(); 705 css->global_pgid.pgid_high.cpu_addr = stap();
707#else
708 css->global_pgid.pgid_high.cpu_addr = 0;
709#endif
710 } 706 }
711 get_cpu_id(&cpu_id); 707 get_cpu_id(&cpu_id);
712 css->global_pgid.cpu_id = cpu_id.ident; 708 css->global_pgid.cpu_id = cpu_id.ident;
713 css->global_pgid.cpu_model = cpu_id.machine; 709 css->global_pgid.cpu_model = cpu_id.machine;
714 css->global_pgid.tod_high = tod_high; 710 css->global_pgid.tod_high = tod_high;
715
716} 711}
717 712
718static void 713static void
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 57f710b3c8a4..b8ab18676e69 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -3,6 +3,9 @@
3# 3#
4 4
5ap-objs := ap_bus.o 5ap-objs := ap_bus.o
6obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcixcc.o 6# zcrypt_api depends on ap
7obj-$(CONFIG_ZCRYPT) += zcrypt_cex2a.o zcrypt_cex4.o 7obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o
8# msgtype* depend on zcrypt_api
8obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o 9obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o
10# adapter drivers depend on ap, zcrypt_api and msgtype*
11obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 9cb3dfbcaddb..61f768518a34 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -74,6 +74,7 @@ static struct device *ap_root_device = NULL;
74static struct ap_config_info *ap_configuration; 74static struct ap_config_info *ap_configuration;
75static DEFINE_SPINLOCK(ap_device_list_lock); 75static DEFINE_SPINLOCK(ap_device_list_lock);
76static LIST_HEAD(ap_device_list); 76static LIST_HEAD(ap_device_list);
77static bool initialised;
77 78
78/* 79/*
79 * Workqueue timer for bus rescan. 80 * Workqueue timer for bus rescan.
@@ -1384,6 +1385,9 @@ int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
1384{ 1385{
1385 struct device_driver *drv = &ap_drv->driver; 1386 struct device_driver *drv = &ap_drv->driver;
1386 1387
1388 if (!initialised)
1389 return -ENODEV;
1390
1387 drv->bus = &ap_bus_type; 1391 drv->bus = &ap_bus_type;
1388 drv->probe = ap_device_probe; 1392 drv->probe = ap_device_probe;
1389 drv->remove = ap_device_remove; 1393 drv->remove = ap_device_remove;
@@ -1808,6 +1812,7 @@ int __init ap_module_init(void)
1808 goto out_pm; 1812 goto out_pm;
1809 1813
1810 queue_work(system_long_wq, &ap_scan_work); 1814 queue_work(system_long_wq, &ap_scan_work);
1815 initialised = true;
1811 1816
1812 return 0; 1817 return 0;
1813 1818
@@ -1837,6 +1842,7 @@ void ap_module_exit(void)
1837{ 1842{
1838 int i; 1843 int i;
1839 1844
1845 initialised = false;
1840 ap_reset_domain(); 1846 ap_reset_domain();
1841 ap_poll_thread_stop(); 1847 ap_poll_thread_stop();
1842 del_timer_sync(&ap_config_timer); 1848 del_timer_sync(&ap_config_timer);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index a9603ebbc1f8..9f8fa42c062c 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -317,11 +317,9 @@ EXPORT_SYMBOL(zcrypt_device_unregister);
317 317
318void zcrypt_msgtype_register(struct zcrypt_ops *zops) 318void zcrypt_msgtype_register(struct zcrypt_ops *zops)
319{ 319{
320 if (zops->owner) { 320 spin_lock_bh(&zcrypt_ops_list_lock);
321 spin_lock_bh(&zcrypt_ops_list_lock); 321 list_add_tail(&zops->list, &zcrypt_ops_list);
322 list_add_tail(&zops->list, &zcrypt_ops_list); 322 spin_unlock_bh(&zcrypt_ops_list_lock);
323 spin_unlock_bh(&zcrypt_ops_list_lock);
324 }
325} 323}
326EXPORT_SYMBOL(zcrypt_msgtype_register); 324EXPORT_SYMBOL(zcrypt_msgtype_register);
327 325
@@ -342,7 +340,7 @@ struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant)
342 spin_lock_bh(&zcrypt_ops_list_lock); 340 spin_lock_bh(&zcrypt_ops_list_lock);
343 list_for_each_entry(zops, &zcrypt_ops_list, list) { 341 list_for_each_entry(zops, &zcrypt_ops_list, list) {
344 if ((zops->variant == variant) && 342 if ((zops->variant == variant) &&
345 (!strncmp(zops->owner->name, name, MODULE_NAME_LEN))) { 343 (!strncmp(zops->name, name, sizeof(zops->name)))) {
346 found = 1; 344 found = 1;
347 break; 345 break;
348 } 346 }
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 750876891931..38618f05ad92 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -96,6 +96,7 @@ struct zcrypt_ops {
96 struct list_head list; /* zcrypt ops list. */ 96 struct list_head list; /* zcrypt ops list. */
97 struct module *owner; 97 struct module *owner;
98 int variant; 98 int variant;
99 char name[128];
99}; 100};
100 101
101struct zcrypt_device { 102struct zcrypt_device {
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 71ceee9137a8..74edf2934e7c 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -513,6 +513,7 @@ static struct zcrypt_ops zcrypt_msgtype50_ops = {
513 .rsa_modexpo = zcrypt_cex2a_modexpo, 513 .rsa_modexpo = zcrypt_cex2a_modexpo,
514 .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt, 514 .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
515 .owner = THIS_MODULE, 515 .owner = THIS_MODULE,
516 .name = MSGTYPE50_NAME,
516 .variant = MSGTYPE50_VARIANT_DEFAULT, 517 .variant = MSGTYPE50_VARIANT_DEFAULT,
517}; 518};
518 519
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 74762214193b..9a2dd472c1cc 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -1119,6 +1119,7 @@ static long zcrypt_msgtype6_rng(struct zcrypt_device *zdev,
1119 */ 1119 */
1120static struct zcrypt_ops zcrypt_msgtype6_norng_ops = { 1120static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
1121 .owner = THIS_MODULE, 1121 .owner = THIS_MODULE,
1122 .name = MSGTYPE06_NAME,
1122 .variant = MSGTYPE06_VARIANT_NORNG, 1123 .variant = MSGTYPE06_VARIANT_NORNG,
1123 .rsa_modexpo = zcrypt_msgtype6_modexpo, 1124 .rsa_modexpo = zcrypt_msgtype6_modexpo,
1124 .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt, 1125 .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
@@ -1127,6 +1128,7 @@ static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
1127 1128
1128static struct zcrypt_ops zcrypt_msgtype6_ops = { 1129static struct zcrypt_ops zcrypt_msgtype6_ops = {
1129 .owner = THIS_MODULE, 1130 .owner = THIS_MODULE,
1131 .name = MSGTYPE06_NAME,
1130 .variant = MSGTYPE06_VARIANT_DEFAULT, 1132 .variant = MSGTYPE06_VARIANT_DEFAULT,
1131 .rsa_modexpo = zcrypt_msgtype6_modexpo, 1133 .rsa_modexpo = zcrypt_msgtype6_modexpo,
1132 .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt, 1134 .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
@@ -1136,6 +1138,7 @@ static struct zcrypt_ops zcrypt_msgtype6_ops = {
1136 1138
1137static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = { 1139static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = {
1138 .owner = THIS_MODULE, 1140 .owner = THIS_MODULE,
1141 .name = MSGTYPE06_NAME,
1139 .variant = MSGTYPE06_VARIANT_EP11, 1142 .variant = MSGTYPE06_VARIANT_EP11,
1140 .rsa_modexpo = NULL, 1143 .rsa_modexpo = NULL,
1141 .rsa_modexpo_crt = NULL, 1144 .rsa_modexpo_crt = NULL,
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index 25abd4eb7d10..91a003011acf 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -34,7 +34,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
34 34
35static int __init sh_pm_runtime_init(void) 35static int __init sh_pm_runtime_init(void)
36{ 36{
37 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { 37 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
38 if (!of_find_compatible_node(NULL, NULL, 38 if (!of_find_compatible_node(NULL, NULL,
39 "renesas,cpg-mstp-clocks")) 39 "renesas,cpg-mstp-clocks"))
40 return 0; 40 return 0;
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index 6d5b38d69578..9d7f0004d2d7 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -18,7 +18,8 @@ source "drivers/staging/iio/resolver/Kconfig"
18source "drivers/staging/iio/trigger/Kconfig" 18source "drivers/staging/iio/trigger/Kconfig"
19 19
20config IIO_DUMMY_EVGEN 20config IIO_DUMMY_EVGEN
21 tristate 21 tristate
22 select IRQ_WORK
22 23
23config IIO_SIMPLE_DUMMY 24config IIO_SIMPLE_DUMMY
24 tristate "An example driver with no hardware requirements" 25 tristate "An example driver with no hardware requirements"
diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c
index d11c54b72186..b51f237cd817 100644
--- a/drivers/staging/iio/adc/lpc32xx_adc.c
+++ b/drivers/staging/iio/adc/lpc32xx_adc.c
@@ -76,7 +76,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev,
76 76
77 if (mask == IIO_CHAN_INFO_RAW) { 77 if (mask == IIO_CHAN_INFO_RAW) {
78 mutex_lock(&indio_dev->mlock); 78 mutex_lock(&indio_dev->mlock);
79 clk_enable(info->clk); 79 clk_prepare_enable(info->clk);
80 /* Measurement setup */ 80 /* Measurement setup */
81 __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm, 81 __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm,
82 LPC32XX_ADC_SELECT(info->adc_base)); 82 LPC32XX_ADC_SELECT(info->adc_base));
@@ -84,7 +84,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev,
84 __raw_writel(AD_PDN_CTRL | AD_STROBE, 84 __raw_writel(AD_PDN_CTRL | AD_STROBE,
85 LPC32XX_ADC_CTRL(info->adc_base)); 85 LPC32XX_ADC_CTRL(info->adc_base));
86 wait_for_completion(&info->completion); /* set by ISR */ 86 wait_for_completion(&info->completion); /* set by ISR */
87 clk_disable(info->clk); 87 clk_disable_unprepare(info->clk);
88 *val = info->value; 88 *val = info->value;
89 mutex_unlock(&indio_dev->mlock); 89 mutex_unlock(&indio_dev->mlock);
90 90
diff --git a/drivers/staging/wilc1000/coreconfigurator.c b/drivers/staging/wilc1000/coreconfigurator.c
index e10c6ffa698a..9568bdb6319b 100644
--- a/drivers/staging/wilc1000/coreconfigurator.c
+++ b/drivers/staging/wilc1000/coreconfigurator.c
@@ -13,12 +13,8 @@
13#include "wilc_wlan.h" 13#include "wilc_wlan.h"
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/etherdevice.h>
17#define TAG_PARAM_OFFSET (MAC_HDR_LEN + TIME_STAMP_LEN + \ 16#define TAG_PARAM_OFFSET (MAC_HDR_LEN + TIME_STAMP_LEN + \
18 BEACON_INTERVAL_LEN + CAP_INFO_LEN) 17 BEACON_INTERVAL_LEN + CAP_INFO_LEN)
19#define ADDR1 4
20#define ADDR2 10
21#define ADDR3 16
22 18
23/* Basic Frame Type Codes (2-bit) */ 19/* Basic Frame Type Codes (2-bit) */
24enum basic_frame_type { 20enum basic_frame_type {
@@ -175,32 +171,38 @@ static inline u8 get_from_ds(u8 *header)
175 return ((header[1] & 0x02) >> 1); 171 return ((header[1] & 0x02) >> 1);
176} 172}
177 173
174/* This function extracts the MAC Address in 'address1' field of the MAC */
175/* header and updates the MAC Address in the allocated 'addr' variable. */
176static inline void get_address1(u8 *pu8msa, u8 *addr)
177{
178 memcpy(addr, pu8msa + 4, 6);
179}
180
181/* This function extracts the MAC Address in 'address2' field of the MAC */
182/* header and updates the MAC Address in the allocated 'addr' variable. */
183static inline void get_address2(u8 *pu8msa, u8 *addr)
184{
185 memcpy(addr, pu8msa + 10, 6);
186}
187
188/* This function extracts the MAC Address in 'address3' field of the MAC */
189/* header and updates the MAC Address in the allocated 'addr' variable. */
190static inline void get_address3(u8 *pu8msa, u8 *addr)
191{
192 memcpy(addr, pu8msa + 16, 6);
193}
194
178/* This function extracts the BSSID from the incoming WLAN packet based on */ 195/* This function extracts the BSSID from the incoming WLAN packet based on */
179/* the 'from ds' bit, and updates the MAC Address in the allocated 'data' */ 196/* the 'from ds' bit, and updates the MAC Address in the allocated 'addr' */
180/* variable. */ 197/* variable. */
181static inline void get_BSSID(u8 *data, u8 *bssid) 198static inline void get_BSSID(u8 *data, u8 *bssid)
182{ 199{
183 if (get_from_ds(data) == 1) 200 if (get_from_ds(data) == 1)
184 /* 201 get_address2(data, bssid);
185 * Extract the MAC Address in 'address2' field of the MAC
186 * header and update the MAC Address in the allocated 'data'
187 * variable.
188 */
189 ether_addr_copy(data, bssid + ADDR2);
190 else if (get_to_ds(data) == 1) 202 else if (get_to_ds(data) == 1)
191 /* 203 get_address1(data, bssid);
192 * Extract the MAC Address in 'address1' field of the MAC
193 * header and update the MAC Address in the allocated 'data'
194 * variable.
195 */
196 ether_addr_copy(data, bssid + ADDR1);
197 else 204 else
198 /* 205 get_address3(data, bssid);
199 * Extract the MAC Address in 'address3' field of the MAC
200 * header and update the MAC Address in the allocated 'data'
201 * variable.
202 */
203 ether_addr_copy(data, bssid + ADDR3);
204} 206}
205 207
206/* This function extracts the SSID from a beacon/probe response frame */ 208/* This function extracts the SSID from a beacon/probe response frame */
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 13844261cd5f..ed776149261e 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -169,7 +169,7 @@ static inline int tty_copy_to_user(struct tty_struct *tty,
169{ 169{
170 struct n_tty_data *ldata = tty->disc_data; 170 struct n_tty_data *ldata = tty->disc_data;
171 171
172 tty_audit_add_data(tty, to, n, ldata->icanon); 172 tty_audit_add_data(tty, from, n, ldata->icanon);
173 return copy_to_user(to, from, n); 173 return copy_to_user(to, from, n);
174} 174}
175 175
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
index c0533a57ec53..910bfee5a88b 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -60,3 +60,4 @@ int fsl8250_handle_irq(struct uart_port *port)
60 spin_unlock_irqrestore(&up->port.lock, flags); 60 spin_unlock_irqrestore(&up->port.lock, flags);
61 return 1; 61 return 1;
62} 62}
63EXPORT_SYMBOL_GPL(fsl8250_handle_irq);
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index e6f5e12a2d83..6412f1455beb 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -373,6 +373,7 @@ config SERIAL_8250_MID
373 depends on SERIAL_8250 && PCI 373 depends on SERIAL_8250 && PCI
374 select HSU_DMA if SERIAL_8250_DMA 374 select HSU_DMA if SERIAL_8250_DMA
375 select HSU_DMA_PCI if X86_INTEL_MID 375 select HSU_DMA_PCI if X86_INTEL_MID
376 select RATIONAL
376 help 377 help
377 Selecting this option will enable handling of the extra features 378 Selecting this option will enable handling of the extra features
378 present on the UART found on Intel Medfield SOC and various other 379 present on the UART found on Intel Medfield SOC and various other
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 1aec4404062d..f38beb28e7ae 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1539,7 +1539,6 @@ config SERIAL_FSL_LPUART
1539 tristate "Freescale lpuart serial port support" 1539 tristate "Freescale lpuart serial port support"
1540 depends on HAS_DMA 1540 depends on HAS_DMA
1541 select SERIAL_CORE 1541 select SERIAL_CORE
1542 select SERIAL_EARLYCON
1543 help 1542 help
1544 Support for the on-chip lpuart on some Freescale SOCs. 1543 Support for the on-chip lpuart on some Freescale SOCs.
1545 1544
@@ -1547,6 +1546,7 @@ config SERIAL_FSL_LPUART_CONSOLE
1547 bool "Console on Freescale lpuart serial port" 1546 bool "Console on Freescale lpuart serial port"
1548 depends on SERIAL_FSL_LPUART=y 1547 depends on SERIAL_FSL_LPUART=y
1549 select SERIAL_CORE_CONSOLE 1548 select SERIAL_CORE_CONSOLE
1549 select SERIAL_EARLYCON
1550 help 1550 help
1551 If you have enabled the lpuart serial port on the Freescale SoCs, 1551 If you have enabled the lpuart serial port on the Freescale SoCs,
1552 you can make it the console by answering Y to this option. 1552 you can make it the console by answering Y to this option.
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 681e0f3d5e0e..a1c0a89d9c7f 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -474,7 +474,7 @@ static int bcm_uart_startup(struct uart_port *port)
474 474
475 /* register irq and enable rx interrupts */ 475 /* register irq and enable rx interrupts */
476 ret = request_irq(port->irq, bcm_uart_interrupt, 0, 476 ret = request_irq(port->irq, bcm_uart_interrupt, 0,
477 bcm_uart_type(port), port); 477 dev_name(port->dev), port);
478 if (ret) 478 if (ret)
479 return ret; 479 return ret;
480 bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG); 480 bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG);
diff --git a/drivers/tty/serial/etraxfs-uart.c b/drivers/tty/serial/etraxfs-uart.c
index 6813e316e9ff..2f80bc7e44fb 100644
--- a/drivers/tty/serial/etraxfs-uart.c
+++ b/drivers/tty/serial/etraxfs-uart.c
@@ -894,7 +894,7 @@ static int etraxfs_uart_probe(struct platform_device *pdev)
894 up->regi_ser = of_iomap(np, 0); 894 up->regi_ser = of_iomap(np, 0);
895 up->port.dev = &pdev->dev; 895 up->port.dev = &pdev->dev;
896 896
897 up->gpios = mctrl_gpio_init(&pdev->dev, 0); 897 up->gpios = mctrl_gpio_init_noauto(&pdev->dev, 0);
898 if (IS_ERR(up->gpios)) 898 if (IS_ERR(up->gpios))
899 return PTR_ERR(up->gpios); 899 return PTR_ERR(up->gpios);
900 900
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index 90ca082935f6..3d245cd3d8e6 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -265,7 +265,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
265 * 265 *
266 * Audit @data of @size from @tty, if necessary. 266 * Audit @data of @size from @tty, if necessary.
267 */ 267 */
268void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, 268void tty_audit_add_data(struct tty_struct *tty, const void *data,
269 size_t size, unsigned icanon) 269 size_t size, unsigned icanon)
270{ 270{
271 struct tty_audit_buf *buf; 271 struct tty_audit_buf *buf;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 0c41dbcb90b8..bcc8e1e8bb72 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1282,18 +1282,22 @@ int tty_send_xchar(struct tty_struct *tty, char ch)
1282 int was_stopped = tty->stopped; 1282 int was_stopped = tty->stopped;
1283 1283
1284 if (tty->ops->send_xchar) { 1284 if (tty->ops->send_xchar) {
1285 down_read(&tty->termios_rwsem);
1285 tty->ops->send_xchar(tty, ch); 1286 tty->ops->send_xchar(tty, ch);
1287 up_read(&tty->termios_rwsem);
1286 return 0; 1288 return 0;
1287 } 1289 }
1288 1290
1289 if (tty_write_lock(tty, 0) < 0) 1291 if (tty_write_lock(tty, 0) < 0)
1290 return -ERESTARTSYS; 1292 return -ERESTARTSYS;
1291 1293
1294 down_read(&tty->termios_rwsem);
1292 if (was_stopped) 1295 if (was_stopped)
1293 start_tty(tty); 1296 start_tty(tty);
1294 tty->ops->write(tty, &ch, 1); 1297 tty->ops->write(tty, &ch, 1);
1295 if (was_stopped) 1298 if (was_stopped)
1296 stop_tty(tty); 1299 stop_tty(tty);
1300 up_read(&tty->termios_rwsem);
1297 tty_write_unlock(tty); 1301 tty_write_unlock(tty);
1298 return 0; 1302 return 0;
1299} 1303}
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 9c5aebfe7053..1445dd39aa62 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -1147,16 +1147,12 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
1147 spin_unlock_irq(&tty->flow_lock); 1147 spin_unlock_irq(&tty->flow_lock);
1148 break; 1148 break;
1149 case TCIOFF: 1149 case TCIOFF:
1150 down_read(&tty->termios_rwsem);
1151 if (STOP_CHAR(tty) != __DISABLED_CHAR) 1150 if (STOP_CHAR(tty) != __DISABLED_CHAR)
1152 retval = tty_send_xchar(tty, STOP_CHAR(tty)); 1151 retval = tty_send_xchar(tty, STOP_CHAR(tty));
1153 up_read(&tty->termios_rwsem);
1154 break; 1152 break;
1155 case TCION: 1153 case TCION:
1156 down_read(&tty->termios_rwsem);
1157 if (START_CHAR(tty) != __DISABLED_CHAR) 1154 if (START_CHAR(tty) != __DISABLED_CHAR)
1158 retval = tty_send_xchar(tty, START_CHAR(tty)); 1155 retval = tty_send_xchar(tty, START_CHAR(tty));
1159 up_read(&tty->termios_rwsem);
1160 break; 1156 break;
1161 default: 1157 default:
1162 return -EINVAL; 1158 return -EINVAL;
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 5af8f1874c1a..629e3c865072 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -592,7 +592,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
592 592
593 /* Restart the work queue in case no characters kick it off. Safe if 593 /* Restart the work queue in case no characters kick it off. Safe if
594 already running */ 594 already running */
595 schedule_work(&tty->port->buf.work); 595 tty_buffer_restart_work(tty->port);
596 596
597 tty_unlock(tty); 597 tty_unlock(tty);
598 return retval; 598 return retval;
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 6ccbf60cdd5c..5a048b7b92e8 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -84,6 +84,12 @@ struct ci_hdrc_imx_data {
84 struct imx_usbmisc_data *usbmisc_data; 84 struct imx_usbmisc_data *usbmisc_data;
85 bool supports_runtime_pm; 85 bool supports_runtime_pm;
86 bool in_lpm; 86 bool in_lpm;
87 /* SoC before i.mx6 (except imx23/imx28) needs three clks */
88 bool need_three_clks;
89 struct clk *clk_ipg;
90 struct clk *clk_ahb;
91 struct clk *clk_per;
92 /* --------------------------------- */
87}; 93};
88 94
89/* Common functions shared by usbmisc drivers */ 95/* Common functions shared by usbmisc drivers */
@@ -135,6 +141,102 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
135} 141}
136 142
137/* End of common functions shared by usbmisc drivers*/ 143/* End of common functions shared by usbmisc drivers*/
144static int imx_get_clks(struct device *dev)
145{
146 struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
147 int ret = 0;
148
149 data->clk_ipg = devm_clk_get(dev, "ipg");
150 if (IS_ERR(data->clk_ipg)) {
151 /* If the platform only needs one clocks */
152 data->clk = devm_clk_get(dev, NULL);
153 if (IS_ERR(data->clk)) {
154 ret = PTR_ERR(data->clk);
155 dev_err(dev,
156 "Failed to get clks, err=%ld,%ld\n",
157 PTR_ERR(data->clk), PTR_ERR(data->clk_ipg));
158 return ret;
159 }
160 return ret;
161 }
162
163 data->clk_ahb = devm_clk_get(dev, "ahb");
164 if (IS_ERR(data->clk_ahb)) {
165 ret = PTR_ERR(data->clk_ahb);
166 dev_err(dev,
167 "Failed to get ahb clock, err=%d\n", ret);
168 return ret;
169 }
170
171 data->clk_per = devm_clk_get(dev, "per");
172 if (IS_ERR(data->clk_per)) {
173 ret = PTR_ERR(data->clk_per);
174 dev_err(dev,
175 "Failed to get per clock, err=%d\n", ret);
176 return ret;
177 }
178
179 data->need_three_clks = true;
180 return ret;
181}
182
183static int imx_prepare_enable_clks(struct device *dev)
184{
185 struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
186 int ret = 0;
187
188 if (data->need_three_clks) {
189 ret = clk_prepare_enable(data->clk_ipg);
190 if (ret) {
191 dev_err(dev,
192 "Failed to prepare/enable ipg clk, err=%d\n",
193 ret);
194 return ret;
195 }
196
197 ret = clk_prepare_enable(data->clk_ahb);
198 if (ret) {
199 dev_err(dev,
200 "Failed to prepare/enable ahb clk, err=%d\n",
201 ret);
202 clk_disable_unprepare(data->clk_ipg);
203 return ret;
204 }
205
206 ret = clk_prepare_enable(data->clk_per);
207 if (ret) {
208 dev_err(dev,
209 "Failed to prepare/enable per clk, err=%d\n",
210 ret);
211 clk_disable_unprepare(data->clk_ahb);
212 clk_disable_unprepare(data->clk_ipg);
213 return ret;
214 }
215 } else {
216 ret = clk_prepare_enable(data->clk);
217 if (ret) {
218 dev_err(dev,
219 "Failed to prepare/enable clk, err=%d\n",
220 ret);
221 return ret;
222 }
223 }
224
225 return ret;
226}
227
228static void imx_disable_unprepare_clks(struct device *dev)
229{
230 struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
231
232 if (data->need_three_clks) {
233 clk_disable_unprepare(data->clk_per);
234 clk_disable_unprepare(data->clk_ahb);
235 clk_disable_unprepare(data->clk_ipg);
236 } else {
237 clk_disable_unprepare(data->clk);
238 }
239}
138 240
139static int ci_hdrc_imx_probe(struct platform_device *pdev) 241static int ci_hdrc_imx_probe(struct platform_device *pdev)
140{ 242{
@@ -145,31 +247,31 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
145 .flags = CI_HDRC_SET_NON_ZERO_TTHA, 247 .flags = CI_HDRC_SET_NON_ZERO_TTHA,
146 }; 248 };
147 int ret; 249 int ret;
148 const struct of_device_id *of_id = 250 const struct of_device_id *of_id;
149 of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev); 251 const struct ci_hdrc_imx_platform_flag *imx_platform_flag;
150 const struct ci_hdrc_imx_platform_flag *imx_platform_flag = of_id->data; 252
253 of_id = of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev);
254 if (!of_id)
255 return -ENODEV;
256
257 imx_platform_flag = of_id->data;
151 258
152 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); 259 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
153 if (!data) 260 if (!data)
154 return -ENOMEM; 261 return -ENOMEM;
155 262
263 platform_set_drvdata(pdev, data);
156 data->usbmisc_data = usbmisc_get_init_data(&pdev->dev); 264 data->usbmisc_data = usbmisc_get_init_data(&pdev->dev);
157 if (IS_ERR(data->usbmisc_data)) 265 if (IS_ERR(data->usbmisc_data))
158 return PTR_ERR(data->usbmisc_data); 266 return PTR_ERR(data->usbmisc_data);
159 267
160 data->clk = devm_clk_get(&pdev->dev, NULL); 268 ret = imx_get_clks(&pdev->dev);
161 if (IS_ERR(data->clk)) { 269 if (ret)
162 dev_err(&pdev->dev, 270 return ret;
163 "Failed to get clock, err=%ld\n", PTR_ERR(data->clk));
164 return PTR_ERR(data->clk);
165 }
166 271
167 ret = clk_prepare_enable(data->clk); 272 ret = imx_prepare_enable_clks(&pdev->dev);
168 if (ret) { 273 if (ret)
169 dev_err(&pdev->dev,
170 "Failed to prepare or enable clock, err=%d\n", ret);
171 return ret; 274 return ret;
172 }
173 275
174 data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0); 276 data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0);
175 if (IS_ERR(data->phy)) { 277 if (IS_ERR(data->phy)) {
@@ -212,8 +314,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
212 goto disable_device; 314 goto disable_device;
213 } 315 }
214 316
215 platform_set_drvdata(pdev, data);
216
217 if (data->supports_runtime_pm) { 317 if (data->supports_runtime_pm) {
218 pm_runtime_set_active(&pdev->dev); 318 pm_runtime_set_active(&pdev->dev);
219 pm_runtime_enable(&pdev->dev); 319 pm_runtime_enable(&pdev->dev);
@@ -226,7 +326,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
226disable_device: 326disable_device:
227 ci_hdrc_remove_device(data->ci_pdev); 327 ci_hdrc_remove_device(data->ci_pdev);
228err_clk: 328err_clk:
229 clk_disable_unprepare(data->clk); 329 imx_disable_unprepare_clks(&pdev->dev);
230 return ret; 330 return ret;
231} 331}
232 332
@@ -240,7 +340,7 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
240 pm_runtime_put_noidle(&pdev->dev); 340 pm_runtime_put_noidle(&pdev->dev);
241 } 341 }
242 ci_hdrc_remove_device(data->ci_pdev); 342 ci_hdrc_remove_device(data->ci_pdev);
243 clk_disable_unprepare(data->clk); 343 imx_disable_unprepare_clks(&pdev->dev);
244 344
245 return 0; 345 return 0;
246} 346}
@@ -252,7 +352,7 @@ static int imx_controller_suspend(struct device *dev)
252 352
253 dev_dbg(dev, "at %s\n", __func__); 353 dev_dbg(dev, "at %s\n", __func__);
254 354
255 clk_disable_unprepare(data->clk); 355 imx_disable_unprepare_clks(dev);
256 data->in_lpm = true; 356 data->in_lpm = true;
257 357
258 return 0; 358 return 0;
@@ -270,7 +370,7 @@ static int imx_controller_resume(struct device *dev)
270 return 0; 370 return 0;
271 } 371 }
272 372
273 ret = clk_prepare_enable(data->clk); 373 ret = imx_prepare_enable_clks(dev);
274 if (ret) 374 if (ret)
275 return ret; 375 return ret;
276 376
@@ -285,7 +385,7 @@ static int imx_controller_resume(struct device *dev)
285 return 0; 385 return 0;
286 386
287clk_disable: 387clk_disable:
288 clk_disable_unprepare(data->clk); 388 imx_disable_unprepare_clks(dev);
289 return ret; 389 return ret;
290} 390}
291 391
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index 080b7be3daf0..58c8485a0715 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -322,8 +322,10 @@ static ssize_t ci_role_write(struct file *file, const char __user *ubuf,
322 return -EINVAL; 322 return -EINVAL;
323 323
324 pm_runtime_get_sync(ci->dev); 324 pm_runtime_get_sync(ci->dev);
325 disable_irq(ci->irq);
325 ci_role_stop(ci); 326 ci_role_stop(ci);
326 ret = ci_role_start(ci, role); 327 ret = ci_role_start(ci, role);
328 enable_irq(ci->irq);
327 pm_runtime_put_sync(ci->dev); 329 pm_runtime_put_sync(ci->dev);
328 330
329 return ret ? ret : count; 331 return ret ? ret : count;
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 8223fe73ea85..391a1225b0ba 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1751,6 +1751,22 @@ static int ci_udc_start(struct usb_gadget *gadget,
1751 return retval; 1751 return retval;
1752} 1752}
1753 1753
1754static void ci_udc_stop_for_otg_fsm(struct ci_hdrc *ci)
1755{
1756 if (!ci_otg_is_fsm_mode(ci))
1757 return;
1758
1759 mutex_lock(&ci->fsm.lock);
1760 if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) {
1761 ci->fsm.a_bidl_adis_tmout = 1;
1762 ci_hdrc_otg_fsm_start(ci);
1763 } else if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) {
1764 ci->fsm.protocol = PROTO_UNDEF;
1765 ci->fsm.otg->state = OTG_STATE_UNDEFINED;
1766 }
1767 mutex_unlock(&ci->fsm.lock);
1768}
1769
1754/** 1770/**
1755 * ci_udc_stop: unregister a gadget driver 1771 * ci_udc_stop: unregister a gadget driver
1756 */ 1772 */
@@ -1775,6 +1791,7 @@ static int ci_udc_stop(struct usb_gadget *gadget)
1775 ci->driver = NULL; 1791 ci->driver = NULL;
1776 spin_unlock_irqrestore(&ci->lock, flags); 1792 spin_unlock_irqrestore(&ci->lock, flags);
1777 1793
1794 ci_udc_stop_for_otg_fsm(ci);
1778 return 0; 1795 return 0;
1779} 1796}
1780 1797
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index fcea4eb36eee..ab8b027e8cc8 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -500,7 +500,11 @@ static int usbmisc_imx_probe(struct platform_device *pdev)
500{ 500{
501 struct resource *res; 501 struct resource *res;
502 struct imx_usbmisc *data; 502 struct imx_usbmisc *data;
503 struct of_device_id *tmp_dev; 503 const struct of_device_id *of_id;
504
505 of_id = of_match_device(usbmisc_imx_dt_ids, &pdev->dev);
506 if (!of_id)
507 return -ENODEV;
504 508
505 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); 509 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
506 if (!data) 510 if (!data)
@@ -513,9 +517,7 @@ static int usbmisc_imx_probe(struct platform_device *pdev)
513 if (IS_ERR(data->base)) 517 if (IS_ERR(data->base))
514 return PTR_ERR(data->base); 518 return PTR_ERR(data->base);
515 519
516 tmp_dev = (struct of_device_id *) 520 data->ops = (const struct usbmisc_ops *)of_id->data;
517 of_match_device(usbmisc_imx_dt_ids, &pdev->dev);
518 data->ops = (const struct usbmisc_ops *)tmp_dev->data;
519 platform_set_drvdata(pdev, data); 521 platform_set_drvdata(pdev, data);
520 522
521 return 0; 523 return 0;
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 433bbc34a8a4..071964c7847f 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -884,11 +884,11 @@ static int usblp_wwait(struct usblp *usblp, int nonblock)
884 884
885 add_wait_queue(&usblp->wwait, &waita); 885 add_wait_queue(&usblp->wwait, &waita);
886 for (;;) { 886 for (;;) {
887 set_current_state(TASK_INTERRUPTIBLE);
888 if (mutex_lock_interruptible(&usblp->mut)) { 887 if (mutex_lock_interruptible(&usblp->mut)) {
889 rc = -EINTR; 888 rc = -EINTR;
890 break; 889 break;
891 } 890 }
891 set_current_state(TASK_INTERRUPTIBLE);
892 rc = usblp_wtest(usblp, nonblock); 892 rc = usblp_wtest(usblp, nonblock);
893 mutex_unlock(&usblp->mut); 893 mutex_unlock(&usblp->mut);
894 if (rc <= 0) 894 if (rc <= 0)
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index a99c89e78126..dd280108758f 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -77,8 +77,7 @@ config USB_OTG_BLACKLIST_HUB
77 77
78config USB_OTG_FSM 78config USB_OTG_FSM
79 tristate "USB 2.0 OTG FSM implementation" 79 tristate "USB 2.0 OTG FSM implementation"
80 depends on USB 80 depends on USB && USB_OTG
81 select USB_OTG
82 select USB_PHY 81 select USB_PHY
83 help 82 help
84 Implements OTG Finite State Machine as specified in On-The-Go 83 Implements OTG Finite State Machine as specified in On-The-Go
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index e79baf73c234..571c21727ff9 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -324,12 +324,13 @@ void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg)
324 */ 324 */
325static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg) 325static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg)
326{ 326{
327 if (hsotg->lx_state == DWC2_L2) { 327 if (hsotg->bus_suspended) {
328 hsotg->flags.b.port_suspend_change = 1; 328 hsotg->flags.b.port_suspend_change = 1;
329 usb_hcd_resume_root_hub(hsotg->priv); 329 usb_hcd_resume_root_hub(hsotg->priv);
330 } else {
331 hsotg->flags.b.port_l1_change = 1;
332 } 330 }
331
332 if (hsotg->lx_state == DWC2_L1)
333 hsotg->flags.b.port_l1_change = 1;
333} 334}
334 335
335/** 336/**
@@ -1428,8 +1429,8 @@ static void dwc2_wakeup_detected(unsigned long data)
1428 dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n", 1429 dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n",
1429 dwc2_readl(hsotg->regs + HPRT0)); 1430 dwc2_readl(hsotg->regs + HPRT0));
1430 1431
1431 hsotg->bus_suspended = 0;
1432 dwc2_hcd_rem_wakeup(hsotg); 1432 dwc2_hcd_rem_wakeup(hsotg);
1433 hsotg->bus_suspended = 0;
1433 1434
1434 /* Change to L0 state */ 1435 /* Change to L0 state */
1435 hsotg->lx_state = DWC2_L0; 1436 hsotg->lx_state = DWC2_L0;
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 5859b0fa19ee..e61d773cf65e 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -108,7 +108,8 @@ static const struct dwc2_core_params params_rk3066 = {
108 .host_ls_low_power_phy_clk = -1, 108 .host_ls_low_power_phy_clk = -1,
109 .ts_dline = -1, 109 .ts_dline = -1,
110 .reload_ctl = -1, 110 .reload_ctl = -1,
111 .ahbcfg = 0x7, /* INCR16 */ 111 .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
112 GAHBCFG_HBSTLEN_SHIFT,
112 .uframe_sched = -1, 113 .uframe_sched = -1,
113 .external_id_pin_ctl = -1, 114 .external_id_pin_ctl = -1,
114 .hibernation = -1, 115 .hibernation = -1,
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 77a622cb48ab..009d83048c8c 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -34,6 +34,8 @@
34#define PCI_DEVICE_ID_INTEL_BSW 0x22b7 34#define PCI_DEVICE_ID_INTEL_BSW 0x22b7
35#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30 35#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30
36#define PCI_DEVICE_ID_INTEL_SPTH 0xa130 36#define PCI_DEVICE_ID_INTEL_SPTH 0xa130
37#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa
38#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
37 39
38static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; 40static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
39static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; 41static const struct acpi_gpio_params cs_gpios = { 1, 0, false };
@@ -210,6 +212,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
210 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), }, 212 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
211 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), }, 213 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
212 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), }, 214 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
215 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
216 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
213 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, 217 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
214 { } /* Terminating Entry */ 218 { } /* Terminating Entry */
215}; 219};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 55ba447fdf8b..e24a01cc98df 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2744,12 +2744,34 @@ int dwc3_gadget_init(struct dwc3 *dwc)
2744 } 2744 }
2745 2745
2746 dwc->gadget.ops = &dwc3_gadget_ops; 2746 dwc->gadget.ops = &dwc3_gadget_ops;
2747 dwc->gadget.max_speed = USB_SPEED_SUPER;
2748 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2747 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2749 dwc->gadget.sg_supported = true; 2748 dwc->gadget.sg_supported = true;
2750 dwc->gadget.name = "dwc3-gadget"; 2749 dwc->gadget.name = "dwc3-gadget";
2751 2750
2752 /* 2751 /*
2752 * FIXME We might be setting max_speed to <SUPER, however versions
2753 * <2.20a of dwc3 have an issue with metastability (documented
2754 * elsewhere in this driver) which tells us we can't set max speed to
2755 * anything lower than SUPER.
2756 *
2757 * Because gadget.max_speed is only used by composite.c and function
2758 * drivers (i.e. it won't go into dwc3's registers) we are allowing this
2759 * to happen so we avoid sending SuperSpeed Capability descriptor
2760 * together with our BOS descriptor as that could confuse host into
2761 * thinking we can handle super speed.
2762 *
2763 * Note that, in fact, we won't even support GetBOS requests when speed
2764 * is less than super speed because we don't have means, yet, to tell
2765 * composite.c that we are USB 2.0 + LPM ECN.
2766 */
2767 if (dwc->revision < DWC3_REVISION_220A)
2768 dwc3_trace(trace_dwc3_gadget,
2769 "Changing max_speed on rev %08x\n",
2770 dwc->revision);
2771
2772 dwc->gadget.max_speed = dwc->maximum_speed;
2773
2774 /*
2753 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize 2775 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2754 * on ep out. 2776 * on ep out.
2755 */ 2777 */
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index 23933bdf2d9d..ddc3aad886b7 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -329,7 +329,7 @@ static int alloc_requests(struct usb_composite_dev *cdev,
329 for (i = 0; i < loop->qlen && result == 0; i++) { 329 for (i = 0; i < loop->qlen && result == 0; i++) {
330 result = -ENOMEM; 330 result = -ENOMEM;
331 331
332 in_req = usb_ep_alloc_request(loop->in_ep, GFP_KERNEL); 332 in_req = usb_ep_alloc_request(loop->in_ep, GFP_ATOMIC);
333 if (!in_req) 333 if (!in_req)
334 goto fail; 334 goto fail;
335 335
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index f0f2b066ac08..f92f5aff0dd5 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1633,7 +1633,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
1633 spin_lock(&udc->lock); 1633 spin_lock(&udc->lock);
1634 1634
1635 int_enb = usba_int_enb_get(udc); 1635 int_enb = usba_int_enb_get(udc);
1636 status = usba_readl(udc, INT_STA) & int_enb; 1636 status = usba_readl(udc, INT_STA) & (int_enb | USBA_HIGH_SPEED);
1637 DBG(DBG_INT, "irq, status=%#08x\n", status); 1637 DBG(DBG_INT, "irq, status=%#08x\n", status);
1638 1638
1639 if (status & USBA_DET_SUSPEND) { 1639 if (status & USBA_DET_SUSPEND) {
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 5d2d7e954bd4..0230965fb78c 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -782,12 +782,15 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
782 status |= USB_PORT_STAT_SUSPEND; 782 status |= USB_PORT_STAT_SUSPEND;
783 } 783 }
784 } 784 }
785 if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 785 if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 &&
786 && (raw_port_status & PORT_POWER) 786 (raw_port_status & PORT_POWER)) {
787 && (bus_state->suspended_ports & (1 << wIndex))) { 787 if (bus_state->suspended_ports & (1 << wIndex)) {
788 bus_state->suspended_ports &= ~(1 << wIndex); 788 bus_state->suspended_ports &= ~(1 << wIndex);
789 if (hcd->speed < HCD_USB3) 789 if (hcd->speed < HCD_USB3)
790 bus_state->port_c_suspend |= 1 << wIndex; 790 bus_state->port_c_suspend |= 1 << wIndex;
791 }
792 bus_state->resume_done[wIndex] = 0;
793 clear_bit(wIndex, &bus_state->resuming_ports);
791 } 794 }
792 if (raw_port_status & PORT_CONNECT) { 795 if (raw_port_status & PORT_CONNECT) {
793 status |= USB_PORT_STAT_CONNECTION; 796 status |= USB_PORT_STAT_CONNECTION;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index fa836251ca21..6c5e8133cf87 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3896,28 +3896,6 @@ cleanup:
3896 return ret; 3896 return ret;
3897} 3897}
3898 3898
3899static int ep_ring_is_processing(struct xhci_hcd *xhci,
3900 int slot_id, unsigned int ep_index)
3901{
3902 struct xhci_virt_device *xdev;
3903 struct xhci_ring *ep_ring;
3904 struct xhci_ep_ctx *ep_ctx;
3905 struct xhci_virt_ep *xep;
3906 dma_addr_t hw_deq;
3907
3908 xdev = xhci->devs[slot_id];
3909 xep = &xhci->devs[slot_id]->eps[ep_index];
3910 ep_ring = xep->ring;
3911 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3912
3913 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) != EP_STATE_RUNNING)
3914 return 0;
3915
3916 hw_deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
3917 return (hw_deq !=
3918 xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue));
3919}
3920
3921/* 3899/*
3922 * Check transfer ring to guarantee there is enough room for the urb. 3900 * Check transfer ring to guarantee there is enough room for the urb.
3923 * Update ISO URB start_frame and interval. 3901 * Update ISO URB start_frame and interval.
@@ -3983,10 +3961,12 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3983 } 3961 }
3984 3962
3985 /* Calculate the start frame and put it in urb->start_frame. */ 3963 /* Calculate the start frame and put it in urb->start_frame. */
3986 if (HCC_CFC(xhci->hcc_params) && 3964 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
3987 ep_ring_is_processing(xhci, slot_id, ep_index)) { 3965 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
3988 urb->start_frame = xep->next_frame_id; 3966 EP_STATE_RUNNING) {
3989 goto skip_start_over; 3967 urb->start_frame = xep->next_frame_id;
3968 goto skip_start_over;
3969 }
3990 } 3970 }
3991 3971
3992 start_frame = readl(&xhci->run_regs->microframe_index); 3972 start_frame = readl(&xhci->run_regs->microframe_index);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6e7dc6f93978..dfa44d3e8eee 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -175,6 +175,16 @@ int xhci_reset(struct xhci_hcd *xhci)
175 command |= CMD_RESET; 175 command |= CMD_RESET;
176 writel(command, &xhci->op_regs->command); 176 writel(command, &xhci->op_regs->command);
177 177
178 /* Existing Intel xHCI controllers require a delay of 1 mS,
179 * after setting the CMD_RESET bit, and before accessing any
180 * HC registers. This allows the HC to complete the
181 * reset operation and be ready for HC register access.
182 * Without this delay, the subsequent HC register access,
183 * may result in a system hang very rarely.
184 */
185 if (xhci->quirks & XHCI_INTEL_HOST)
186 udelay(1000);
187
178 ret = xhci_handshake(&xhci->op_regs->command, 188 ret = xhci_handshake(&xhci->op_regs->command,
179 CMD_RESET, 0, 10 * 1000 * 1000); 189 CMD_RESET, 0, 10 * 1000 * 1000);
180 if (ret) 190 if (ret)
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index ba13529cbd52..18cfc0a361cb 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -132,7 +132,7 @@ static inline struct musb *dev_to_musb(struct device *dev)
132/*-------------------------------------------------------------------------*/ 132/*-------------------------------------------------------------------------*/
133 133
134#ifndef CONFIG_BLACKFIN 134#ifndef CONFIG_BLACKFIN
135static int musb_ulpi_read(struct usb_phy *phy, u32 offset) 135static int musb_ulpi_read(struct usb_phy *phy, u32 reg)
136{ 136{
137 void __iomem *addr = phy->io_priv; 137 void __iomem *addr = phy->io_priv;
138 int i = 0; 138 int i = 0;
@@ -151,7 +151,7 @@ static int musb_ulpi_read(struct usb_phy *phy, u32 offset)
151 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM. 151 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
152 */ 152 */
153 153
154 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset); 154 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
155 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, 155 musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
156 MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR); 156 MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
157 157
@@ -176,7 +176,7 @@ out:
176 return ret; 176 return ret;
177} 177}
178 178
179static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data) 179static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
180{ 180{
181 void __iomem *addr = phy->io_priv; 181 void __iomem *addr = phy->io_priv;
182 int i = 0; 182 int i = 0;
@@ -191,8 +191,8 @@ static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
191 power &= ~MUSB_POWER_SUSPENDM; 191 power &= ~MUSB_POWER_SUSPENDM;
192 musb_writeb(addr, MUSB_POWER, power); 192 musb_writeb(addr, MUSB_POWER, power);
193 193
194 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset); 194 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
195 musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)data); 195 musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val);
196 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ); 196 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
197 197
198 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) 198 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
@@ -1668,7 +1668,7 @@ EXPORT_SYMBOL_GPL(musb_interrupt);
1668static bool use_dma = 1; 1668static bool use_dma = 1;
1669 1669
1670/* "modprobe ... use_dma=0" etc */ 1670/* "modprobe ... use_dma=0" etc */
1671module_param(use_dma, bool, 0); 1671module_param(use_dma, bool, 0644);
1672MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); 1672MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
1673 1673
1674void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit) 1674void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 26c65e66cc0f..795a45b1b25b 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -112,22 +112,32 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
112 struct musb *musb = ep->musb; 112 struct musb *musb = ep->musb;
113 void __iomem *epio = ep->regs; 113 void __iomem *epio = ep->regs;
114 u16 csr; 114 u16 csr;
115 u16 lastcsr = 0;
116 int retries = 1000; 115 int retries = 1000;
117 116
118 csr = musb_readw(epio, MUSB_TXCSR); 117 csr = musb_readw(epio, MUSB_TXCSR);
119 while (csr & MUSB_TXCSR_FIFONOTEMPTY) { 118 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
120 if (csr != lastcsr)
121 dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
122 lastcsr = csr;
123 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY; 119 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY;
124 musb_writew(epio, MUSB_TXCSR, csr); 120 musb_writew(epio, MUSB_TXCSR, csr);
125 csr = musb_readw(epio, MUSB_TXCSR); 121 csr = musb_readw(epio, MUSB_TXCSR);
126 if (WARN(retries-- < 1, 122
123 /*
124 * FIXME: sometimes the tx fifo flush failed, it has been
125 * observed during device disconnect on AM335x.
126 *
127 * To reproduce the issue, ensure tx urb(s) are queued when
128 * unplug the usb device which is connected to AM335x usb
129 * host port.
130 *
131 * I found using a usb-ethernet device and running iperf
132 * (client on AM335x) has very high chance to trigger it.
133 *
134 * Better to turn on dev_dbg() in musb_cleanup_urb() with
135 * CPPI enabled to see the issue when aborting the tx channel.
136 */
137 if (dev_WARN_ONCE(musb->controller, retries-- < 1,
127 "Could not flush host TX%d fifo: csr: %04x\n", 138 "Could not flush host TX%d fifo: csr: %04x\n",
128 ep->epnum, csr)) 139 ep->epnum, csr))
129 return; 140 return;
130 mdelay(1);
131 } 141 }
132} 142}
133 143
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 173132416170..22e8ecb6bfbd 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -21,7 +21,6 @@ config AB8500_USB
21config FSL_USB2_OTG 21config FSL_USB2_OTG
22 bool "Freescale USB OTG Transceiver Driver" 22 bool "Freescale USB OTG Transceiver Driver"
23 depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM 23 depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM
24 select USB_OTG
25 select USB_PHY 24 select USB_PHY
26 help 25 help
27 Enable this to support Freescale USB OTG transceiver. 26 Enable this to support Freescale USB OTG transceiver.
@@ -168,8 +167,7 @@ config USB_QCOM_8X16_PHY
168 167
169config USB_MV_OTG 168config USB_MV_OTG
170 tristate "Marvell USB OTG support" 169 tristate "Marvell USB OTG support"
171 depends on USB_EHCI_MV && USB_MV_UDC && PM 170 depends on USB_EHCI_MV && USB_MV_UDC && PM && USB_OTG
172 select USB_OTG
173 select USB_PHY 171 select USB_PHY
174 help 172 help
175 Say Y here if you want to build Marvell USB OTG transciever 173 Say Y here if you want to build Marvell USB OTG transciever
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 4d863ebc117c..b7536af777ab 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -452,10 +452,13 @@ static int mxs_phy_probe(struct platform_device *pdev)
452 struct clk *clk; 452 struct clk *clk;
453 struct mxs_phy *mxs_phy; 453 struct mxs_phy *mxs_phy;
454 int ret; 454 int ret;
455 const struct of_device_id *of_id = 455 const struct of_device_id *of_id;
456 of_match_device(mxs_phy_dt_ids, &pdev->dev);
457 struct device_node *np = pdev->dev.of_node; 456 struct device_node *np = pdev->dev.of_node;
458 457
458 of_id = of_match_device(mxs_phy_dt_ids, &pdev->dev);
459 if (!of_id)
460 return -ENODEV;
461
459 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 462 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
460 base = devm_ioremap_resource(&pdev->dev, res); 463 base = devm_ioremap_resource(&pdev->dev, res);
461 if (IS_ERR(base)) 464 if (IS_ERR(base))
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c
index 1270906ccb95..c4bf2de6d14e 100644
--- a/drivers/usb/phy/phy-omap-otg.c
+++ b/drivers/usb/phy/phy-omap-otg.c
@@ -105,7 +105,6 @@ static int omap_otg_probe(struct platform_device *pdev)
105 extcon = extcon_get_extcon_dev(config->extcon); 105 extcon = extcon_get_extcon_dev(config->extcon);
106 if (!extcon) 106 if (!extcon)
107 return -EPROBE_DEFER; 107 return -EPROBE_DEFER;
108 otg_dev->extcon = extcon;
109 108
110 otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL); 109 otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL);
111 if (!otg_dev) 110 if (!otg_dev)
@@ -115,6 +114,7 @@ static int omap_otg_probe(struct platform_device *pdev)
115 if (IS_ERR(otg_dev->base)) 114 if (IS_ERR(otg_dev->base))
116 return PTR_ERR(otg_dev->base); 115 return PTR_ERR(otg_dev->base);
117 116
117 otg_dev->extcon = extcon;
118 otg_dev->id_nb.notifier_call = omap_otg_id_notifier; 118 otg_dev->id_nb.notifier_call = omap_otg_id_notifier;
119 otg_dev->vbus_nb.notifier_call = omap_otg_vbus_notifier; 119 otg_dev->vbus_nb.notifier_call = omap_otg_vbus_notifier;
120 120
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 685fef71d3d1..f2280606b73c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -161,6 +161,7 @@ static void option_instat_callback(struct urb *urb);
161#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 161#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
162#define NOVATELWIRELESS_PRODUCT_E362 0x9010 162#define NOVATELWIRELESS_PRODUCT_E362 0x9010
163#define NOVATELWIRELESS_PRODUCT_E371 0x9011 163#define NOVATELWIRELESS_PRODUCT_E371 0x9011
164#define NOVATELWIRELESS_PRODUCT_U620L 0x9022
164#define NOVATELWIRELESS_PRODUCT_G2 0xA010 165#define NOVATELWIRELESS_PRODUCT_G2 0xA010
165#define NOVATELWIRELESS_PRODUCT_MC551 0xB001 166#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
166 167
@@ -354,6 +355,7 @@ static void option_instat_callback(struct urb *urb);
354/* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick * 355/* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick *
355 * It seems to contain a Qualcomm QSC6240/6290 chipset */ 356 * It seems to contain a Qualcomm QSC6240/6290 chipset */
356#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 357#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
358#define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01
357 359
358/* iBall 3.5G connect wireless modem */ 360/* iBall 3.5G connect wireless modem */
359#define IBALL_3_5G_CONNECT 0x9605 361#define IBALL_3_5G_CONNECT 0x9605
@@ -519,6 +521,11 @@ static const struct option_blacklist_info four_g_w14_blacklist = {
519 .sendsetup = BIT(0) | BIT(1), 521 .sendsetup = BIT(0) | BIT(1),
520}; 522};
521 523
524static const struct option_blacklist_info four_g_w100_blacklist = {
525 .sendsetup = BIT(1) | BIT(2),
526 .reserved = BIT(3),
527};
528
522static const struct option_blacklist_info alcatel_x200_blacklist = { 529static const struct option_blacklist_info alcatel_x200_blacklist = {
523 .sendsetup = BIT(0) | BIT(1), 530 .sendsetup = BIT(0) | BIT(1),
524 .reserved = BIT(4), 531 .reserved = BIT(4),
@@ -1052,6 +1059,7 @@ static const struct usb_device_id option_ids[] = {
1052 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, 1059 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
1053 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, 1060 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
1054 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) }, 1061 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) },
1062 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U620L, 0xff, 0x00, 0x00) },
1055 1063
1056 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, 1064 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
1057 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, 1065 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -1641,6 +1649,9 @@ static const struct usb_device_id option_ids[] = {
1641 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), 1649 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
1642 .driver_info = (kernel_ulong_t)&four_g_w14_blacklist 1650 .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
1643 }, 1651 },
1652 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
1653 .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
1654 },
1644 { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, 1655 { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
1645 { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, 1656 { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
1646 { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, 1657 { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 5022fcfa0260..9919d2a9faf2 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -22,6 +22,8 @@
22#define DRIVER_AUTHOR "Qualcomm Inc" 22#define DRIVER_AUTHOR "Qualcomm Inc"
23#define DRIVER_DESC "Qualcomm USB Serial driver" 23#define DRIVER_DESC "Qualcomm USB Serial driver"
24 24
25#define QUECTEL_EC20_PID 0x9215
26
25/* standard device layouts supported by this driver */ 27/* standard device layouts supported by this driver */
26enum qcserial_layouts { 28enum qcserial_layouts {
27 QCSERIAL_G2K = 0, /* Gobi 2000 */ 29 QCSERIAL_G2K = 0, /* Gobi 2000 */
@@ -171,6 +173,38 @@ static const struct usb_device_id id_table[] = {
171}; 173};
172MODULE_DEVICE_TABLE(usb, id_table); 174MODULE_DEVICE_TABLE(usb, id_table);
173 175
176static int handle_quectel_ec20(struct device *dev, int ifnum)
177{
178 int altsetting = 0;
179
180 /*
181 * Quectel EC20 Mini PCIe LTE module layout:
182 * 0: DM/DIAG (use libqcdm from ModemManager for communication)
183 * 1: NMEA
184 * 2: AT-capable modem port
185 * 3: Modem interface
186 * 4: NDIS
187 */
188 switch (ifnum) {
189 case 0:
190 dev_dbg(dev, "Quectel EC20 DM/DIAG interface found\n");
191 break;
192 case 1:
193 dev_dbg(dev, "Quectel EC20 NMEA GPS interface found\n");
194 break;
195 case 2:
196 case 3:
197 dev_dbg(dev, "Quectel EC20 Modem port found\n");
198 break;
199 case 4:
200 /* Don't claim the QMI/net interface */
201 altsetting = -1;
202 break;
203 }
204
205 return altsetting;
206}
207
174static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) 208static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
175{ 209{
176 struct usb_host_interface *intf = serial->interface->cur_altsetting; 210 struct usb_host_interface *intf = serial->interface->cur_altsetting;
@@ -181,6 +215,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
181 int altsetting = -1; 215 int altsetting = -1;
182 bool sendsetup = false; 216 bool sendsetup = false;
183 217
218 /* we only support vendor specific functions */
219 if (intf->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
220 goto done;
221
184 nintf = serial->dev->actconfig->desc.bNumInterfaces; 222 nintf = serial->dev->actconfig->desc.bNumInterfaces;
185 dev_dbg(dev, "Num Interfaces = %d\n", nintf); 223 dev_dbg(dev, "Num Interfaces = %d\n", nintf);
186 ifnum = intf->desc.bInterfaceNumber; 224 ifnum = intf->desc.bInterfaceNumber;
@@ -240,6 +278,12 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
240 altsetting = -1; 278 altsetting = -1;
241 break; 279 break;
242 case QCSERIAL_G2K: 280 case QCSERIAL_G2K:
281 /* handle non-standard layouts */
282 if (nintf == 5 && id->idProduct == QUECTEL_EC20_PID) {
283 altsetting = handle_quectel_ec20(dev, ifnum);
284 goto done;
285 }
286
243 /* 287 /*
244 * Gobi 2K+ USB layout: 288 * Gobi 2K+ USB layout:
245 * 0: QMI/net 289 * 0: QMI/net
@@ -301,29 +345,39 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
301 break; 345 break;
302 case QCSERIAL_HWI: 346 case QCSERIAL_HWI:
303 /* 347 /*
304 * Huawei layout: 348 * Huawei devices map functions by subclass + protocol
305 * 0: AT-capable modem port 349 * instead of interface numbers. The protocol identify
306 * 1: DM/DIAG 350 * a specific function, while the subclass indicate a
307 * 2: AT-capable modem port 351 * specific firmware source
308 * 3: CCID-compatible PCSC interface 352 *
309 * 4: QMI/net 353 * This is a blacklist of functions known to be
310 * 5: NMEA 354 * non-serial. The rest are assumed to be serial and
355 * will be handled by this driver
311 */ 356 */
312 switch (ifnum) { 357 switch (intf->desc.bInterfaceProtocol) {
313 case 0: 358 /* QMI combined (qmi_wwan) */
314 case 2: 359 case 0x07:
315 dev_dbg(dev, "Modem port found\n"); 360 case 0x37:
316 break; 361 case 0x67:
317 case 1: 362 /* QMI data (qmi_wwan) */
318 dev_dbg(dev, "DM/DIAG interface found\n"); 363 case 0x08:
319 break; 364 case 0x38:
320 case 5: 365 case 0x68:
321 dev_dbg(dev, "NMEA GPS interface found\n"); 366 /* QMI control (qmi_wwan) */
322 break; 367 case 0x09:
323 default: 368 case 0x39:
324 /* don't claim any unsupported interface */ 369 case 0x69:
370 /* NCM like (huawei_cdc_ncm) */
371 case 0x16:
372 case 0x46:
373 case 0x76:
325 altsetting = -1; 374 altsetting = -1;
326 break; 375 break;
376 default:
377 dev_dbg(dev, "Huawei type serial port found (%02x/%02x/%02x)\n",
378 intf->desc.bInterfaceClass,
379 intf->desc.bInterfaceSubClass,
380 intf->desc.bInterfaceProtocol);
327 } 381 }
328 break; 382 break;
329 default: 383 default:
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index e9da41d9fe7f..2694df2f4559 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -159,6 +159,7 @@ static const struct usb_device_id ti_id_table_3410[] = {
159 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) }, 159 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) },
160 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, 160 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
161 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, 161 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
162 { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) },
162 { } /* terminator */ 163 { } /* terminator */
163}; 164};
164 165
@@ -191,6 +192,7 @@ static const struct usb_device_id ti_id_table_combined[] = {
191 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, 192 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
192 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, 193 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
193 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, 194 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
195 { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) },
194 { } /* terminator */ 196 { } /* terminator */
195}; 197};
196 198
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
index 4a2423e84d55..98f35c656c02 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.h
+++ b/drivers/usb/serial/ti_usb_3410_5052.h
@@ -56,6 +56,10 @@
56#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID 56#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID
57#define ABBOTT_STRIP_PORT_ID 0x3420 57#define ABBOTT_STRIP_PORT_ID 0x3420
58 58
59/* Honeywell vendor and product IDs */
60#define HONEYWELL_VENDOR_ID 0x10ac
61#define HONEYWELL_HGI80_PRODUCT_ID 0x0102 /* Honeywell HGI80 */
62
59/* Commands */ 63/* Commands */
60#define TI_GET_VERSION 0x01 64#define TI_GET_VERSION 0x01
61#define TI_GET_PORT_STATUS 0x02 65#define TI_GET_PORT_STATUS 0x02