aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_video.c27
-rw-r--r--drivers/acpi/acpi_watchdog.c59
-rw-r--r--drivers/acpi/button.c24
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/acpi/sleep.c13
-rw-r--r--drivers/atm/iphase.c4
-rw-r--r--drivers/base/dma-coherent.c5
-rw-r--r--drivers/base/dma-mapping.c6
-rw-r--r--drivers/block/loop.c64
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/rbd.c101
-rw-r--r--drivers/block/swim.c49
-rw-r--r--drivers/block/swim3.c6
-rw-r--r--drivers/bus/Kconfig1
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/char/random.c172
-rw-r--r--drivers/char/virtio_console.c157
-rw-r--r--drivers/clocksource/Kconfig8
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/timer-imx-tpm.c45
-rw-r--r--drivers/clocksource/timer-npcm7xx.c215
-rw-r--r--drivers/cpufreq/Kconfig.arm10
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c323
-rw-r--r--drivers/dax/device.c2
-rw-r--r--drivers/firmware/arm_scmi/clock.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c54
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h4
-rw-r--r--drivers/gpu/drm/drm_dp_dual_mode_helper.c39
-rw-r--r--drivers/gpu/drm/drm_edid.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c73
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c52
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c27
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c37
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c13
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c16
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h4
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c9
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c11
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_format.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_kms.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c16
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c109
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c28
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c3
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c11
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c20
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h5
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c18
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c55
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c4
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-input.c24
-rw-r--r--drivers/hid/hidraw.c5
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c13
-rw-r--r--drivers/hid/wacom_wac.c76
-rw-r--r--drivers/hwmon/k10temp.c17
-rw-r--r--drivers/hwmon/nct6683.c4
-rw-r--r--drivers/hwmon/scmi-hwmon.c5
-rw-r--r--drivers/isdn/mISDN/dsp_hwec.c8
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c14
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/md/raid1.c25
-rw-r--r--drivers/memory/emif-asm-offsets.c72
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c39
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c25
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c33
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c9
-rw-r--r--drivers/mtd/nand/core.c3
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c25
-rw-r--r--drivers/mtd/nand/raw/tango_nand.c2
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c19
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c12
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c24
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c196
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c49
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c89
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c44
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c3
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c11
-rw-r--r--drivers/net/ethernet/sfc/ef10.c85
-rw-r--r--drivers/net/ethernet/sfc/efx.c143
-rw-r--r--drivers/net/ethernet/sfc/efx.h21
-rw-r--r--drivers/net/ethernet/sfc/farch.c41
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h61
-rw-r--r--drivers/net/ethernet/sfc/rx.c122
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/macsec.c5
-rw-r--r--drivers/net/phy/marvell.c9
-rw-r--r--drivers/net/phy/microchip.c178
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/team/team.c38
-rw-r--r--drivers/net/tun.c7
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/virtio_net.c79
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c17
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/nvdimm/Kconfig3
-rw-r--r--drivers/nvdimm/dimm_devs.c22
-rw-r--r--drivers/nvdimm/of_pmem.c2
-rw-r--r--drivers/pci/dwc/pcie-kirin.c2
-rw-r--r--drivers/pci/host/pci-aardvark.c53
-rw-r--r--drivers/pci/pci-driver.c5
-rw-r--r--drivers/pci/pci.c4
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c19
-rw-r--r--drivers/s390/block/dasd_alias.c13
-rw-r--r--drivers/s390/block/dasd_diag.c1
-rw-r--r--drivers/s390/char/sclp_early_core.c2
-rw-r--r--drivers/s390/cio/chsc.c14
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c19
-rw-r--r--drivers/s390/net/qeth_core.h2
-rw-r--r--drivers/s390/net/qeth_core_main.c158
-rw-r--r--drivers/s390/net/qeth_core_mpc.h12
-rw-r--r--drivers/s390/net/qeth_l2_main.c60
-rw-r--r--drivers/s390/net/smsgiucv.c2
-rw-r--r--drivers/scsi/fnic/fnic_trace.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c6
-rw-r--r--drivers/scsi/scsi_debug.c33
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c29
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sd_zbc.c140
-rw-r--r--drivers/scsi/ufs/ufshcd.c40
-rw-r--r--drivers/soc/bcm/raspberrypi-power.c2
-rw-r--r--drivers/target/target_core_pscsi.c2
-rw-r--r--drivers/watchdog/aspeed_wdt.c7
-rw-r--r--drivers/watchdog/renesas_wdt.c6
-rw-r--r--drivers/watchdog/sch311x_wdt.c2
-rw-r--r--drivers/watchdog/w83977f_wdt.c2
-rw-r--r--drivers/watchdog/wafer5823wdt.c2
-rw-r--r--drivers/xen/xen-pciback/conf_space_quirks.c2
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c8
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c3
181 files changed, 2948 insertions, 1608 deletions
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 76fb96966f7b..2f2e737be0f8 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -2123,6 +2123,25 @@ static int __init intel_opregion_present(void)
2123 return opregion; 2123 return opregion;
2124} 2124}
2125 2125
2126static bool dmi_is_desktop(void)
2127{
2128 const char *chassis_type;
2129
2130 chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
2131 if (!chassis_type)
2132 return false;
2133
2134 if (!strcmp(chassis_type, "3") || /* 3: Desktop */
2135 !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */
2136 !strcmp(chassis_type, "5") || /* 5: Pizza Box */
2137 !strcmp(chassis_type, "6") || /* 6: Mini Tower */
2138 !strcmp(chassis_type, "7") || /* 7: Tower */
2139 !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */
2140 return true;
2141
2142 return false;
2143}
2144
2126int acpi_video_register(void) 2145int acpi_video_register(void)
2127{ 2146{
2128 int ret = 0; 2147 int ret = 0;
@@ -2143,8 +2162,12 @@ int acpi_video_register(void)
2143 * win8 ready (where we also prefer the native backlight driver, so 2162 * win8 ready (where we also prefer the native backlight driver, so
2144 * normally the acpi_video code should not register there anyways). 2163 * normally the acpi_video code should not register there anyways).
2145 */ 2164 */
2146 if (only_lcd == -1) 2165 if (only_lcd == -1) {
2147 only_lcd = acpi_osi_is_win8(); 2166 if (dmi_is_desktop() && acpi_osi_is_win8())
2167 only_lcd = true;
2168 else
2169 only_lcd = false;
2170 }
2148 2171
2149 dmi_check_system(video_dmi_table); 2172 dmi_check_system(video_dmi_table);
2150 2173
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index ebb626ffb5fa..4bde16fb97d8 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -12,23 +12,64 @@
12#define pr_fmt(fmt) "ACPI: watchdog: " fmt 12#define pr_fmt(fmt) "ACPI: watchdog: " fmt
13 13
14#include <linux/acpi.h> 14#include <linux/acpi.h>
15#include <linux/dmi.h>
15#include <linux/ioport.h> 16#include <linux/ioport.h>
16#include <linux/platform_device.h> 17#include <linux/platform_device.h>
17 18
18#include "internal.h" 19#include "internal.h"
19 20
21static const struct dmi_system_id acpi_watchdog_skip[] = {
22 {
23 /*
24 * On Lenovo Z50-70 there are two issues with the WDAT
25 * table. First some of the instructions use RTC SRAM
26 * to store persistent information. This does not work well
27 * with Linux RTC driver. Second, more important thing is
28 * that the instructions do not actually reset the system.
29 *
30 * On this particular system iTCO_wdt seems to work just
31 * fine so we prefer that over WDAT for now.
32 *
33 * See also https://bugzilla.kernel.org/show_bug.cgi?id=199033.
34 */
35 .ident = "Lenovo Z50-70",
36 .matches = {
37 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
38 DMI_MATCH(DMI_PRODUCT_NAME, "20354"),
39 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Z50-70"),
40 },
41 },
42 {}
43};
44
45static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
46{
47 const struct acpi_table_wdat *wdat = NULL;
48 acpi_status status;
49
50 if (acpi_disabled)
51 return NULL;
52
53 if (dmi_check_system(acpi_watchdog_skip))
54 return NULL;
55
56 status = acpi_get_table(ACPI_SIG_WDAT, 0,
57 (struct acpi_table_header **)&wdat);
58 if (ACPI_FAILURE(status)) {
59 /* It is fine if there is no WDAT */
60 return NULL;
61 }
62
63 return wdat;
64}
65
20/** 66/**
21 * Returns true if this system should prefer ACPI based watchdog instead of 67 * Returns true if this system should prefer ACPI based watchdog instead of
22 * the native one (which are typically the same hardware). 68 * the native one (which are typically the same hardware).
23 */ 69 */
24bool acpi_has_watchdog(void) 70bool acpi_has_watchdog(void)
25{ 71{
26 struct acpi_table_header hdr; 72 return !!acpi_watchdog_get_wdat();
27
28 if (acpi_disabled)
29 return false;
30
31 return ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_WDAT, 0, &hdr));
32} 73}
33EXPORT_SYMBOL_GPL(acpi_has_watchdog); 74EXPORT_SYMBOL_GPL(acpi_has_watchdog);
34 75
@@ -41,12 +82,10 @@ void __init acpi_watchdog_init(void)
41 struct platform_device *pdev; 82 struct platform_device *pdev;
42 struct resource *resources; 83 struct resource *resources;
43 size_t nresources = 0; 84 size_t nresources = 0;
44 acpi_status status;
45 int i; 85 int i;
46 86
47 status = acpi_get_table(ACPI_SIG_WDAT, 0, 87 wdat = acpi_watchdog_get_wdat();
48 (struct acpi_table_header **)&wdat); 88 if (!wdat) {
49 if (ACPI_FAILURE(status)) {
50 /* It is fine if there is no WDAT */ 89 /* It is fine if there is no WDAT */
51 return; 90 return;
52 } 91 }
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index e1eee7a60fad..f1cc4f9d31cd 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -635,4 +635,26 @@ module_param_call(lid_init_state,
635 NULL, 0644); 635 NULL, 0644);
636MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state"); 636MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state");
637 637
638module_acpi_driver(acpi_button_driver); 638static int acpi_button_register_driver(struct acpi_driver *driver)
639{
640 /*
641 * Modules such as nouveau.ko and i915.ko have a link time dependency
642 * on acpi_lid_open(), and would therefore not be loadable on ACPI
643 * capable kernels booted in non-ACPI mode if the return value of
644 * acpi_bus_register_driver() is returned from here with ACPI disabled
645 * when this driver is built as a module.
646 */
647 if (acpi_disabled)
648 return 0;
649
650 return acpi_bus_register_driver(driver);
651}
652
653static void acpi_button_unregister_driver(struct acpi_driver *driver)
654{
655 if (!acpi_disabled)
656 acpi_bus_unregister_driver(driver);
657}
658
659module_driver(acpi_button_driver, acpi_button_register_driver,
660 acpi_button_unregister_driver);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index cc234e6a6297..970dd87d347c 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2166,10 +2166,10 @@ int __init acpi_scan_init(void)
2166 acpi_cmos_rtc_init(); 2166 acpi_cmos_rtc_init();
2167 acpi_container_init(); 2167 acpi_container_init();
2168 acpi_memory_hotplug_init(); 2168 acpi_memory_hotplug_init();
2169 acpi_watchdog_init();
2169 acpi_pnp_init(); 2170 acpi_pnp_init();
2170 acpi_int340x_thermal_init(); 2171 acpi_int340x_thermal_init();
2171 acpi_amba_init(); 2172 acpi_amba_init();
2172 acpi_watchdog_init();
2173 acpi_init_lpit(); 2173 acpi_init_lpit();
2174 2174
2175 acpi_scan_add_handler(&generic_device_handler); 2175 acpi_scan_add_handler(&generic_device_handler);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 99a1a650326d..974e58457697 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -364,6 +364,19 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
364 DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"), 364 DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
365 }, 365 },
366 }, 366 },
367 /*
368 * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
369 * the Low Power S0 Idle firmware interface (see
370 * https://bugzilla.kernel.org/show_bug.cgi?id=199057).
371 */
372 {
373 .callback = init_no_lps0,
374 .ident = "ThinkPad X1 Tablet(2016)",
375 .matches = {
376 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
377 DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
378 },
379 },
367 {}, 380 {},
368}; 381};
369 382
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 44abb8a0a5e5..be076606d30e 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -671,7 +671,7 @@ static void ia_tx_poll (IADEV *iadev) {
671 if ((vcc->pop) && (skb1->len != 0)) 671 if ((vcc->pop) && (skb1->len != 0))
672 { 672 {
673 vcc->pop(vcc, skb1); 673 vcc->pop(vcc, skb1);
674 IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n", 674 IF_EVENT(printk("Transmit Done - skb 0x%lx return\n",
675 (long)skb1);) 675 (long)skb1);)
676 } 676 }
677 else 677 else
@@ -1665,7 +1665,7 @@ static void tx_intr(struct atm_dev *dev)
1665 status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG); 1665 status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1666 if (status & TRANSMIT_DONE){ 1666 if (status & TRANSMIT_DONE){
1667 1667
1668 IF_EVENT(printk("Tansmit Done Intr logic run\n");) 1668 IF_EVENT(printk("Transmit Done Intr logic run\n");)
1669 spin_lock_irqsave(&iadev->tx_lock, flags); 1669 spin_lock_irqsave(&iadev->tx_lock, flags);
1670 ia_tx_poll(iadev); 1670 ia_tx_poll(iadev);
1671 spin_unlock_irqrestore(&iadev->tx_lock, flags); 1671 spin_unlock_irqrestore(&iadev->tx_lock, flags);
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 1e6396bb807b..597d40893862 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -312,8 +312,9 @@ static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
312 * This checks whether the memory was allocated from the per-device 312 * This checks whether the memory was allocated from the per-device
313 * coherent memory pool and if so, maps that memory to the provided vma. 313 * coherent memory pool and if so, maps that memory to the provided vma.
314 * 314 *
315 * Returns 1 if we correctly mapped the memory, or 0 if the caller should 315 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
316 * proceed with mapping memory from generic pools. 316 * should return @ret, or 0 if they should proceed with mapping memory from
317 * generic areas.
317 */ 318 */
318int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, 319int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
319 void *vaddr, size_t size, int *ret) 320 void *vaddr, size_t size, int *ret)
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 3b118353ea17..d82566d6e237 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -226,7 +226,6 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
226#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP 226#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
227 unsigned long user_count = vma_pages(vma); 227 unsigned long user_count = vma_pages(vma);
228 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 228 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
229 unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
230 unsigned long off = vma->vm_pgoff; 229 unsigned long off = vma->vm_pgoff;
231 230
232 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 231 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -234,12 +233,11 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
234 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 233 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
235 return ret; 234 return ret;
236 235
237 if (off < count && user_count <= (count - off)) { 236 if (off < count && user_count <= (count - off))
238 ret = remap_pfn_range(vma, vma->vm_start, 237 ret = remap_pfn_range(vma, vma->vm_start,
239 pfn + off, 238 page_to_pfn(virt_to_page(cpu_addr)) + off,
240 user_count << PAGE_SHIFT, 239 user_count << PAGE_SHIFT,
241 vma->vm_page_prot); 240 vma->vm_page_prot);
242 }
243#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ 241#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
244 242
245 return ret; 243 return ret;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c9d04497a415..5d4e31655d96 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -451,25 +451,47 @@ static int lo_req_flush(struct loop_device *lo, struct request *rq)
451static void lo_complete_rq(struct request *rq) 451static void lo_complete_rq(struct request *rq)
452{ 452{
453 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 453 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
454 blk_status_t ret = BLK_STS_OK;
454 455
455 if (unlikely(req_op(cmd->rq) == REQ_OP_READ && cmd->use_aio && 456 if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
456 cmd->ret >= 0 && cmd->ret < blk_rq_bytes(cmd->rq))) { 457 req_op(rq) != REQ_OP_READ) {
457 struct bio *bio = cmd->rq->bio; 458 if (cmd->ret < 0)
458 459 ret = BLK_STS_IOERR;
459 bio_advance(bio, cmd->ret); 460 goto end_io;
460 zero_fill_bio(bio);
461 } 461 }
462 462
463 blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK); 463 /*
464 * Short READ - if we got some data, advance our request and
465 * retry it. If we got no data, end the rest with EIO.
466 */
467 if (cmd->ret) {
468 blk_update_request(rq, BLK_STS_OK, cmd->ret);
469 cmd->ret = 0;
470 blk_mq_requeue_request(rq, true);
471 } else {
472 if (cmd->use_aio) {
473 struct bio *bio = rq->bio;
474
475 while (bio) {
476 zero_fill_bio(bio);
477 bio = bio->bi_next;
478 }
479 }
480 ret = BLK_STS_IOERR;
481end_io:
482 blk_mq_end_request(rq, ret);
483 }
464} 484}
465 485
466static void lo_rw_aio_do_completion(struct loop_cmd *cmd) 486static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
467{ 487{
488 struct request *rq = blk_mq_rq_from_pdu(cmd);
489
468 if (!atomic_dec_and_test(&cmd->ref)) 490 if (!atomic_dec_and_test(&cmd->ref))
469 return; 491 return;
470 kfree(cmd->bvec); 492 kfree(cmd->bvec);
471 cmd->bvec = NULL; 493 cmd->bvec = NULL;
472 blk_mq_complete_request(cmd->rq); 494 blk_mq_complete_request(rq);
473} 495}
474 496
475static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) 497static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
@@ -487,7 +509,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
487{ 509{
488 struct iov_iter iter; 510 struct iov_iter iter;
489 struct bio_vec *bvec; 511 struct bio_vec *bvec;
490 struct request *rq = cmd->rq; 512 struct request *rq = blk_mq_rq_from_pdu(cmd);
491 struct bio *bio = rq->bio; 513 struct bio *bio = rq->bio;
492 struct file *file = lo->lo_backing_file; 514 struct file *file = lo->lo_backing_file;
493 unsigned int offset; 515 unsigned int offset;
@@ -1702,15 +1724,16 @@ EXPORT_SYMBOL(loop_unregister_transfer);
1702static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, 1724static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1703 const struct blk_mq_queue_data *bd) 1725 const struct blk_mq_queue_data *bd)
1704{ 1726{
1705 struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 1727 struct request *rq = bd->rq;
1706 struct loop_device *lo = cmd->rq->q->queuedata; 1728 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
1729 struct loop_device *lo = rq->q->queuedata;
1707 1730
1708 blk_mq_start_request(bd->rq); 1731 blk_mq_start_request(rq);
1709 1732
1710 if (lo->lo_state != Lo_bound) 1733 if (lo->lo_state != Lo_bound)
1711 return BLK_STS_IOERR; 1734 return BLK_STS_IOERR;
1712 1735
1713 switch (req_op(cmd->rq)) { 1736 switch (req_op(rq)) {
1714 case REQ_OP_FLUSH: 1737 case REQ_OP_FLUSH:
1715 case REQ_OP_DISCARD: 1738 case REQ_OP_DISCARD:
1716 case REQ_OP_WRITE_ZEROES: 1739 case REQ_OP_WRITE_ZEROES:
@@ -1723,8 +1746,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1723 1746
1724 /* always use the first bio's css */ 1747 /* always use the first bio's css */
1725#ifdef CONFIG_BLK_CGROUP 1748#ifdef CONFIG_BLK_CGROUP
1726 if (cmd->use_aio && cmd->rq->bio && cmd->rq->bio->bi_css) { 1749 if (cmd->use_aio && rq->bio && rq->bio->bi_css) {
1727 cmd->css = cmd->rq->bio->bi_css; 1750 cmd->css = rq->bio->bi_css;
1728 css_get(cmd->css); 1751 css_get(cmd->css);
1729 } else 1752 } else
1730#endif 1753#endif
@@ -1736,8 +1759,9 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1736 1759
1737static void loop_handle_cmd(struct loop_cmd *cmd) 1760static void loop_handle_cmd(struct loop_cmd *cmd)
1738{ 1761{
1739 const bool write = op_is_write(req_op(cmd->rq)); 1762 struct request *rq = blk_mq_rq_from_pdu(cmd);
1740 struct loop_device *lo = cmd->rq->q->queuedata; 1763 const bool write = op_is_write(req_op(rq));
1764 struct loop_device *lo = rq->q->queuedata;
1741 int ret = 0; 1765 int ret = 0;
1742 1766
1743 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { 1767 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
@@ -1745,12 +1769,12 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
1745 goto failed; 1769 goto failed;
1746 } 1770 }
1747 1771
1748 ret = do_req_filebacked(lo, cmd->rq); 1772 ret = do_req_filebacked(lo, rq);
1749 failed: 1773 failed:
1750 /* complete non-aio request */ 1774 /* complete non-aio request */
1751 if (!cmd->use_aio || ret) { 1775 if (!cmd->use_aio || ret) {
1752 cmd->ret = ret ? -EIO : 0; 1776 cmd->ret = ret ? -EIO : 0;
1753 blk_mq_complete_request(cmd->rq); 1777 blk_mq_complete_request(rq);
1754 } 1778 }
1755} 1779}
1756 1780
@@ -1767,9 +1791,7 @@ static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
1767{ 1791{
1768 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 1792 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
1769 1793
1770 cmd->rq = rq;
1771 kthread_init_work(&cmd->work, loop_queue_work); 1794 kthread_init_work(&cmd->work, loop_queue_work);
1772
1773 return 0; 1795 return 0;
1774} 1796}
1775 1797
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 0f45416e4fcf..b78de9879f4f 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -66,7 +66,6 @@ struct loop_device {
66 66
67struct loop_cmd { 67struct loop_cmd {
68 struct kthread_work work; 68 struct kthread_work work;
69 struct request *rq;
70 bool use_aio; /* use AIO interface to handle I/O */ 69 bool use_aio; /* use AIO interface to handle I/O */
71 atomic_t ref; /* only for aio */ 70 atomic_t ref; /* only for aio */
72 long ret; 71 long ret;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 07dc5419bd63..8e8b04cc569a 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -732,6 +732,7 @@ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
732 */ 732 */
733enum { 733enum {
734 Opt_queue_depth, 734 Opt_queue_depth,
735 Opt_lock_timeout,
735 Opt_last_int, 736 Opt_last_int,
736 /* int args above */ 737 /* int args above */
737 Opt_last_string, 738 Opt_last_string,
@@ -740,11 +741,13 @@ enum {
740 Opt_read_write, 741 Opt_read_write,
741 Opt_lock_on_read, 742 Opt_lock_on_read,
742 Opt_exclusive, 743 Opt_exclusive,
744 Opt_notrim,
743 Opt_err 745 Opt_err
744}; 746};
745 747
746static match_table_t rbd_opts_tokens = { 748static match_table_t rbd_opts_tokens = {
747 {Opt_queue_depth, "queue_depth=%d"}, 749 {Opt_queue_depth, "queue_depth=%d"},
750 {Opt_lock_timeout, "lock_timeout=%d"},
748 /* int args above */ 751 /* int args above */
749 /* string args above */ 752 /* string args above */
750 {Opt_read_only, "read_only"}, 753 {Opt_read_only, "read_only"},
@@ -753,20 +756,25 @@ static match_table_t rbd_opts_tokens = {
753 {Opt_read_write, "rw"}, /* Alternate spelling */ 756 {Opt_read_write, "rw"}, /* Alternate spelling */
754 {Opt_lock_on_read, "lock_on_read"}, 757 {Opt_lock_on_read, "lock_on_read"},
755 {Opt_exclusive, "exclusive"}, 758 {Opt_exclusive, "exclusive"},
759 {Opt_notrim, "notrim"},
756 {Opt_err, NULL} 760 {Opt_err, NULL}
757}; 761};
758 762
759struct rbd_options { 763struct rbd_options {
760 int queue_depth; 764 int queue_depth;
765 unsigned long lock_timeout;
761 bool read_only; 766 bool read_only;
762 bool lock_on_read; 767 bool lock_on_read;
763 bool exclusive; 768 bool exclusive;
769 bool trim;
764}; 770};
765 771
766#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ 772#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
773#define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
767#define RBD_READ_ONLY_DEFAULT false 774#define RBD_READ_ONLY_DEFAULT false
768#define RBD_LOCK_ON_READ_DEFAULT false 775#define RBD_LOCK_ON_READ_DEFAULT false
769#define RBD_EXCLUSIVE_DEFAULT false 776#define RBD_EXCLUSIVE_DEFAULT false
777#define RBD_TRIM_DEFAULT true
770 778
771static int parse_rbd_opts_token(char *c, void *private) 779static int parse_rbd_opts_token(char *c, void *private)
772{ 780{
@@ -796,6 +804,14 @@ static int parse_rbd_opts_token(char *c, void *private)
796 } 804 }
797 rbd_opts->queue_depth = intval; 805 rbd_opts->queue_depth = intval;
798 break; 806 break;
807 case Opt_lock_timeout:
808 /* 0 is "wait forever" (i.e. infinite timeout) */
809 if (intval < 0 || intval > INT_MAX / 1000) {
810 pr_err("lock_timeout out of range\n");
811 return -EINVAL;
812 }
813 rbd_opts->lock_timeout = msecs_to_jiffies(intval * 1000);
814 break;
799 case Opt_read_only: 815 case Opt_read_only:
800 rbd_opts->read_only = true; 816 rbd_opts->read_only = true;
801 break; 817 break;
@@ -808,6 +824,9 @@ static int parse_rbd_opts_token(char *c, void *private)
808 case Opt_exclusive: 824 case Opt_exclusive:
809 rbd_opts->exclusive = true; 825 rbd_opts->exclusive = true;
810 break; 826 break;
827 case Opt_notrim:
828 rbd_opts->trim = false;
829 break;
811 default: 830 default:
812 /* libceph prints "bad option" msg */ 831 /* libceph prints "bad option" msg */
813 return -EINVAL; 832 return -EINVAL;
@@ -1392,7 +1411,7 @@ static bool rbd_img_is_write(struct rbd_img_request *img_req)
1392 case OBJ_OP_DISCARD: 1411 case OBJ_OP_DISCARD:
1393 return true; 1412 return true;
1394 default: 1413 default:
1395 rbd_assert(0); 1414 BUG();
1396 } 1415 }
1397} 1416}
1398 1417
@@ -2466,7 +2485,7 @@ again:
2466 } 2485 }
2467 return false; 2486 return false;
2468 default: 2487 default:
2469 rbd_assert(0); 2488 BUG();
2470 } 2489 }
2471} 2490}
2472 2491
@@ -2494,7 +2513,7 @@ static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req)
2494 } 2513 }
2495 return false; 2514 return false;
2496 default: 2515 default:
2497 rbd_assert(0); 2516 BUG();
2498 } 2517 }
2499} 2518}
2500 2519
@@ -3533,9 +3552,22 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3533/* 3552/*
3534 * lock_rwsem must be held for read 3553 * lock_rwsem must be held for read
3535 */ 3554 */
3536static void rbd_wait_state_locked(struct rbd_device *rbd_dev) 3555static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire)
3537{ 3556{
3538 DEFINE_WAIT(wait); 3557 DEFINE_WAIT(wait);
3558 unsigned long timeout;
3559 int ret = 0;
3560
3561 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
3562 return -EBLACKLISTED;
3563
3564 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3565 return 0;
3566
3567 if (!may_acquire) {
3568 rbd_warn(rbd_dev, "exclusive lock required");
3569 return -EROFS;
3570 }
3539 3571
3540 do { 3572 do {
3541 /* 3573 /*
@@ -3547,12 +3579,22 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
3547 prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait, 3579 prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
3548 TASK_UNINTERRUPTIBLE); 3580 TASK_UNINTERRUPTIBLE);
3549 up_read(&rbd_dev->lock_rwsem); 3581 up_read(&rbd_dev->lock_rwsem);
3550 schedule(); 3582 timeout = schedule_timeout(ceph_timeout_jiffies(
3583 rbd_dev->opts->lock_timeout));
3551 down_read(&rbd_dev->lock_rwsem); 3584 down_read(&rbd_dev->lock_rwsem);
3552 } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && 3585 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
3553 !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)); 3586 ret = -EBLACKLISTED;
3587 break;
3588 }
3589 if (!timeout) {
3590 rbd_warn(rbd_dev, "timed out waiting for lock");
3591 ret = -ETIMEDOUT;
3592 break;
3593 }
3594 } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
3554 3595
3555 finish_wait(&rbd_dev->lock_waitq, &wait); 3596 finish_wait(&rbd_dev->lock_waitq, &wait);
3597 return ret;
3556} 3598}
3557 3599
3558static void rbd_queue_workfn(struct work_struct *work) 3600static void rbd_queue_workfn(struct work_struct *work)
@@ -3638,19 +3680,10 @@ static void rbd_queue_workfn(struct work_struct *work)
3638 (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read); 3680 (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read);
3639 if (must_be_locked) { 3681 if (must_be_locked) {
3640 down_read(&rbd_dev->lock_rwsem); 3682 down_read(&rbd_dev->lock_rwsem);
3641 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && 3683 result = rbd_wait_state_locked(rbd_dev,
3642 !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { 3684 !rbd_dev->opts->exclusive);
3643 if (rbd_dev->opts->exclusive) { 3685 if (result)
3644 rbd_warn(rbd_dev, "exclusive lock required");
3645 result = -EROFS;
3646 goto err_unlock;
3647 }
3648 rbd_wait_state_locked(rbd_dev);
3649 }
3650 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
3651 result = -EBLACKLISTED;
3652 goto err_unlock; 3686 goto err_unlock;
3653 }
3654 } 3687 }
3655 3688
3656 img_request = rbd_img_request_create(rbd_dev, op_type, snapc); 3689 img_request = rbd_img_request_create(rbd_dev, op_type, snapc);
@@ -3902,7 +3935,8 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
3902{ 3935{
3903 struct gendisk *disk; 3936 struct gendisk *disk;
3904 struct request_queue *q; 3937 struct request_queue *q;
3905 u64 segment_size; 3938 unsigned int objset_bytes =
3939 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
3906 int err; 3940 int err;
3907 3941
3908 /* create gendisk info */ 3942 /* create gendisk info */
@@ -3942,20 +3976,19 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
3942 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 3976 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
3943 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ 3977 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
3944 3978
3945 /* set io sizes to object size */ 3979 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
3946 segment_size = rbd_obj_bytes(&rbd_dev->header);
3947 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3948 q->limits.max_sectors = queue_max_hw_sectors(q); 3980 q->limits.max_sectors = queue_max_hw_sectors(q);
3949 blk_queue_max_segments(q, USHRT_MAX); 3981 blk_queue_max_segments(q, USHRT_MAX);
3950 blk_queue_max_segment_size(q, UINT_MAX); 3982 blk_queue_max_segment_size(q, UINT_MAX);
3951 blk_queue_io_min(q, segment_size); 3983 blk_queue_io_min(q, objset_bytes);
3952 blk_queue_io_opt(q, segment_size); 3984 blk_queue_io_opt(q, objset_bytes);
3953 3985
3954 /* enable the discard support */ 3986 if (rbd_dev->opts->trim) {
3955 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); 3987 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
3956 q->limits.discard_granularity = segment_size; 3988 q->limits.discard_granularity = objset_bytes;
3957 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); 3989 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
3958 blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE); 3990 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
3991 }
3959 3992
3960 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) 3993 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
3961 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; 3994 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
@@ -5179,8 +5212,10 @@ static int rbd_add_parse_args(const char *buf,
5179 5212
5180 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT; 5213 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
5181 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT; 5214 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
5215 rbd_opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
5182 rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT; 5216 rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
5183 rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT; 5217 rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
5218 rbd_opts->trim = RBD_TRIM_DEFAULT;
5184 5219
5185 copts = ceph_parse_options(options, mon_addrs, 5220 copts = ceph_parse_options(options, mon_addrs,
5186 mon_addrs + mon_addrs_size - 1, 5221 mon_addrs + mon_addrs_size - 1,
@@ -5216,6 +5251,8 @@ static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
5216 5251
5217static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) 5252static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
5218{ 5253{
5254 int ret;
5255
5219 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) { 5256 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
5220 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled"); 5257 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
5221 return -EINVAL; 5258 return -EINVAL;
@@ -5223,9 +5260,9 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
5223 5260
5224 /* FIXME: "rbd map --exclusive" should be in interruptible */ 5261 /* FIXME: "rbd map --exclusive" should be in interruptible */
5225 down_read(&rbd_dev->lock_rwsem); 5262 down_read(&rbd_dev->lock_rwsem);
5226 rbd_wait_state_locked(rbd_dev); 5263 ret = rbd_wait_state_locked(rbd_dev, true);
5227 up_read(&rbd_dev->lock_rwsem); 5264 up_read(&rbd_dev->lock_rwsem);
5228 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { 5265 if (ret) {
5229 rbd_warn(rbd_dev, "failed to acquire exclusive lock"); 5266 rbd_warn(rbd_dev, "failed to acquire exclusive lock");
5230 return -EROFS; 5267 return -EROFS;
5231 } 5268 }
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 64e066eba72e..0e31884a9519 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -110,7 +110,7 @@ struct iwm {
110/* Select values for swim_select and swim_readbit */ 110/* Select values for swim_select and swim_readbit */
111 111
112#define READ_DATA_0 0x074 112#define READ_DATA_0 0x074
113#define TWOMEG_DRIVE 0x075 113#define ONEMEG_DRIVE 0x075
114#define SINGLE_SIDED 0x076 114#define SINGLE_SIDED 0x076
115#define DRIVE_PRESENT 0x077 115#define DRIVE_PRESENT 0x077
116#define DISK_IN 0x170 116#define DISK_IN 0x170
@@ -118,9 +118,9 @@ struct iwm {
118#define TRACK_ZERO 0x172 118#define TRACK_ZERO 0x172
119#define TACHO 0x173 119#define TACHO 0x173
120#define READ_DATA_1 0x174 120#define READ_DATA_1 0x174
121#define MFM_MODE 0x175 121#define GCR_MODE 0x175
122#define SEEK_COMPLETE 0x176 122#define SEEK_COMPLETE 0x176
123#define ONEMEG_MEDIA 0x177 123#define TWOMEG_MEDIA 0x177
124 124
125/* Bits in handshake register */ 125/* Bits in handshake register */
126 126
@@ -612,7 +612,6 @@ static void setup_medium(struct floppy_state *fs)
612 struct floppy_struct *g; 612 struct floppy_struct *g;
613 fs->disk_in = 1; 613 fs->disk_in = 1;
614 fs->write_protected = swim_readbit(base, WRITE_PROT); 614 fs->write_protected = swim_readbit(base, WRITE_PROT);
615 fs->type = swim_readbit(base, ONEMEG_MEDIA);
616 615
617 if (swim_track00(base)) 616 if (swim_track00(base))
618 printk(KERN_ERR 617 printk(KERN_ERR
@@ -620,6 +619,9 @@ static void setup_medium(struct floppy_state *fs)
620 619
621 swim_track00(base); 620 swim_track00(base);
622 621
622 fs->type = swim_readbit(base, TWOMEG_MEDIA) ?
623 HD_MEDIA : DD_MEDIA;
624 fs->head_number = swim_readbit(base, SINGLE_SIDED) ? 1 : 2;
623 get_floppy_geometry(fs, 0, &g); 625 get_floppy_geometry(fs, 0, &g);
624 fs->total_secs = g->size; 626 fs->total_secs = g->size;
625 fs->secpercyl = g->head * g->sect; 627 fs->secpercyl = g->head * g->sect;
@@ -646,7 +648,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
646 648
647 swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2); 649 swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2);
648 udelay(10); 650 udelay(10);
649 swim_drive(base, INTERNAL_DRIVE); 651 swim_drive(base, fs->location);
650 swim_motor(base, ON); 652 swim_motor(base, ON);
651 swim_action(base, SETMFM); 653 swim_action(base, SETMFM);
652 if (fs->ejected) 654 if (fs->ejected)
@@ -656,6 +658,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
656 goto out; 658 goto out;
657 } 659 }
658 660
661 set_capacity(fs->disk, fs->total_secs);
662
659 if (mode & FMODE_NDELAY) 663 if (mode & FMODE_NDELAY)
660 return 0; 664 return 0;
661 665
@@ -727,14 +731,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
727 if (copy_to_user((void __user *) param, (void *) &floppy_type, 731 if (copy_to_user((void __user *) param, (void *) &floppy_type,
728 sizeof(struct floppy_struct))) 732 sizeof(struct floppy_struct)))
729 return -EFAULT; 733 return -EFAULT;
730 break; 734 return 0;
731
732 default:
733 printk(KERN_DEBUG "SWIM floppy_ioctl: unknown cmd %d\n",
734 cmd);
735 return -ENOSYS;
736 } 735 }
737 return 0; 736 return -ENOTTY;
738} 737}
739 738
740static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo) 739static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -795,7 +794,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
795 struct swim_priv *swd = data; 794 struct swim_priv *swd = data;
796 int drive = (*part & 3); 795 int drive = (*part & 3);
797 796
798 if (drive > swd->floppy_count) 797 if (drive >= swd->floppy_count)
799 return NULL; 798 return NULL;
800 799
801 *part = 0; 800 *part = 0;
@@ -813,10 +812,9 @@ static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
813 812
814 swim_motor(base, OFF); 813 swim_motor(base, OFF);
815 814
816 if (swim_readbit(base, SINGLE_SIDED)) 815 fs->type = HD_MEDIA;
817 fs->head_number = 1; 816 fs->head_number = 2;
818 else 817
819 fs->head_number = 2;
820 fs->ref_count = 0; 818 fs->ref_count = 0;
821 fs->ejected = 1; 819 fs->ejected = 1;
822 820
@@ -834,10 +832,12 @@ static int swim_floppy_init(struct swim_priv *swd)
834 /* scan floppy drives */ 832 /* scan floppy drives */
835 833
836 swim_drive(base, INTERNAL_DRIVE); 834 swim_drive(base, INTERNAL_DRIVE);
837 if (swim_readbit(base, DRIVE_PRESENT)) 835 if (swim_readbit(base, DRIVE_PRESENT) &&
836 !swim_readbit(base, ONEMEG_DRIVE))
838 swim_add_floppy(swd, INTERNAL_DRIVE); 837 swim_add_floppy(swd, INTERNAL_DRIVE);
839 swim_drive(base, EXTERNAL_DRIVE); 838 swim_drive(base, EXTERNAL_DRIVE);
840 if (swim_readbit(base, DRIVE_PRESENT)) 839 if (swim_readbit(base, DRIVE_PRESENT) &&
840 !swim_readbit(base, ONEMEG_DRIVE))
841 swim_add_floppy(swd, EXTERNAL_DRIVE); 841 swim_add_floppy(swd, EXTERNAL_DRIVE);
842 842
843 /* register floppy drives */ 843 /* register floppy drives */
@@ -861,7 +861,6 @@ static int swim_floppy_init(struct swim_priv *swd)
861 &swd->lock); 861 &swd->lock);
862 if (!swd->unit[drive].disk->queue) { 862 if (!swd->unit[drive].disk->queue) {
863 err = -ENOMEM; 863 err = -ENOMEM;
864 put_disk(swd->unit[drive].disk);
865 goto exit_put_disks; 864 goto exit_put_disks;
866 } 865 }
867 blk_queue_bounce_limit(swd->unit[drive].disk->queue, 866 blk_queue_bounce_limit(swd->unit[drive].disk->queue,
@@ -911,7 +910,7 @@ static int swim_probe(struct platform_device *dev)
911 goto out; 910 goto out;
912 } 911 }
913 912
914 swim_base = ioremap(res->start, resource_size(res)); 913 swim_base = (struct swim __iomem *)res->start;
915 if (!swim_base) { 914 if (!swim_base) {
916 ret = -ENOMEM; 915 ret = -ENOMEM;
917 goto out_release_io; 916 goto out_release_io;
@@ -923,7 +922,7 @@ static int swim_probe(struct platform_device *dev)
923 if (!get_swim_mode(swim_base)) { 922 if (!get_swim_mode(swim_base)) {
924 printk(KERN_INFO "SWIM device not found !\n"); 923 printk(KERN_INFO "SWIM device not found !\n");
925 ret = -ENODEV; 924 ret = -ENODEV;
926 goto out_iounmap; 925 goto out_release_io;
927 } 926 }
928 927
929 /* set platform driver data */ 928 /* set platform driver data */
@@ -931,7 +930,7 @@ static int swim_probe(struct platform_device *dev)
931 swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL); 930 swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL);
932 if (!swd) { 931 if (!swd) {
933 ret = -ENOMEM; 932 ret = -ENOMEM;
934 goto out_iounmap; 933 goto out_release_io;
935 } 934 }
936 platform_set_drvdata(dev, swd); 935 platform_set_drvdata(dev, swd);
937 936
@@ -945,8 +944,6 @@ static int swim_probe(struct platform_device *dev)
945 944
946out_kfree: 945out_kfree:
947 kfree(swd); 946 kfree(swd);
948out_iounmap:
949 iounmap(swim_base);
950out_release_io: 947out_release_io:
951 release_mem_region(res->start, resource_size(res)); 948 release_mem_region(res->start, resource_size(res));
952out: 949out:
@@ -974,8 +971,6 @@ static int swim_remove(struct platform_device *dev)
974 for (drive = 0; drive < swd->floppy_count; drive++) 971 for (drive = 0; drive < swd->floppy_count; drive++)
975 floppy_eject(&swd->unit[drive]); 972 floppy_eject(&swd->unit[drive]);
976 973
977 iounmap(swd->base);
978
979 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 974 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
980 if (res) 975 if (res)
981 release_mem_region(res->start, resource_size(res)); 976 release_mem_region(res->start, resource_size(res));
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index af51015d056e..469541c1e51e 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -148,7 +148,7 @@ struct swim3 {
148#define MOTOR_ON 2 148#define MOTOR_ON 2
149#define RELAX 3 /* also eject in progress */ 149#define RELAX 3 /* also eject in progress */
150#define READ_DATA_0 4 150#define READ_DATA_0 4
151#define TWOMEG_DRIVE 5 151#define ONEMEG_DRIVE 5
152#define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */ 152#define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */
153#define DRIVE_PRESENT 7 153#define DRIVE_PRESENT 7
154#define DISK_IN 8 154#define DISK_IN 8
@@ -156,9 +156,9 @@ struct swim3 {
156#define TRACK_ZERO 10 156#define TRACK_ZERO 10
157#define TACHO 11 157#define TACHO 11
158#define READ_DATA_1 12 158#define READ_DATA_1 12
159#define MFM_MODE 13 159#define GCR_MODE 13
160#define SEEK_COMPLETE 14 160#define SEEK_COMPLETE 14
161#define ONEMEG_MEDIA 15 161#define TWOMEG_MEDIA 15
162 162
163/* Definitions of values used in writing and formatting */ 163/* Definitions of values used in writing and formatting */
164#define DATA_ESCAPE 0x99 164#define DATA_ESCAPE 0x99
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index d1c0b60e9326..6dc177bf4c42 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -33,6 +33,7 @@ config HISILICON_LPC
33 bool "Support for ISA I/O space on HiSilicon Hip06/7" 33 bool "Support for ISA I/O space on HiSilicon Hip06/7"
34 depends on ARM64 && (ARCH_HISI || COMPILE_TEST) 34 depends on ARM64 && (ARCH_HISI || COMPILE_TEST)
35 select INDIRECT_PIO 35 select INDIRECT_PIO
36 select MFD_CORE if ACPI
36 help 37 help
37 Driver to enable I/O access to devices attached to the Low Pin 38 Driver to enable I/O access to devices attached to the Low Pin
38 Count bus on the HiSilicon Hip06/7 SoC. 39 Count bus on the HiSilicon Hip06/7 SoC.
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 8327478effd0..bfc566d3f31a 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2371,7 +2371,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
2371 if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT) 2371 if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
2372 return media_changed(cdi, 1); 2372 return media_changed(cdi, 1);
2373 2373
2374 if ((unsigned int)arg >= cdi->capacity) 2374 if (arg >= cdi->capacity)
2375 return -EINVAL; 2375 return -EINVAL;
2376 2376
2377 info = kmalloc(sizeof(*info), GFP_KERNEL); 2377 info = kmalloc(sizeof(*info), GFP_KERNEL);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index e027e7fa1472..cd888d4ee605 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -261,6 +261,7 @@
261#include <linux/ptrace.h> 261#include <linux/ptrace.h>
262#include <linux/workqueue.h> 262#include <linux/workqueue.h>
263#include <linux/irq.h> 263#include <linux/irq.h>
264#include <linux/ratelimit.h>
264#include <linux/syscalls.h> 265#include <linux/syscalls.h>
265#include <linux/completion.h> 266#include <linux/completion.h>
266#include <linux/uuid.h> 267#include <linux/uuid.h>
@@ -427,8 +428,9 @@ struct crng_state primary_crng = {
427 * its value (from 0->1->2). 428 * its value (from 0->1->2).
428 */ 429 */
429static int crng_init = 0; 430static int crng_init = 0;
430#define crng_ready() (likely(crng_init > 0)) 431#define crng_ready() (likely(crng_init > 1))
431static int crng_init_cnt = 0; 432static int crng_init_cnt = 0;
433static unsigned long crng_global_init_time = 0;
432#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE) 434#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
433static void _extract_crng(struct crng_state *crng, 435static void _extract_crng(struct crng_state *crng,
434 __u32 out[CHACHA20_BLOCK_WORDS]); 436 __u32 out[CHACHA20_BLOCK_WORDS]);
@@ -437,6 +439,16 @@ static void _crng_backtrack_protect(struct crng_state *crng,
437static void process_random_ready_list(void); 439static void process_random_ready_list(void);
438static void _get_random_bytes(void *buf, int nbytes); 440static void _get_random_bytes(void *buf, int nbytes);
439 441
442static struct ratelimit_state unseeded_warning =
443 RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
444static struct ratelimit_state urandom_warning =
445 RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
446
447static int ratelimit_disable __read_mostly;
448
449module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
450MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
451
440/********************************************************************** 452/**********************************************************************
441 * 453 *
442 * OS independent entropy store. Here are the functions which handle 454 * OS independent entropy store. Here are the functions which handle
@@ -787,6 +799,43 @@ static void crng_initialize(struct crng_state *crng)
787 crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; 799 crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
788} 800}
789 801
802#ifdef CONFIG_NUMA
803static void do_numa_crng_init(struct work_struct *work)
804{
805 int i;
806 struct crng_state *crng;
807 struct crng_state **pool;
808
809 pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
810 for_each_online_node(i) {
811 crng = kmalloc_node(sizeof(struct crng_state),
812 GFP_KERNEL | __GFP_NOFAIL, i);
813 spin_lock_init(&crng->lock);
814 crng_initialize(crng);
815 pool[i] = crng;
816 }
817 mb();
818 if (cmpxchg(&crng_node_pool, NULL, pool)) {
819 for_each_node(i)
820 kfree(pool[i]);
821 kfree(pool);
822 }
823}
824
825static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
826
827static void numa_crng_init(void)
828{
829 schedule_work(&numa_crng_init_work);
830}
831#else
832static void numa_crng_init(void) {}
833#endif
834
835/*
836 * crng_fast_load() can be called by code in the interrupt service
837 * path. So we can't afford to dilly-dally.
838 */
790static int crng_fast_load(const char *cp, size_t len) 839static int crng_fast_load(const char *cp, size_t len)
791{ 840{
792 unsigned long flags; 841 unsigned long flags;
@@ -794,7 +843,7 @@ static int crng_fast_load(const char *cp, size_t len)
794 843
795 if (!spin_trylock_irqsave(&primary_crng.lock, flags)) 844 if (!spin_trylock_irqsave(&primary_crng.lock, flags))
796 return 0; 845 return 0;
797 if (crng_ready()) { 846 if (crng_init != 0) {
798 spin_unlock_irqrestore(&primary_crng.lock, flags); 847 spin_unlock_irqrestore(&primary_crng.lock, flags);
799 return 0; 848 return 0;
800 } 849 }
@@ -813,6 +862,51 @@ static int crng_fast_load(const char *cp, size_t len)
813 return 1; 862 return 1;
814} 863}
815 864
865/*
866 * crng_slow_load() is called by add_device_randomness, which has two
867 * attributes. (1) We can't trust the buffer passed to it is
868 * guaranteed to be unpredictable (so it might not have any entropy at
869 * all), and (2) it doesn't have the performance constraints of
870 * crng_fast_load().
871 *
872 * So we do something more comprehensive which is guaranteed to touch
873 * all of the primary_crng's state, and which uses a LFSR with a
874 * period of 255 as part of the mixing algorithm. Finally, we do
875 * *not* advance crng_init_cnt since buffer we may get may be something
876 * like a fixed DMI table (for example), which might very well be
877 * unique to the machine, but is otherwise unvarying.
878 */
879static int crng_slow_load(const char *cp, size_t len)
880{
881 unsigned long flags;
882 static unsigned char lfsr = 1;
883 unsigned char tmp;
884 unsigned i, max = CHACHA20_KEY_SIZE;
885 const char * src_buf = cp;
886 char * dest_buf = (char *) &primary_crng.state[4];
887
888 if (!spin_trylock_irqsave(&primary_crng.lock, flags))
889 return 0;
890 if (crng_init != 0) {
891 spin_unlock_irqrestore(&primary_crng.lock, flags);
892 return 0;
893 }
894 if (len > max)
895 max = len;
896
897 for (i = 0; i < max ; i++) {
898 tmp = lfsr;
899 lfsr >>= 1;
900 if (tmp & 1)
901 lfsr ^= 0xE1;
902 tmp = dest_buf[i % CHACHA20_KEY_SIZE];
903 dest_buf[i % CHACHA20_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
904 lfsr += (tmp << 3) | (tmp >> 5);
905 }
906 spin_unlock_irqrestore(&primary_crng.lock, flags);
907 return 1;
908}
909
816static void crng_reseed(struct crng_state *crng, struct entropy_store *r) 910static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
817{ 911{
818 unsigned long flags; 912 unsigned long flags;
@@ -831,7 +925,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
831 _crng_backtrack_protect(&primary_crng, buf.block, 925 _crng_backtrack_protect(&primary_crng, buf.block,
832 CHACHA20_KEY_SIZE); 926 CHACHA20_KEY_SIZE);
833 } 927 }
834 spin_lock_irqsave(&primary_crng.lock, flags); 928 spin_lock_irqsave(&crng->lock, flags);
835 for (i = 0; i < 8; i++) { 929 for (i = 0; i < 8; i++) {
836 unsigned long rv; 930 unsigned long rv;
837 if (!arch_get_random_seed_long(&rv) && 931 if (!arch_get_random_seed_long(&rv) &&
@@ -841,13 +935,26 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
841 } 935 }
842 memzero_explicit(&buf, sizeof(buf)); 936 memzero_explicit(&buf, sizeof(buf));
843 crng->init_time = jiffies; 937 crng->init_time = jiffies;
844 spin_unlock_irqrestore(&primary_crng.lock, flags); 938 spin_unlock_irqrestore(&crng->lock, flags);
845 if (crng == &primary_crng && crng_init < 2) { 939 if (crng == &primary_crng && crng_init < 2) {
846 invalidate_batched_entropy(); 940 invalidate_batched_entropy();
941 numa_crng_init();
847 crng_init = 2; 942 crng_init = 2;
848 process_random_ready_list(); 943 process_random_ready_list();
849 wake_up_interruptible(&crng_init_wait); 944 wake_up_interruptible(&crng_init_wait);
850 pr_notice("random: crng init done\n"); 945 pr_notice("random: crng init done\n");
946 if (unseeded_warning.missed) {
947 pr_notice("random: %d get_random_xx warning(s) missed "
948 "due to ratelimiting\n",
949 unseeded_warning.missed);
950 unseeded_warning.missed = 0;
951 }
952 if (urandom_warning.missed) {
953 pr_notice("random: %d urandom warning(s) missed "
954 "due to ratelimiting\n",
955 urandom_warning.missed);
956 urandom_warning.missed = 0;
957 }
851 } 958 }
852} 959}
853 960
@@ -856,8 +963,9 @@ static void _extract_crng(struct crng_state *crng,
856{ 963{
857 unsigned long v, flags; 964 unsigned long v, flags;
858 965
859 if (crng_init > 1 && 966 if (crng_ready() &&
860 time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)) 967 (time_after(crng_global_init_time, crng->init_time) ||
968 time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
861 crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL); 969 crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
862 spin_lock_irqsave(&crng->lock, flags); 970 spin_lock_irqsave(&crng->lock, flags);
863 if (arch_get_random_long(&v)) 971 if (arch_get_random_long(&v))
@@ -981,10 +1089,8 @@ void add_device_randomness(const void *buf, unsigned int size)
981 unsigned long time = random_get_entropy() ^ jiffies; 1089 unsigned long time = random_get_entropy() ^ jiffies;
982 unsigned long flags; 1090 unsigned long flags;
983 1091
984 if (!crng_ready()) { 1092 if (!crng_ready() && size)
985 crng_fast_load(buf, size); 1093 crng_slow_load(buf, size);
986 return;
987 }
988 1094
989 trace_add_device_randomness(size, _RET_IP_); 1095 trace_add_device_randomness(size, _RET_IP_);
990 spin_lock_irqsave(&input_pool.lock, flags); 1096 spin_lock_irqsave(&input_pool.lock, flags);
@@ -1139,7 +1245,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
1139 fast_mix(fast_pool); 1245 fast_mix(fast_pool);
1140 add_interrupt_bench(cycles); 1246 add_interrupt_bench(cycles);
1141 1247
1142 if (!crng_ready()) { 1248 if (unlikely(crng_init == 0)) {
1143 if ((fast_pool->count >= 64) && 1249 if ((fast_pool->count >= 64) &&
1144 crng_fast_load((char *) fast_pool->pool, 1250 crng_fast_load((char *) fast_pool->pool,
1145 sizeof(fast_pool->pool))) { 1251 sizeof(fast_pool->pool))) {
@@ -1489,8 +1595,9 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
1489#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM 1595#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1490 print_once = true; 1596 print_once = true;
1491#endif 1597#endif
1492 pr_notice("random: %s called from %pS with crng_init=%d\n", 1598 if (__ratelimit(&unseeded_warning))
1493 func_name, caller, crng_init); 1599 pr_notice("random: %s called from %pS with crng_init=%d\n",
1600 func_name, caller, crng_init);
1494} 1601}
1495 1602
1496/* 1603/*
@@ -1680,28 +1787,14 @@ static void init_std_data(struct entropy_store *r)
1680 */ 1787 */
1681static int rand_initialize(void) 1788static int rand_initialize(void)
1682{ 1789{
1683#ifdef CONFIG_NUMA
1684 int i;
1685 struct crng_state *crng;
1686 struct crng_state **pool;
1687#endif
1688
1689 init_std_data(&input_pool); 1790 init_std_data(&input_pool);
1690 init_std_data(&blocking_pool); 1791 init_std_data(&blocking_pool);
1691 crng_initialize(&primary_crng); 1792 crng_initialize(&primary_crng);
1692 1793 crng_global_init_time = jiffies;
1693#ifdef CONFIG_NUMA 1794 if (ratelimit_disable) {
1694 pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); 1795 urandom_warning.interval = 0;
1695 for_each_online_node(i) { 1796 unseeded_warning.interval = 0;
1696 crng = kmalloc_node(sizeof(struct crng_state),
1697 GFP_KERNEL | __GFP_NOFAIL, i);
1698 spin_lock_init(&crng->lock);
1699 crng_initialize(crng);
1700 pool[i] = crng;
1701 } 1797 }
1702 mb();
1703 crng_node_pool = pool;
1704#endif
1705 return 0; 1798 return 0;
1706} 1799}
1707early_initcall(rand_initialize); 1800early_initcall(rand_initialize);
@@ -1769,9 +1862,10 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1769 1862
1770 if (!crng_ready() && maxwarn > 0) { 1863 if (!crng_ready() && maxwarn > 0) {
1771 maxwarn--; 1864 maxwarn--;
1772 printk(KERN_NOTICE "random: %s: uninitialized urandom read " 1865 if (__ratelimit(&urandom_warning))
1773 "(%zd bytes read)\n", 1866 printk(KERN_NOTICE "random: %s: uninitialized "
1774 current->comm, nbytes); 1867 "urandom read (%zd bytes read)\n",
1868 current->comm, nbytes);
1775 spin_lock_irqsave(&primary_crng.lock, flags); 1869 spin_lock_irqsave(&primary_crng.lock, flags);
1776 crng_init_cnt = 0; 1870 crng_init_cnt = 0;
1777 spin_unlock_irqrestore(&primary_crng.lock, flags); 1871 spin_unlock_irqrestore(&primary_crng.lock, flags);
@@ -1875,6 +1969,14 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1875 input_pool.entropy_count = 0; 1969 input_pool.entropy_count = 0;
1876 blocking_pool.entropy_count = 0; 1970 blocking_pool.entropy_count = 0;
1877 return 0; 1971 return 0;
1972 case RNDRESEEDCRNG:
1973 if (!capable(CAP_SYS_ADMIN))
1974 return -EPERM;
1975 if (crng_init < 2)
1976 return -ENODATA;
1977 crng_reseed(&primary_crng, NULL);
1978 crng_global_init_time = jiffies - 1;
1979 return 0;
1878 default: 1980 default:
1879 return -EINVAL; 1981 return -EINVAL;
1880 } 1982 }
@@ -2212,7 +2314,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
2212{ 2314{
2213 struct entropy_store *poolp = &input_pool; 2315 struct entropy_store *poolp = &input_pool;
2214 2316
2215 if (!crng_ready()) { 2317 if (unlikely(crng_init == 0)) {
2216 crng_fast_load(buffer, count); 2318 crng_fast_load(buffer, count);
2217 return; 2319 return;
2218 } 2320 }
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 468f06134012..21085515814f 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -422,7 +422,7 @@ static void reclaim_dma_bufs(void)
422 } 422 }
423} 423}
424 424
425static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size, 425static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size,
426 int pages) 426 int pages)
427{ 427{
428 struct port_buffer *buf; 428 struct port_buffer *buf;
@@ -445,16 +445,16 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
445 return buf; 445 return buf;
446 } 446 }
447 447
448 if (is_rproc_serial(vq->vdev)) { 448 if (is_rproc_serial(vdev)) {
449 /* 449 /*
450 * Allocate DMA memory from ancestor. When a virtio 450 * Allocate DMA memory from ancestor. When a virtio
451 * device is created by remoteproc, the DMA memory is 451 * device is created by remoteproc, the DMA memory is
452 * associated with the grandparent device: 452 * associated with the grandparent device:
453 * vdev => rproc => platform-dev. 453 * vdev => rproc => platform-dev.
454 */ 454 */
455 if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent) 455 if (!vdev->dev.parent || !vdev->dev.parent->parent)
456 goto free_buf; 456 goto free_buf;
457 buf->dev = vq->vdev->dev.parent->parent; 457 buf->dev = vdev->dev.parent->parent;
458 458
459 /* Increase device refcnt to avoid freeing it */ 459 /* Increase device refcnt to avoid freeing it */
460 get_device(buf->dev); 460 get_device(buf->dev);
@@ -838,7 +838,7 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
838 838
839 count = min((size_t)(32 * 1024), count); 839 count = min((size_t)(32 * 1024), count);
840 840
841 buf = alloc_buf(port->out_vq, count, 0); 841 buf = alloc_buf(port->portdev->vdev, count, 0);
842 if (!buf) 842 if (!buf)
843 return -ENOMEM; 843 return -ENOMEM;
844 844
@@ -957,7 +957,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
957 if (ret < 0) 957 if (ret < 0)
958 goto error_out; 958 goto error_out;
959 959
960 buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); 960 buf = alloc_buf(port->portdev->vdev, 0, pipe->nrbufs);
961 if (!buf) { 961 if (!buf) {
962 ret = -ENOMEM; 962 ret = -ENOMEM;
963 goto error_out; 963 goto error_out;
@@ -1374,7 +1374,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
1374 1374
1375 nr_added_bufs = 0; 1375 nr_added_bufs = 0;
1376 do { 1376 do {
1377 buf = alloc_buf(vq, PAGE_SIZE, 0); 1377 buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
1378 if (!buf) 1378 if (!buf)
1379 break; 1379 break;
1380 1380
@@ -1402,7 +1402,6 @@ static int add_port(struct ports_device *portdev, u32 id)
1402{ 1402{
1403 char debugfs_name[16]; 1403 char debugfs_name[16];
1404 struct port *port; 1404 struct port *port;
1405 struct port_buffer *buf;
1406 dev_t devt; 1405 dev_t devt;
1407 unsigned int nr_added_bufs; 1406 unsigned int nr_added_bufs;
1408 int err; 1407 int err;
@@ -1513,8 +1512,6 @@ static int add_port(struct ports_device *portdev, u32 id)
1513 return 0; 1512 return 0;
1514 1513
1515free_inbufs: 1514free_inbufs:
1516 while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1517 free_buf(buf, true);
1518free_device: 1515free_device:
1519 device_destroy(pdrvdata.class, port->dev->devt); 1516 device_destroy(pdrvdata.class, port->dev->devt);
1520free_cdev: 1517free_cdev:
@@ -1539,34 +1536,14 @@ static void remove_port(struct kref *kref)
1539 1536
1540static void remove_port_data(struct port *port) 1537static void remove_port_data(struct port *port)
1541{ 1538{
1542 struct port_buffer *buf;
1543
1544 spin_lock_irq(&port->inbuf_lock); 1539 spin_lock_irq(&port->inbuf_lock);
1545 /* Remove unused data this port might have received. */ 1540 /* Remove unused data this port might have received. */
1546 discard_port_data(port); 1541 discard_port_data(port);
1547 spin_unlock_irq(&port->inbuf_lock); 1542 spin_unlock_irq(&port->inbuf_lock);
1548 1543
1549 /* Remove buffers we queued up for the Host to send us data in. */
1550 do {
1551 spin_lock_irq(&port->inbuf_lock);
1552 buf = virtqueue_detach_unused_buf(port->in_vq);
1553 spin_unlock_irq(&port->inbuf_lock);
1554 if (buf)
1555 free_buf(buf, true);
1556 } while (buf);
1557
1558 spin_lock_irq(&port->outvq_lock); 1544 spin_lock_irq(&port->outvq_lock);
1559 reclaim_consumed_buffers(port); 1545 reclaim_consumed_buffers(port);
1560 spin_unlock_irq(&port->outvq_lock); 1546 spin_unlock_irq(&port->outvq_lock);
1561
1562 /* Free pending buffers from the out-queue. */
1563 do {
1564 spin_lock_irq(&port->outvq_lock);
1565 buf = virtqueue_detach_unused_buf(port->out_vq);
1566 spin_unlock_irq(&port->outvq_lock);
1567 if (buf)
1568 free_buf(buf, true);
1569 } while (buf);
1570} 1547}
1571 1548
1572/* 1549/*
@@ -1791,13 +1768,24 @@ static void control_work_handler(struct work_struct *work)
1791 spin_unlock(&portdev->c_ivq_lock); 1768 spin_unlock(&portdev->c_ivq_lock);
1792} 1769}
1793 1770
1771static void flush_bufs(struct virtqueue *vq, bool can_sleep)
1772{
1773 struct port_buffer *buf;
1774 unsigned int len;
1775
1776 while ((buf = virtqueue_get_buf(vq, &len)))
1777 free_buf(buf, can_sleep);
1778}
1779
1794static void out_intr(struct virtqueue *vq) 1780static void out_intr(struct virtqueue *vq)
1795{ 1781{
1796 struct port *port; 1782 struct port *port;
1797 1783
1798 port = find_port_by_vq(vq->vdev->priv, vq); 1784 port = find_port_by_vq(vq->vdev->priv, vq);
1799 if (!port) 1785 if (!port) {
1786 flush_bufs(vq, false);
1800 return; 1787 return;
1788 }
1801 1789
1802 wake_up_interruptible(&port->waitqueue); 1790 wake_up_interruptible(&port->waitqueue);
1803} 1791}
@@ -1808,8 +1796,10 @@ static void in_intr(struct virtqueue *vq)
1808 unsigned long flags; 1796 unsigned long flags;
1809 1797
1810 port = find_port_by_vq(vq->vdev->priv, vq); 1798 port = find_port_by_vq(vq->vdev->priv, vq);
1811 if (!port) 1799 if (!port) {
1800 flush_bufs(vq, false);
1812 return; 1801 return;
1802 }
1813 1803
1814 spin_lock_irqsave(&port->inbuf_lock, flags); 1804 spin_lock_irqsave(&port->inbuf_lock, flags);
1815 port->inbuf = get_inbuf(port); 1805 port->inbuf = get_inbuf(port);
@@ -1984,24 +1974,54 @@ static const struct file_operations portdev_fops = {
1984 1974
1985static void remove_vqs(struct ports_device *portdev) 1975static void remove_vqs(struct ports_device *portdev)
1986{ 1976{
1977 struct virtqueue *vq;
1978
1979 virtio_device_for_each_vq(portdev->vdev, vq) {
1980 struct port_buffer *buf;
1981
1982 flush_bufs(vq, true);
1983 while ((buf = virtqueue_detach_unused_buf(vq)))
1984 free_buf(buf, true);
1985 }
1987 portdev->vdev->config->del_vqs(portdev->vdev); 1986 portdev->vdev->config->del_vqs(portdev->vdev);
1988 kfree(portdev->in_vqs); 1987 kfree(portdev->in_vqs);
1989 kfree(portdev->out_vqs); 1988 kfree(portdev->out_vqs);
1990} 1989}
1991 1990
1992static void remove_controlq_data(struct ports_device *portdev) 1991static void virtcons_remove(struct virtio_device *vdev)
1993{ 1992{
1994 struct port_buffer *buf; 1993 struct ports_device *portdev;
1995 unsigned int len; 1994 struct port *port, *port2;
1996 1995
1997 if (!use_multiport(portdev)) 1996 portdev = vdev->priv;
1998 return;
1999 1997
2000 while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) 1998 spin_lock_irq(&pdrvdata_lock);
2001 free_buf(buf, true); 1999 list_del(&portdev->list);
2000 spin_unlock_irq(&pdrvdata_lock);
2002 2001
2003 while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) 2002 /* Disable interrupts for vqs */
2004 free_buf(buf, true); 2003 vdev->config->reset(vdev);
2004 /* Finish up work that's lined up */
2005 if (use_multiport(portdev))
2006 cancel_work_sync(&portdev->control_work);
2007 else
2008 cancel_work_sync(&portdev->config_work);
2009
2010 list_for_each_entry_safe(port, port2, &portdev->ports, list)
2011 unplug_port(port);
2012
2013 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
2014
2015 /*
2016 * When yanking out a device, we immediately lose the
2017 * (device-side) queues. So there's no point in keeping the
2018 * guest side around till we drop our final reference. This
2019 * also means that any ports which are in an open state will
2020 * have to just stop using the port, as the vqs are going
2021 * away.
2022 */
2023 remove_vqs(portdev);
2024 kfree(portdev);
2005} 2025}
2006 2026
2007/* 2027/*
@@ -2070,6 +2090,7 @@ static int virtcons_probe(struct virtio_device *vdev)
2070 2090
2071 spin_lock_init(&portdev->ports_lock); 2091 spin_lock_init(&portdev->ports_lock);
2072 INIT_LIST_HEAD(&portdev->ports); 2092 INIT_LIST_HEAD(&portdev->ports);
2093 INIT_LIST_HEAD(&portdev->list);
2073 2094
2074 virtio_device_ready(portdev->vdev); 2095 virtio_device_ready(portdev->vdev);
2075 2096
@@ -2087,8 +2108,15 @@ static int virtcons_probe(struct virtio_device *vdev)
2087 if (!nr_added_bufs) { 2108 if (!nr_added_bufs) {
2088 dev_err(&vdev->dev, 2109 dev_err(&vdev->dev,
2089 "Error allocating buffers for control queue\n"); 2110 "Error allocating buffers for control queue\n");
2090 err = -ENOMEM; 2111 /*
2091 goto free_vqs; 2112 * The host might want to notify mgmt sw about device
2113 * add failure.
2114 */
2115 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
2116 VIRTIO_CONSOLE_DEVICE_READY, 0);
2117 /* Device was functional: we need full cleanup. */
2118 virtcons_remove(vdev);
2119 return -ENOMEM;
2092 } 2120 }
2093 } else { 2121 } else {
2094 /* 2122 /*
@@ -2119,11 +2147,6 @@ static int virtcons_probe(struct virtio_device *vdev)
2119 2147
2120 return 0; 2148 return 0;
2121 2149
2122free_vqs:
2123 /* The host might want to notify mgmt sw about device add failure */
2124 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
2125 VIRTIO_CONSOLE_DEVICE_READY, 0);
2126 remove_vqs(portdev);
2127free_chrdev: 2150free_chrdev:
2128 unregister_chrdev(portdev->chr_major, "virtio-portsdev"); 2151 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
2129free: 2152free:
@@ -2132,43 +2155,6 @@ fail:
2132 return err; 2155 return err;
2133} 2156}
2134 2157
2135static void virtcons_remove(struct virtio_device *vdev)
2136{
2137 struct ports_device *portdev;
2138 struct port *port, *port2;
2139
2140 portdev = vdev->priv;
2141
2142 spin_lock_irq(&pdrvdata_lock);
2143 list_del(&portdev->list);
2144 spin_unlock_irq(&pdrvdata_lock);
2145
2146 /* Disable interrupts for vqs */
2147 vdev->config->reset(vdev);
2148 /* Finish up work that's lined up */
2149 if (use_multiport(portdev))
2150 cancel_work_sync(&portdev->control_work);
2151 else
2152 cancel_work_sync(&portdev->config_work);
2153
2154 list_for_each_entry_safe(port, port2, &portdev->ports, list)
2155 unplug_port(port);
2156
2157 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
2158
2159 /*
2160 * When yanking out a device, we immediately lose the
2161 * (device-side) queues. So there's no point in keeping the
2162 * guest side around till we drop our final reference. This
2163 * also means that any ports which are in an open state will
2164 * have to just stop using the port, as the vqs are going
2165 * away.
2166 */
2167 remove_controlq_data(portdev);
2168 remove_vqs(portdev);
2169 kfree(portdev);
2170}
2171
2172static struct virtio_device_id id_table[] = { 2158static struct virtio_device_id id_table[] = {
2173 { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, 2159 { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
2174 { 0 }, 2160 { 0 },
@@ -2209,7 +2195,6 @@ static int virtcons_freeze(struct virtio_device *vdev)
2209 */ 2195 */
2210 if (use_multiport(portdev)) 2196 if (use_multiport(portdev))
2211 virtqueue_disable_cb(portdev->c_ivq); 2197 virtqueue_disable_cb(portdev->c_ivq);
2212 remove_controlq_data(portdev);
2213 2198
2214 list_for_each_entry(port, &portdev->ports, list) { 2199 list_for_each_entry(port, &portdev->ports, list) {
2215 virtqueue_disable_cb(port->in_vq); 2200 virtqueue_disable_cb(port->in_vq);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 9ee2888275c1..8e8a09755d10 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -133,6 +133,14 @@ config VT8500_TIMER
133 help 133 help
134 Enables support for the VT8500 driver. 134 Enables support for the VT8500 driver.
135 135
136config NPCM7XX_TIMER
137 bool "NPCM7xx timer driver" if COMPILE_TEST
138 depends on HAS_IOMEM
139 select CLKSRC_MMIO
140 help
141 Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture,
142 While TIMER0 serves as clockevent and TIMER1 serves as clocksource.
143
136config CADENCE_TTC_TIMER 144config CADENCE_TTC_TIMER
137 bool "Cadence TTC timer driver" if COMPILE_TEST 145 bool "Cadence TTC timer driver" if COMPILE_TEST
138 depends on COMMON_CLK 146 depends on COMMON_CLK
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index e8e76dfef00b..00caf37e52f9 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
56obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o 56obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
57obj-$(CONFIG_OWL_TIMER) += owl-timer.o 57obj-$(CONFIG_OWL_TIMER) += owl-timer.o
58obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o 58obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o
59obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o
59 60
60obj-$(CONFIG_ARC_TIMERS) += arc_timer.o 61obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
61obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o 62obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
index 21bffdcb2f20..6c8318470b48 100644
--- a/drivers/clocksource/timer-imx-tpm.c
+++ b/drivers/clocksource/timer-imx-tpm.c
@@ -17,9 +17,14 @@
17#include <linux/of_irq.h> 17#include <linux/of_irq.h>
18#include <linux/sched_clock.h> 18#include <linux/sched_clock.h>
19 19
20#define TPM_PARAM 0x4
21#define TPM_PARAM_WIDTH_SHIFT 16
22#define TPM_PARAM_WIDTH_MASK (0xff << 16)
20#define TPM_SC 0x10 23#define TPM_SC 0x10
21#define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3) 24#define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3)
22#define TPM_SC_CMOD_DIV_DEFAULT 0x3 25#define TPM_SC_CMOD_DIV_DEFAULT 0x3
26#define TPM_SC_CMOD_DIV_MAX 0x7
27#define TPM_SC_TOF_MASK (0x1 << 7)
23#define TPM_CNT 0x14 28#define TPM_CNT 0x14
24#define TPM_MOD 0x18 29#define TPM_MOD 0x18
25#define TPM_STATUS 0x1c 30#define TPM_STATUS 0x1c
@@ -29,8 +34,11 @@
29#define TPM_C0SC_MODE_SHIFT 2 34#define TPM_C0SC_MODE_SHIFT 2
30#define TPM_C0SC_MODE_MASK 0x3c 35#define TPM_C0SC_MODE_MASK 0x3c
31#define TPM_C0SC_MODE_SW_COMPARE 0x4 36#define TPM_C0SC_MODE_SW_COMPARE 0x4
37#define TPM_C0SC_CHF_MASK (0x1 << 7)
32#define TPM_C0V 0x24 38#define TPM_C0V 0x24
33 39
40static int counter_width;
41static int rating;
34static void __iomem *timer_base; 42static void __iomem *timer_base;
35static struct clock_event_device clockevent_tpm; 43static struct clock_event_device clockevent_tpm;
36 44
@@ -83,10 +91,11 @@ static int __init tpm_clocksource_init(unsigned long rate)
83 tpm_delay_timer.freq = rate; 91 tpm_delay_timer.freq = rate;
84 register_current_timer_delay(&tpm_delay_timer); 92 register_current_timer_delay(&tpm_delay_timer);
85 93
86 sched_clock_register(tpm_read_sched_clock, 32, rate); 94 sched_clock_register(tpm_read_sched_clock, counter_width, rate);
87 95
88 return clocksource_mmio_init(timer_base + TPM_CNT, "imx-tpm", 96 return clocksource_mmio_init(timer_base + TPM_CNT, "imx-tpm",
89 rate, 200, 32, clocksource_mmio_readl_up); 97 rate, rating, counter_width,
98 clocksource_mmio_readl_up);
90} 99}
91 100
92static int tpm_set_next_event(unsigned long delta, 101static int tpm_set_next_event(unsigned long delta,
@@ -105,7 +114,7 @@ static int tpm_set_next_event(unsigned long delta,
105 * of writing CNT registers which may cause the min_delta event got 114 * of writing CNT registers which may cause the min_delta event got
106 * missed, so we need add a ETIME check here in case it happened. 115 * missed, so we need add a ETIME check here in case it happened.
107 */ 116 */
108 return (int)((next - now) <= 0) ? -ETIME : 0; 117 return (int)(next - now) <= 0 ? -ETIME : 0;
109} 118}
110 119
111static int tpm_set_state_oneshot(struct clock_event_device *evt) 120static int tpm_set_state_oneshot(struct clock_event_device *evt)
@@ -139,7 +148,6 @@ static struct clock_event_device clockevent_tpm = {
139 .set_state_oneshot = tpm_set_state_oneshot, 148 .set_state_oneshot = tpm_set_state_oneshot,
140 .set_next_event = tpm_set_next_event, 149 .set_next_event = tpm_set_next_event,
141 .set_state_shutdown = tpm_set_state_shutdown, 150 .set_state_shutdown = tpm_set_state_shutdown,
142 .rating = 200,
143}; 151};
144 152
145static int __init tpm_clockevent_init(unsigned long rate, int irq) 153static int __init tpm_clockevent_init(unsigned long rate, int irq)
@@ -149,10 +157,11 @@ static int __init tpm_clockevent_init(unsigned long rate, int irq)
149 ret = request_irq(irq, tpm_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, 157 ret = request_irq(irq, tpm_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
150 "i.MX7ULP TPM Timer", &clockevent_tpm); 158 "i.MX7ULP TPM Timer", &clockevent_tpm);
151 159
160 clockevent_tpm.rating = rating;
152 clockevent_tpm.cpumask = cpumask_of(0); 161 clockevent_tpm.cpumask = cpumask_of(0);
153 clockevent_tpm.irq = irq; 162 clockevent_tpm.irq = irq;
154 clockevents_config_and_register(&clockevent_tpm, 163 clockevents_config_and_register(&clockevent_tpm, rate, 300,
155 rate, 300, 0xfffffffe); 164 GENMASK(counter_width - 1, 1));
156 165
157 return ret; 166 return ret;
158} 167}
@@ -179,7 +188,7 @@ static int __init tpm_timer_init(struct device_node *np)
179 ipg = of_clk_get_by_name(np, "ipg"); 188 ipg = of_clk_get_by_name(np, "ipg");
180 per = of_clk_get_by_name(np, "per"); 189 per = of_clk_get_by_name(np, "per");
181 if (IS_ERR(ipg) || IS_ERR(per)) { 190 if (IS_ERR(ipg) || IS_ERR(per)) {
182 pr_err("tpm: failed to get igp or per clk\n"); 191 pr_err("tpm: failed to get ipg or per clk\n");
183 ret = -ENODEV; 192 ret = -ENODEV;
184 goto err_clk_get; 193 goto err_clk_get;
185 } 194 }
@@ -197,6 +206,11 @@ static int __init tpm_timer_init(struct device_node *np)
197 goto err_per_clk_enable; 206 goto err_per_clk_enable;
198 } 207 }
199 208
209 counter_width = (readl(timer_base + TPM_PARAM) & TPM_PARAM_WIDTH_MASK)
210 >> TPM_PARAM_WIDTH_SHIFT;
211 /* use rating 200 for 32-bit counter and 150 for 16-bit counter */
212 rating = counter_width == 0x20 ? 200 : 150;
213
200 /* 214 /*
201 * Initialize tpm module to a known state 215 * Initialize tpm module to a known state
202 * 1) Counter disabled 216 * 1) Counter disabled
@@ -205,16 +219,25 @@ static int __init tpm_timer_init(struct device_node *np)
205 * 4) Channel0 disabled 219 * 4) Channel0 disabled
206 * 5) DMA transfers disabled 220 * 5) DMA transfers disabled
207 */ 221 */
222 /* make sure counter is disabled */
208 writel(0, timer_base + TPM_SC); 223 writel(0, timer_base + TPM_SC);
224 /* TOF is W1C */
225 writel(TPM_SC_TOF_MASK, timer_base + TPM_SC);
209 writel(0, timer_base + TPM_CNT); 226 writel(0, timer_base + TPM_CNT);
210 writel(0, timer_base + TPM_C0SC); 227 /* CHF is W1C */
228 writel(TPM_C0SC_CHF_MASK, timer_base + TPM_C0SC);
211 229
212 /* increase per cnt, div 8 by default */ 230 /*
213 writel(TPM_SC_CMOD_INC_PER_CNT | TPM_SC_CMOD_DIV_DEFAULT, 231 * increase per cnt,
232 * div 8 for 32-bit counter and div 128 for 16-bit counter
233 */
234 writel(TPM_SC_CMOD_INC_PER_CNT |
235 (counter_width == 0x20 ?
236 TPM_SC_CMOD_DIV_DEFAULT : TPM_SC_CMOD_DIV_MAX),
214 timer_base + TPM_SC); 237 timer_base + TPM_SC);
215 238
216 /* set MOD register to maximum for free running mode */ 239 /* set MOD register to maximum for free running mode */
217 writel(0xffffffff, timer_base + TPM_MOD); 240 writel(GENMASK(counter_width - 1, 0), timer_base + TPM_MOD);
218 241
219 rate = clk_get_rate(per) >> 3; 242 rate = clk_get_rate(per) >> 3;
220 ret = tpm_clocksource_init(rate); 243 ret = tpm_clocksource_init(rate);
diff --git a/drivers/clocksource/timer-npcm7xx.c b/drivers/clocksource/timer-npcm7xx.c
new file mode 100644
index 000000000000..7a9bb5532d99
--- /dev/null
+++ b/drivers/clocksource/timer-npcm7xx.c
@@ -0,0 +1,215 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2014-2018 Nuvoton Technologies tomer.maimon@nuvoton.com
4 * All rights reserved.
5 *
6 * Copyright 2017 Google, Inc.
7 */
8
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/err.h>
14#include <linux/clk.h>
15#include <linux/io.h>
16#include <linux/clockchips.h>
17#include <linux/of_irq.h>
18#include <linux/of_address.h>
19#include "timer-of.h"
20
21/* Timers registers */
22#define NPCM7XX_REG_TCSR0 0x0 /* Timer 0 Control and Status Register */
23#define NPCM7XX_REG_TICR0 0x8 /* Timer 0 Initial Count Register */
24#define NPCM7XX_REG_TCSR1 0x4 /* Timer 1 Control and Status Register */
25#define NPCM7XX_REG_TICR1 0xc /* Timer 1 Initial Count Register */
26#define NPCM7XX_REG_TDR1 0x14 /* Timer 1 Data Register */
27#define NPCM7XX_REG_TISR 0x18 /* Timer Interrupt Status Register */
28
29/* Timers control */
30#define NPCM7XX_Tx_RESETINT 0x1f
31#define NPCM7XX_Tx_PERIOD BIT(27)
32#define NPCM7XX_Tx_INTEN BIT(29)
33#define NPCM7XX_Tx_COUNTEN BIT(30)
34#define NPCM7XX_Tx_ONESHOT 0x0
35#define NPCM7XX_Tx_OPER GENMASK(3, 27)
36#define NPCM7XX_Tx_MIN_PRESCALE 0x1
37#define NPCM7XX_Tx_TDR_MASK_BITS 24
38#define NPCM7XX_Tx_MAX_CNT 0xFFFFFF
39#define NPCM7XX_T0_CLR_INT 0x1
40#define NPCM7XX_Tx_CLR_CSR 0x0
41
42/* Timers operating mode */
43#define NPCM7XX_START_PERIODIC_Tx (NPCM7XX_Tx_PERIOD | NPCM7XX_Tx_COUNTEN | \
44 NPCM7XX_Tx_INTEN | \
45 NPCM7XX_Tx_MIN_PRESCALE)
46
47#define NPCM7XX_START_ONESHOT_Tx (NPCM7XX_Tx_ONESHOT | NPCM7XX_Tx_COUNTEN | \
48 NPCM7XX_Tx_INTEN | \
49 NPCM7XX_Tx_MIN_PRESCALE)
50
51#define NPCM7XX_START_Tx (NPCM7XX_Tx_COUNTEN | NPCM7XX_Tx_PERIOD | \
52 NPCM7XX_Tx_MIN_PRESCALE)
53
54#define NPCM7XX_DEFAULT_CSR (NPCM7XX_Tx_CLR_CSR | NPCM7XX_Tx_MIN_PRESCALE)
55
56static int npcm7xx_timer_resume(struct clock_event_device *evt)
57{
58 struct timer_of *to = to_timer_of(evt);
59 u32 val;
60
61 val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
62 val |= NPCM7XX_Tx_COUNTEN;
63 writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
64
65 return 0;
66}
67
68static int npcm7xx_timer_shutdown(struct clock_event_device *evt)
69{
70 struct timer_of *to = to_timer_of(evt);
71 u32 val;
72
73 val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
74 val &= ~NPCM7XX_Tx_COUNTEN;
75 writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
76
77 return 0;
78}
79
80static int npcm7xx_timer_oneshot(struct clock_event_device *evt)
81{
82 struct timer_of *to = to_timer_of(evt);
83 u32 val;
84
85 val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
86 val &= ~NPCM7XX_Tx_OPER;
87
88 val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
89 val |= NPCM7XX_START_ONESHOT_Tx;
90 writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
91
92 return 0;
93}
94
95static int npcm7xx_timer_periodic(struct clock_event_device *evt)
96{
97 struct timer_of *to = to_timer_of(evt);
98 u32 val;
99
100 val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
101 val &= ~NPCM7XX_Tx_OPER;
102
103 writel(timer_of_period(to), timer_of_base(to) + NPCM7XX_REG_TICR0);
104 val |= NPCM7XX_START_PERIODIC_Tx;
105
106 writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
107
108 return 0;
109}
110
111static int npcm7xx_clockevent_set_next_event(unsigned long evt,
112 struct clock_event_device *clk)
113{
114 struct timer_of *to = to_timer_of(clk);
115 u32 val;
116
117 writel(evt, timer_of_base(to) + NPCM7XX_REG_TICR0);
118 val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
119 val |= NPCM7XX_START_Tx;
120 writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
121
122 return 0;
123}
124
125static irqreturn_t npcm7xx_timer0_interrupt(int irq, void *dev_id)
126{
127 struct clock_event_device *evt = (struct clock_event_device *)dev_id;
128 struct timer_of *to = to_timer_of(evt);
129
130 writel(NPCM7XX_T0_CLR_INT, timer_of_base(to) + NPCM7XX_REG_TISR);
131
132 evt->event_handler(evt);
133
134 return IRQ_HANDLED;
135}
136
137static struct timer_of npcm7xx_to = {
138 .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
139
140 .clkevt = {
141 .name = "npcm7xx-timer0",
142 .features = CLOCK_EVT_FEAT_PERIODIC |
143 CLOCK_EVT_FEAT_ONESHOT,
144 .set_next_event = npcm7xx_clockevent_set_next_event,
145 .set_state_shutdown = npcm7xx_timer_shutdown,
146 .set_state_periodic = npcm7xx_timer_periodic,
147 .set_state_oneshot = npcm7xx_timer_oneshot,
148 .tick_resume = npcm7xx_timer_resume,
149 .rating = 300,
150 },
151
152 .of_irq = {
153 .handler = npcm7xx_timer0_interrupt,
154 .flags = IRQF_TIMER | IRQF_IRQPOLL,
155 },
156};
157
158static void __init npcm7xx_clockevents_init(void)
159{
160 writel(NPCM7XX_DEFAULT_CSR,
161 timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR0);
162
163 writel(NPCM7XX_Tx_RESETINT,
164 timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TISR);
165
166 npcm7xx_to.clkevt.cpumask = cpumask_of(0);
167 clockevents_config_and_register(&npcm7xx_to.clkevt,
168 timer_of_rate(&npcm7xx_to),
169 0x1, NPCM7XX_Tx_MAX_CNT);
170}
171
172static void __init npcm7xx_clocksource_init(void)
173{
174 u32 val;
175
176 writel(NPCM7XX_DEFAULT_CSR,
177 timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1);
178 writel(NPCM7XX_Tx_MAX_CNT,
179 timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TICR1);
180
181 val = readl(timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1);
182 val |= NPCM7XX_START_Tx;
183 writel(val, timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1);
184
185 clocksource_mmio_init(timer_of_base(&npcm7xx_to) +
186 NPCM7XX_REG_TDR1,
187 "npcm7xx-timer1", timer_of_rate(&npcm7xx_to),
188 200, (unsigned int)NPCM7XX_Tx_TDR_MASK_BITS,
189 clocksource_mmio_readl_down);
190}
191
192static int __init npcm7xx_timer_init(struct device_node *np)
193{
194 int ret;
195
196 ret = timer_of_init(np, &npcm7xx_to);
197 if (ret)
198 return ret;
199
200 /* Clock input is divided by PRESCALE + 1 before it is fed */
201 /* to the counter */
202 npcm7xx_to.of_clk.rate = npcm7xx_to.of_clk.rate /
203 (NPCM7XX_Tx_MIN_PRESCALE + 1);
204
205 npcm7xx_clocksource_init();
206 npcm7xx_clockevents_init();
207
208 pr_info("Enabling NPCM7xx clocksource timer base: %px, IRQ: %d ",
209 timer_of_base(&npcm7xx_to), timer_of_irq(&npcm7xx_to));
210
211 return 0;
212}
213
214TIMER_OF_DECLARE(npcm7xx, "nuvoton,npcm750-timer", npcm7xx_timer_init);
215
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 7f56fe5183f2..de55c7d57438 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -71,16 +71,6 @@ config ARM_BRCMSTB_AVS_CPUFREQ
71 71
72 Say Y, if you have a Broadcom SoC with AVS support for DFS or DVFS. 72 Say Y, if you have a Broadcom SoC with AVS support for DFS or DVFS.
73 73
74config ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
75 bool "Broadcom STB AVS CPUfreq driver sysfs debug capability"
76 depends on ARM_BRCMSTB_AVS_CPUFREQ
77 help
78 Enabling this option turns on debug support via sysfs under
79 /sys/kernel/debug/brcmstb-avs-cpufreq. It is possible to read all and
80 write some AVS mailbox registers through sysfs entries.
81
82 If in doubt, say N.
83
84config ARM_EXYNOS5440_CPUFREQ 74config ARM_EXYNOS5440_CPUFREQ
85 tristate "SAMSUNG EXYNOS5440" 75 tristate "SAMSUNG EXYNOS5440"
86 depends on SOC_EXYNOS5440 76 depends on SOC_EXYNOS5440
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 6cdac1aaf23c..b07559b9ed99 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -49,13 +49,6 @@
49#include <linux/platform_device.h> 49#include <linux/platform_device.h>
50#include <linux/semaphore.h> 50#include <linux/semaphore.h>
51 51
52#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
53#include <linux/ctype.h>
54#include <linux/debugfs.h>
55#include <linux/slab.h>
56#include <linux/uaccess.h>
57#endif
58
59/* Max number of arguments AVS calls take */ 52/* Max number of arguments AVS calls take */
60#define AVS_MAX_CMD_ARGS 4 53#define AVS_MAX_CMD_ARGS 4
61/* 54/*
@@ -182,88 +175,11 @@ struct private_data {
182 void __iomem *base; 175 void __iomem *base;
183 void __iomem *avs_intr_base; 176 void __iomem *avs_intr_base;
184 struct device *dev; 177 struct device *dev;
185#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
186 struct dentry *debugfs;
187#endif
188 struct completion done; 178 struct completion done;
189 struct semaphore sem; 179 struct semaphore sem;
190 struct pmap pmap; 180 struct pmap pmap;
191}; 181};
192 182
193#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
194
195enum debugfs_format {
196 DEBUGFS_NORMAL,
197 DEBUGFS_FLOAT,
198 DEBUGFS_REV,
199};
200
201struct debugfs_data {
202 struct debugfs_entry *entry;
203 struct private_data *priv;
204};
205
206struct debugfs_entry {
207 char *name;
208 u32 offset;
209 fmode_t mode;
210 enum debugfs_format format;
211};
212
213#define DEBUGFS_ENTRY(name, mode, format) { \
214 #name, AVS_MBOX_##name, mode, format \
215}
216
217/*
218 * These are used for debugfs only. Otherwise we use AVS_MBOX_PARAM() directly.
219 */
220#define AVS_MBOX_PARAM1 AVS_MBOX_PARAM(0)
221#define AVS_MBOX_PARAM2 AVS_MBOX_PARAM(1)
222#define AVS_MBOX_PARAM3 AVS_MBOX_PARAM(2)
223#define AVS_MBOX_PARAM4 AVS_MBOX_PARAM(3)
224
225/*
226 * This table stores the name, access permissions and offset for each hardware
227 * register and is used to generate debugfs entries.
228 */
229static struct debugfs_entry debugfs_entries[] = {
230 DEBUGFS_ENTRY(COMMAND, S_IWUSR, DEBUGFS_NORMAL),
231 DEBUGFS_ENTRY(STATUS, S_IWUSR, DEBUGFS_NORMAL),
232 DEBUGFS_ENTRY(VOLTAGE0, 0, DEBUGFS_FLOAT),
233 DEBUGFS_ENTRY(TEMP0, 0, DEBUGFS_FLOAT),
234 DEBUGFS_ENTRY(PV0, 0, DEBUGFS_FLOAT),
235 DEBUGFS_ENTRY(MV0, 0, DEBUGFS_FLOAT),
236 DEBUGFS_ENTRY(PARAM1, S_IWUSR, DEBUGFS_NORMAL),
237 DEBUGFS_ENTRY(PARAM2, S_IWUSR, DEBUGFS_NORMAL),
238 DEBUGFS_ENTRY(PARAM3, S_IWUSR, DEBUGFS_NORMAL),
239 DEBUGFS_ENTRY(PARAM4, S_IWUSR, DEBUGFS_NORMAL),
240 DEBUGFS_ENTRY(REVISION, 0, DEBUGFS_REV),
241 DEBUGFS_ENTRY(PSTATE, 0, DEBUGFS_NORMAL),
242 DEBUGFS_ENTRY(HEARTBEAT, 0, DEBUGFS_NORMAL),
243 DEBUGFS_ENTRY(MAGIC, S_IWUSR, DEBUGFS_NORMAL),
244 DEBUGFS_ENTRY(SIGMA_HVT, 0, DEBUGFS_NORMAL),
245 DEBUGFS_ENTRY(SIGMA_SVT, 0, DEBUGFS_NORMAL),
246 DEBUGFS_ENTRY(VOLTAGE1, 0, DEBUGFS_FLOAT),
247 DEBUGFS_ENTRY(TEMP1, 0, DEBUGFS_FLOAT),
248 DEBUGFS_ENTRY(PV1, 0, DEBUGFS_FLOAT),
249 DEBUGFS_ENTRY(MV1, 0, DEBUGFS_FLOAT),
250 DEBUGFS_ENTRY(FREQUENCY, 0, DEBUGFS_NORMAL),
251};
252
253static int brcm_avs_target_index(struct cpufreq_policy *, unsigned int);
254
255static char *__strtolower(char *s)
256{
257 char *p;
258
259 for (p = s; *p; p++)
260 *p = tolower(*p);
261
262 return s;
263}
264
265#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
266
267static void __iomem *__map_region(const char *name) 183static void __iomem *__map_region(const char *name)
268{ 184{
269 struct device_node *np; 185 struct device_node *np;
@@ -516,238 +432,6 @@ brcm_avs_get_freq_table(struct device *dev, struct private_data *priv)
516 return table; 432 return table;
517} 433}
518 434
519#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
520
521#define MANT(x) (unsigned int)(abs((x)) / 1000)
522#define FRAC(x) (unsigned int)(abs((x)) - abs((x)) / 1000 * 1000)
523
524static int brcm_avs_debug_show(struct seq_file *s, void *data)
525{
526 struct debugfs_data *dbgfs = s->private;
527 void __iomem *base;
528 u32 val, offset;
529
530 if (!dbgfs) {
531 seq_puts(s, "No device pointer\n");
532 return 0;
533 }
534
535 base = dbgfs->priv->base;
536 offset = dbgfs->entry->offset;
537 val = readl(base + offset);
538 switch (dbgfs->entry->format) {
539 case DEBUGFS_NORMAL:
540 seq_printf(s, "%u\n", val);
541 break;
542 case DEBUGFS_FLOAT:
543 seq_printf(s, "%d.%03d\n", MANT(val), FRAC(val));
544 break;
545 case DEBUGFS_REV:
546 seq_printf(s, "%c.%c.%c.%c\n", (val >> 24 & 0xff),
547 (val >> 16 & 0xff), (val >> 8 & 0xff),
548 val & 0xff);
549 break;
550 }
551 seq_printf(s, "0x%08x\n", val);
552
553 return 0;
554}
555
556#undef MANT
557#undef FRAC
558
559static ssize_t brcm_avs_seq_write(struct file *file, const char __user *buf,
560 size_t size, loff_t *ppos)
561{
562 struct seq_file *s = file->private_data;
563 struct debugfs_data *dbgfs = s->private;
564 struct private_data *priv = dbgfs->priv;
565 void __iomem *base, *avs_intr_base;
566 bool use_issue_command = false;
567 unsigned long val, offset;
568 char str[128];
569 int ret;
570 char *str_ptr = str;
571
572 if (size >= sizeof(str))
573 return -E2BIG;
574
575 memset(str, 0, sizeof(str));
576 ret = copy_from_user(str, buf, size);
577 if (ret)
578 return ret;
579
580 base = priv->base;
581 avs_intr_base = priv->avs_intr_base;
582 offset = dbgfs->entry->offset;
583 /*
584 * Special case writing to "command" entry only: if the string starts
585 * with a 'c', we use the driver's __issue_avs_command() function.
586 * Otherwise, we perform a raw write. This should allow testing of raw
587 * access as well as using the higher level function. (Raw access
588 * doesn't clear the firmware return status after issuing the command.)
589 */
590 if (str_ptr[0] == 'c' && offset == AVS_MBOX_COMMAND) {
591 use_issue_command = true;
592 str_ptr++;
593 }
594 if (kstrtoul(str_ptr, 0, &val) != 0)
595 return -EINVAL;
596
597 /*
598 * Setting the P-state is a special case. We need to update the CPU
599 * frequency we report.
600 */
601 if (val == AVS_CMD_SET_PSTATE) {
602 struct cpufreq_policy *policy;
603 unsigned int pstate;
604
605 policy = cpufreq_cpu_get(smp_processor_id());
606 /* Read back the P-state we are about to set */
607 pstate = readl(base + AVS_MBOX_PARAM(0));
608 if (use_issue_command) {
609 ret = brcm_avs_target_index(policy, pstate);
610 return ret ? ret : size;
611 }
612 policy->cur = policy->freq_table[pstate].frequency;
613 }
614
615 if (use_issue_command) {
616 ret = __issue_avs_command(priv, val, false, NULL);
617 } else {
618 /* Locking here is not perfect, but is only for debug. */
619 ret = down_interruptible(&priv->sem);
620 if (ret)
621 return ret;
622
623 writel(val, base + offset);
624 /* We have to wake up the firmware to process a command. */
625 if (offset == AVS_MBOX_COMMAND)
626 writel(AVS_CPU_L2_INT_MASK,
627 avs_intr_base + AVS_CPU_L2_SET0);
628 up(&priv->sem);
629 }
630
631 return ret ? ret : size;
632}
633
634static struct debugfs_entry *__find_debugfs_entry(const char *name)
635{
636 int i;
637
638 for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++)
639 if (strcasecmp(debugfs_entries[i].name, name) == 0)
640 return &debugfs_entries[i];
641
642 return NULL;
643}
644
645static int brcm_avs_debug_open(struct inode *inode, struct file *file)
646{
647 struct debugfs_data *data;
648 fmode_t fmode;
649 int ret;
650
651 /*
652 * seq_open(), which is called by single_open(), clears "write" access.
653 * We need write access to some files, so we preserve our access mode
654 * and restore it.
655 */
656 fmode = file->f_mode;
657 /*
658 * Check access permissions even for root. We don't want to be writing
659 * to read-only registers. Access for regular users has already been
660 * checked by the VFS layer.
661 */
662 if ((fmode & FMODE_WRITER) && !(inode->i_mode & S_IWUSR))
663 return -EACCES;
664
665 data = kmalloc(sizeof(*data), GFP_KERNEL);
666 if (!data)
667 return -ENOMEM;
668 /*
669 * We use the same file system operations for all our debug files. To
670 * produce specific output, we look up the file name upon opening a
671 * debugfs entry and map it to a memory offset. This offset is then used
672 * in the generic "show" function to read a specific register.
673 */
674 data->entry = __find_debugfs_entry(file->f_path.dentry->d_iname);
675 data->priv = inode->i_private;
676
677 ret = single_open(file, brcm_avs_debug_show, data);
678 if (ret)
679 kfree(data);
680 file->f_mode = fmode;
681
682 return ret;
683}
684
685static int brcm_avs_debug_release(struct inode *inode, struct file *file)
686{
687 struct seq_file *seq_priv = file->private_data;
688 struct debugfs_data *data = seq_priv->private;
689
690 kfree(data);
691 return single_release(inode, file);
692}
693
694static const struct file_operations brcm_avs_debug_ops = {
695 .open = brcm_avs_debug_open,
696 .read = seq_read,
697 .write = brcm_avs_seq_write,
698 .llseek = seq_lseek,
699 .release = brcm_avs_debug_release,
700};
701
702static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev)
703{
704 struct private_data *priv = platform_get_drvdata(pdev);
705 struct dentry *dir;
706 int i;
707
708 if (!priv)
709 return;
710
711 dir = debugfs_create_dir(BRCM_AVS_CPUFREQ_NAME, NULL);
712 if (IS_ERR_OR_NULL(dir))
713 return;
714 priv->debugfs = dir;
715
716 for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++) {
717 /*
718 * The DEBUGFS_ENTRY macro generates uppercase strings. We
719 * convert them to lowercase before creating the debugfs
720 * entries.
721 */
722 char *entry = __strtolower(debugfs_entries[i].name);
723 fmode_t mode = debugfs_entries[i].mode;
724
725 if (!debugfs_create_file(entry, S_IFREG | S_IRUGO | mode,
726 dir, priv, &brcm_avs_debug_ops)) {
727 priv->debugfs = NULL;
728 debugfs_remove_recursive(dir);
729 break;
730 }
731 }
732}
733
734static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev)
735{
736 struct private_data *priv = platform_get_drvdata(pdev);
737
738 if (priv && priv->debugfs) {
739 debugfs_remove_recursive(priv->debugfs);
740 priv->debugfs = NULL;
741 }
742}
743
744#else
745
746static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev) {}
747static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev) {}
748
749#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
750
751/* 435/*
752 * To ensure the right firmware is running we need to 436 * To ensure the right firmware is running we need to
753 * - check the MAGIC matches what we expect 437 * - check the MAGIC matches what we expect
@@ -1016,11 +700,8 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
1016 return ret; 700 return ret;
1017 701
1018 brcm_avs_driver.driver_data = pdev; 702 brcm_avs_driver.driver_data = pdev;
1019 ret = cpufreq_register_driver(&brcm_avs_driver);
1020 if (!ret)
1021 brcm_avs_cpufreq_debug_init(pdev);
1022 703
1023 return ret; 704 return cpufreq_register_driver(&brcm_avs_driver);
1024} 705}
1025 706
1026static int brcm_avs_cpufreq_remove(struct platform_device *pdev) 707static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
@@ -1032,8 +713,6 @@ static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
1032 if (ret) 713 if (ret)
1033 return ret; 714 return ret;
1034 715
1035 brcm_avs_cpufreq_debug_exit(pdev);
1036
1037 priv = platform_get_drvdata(pdev); 716 priv = platform_get_drvdata(pdev);
1038 iounmap(priv->base); 717 iounmap(priv->base);
1039 iounmap(priv->avs_intr_base); 718 iounmap(priv->avs_intr_base);
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index be8606457f27..aff2c1594220 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -19,6 +19,7 @@
19#include <linux/dax.h> 19#include <linux/dax.h>
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/mman.h>
22#include "dax-private.h" 23#include "dax-private.h"
23#include "dax.h" 24#include "dax.h"
24 25
@@ -540,6 +541,7 @@ static const struct file_operations dax_fops = {
540 .release = dax_release, 541 .release = dax_release,
541 .get_unmapped_area = dax_get_unmapped_area, 542 .get_unmapped_area = dax_get_unmapped_area,
542 .mmap = dax_mmap, 543 .mmap = dax_mmap,
544 .mmap_supported_flags = MAP_SYNC,
543}; 545};
544 546
545static void dev_dax_release(struct device *dev) 547static void dev_dax_release(struct device *dev)
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index e6f17825db79..2b90606452a2 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -284,7 +284,7 @@ scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id)
284 struct clock_info *ci = handle->clk_priv; 284 struct clock_info *ci = handle->clk_priv;
285 struct scmi_clock_info *clk = ci->clk + clk_id; 285 struct scmi_clock_info *clk = ci->clk + clk_id;
286 286
287 if (!clk->name || !clk->name[0]) 287 if (!clk->name[0])
288 return NULL; 288 return NULL;
289 289
290 return clk; 290 return clk;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b0e591eaa71a..e14263fca1c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1459,10 +1459,11 @@ static const u32 sgpr_init_compute_shader[] =
1459static const u32 vgpr_init_regs[] = 1459static const u32 vgpr_init_regs[] =
1460{ 1460{
1461 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff, 1461 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
1462 mmCOMPUTE_RESOURCE_LIMITS, 0, 1462 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1463 mmCOMPUTE_NUM_THREAD_X, 256*4, 1463 mmCOMPUTE_NUM_THREAD_X, 256*4,
1464 mmCOMPUTE_NUM_THREAD_Y, 1, 1464 mmCOMPUTE_NUM_THREAD_Y, 1,
1465 mmCOMPUTE_NUM_THREAD_Z, 1, 1465 mmCOMPUTE_NUM_THREAD_Z, 1,
1466 mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
1466 mmCOMPUTE_PGM_RSRC2, 20, 1467 mmCOMPUTE_PGM_RSRC2, 20,
1467 mmCOMPUTE_USER_DATA_0, 0xedcedc00, 1468 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1468 mmCOMPUTE_USER_DATA_1, 0xedcedc01, 1469 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1479,10 +1480,11 @@ static const u32 vgpr_init_regs[] =
1479static const u32 sgpr1_init_regs[] = 1480static const u32 sgpr1_init_regs[] =
1480{ 1481{
1481 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f, 1482 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
1482 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, 1483 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1483 mmCOMPUTE_NUM_THREAD_X, 256*5, 1484 mmCOMPUTE_NUM_THREAD_X, 256*5,
1484 mmCOMPUTE_NUM_THREAD_Y, 1, 1485 mmCOMPUTE_NUM_THREAD_Y, 1,
1485 mmCOMPUTE_NUM_THREAD_Z, 1, 1486 mmCOMPUTE_NUM_THREAD_Z, 1,
1487 mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1486 mmCOMPUTE_PGM_RSRC2, 20, 1488 mmCOMPUTE_PGM_RSRC2, 20,
1487 mmCOMPUTE_USER_DATA_0, 0xedcedc00, 1489 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1488 mmCOMPUTE_USER_DATA_1, 0xedcedc01, 1490 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1503,6 +1505,7 @@ static const u32 sgpr2_init_regs[] =
1503 mmCOMPUTE_NUM_THREAD_X, 256*5, 1505 mmCOMPUTE_NUM_THREAD_X, 256*5,
1504 mmCOMPUTE_NUM_THREAD_Y, 1, 1506 mmCOMPUTE_NUM_THREAD_Y, 1,
1505 mmCOMPUTE_NUM_THREAD_Z, 1, 1507 mmCOMPUTE_NUM_THREAD_Z, 1,
1508 mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1506 mmCOMPUTE_PGM_RSRC2, 20, 1509 mmCOMPUTE_PGM_RSRC2, 20,
1507 mmCOMPUTE_USER_DATA_0, 0xedcedc00, 1510 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1508 mmCOMPUTE_USER_DATA_1, 0xedcedc01, 1511 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index ed2f06c9f346..3858820a0055 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -6,5 +6,6 @@ config HSA_AMD
6 tristate "HSA kernel driver for AMD GPU devices" 6 tristate "HSA kernel driver for AMD GPU devices"
7 depends on DRM_AMDGPU && X86_64 7 depends on DRM_AMDGPU && X86_64
8 imply AMD_IOMMU_V2 8 imply AMD_IOMMU_V2
9 select MMU_NOTIFIER
9 help 10 help
10 Enable this if you want to use HSA features on AMD GPU devices. 11 Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index cd679cf1fd30..59808a39ecf4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -749,12 +749,13 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
749 struct timespec64 time; 749 struct timespec64 time;
750 750
751 dev = kfd_device_by_id(args->gpu_id); 751 dev = kfd_device_by_id(args->gpu_id);
752 if (dev == NULL) 752 if (dev)
753 return -EINVAL; 753 /* Reading GPU clock counter from KGD */
754 754 args->gpu_clock_counter =
755 /* Reading GPU clock counter from KGD */ 755 dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
756 args->gpu_clock_counter = 756 else
757 dev->kfd2kgd->get_gpu_clock_counter(dev->kgd); 757 /* Node without GPU resource */
758 args->gpu_clock_counter = 0;
758 759
759 /* No access to rdtsc. Using raw monotonic time */ 760 /* No access to rdtsc. Using raw monotonic time */
760 getrawmonotonic64(&time); 761 getrawmonotonic64(&time);
@@ -1147,7 +1148,7 @@ err_unlock:
1147 return ret; 1148 return ret;
1148} 1149}
1149 1150
1150bool kfd_dev_is_large_bar(struct kfd_dev *dev) 1151static bool kfd_dev_is_large_bar(struct kfd_dev *dev)
1151{ 1152{
1152 struct kfd_local_mem_info mem_info; 1153 struct kfd_local_mem_info mem_info;
1153 1154
@@ -1421,7 +1422,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
1421 1422
1422 pdd = kfd_get_process_device_data(dev, p); 1423 pdd = kfd_get_process_device_data(dev, p);
1423 if (!pdd) { 1424 if (!pdd) {
1424 err = PTR_ERR(pdd); 1425 err = -EINVAL;
1425 goto bind_process_to_device_failed; 1426 goto bind_process_to_device_failed;
1426 } 1427 }
1427 1428
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4e2f379ce217..1dd1142246c2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4557,6 +4557,7 @@ static int dm_update_crtcs_state(struct dc *dc,
4557 struct amdgpu_dm_connector *aconnector = NULL; 4557 struct amdgpu_dm_connector *aconnector = NULL;
4558 struct drm_connector_state *new_con_state = NULL; 4558 struct drm_connector_state *new_con_state = NULL;
4559 struct dm_connector_state *dm_conn_state = NULL; 4559 struct dm_connector_state *dm_conn_state = NULL;
4560 struct drm_plane_state *new_plane_state = NULL;
4560 4561
4561 new_stream = NULL; 4562 new_stream = NULL;
4562 4563
@@ -4564,6 +4565,13 @@ static int dm_update_crtcs_state(struct dc *dc,
4564 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 4565 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4565 acrtc = to_amdgpu_crtc(crtc); 4566 acrtc = to_amdgpu_crtc(crtc);
4566 4567
4568 new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
4569
4570 if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
4571 ret = -EINVAL;
4572 goto fail;
4573 }
4574
4567 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 4575 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
4568 4576
4569 /* TODO This hack should go away */ 4577 /* TODO This hack should go away */
@@ -4760,7 +4768,7 @@ static int dm_update_planes_state(struct dc *dc,
4760 if (!dm_old_crtc_state->stream) 4768 if (!dm_old_crtc_state->stream)
4761 continue; 4769 continue;
4762 4770
4763 DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n", 4771 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
4764 plane->base.id, old_plane_crtc->base.id); 4772 plane->base.id, old_plane_crtc->base.id);
4765 4773
4766 if (!dc_remove_plane_from_context( 4774 if (!dc_remove_plane_from_context(
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index f6cb502c303f..25f064c01038 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -138,13 +138,6 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
138 lut = (struct drm_color_lut *)blob->data; 138 lut = (struct drm_color_lut *)blob->data;
139 lut_size = blob->length / sizeof(struct drm_color_lut); 139 lut_size = blob->length / sizeof(struct drm_color_lut);
140 140
141 if (__is_lut_linear(lut, lut_size)) {
142 /* Set to bypass if lut is set to linear */
143 stream->out_transfer_func->type = TF_TYPE_BYPASS;
144 stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
145 return 0;
146 }
147
148 gamma = dc_create_gamma(); 141 gamma = dc_create_gamma();
149 if (!gamma) 142 if (!gamma)
150 return -ENOMEM; 143 return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 490017df371d..4be21bf54749 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -329,14 +329,15 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
329{ 329{
330 int src; 330 int src;
331 struct irq_list_head *lh; 331 struct irq_list_head *lh;
332 unsigned long irq_table_flags;
332 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n"); 333 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
333
334 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { 334 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
335 335 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
336 /* The handler was removed from the table, 336 /* The handler was removed from the table,
337 * it means it is safe to flush all the 'work' 337 * it means it is safe to flush all the 'work'
338 * (because no code can schedule a new one). */ 338 * (because no code can schedule a new one). */
339 lh = &adev->dm.irq_handler_list_low_tab[src]; 339 lh = &adev->dm.irq_handler_list_low_tab[src];
340 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
340 flush_work(&lh->work); 341 flush_work(&lh->work);
341 } 342 }
342} 343}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 8291d74f26bc..ace9ad578ca0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -161,6 +161,11 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
161 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 161 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
162 struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder; 162 struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
163 163
164 if (amdgpu_dm_connector->edid) {
165 kfree(amdgpu_dm_connector->edid);
166 amdgpu_dm_connector->edid = NULL;
167 }
168
164 drm_encoder_cleanup(&amdgpu_encoder->base); 169 drm_encoder_cleanup(&amdgpu_encoder->base);
165 kfree(amdgpu_encoder); 170 kfree(amdgpu_encoder);
166 drm_connector_cleanup(connector); 171 drm_connector_cleanup(connector);
@@ -181,28 +186,22 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
181void dm_dp_mst_dc_sink_create(struct drm_connector *connector) 186void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
182{ 187{
183 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 188 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
184 struct edid *edid;
185 struct dc_sink *dc_sink; 189 struct dc_sink *dc_sink;
186 struct dc_sink_init_data init_params = { 190 struct dc_sink_init_data init_params = {
187 .link = aconnector->dc_link, 191 .link = aconnector->dc_link,
188 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; 192 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
189 193
194 /* FIXME none of this is safe. we shouldn't touch aconnector here in
195 * atomic_check
196 */
197
190 /* 198 /*
191 * TODO: Need to further figure out why ddc.algo is NULL while MST port exists 199 * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
192 */ 200 */
193 if (!aconnector->port || !aconnector->port->aux.ddc.algo) 201 if (!aconnector->port || !aconnector->port->aux.ddc.algo)
194 return; 202 return;
195 203
196 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); 204 ASSERT(aconnector->edid);
197
198 if (!edid) {
199 drm_mode_connector_update_edid_property(
200 &aconnector->base,
201 NULL);
202 return;
203 }
204
205 aconnector->edid = edid;
206 205
207 dc_sink = dc_link_add_remote_sink( 206 dc_sink = dc_link_add_remote_sink(
208 aconnector->dc_link, 207 aconnector->dc_link,
@@ -215,9 +214,6 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
215 214
216 amdgpu_dm_add_sink_to_freesync_module( 215 amdgpu_dm_add_sink_to_freesync_module(
217 connector, aconnector->edid); 216 connector, aconnector->edid);
218
219 drm_mode_connector_update_edid_property(
220 &aconnector->base, aconnector->edid);
221} 217}
222 218
223static int dm_dp_mst_get_modes(struct drm_connector *connector) 219static int dm_dp_mst_get_modes(struct drm_connector *connector)
@@ -230,10 +226,6 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
230 226
231 if (!aconnector->edid) { 227 if (!aconnector->edid) {
232 struct edid *edid; 228 struct edid *edid;
233 struct dc_sink *dc_sink;
234 struct dc_sink_init_data init_params = {
235 .link = aconnector->dc_link,
236 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
237 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); 229 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
238 230
239 if (!edid) { 231 if (!edid) {
@@ -244,11 +236,17 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
244 } 236 }
245 237
246 aconnector->edid = edid; 238 aconnector->edid = edid;
239 }
247 240
241 if (!aconnector->dc_sink) {
242 struct dc_sink *dc_sink;
243 struct dc_sink_init_data init_params = {
244 .link = aconnector->dc_link,
245 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
248 dc_sink = dc_link_add_remote_sink( 246 dc_sink = dc_link_add_remote_sink(
249 aconnector->dc_link, 247 aconnector->dc_link,
250 (uint8_t *)edid, 248 (uint8_t *)aconnector->edid,
251 (edid->extensions + 1) * EDID_LENGTH, 249 (aconnector->edid->extensions + 1) * EDID_LENGTH,
252 &init_params); 250 &init_params);
253 251
254 dc_sink->priv = aconnector; 252 dc_sink->priv = aconnector;
@@ -256,12 +254,12 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
256 254
257 if (aconnector->dc_sink) 255 if (aconnector->dc_sink)
258 amdgpu_dm_add_sink_to_freesync_module( 256 amdgpu_dm_add_sink_to_freesync_module(
259 connector, edid); 257 connector, aconnector->edid);
260
261 drm_mode_connector_update_edid_property(
262 &aconnector->base, edid);
263 } 258 }
264 259
260 drm_mode_connector_update_edid_property(
261 &aconnector->base, aconnector->edid);
262
265 ret = drm_add_edid_modes(connector, aconnector->edid); 263 ret = drm_add_edid_modes(connector, aconnector->edid);
266 264
267 return ret; 265 return ret;
@@ -424,14 +422,6 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
424 dc_sink_release(aconnector->dc_sink); 422 dc_sink_release(aconnector->dc_sink);
425 aconnector->dc_sink = NULL; 423 aconnector->dc_sink = NULL;
426 } 424 }
427 if (aconnector->edid) {
428 kfree(aconnector->edid);
429 aconnector->edid = NULL;
430 }
431
432 drm_mode_connector_update_edid_property(
433 &aconnector->base,
434 NULL);
435 425
436 aconnector->mst_connected = false; 426 aconnector->mst_connected = false;
437} 427}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index add90675fd2a..26fbeafc3c96 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -4743,23 +4743,27 @@ static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
4743 4743
4744 for (i=0; i < dep_table->count; i++) { 4744 for (i=0; i < dep_table->count; i++) {
4745 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { 4745 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
4746 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; 4746 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
4747 break; 4747 return;
4748 } 4748 }
4749 } 4749 }
4750 if (i == dep_table->count) 4750 if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
4751 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; 4751 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
4752 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4753 }
4752 4754
4753 dep_table = table_info->vdd_dep_on_sclk; 4755 dep_table = table_info->vdd_dep_on_sclk;
4754 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); 4756 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
4755 for (i=0; i < dep_table->count; i++) { 4757 for (i=0; i < dep_table->count; i++) {
4756 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { 4758 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
4757 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; 4759 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
4758 break; 4760 return;
4759 } 4761 }
4760 } 4762 }
4761 if (i == dep_table->count) 4763 if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
4762 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; 4764 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
4765 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4766 }
4763} 4767}
4764 4768
4765static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, 4769static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
index fb696e3d06cf..2f8a3b983cce 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
@@ -412,8 +412,10 @@ typedef struct {
412 QuadraticInt_t ReservedEquation2; 412 QuadraticInt_t ReservedEquation2;
413 QuadraticInt_t ReservedEquation3; 413 QuadraticInt_t ReservedEquation3;
414 414
415 uint16_t MinVoltageUlvGfx;
416 uint16_t MinVoltageUlvSoc;
415 417
416 uint32_t Reserved[15]; 418 uint32_t Reserved[14];
417 419
418 420
419 421
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
index 02a50929af67..e7f4fe2848a5 100644
--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -350,19 +350,44 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
350{ 350{
351 uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE; 351 uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
352 ssize_t ret; 352 ssize_t ret;
353 int retry;
353 354
354 if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) 355 if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
355 return 0; 356 return 0;
356 357
357 ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, 358 /*
358 &tmds_oen, sizeof(tmds_oen)); 359 * LSPCON adapters in low-power state may ignore the first write, so
359 if (ret) { 360 * read back and verify the written value a few times.
360 DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n", 361 */
361 enable ? "enable" : "disable"); 362 for (retry = 0; retry < 3; retry++) {
362 return ret; 363 uint8_t tmp;
364
365 ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
366 &tmds_oen, sizeof(tmds_oen));
367 if (ret) {
368 DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n",
369 enable ? "enable" : "disable",
370 retry + 1);
371 return ret;
372 }
373
374 ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
375 &tmp, sizeof(tmp));
376 if (ret) {
377 DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n",
378 enable ? "enabling" : "disabling",
379 retry + 1);
380 return ret;
381 }
382
383 if (tmp == tmds_oen)
384 return 0;
363 } 385 }
364 386
365 return 0; 387 DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n",
388 enable ? "enabling" : "disabling");
389
390 return -EIO;
366} 391}
367EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output); 392EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
368 393
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 134069f36482..39f1db4acda4 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -4451,6 +4451,7 @@ drm_reset_display_info(struct drm_connector *connector)
4451 info->max_tmds_clock = 0; 4451 info->max_tmds_clock = 0;
4452 info->dvi_dual = false; 4452 info->dvi_dual = false;
4453 info->has_hdmi_infoframe = false; 4453 info->has_hdmi_infoframe = false;
4454 memset(&info->hdmi, 0, sizeof(info->hdmi));
4454 4455
4455 info->non_desktop = 0; 4456 info->non_desktop = 0;
4456} 4457}
@@ -4462,17 +4463,11 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
4462 4463
4463 u32 quirks = edid_get_quirks(edid); 4464 u32 quirks = edid_get_quirks(edid);
4464 4465
4466 drm_reset_display_info(connector);
4467
4465 info->width_mm = edid->width_cm * 10; 4468 info->width_mm = edid->width_cm * 10;
4466 info->height_mm = edid->height_cm * 10; 4469 info->height_mm = edid->height_cm * 10;
4467 4470
4468 /* driver figures it out in this case */
4469 info->bpc = 0;
4470 info->color_formats = 0;
4471 info->cea_rev = 0;
4472 info->max_tmds_clock = 0;
4473 info->dvi_dual = false;
4474 info->has_hdmi_infoframe = false;
4475
4476 info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP); 4471 info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
4477 4472
4478 DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop); 4473 DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 0faaf829f5bf..f0e79178bde6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -18,6 +18,7 @@
18#include <drm/drm_fb_helper.h> 18#include <drm/drm_fb_helper.h>
19#include <drm/drm_atomic.h> 19#include <drm/drm_atomic.h>
20#include <drm/drm_atomic_helper.h> 20#include <drm/drm_atomic_helper.h>
21#include <drm/drm_gem_framebuffer_helper.h>
21#include <uapi/drm/exynos_drm.h> 22#include <uapi/drm/exynos_drm.h>
22 23
23#include "exynos_drm_drv.h" 24#include "exynos_drm_drv.h"
@@ -26,20 +27,6 @@
26#include "exynos_drm_iommu.h" 27#include "exynos_drm_iommu.h"
27#include "exynos_drm_crtc.h" 28#include "exynos_drm_crtc.h"
28 29
29#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
30
31/*
32 * exynos specific framebuffer structure.
33 *
34 * @fb: drm framebuffer obejct.
35 * @exynos_gem: array of exynos specific gem object containing a gem object.
36 */
37struct exynos_drm_fb {
38 struct drm_framebuffer fb;
39 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
40 dma_addr_t dma_addr[MAX_FB_BUFFER];
41};
42
43static int check_fb_gem_memory_type(struct drm_device *drm_dev, 30static int check_fb_gem_memory_type(struct drm_device *drm_dev,
44 struct exynos_drm_gem *exynos_gem) 31 struct exynos_drm_gem *exynos_gem)
45{ 32{
@@ -66,40 +53,9 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev,
66 return 0; 53 return 0;
67} 54}
68 55
69static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
70{
71 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
72 unsigned int i;
73
74 drm_framebuffer_cleanup(fb);
75
76 for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem); i++) {
77 struct drm_gem_object *obj;
78
79 if (exynos_fb->exynos_gem[i] == NULL)
80 continue;
81
82 obj = &exynos_fb->exynos_gem[i]->base;
83 drm_gem_object_unreference_unlocked(obj);
84 }
85
86 kfree(exynos_fb);
87 exynos_fb = NULL;
88}
89
90static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
91 struct drm_file *file_priv,
92 unsigned int *handle)
93{
94 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
95
96 return drm_gem_handle_create(file_priv,
97 &exynos_fb->exynos_gem[0]->base, handle);
98}
99
100static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = { 56static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
101 .destroy = exynos_drm_fb_destroy, 57 .destroy = drm_gem_fb_destroy,
102 .create_handle = exynos_drm_fb_create_handle, 58 .create_handle = drm_gem_fb_create_handle,
103}; 59};
104 60
105struct drm_framebuffer * 61struct drm_framebuffer *
@@ -108,12 +64,12 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
108 struct exynos_drm_gem **exynos_gem, 64 struct exynos_drm_gem **exynos_gem,
109 int count) 65 int count)
110{ 66{
111 struct exynos_drm_fb *exynos_fb; 67 struct drm_framebuffer *fb;
112 int i; 68 int i;
113 int ret; 69 int ret;
114 70
115 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); 71 fb = kzalloc(sizeof(*fb), GFP_KERNEL);
116 if (!exynos_fb) 72 if (!fb)
117 return ERR_PTR(-ENOMEM); 73 return ERR_PTR(-ENOMEM);
118 74
119 for (i = 0; i < count; i++) { 75 for (i = 0; i < count; i++) {
@@ -121,23 +77,21 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
121 if (ret < 0) 77 if (ret < 0)
122 goto err; 78 goto err;
123 79
124 exynos_fb->exynos_gem[i] = exynos_gem[i]; 80 fb->obj[i] = &exynos_gem[i]->base;
125 exynos_fb->dma_addr[i] = exynos_gem[i]->dma_addr
126 + mode_cmd->offsets[i];
127 } 81 }
128 82
129 drm_helper_mode_fill_fb_struct(dev, &exynos_fb->fb, mode_cmd); 83 drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
130 84
131 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); 85 ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
132 if (ret < 0) { 86 if (ret < 0) {
133 DRM_ERROR("failed to initialize framebuffer\n"); 87 DRM_ERROR("failed to initialize framebuffer\n");
134 goto err; 88 goto err;
135 } 89 }
136 90
137 return &exynos_fb->fb; 91 return fb;
138 92
139err: 93err:
140 kfree(exynos_fb); 94 kfree(fb);
141 return ERR_PTR(ret); 95 return ERR_PTR(ret);
142} 96}
143 97
@@ -191,12 +145,13 @@ err:
191 145
192dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index) 146dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
193{ 147{
194 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 148 struct exynos_drm_gem *exynos_gem;
195 149
196 if (WARN_ON_ONCE(index >= MAX_FB_BUFFER)) 150 if (WARN_ON_ONCE(index >= MAX_FB_BUFFER))
197 return 0; 151 return 0;
198 152
199 return exynos_fb->dma_addr[index]; 153 exynos_gem = to_exynos_gem(fb->obj[index]);
154 return exynos_gem->dma_addr + fb->offsets[index];
200} 155}
201 156
202static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = { 157static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = {
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index db6b94dda5df..d85939bd7b47 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1080,6 +1080,7 @@ static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
1080{ 1080{
1081 set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt, 1081 set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
1082 s->workload->pending_events); 1082 s->workload->pending_events);
1083 patch_value(s, cmd_ptr(s, 0), MI_NOOP);
1083 return 0; 1084 return 0;
1084} 1085}
1085 1086
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index dd96ffc878ac..6d8180e8d1e2 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -169,6 +169,8 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
169static void emulate_monitor_status_change(struct intel_vgpu *vgpu) 169static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
170{ 170{
171 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 171 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
172 int pipe;
173
172 vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT | 174 vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
173 SDE_PORTC_HOTPLUG_CPT | 175 SDE_PORTC_HOTPLUG_CPT |
174 SDE_PORTD_HOTPLUG_CPT); 176 SDE_PORTD_HOTPLUG_CPT);
@@ -267,6 +269,14 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
267 if (IS_BROADWELL(dev_priv)) 269 if (IS_BROADWELL(dev_priv))
268 vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK; 270 vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
269 271
272 /* Disable Primary/Sprite/Cursor plane */
273 for_each_pipe(dev_priv, pipe) {
274 vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE;
275 vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
276 vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE;
277 vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE;
278 }
279
270 vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; 280 vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
271} 281}
272 282
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index b555eb26f9ce..6f4f8e941fc2 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -323,6 +323,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
323 struct intel_vgpu_fb_info *fb_info) 323 struct intel_vgpu_fb_info *fb_info)
324{ 324{
325 gvt_dmabuf->drm_format = fb_info->drm_format; 325 gvt_dmabuf->drm_format = fb_info->drm_format;
326 gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
326 gvt_dmabuf->width = fb_info->width; 327 gvt_dmabuf->width = fb_info->width;
327 gvt_dmabuf->height = fb_info->height; 328 gvt_dmabuf->height = fb_info->height;
328 gvt_dmabuf->stride = fb_info->stride; 329 gvt_dmabuf->stride = fb_info->stride;
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 6b50fe78dc1b..1c120683e958 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -245,16 +245,13 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
245 plane->hw_format = fmt; 245 plane->hw_format = fmt;
246 246
247 plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; 247 plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
248 if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { 248 if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
249 gvt_vgpu_err("invalid gma address: %lx\n",
250 (unsigned long)plane->base);
251 return -EINVAL; 249 return -EINVAL;
252 }
253 250
254 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); 251 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
255 if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { 252 if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
256 gvt_vgpu_err("invalid gma address: %lx\n", 253 gvt_vgpu_err("Translate primary plane gma 0x%x to gpa fail\n",
257 (unsigned long)plane->base); 254 plane->base);
258 return -EINVAL; 255 return -EINVAL;
259 } 256 }
260 257
@@ -371,16 +368,13 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
371 alpha_plane, alpha_force); 368 alpha_plane, alpha_force);
372 369
373 plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; 370 plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
374 if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { 371 if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
375 gvt_vgpu_err("invalid gma address: %lx\n",
376 (unsigned long)plane->base);
377 return -EINVAL; 372 return -EINVAL;
378 }
379 373
380 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); 374 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
381 if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { 375 if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
382 gvt_vgpu_err("invalid gma address: %lx\n", 376 gvt_vgpu_err("Translate cursor plane gma 0x%x to gpa fail\n",
383 (unsigned long)plane->base); 377 plane->base);
384 return -EINVAL; 378 return -EINVAL;
385 } 379 }
386 380
@@ -476,16 +470,13 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
476 plane->drm_format = drm_format; 470 plane->drm_format = drm_format;
477 471
478 plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; 472 plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
479 if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { 473 if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
480 gvt_vgpu_err("invalid gma address: %lx\n",
481 (unsigned long)plane->base);
482 return -EINVAL; 474 return -EINVAL;
483 }
484 475
485 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); 476 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
486 if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { 477 if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
487 gvt_vgpu_err("invalid gma address: %lx\n", 478 gvt_vgpu_err("Translate sprite plane gma 0x%x to gpa fail\n",
488 (unsigned long)plane->base); 479 plane->base);
489 return -EINVAL; 480 return -EINVAL;
490 } 481 }
491 482
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index d29281231507..78e55aafc8bc 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -530,6 +530,16 @@ static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
530 false, 0, mm->vgpu); 530 false, 0, mm->vgpu);
531} 531}
532 532
533static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
534 struct intel_gvt_gtt_entry *entry, unsigned long index)
535{
536 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
537
538 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
539
540 pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
541}
542
533static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, 543static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
534 struct intel_gvt_gtt_entry *entry, unsigned long index) 544 struct intel_gvt_gtt_entry *entry, unsigned long index)
535{ 545{
@@ -1818,6 +1828,18 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
1818 return ret; 1828 return ret;
1819} 1829}
1820 1830
1831static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
1832 struct intel_gvt_gtt_entry *entry)
1833{
1834 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
1835 unsigned long pfn;
1836
1837 pfn = pte_ops->get_pfn(entry);
1838 if (pfn != vgpu->gvt->gtt.scratch_mfn)
1839 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu,
1840 pfn << PAGE_SHIFT);
1841}
1842
1821static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, 1843static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1822 void *p_data, unsigned int bytes) 1844 void *p_data, unsigned int bytes)
1823{ 1845{
@@ -1844,10 +1866,10 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1844 1866
1845 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, 1867 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
1846 bytes); 1868 bytes);
1847 m = e;
1848 1869
1849 if (ops->test_present(&e)) { 1870 if (ops->test_present(&e)) {
1850 gfn = ops->get_pfn(&e); 1871 gfn = ops->get_pfn(&e);
1872 m = e;
1851 1873
1852 /* one PTE update may be issued in multiple writes and the 1874 /* one PTE update may be issued in multiple writes and the
1853 * first write may not construct a valid gfn 1875 * first write may not construct a valid gfn
@@ -1868,8 +1890,12 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1868 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 1890 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
1869 } else 1891 } else
1870 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); 1892 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
1871 } else 1893 } else {
1894 ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index);
1895 ggtt_invalidate_pte(vgpu, &m);
1872 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 1896 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
1897 ops->clear_present(&m);
1898 }
1873 1899
1874out: 1900out:
1875 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); 1901 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
@@ -2030,7 +2056,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2030 return PTR_ERR(gtt->ggtt_mm); 2056 return PTR_ERR(gtt->ggtt_mm);
2031 } 2057 }
2032 2058
2033 intel_vgpu_reset_ggtt(vgpu); 2059 intel_vgpu_reset_ggtt(vgpu, false);
2034 2060
2035 return create_scratch_page_tree(vgpu); 2061 return create_scratch_page_tree(vgpu);
2036} 2062}
@@ -2315,17 +2341,19 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
2315/** 2341/**
2316 * intel_vgpu_reset_ggtt - reset the GGTT entry 2342 * intel_vgpu_reset_ggtt - reset the GGTT entry
2317 * @vgpu: a vGPU 2343 * @vgpu: a vGPU
2344 * @invalidate_old: invalidate old entries
2318 * 2345 *
2319 * This function is called at the vGPU create stage 2346 * This function is called at the vGPU create stage
2320 * to reset all the GGTT entries. 2347 * to reset all the GGTT entries.
2321 * 2348 *
2322 */ 2349 */
2323void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) 2350void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
2324{ 2351{
2325 struct intel_gvt *gvt = vgpu->gvt; 2352 struct intel_gvt *gvt = vgpu->gvt;
2326 struct drm_i915_private *dev_priv = gvt->dev_priv; 2353 struct drm_i915_private *dev_priv = gvt->dev_priv;
2327 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; 2354 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2328 struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; 2355 struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
2356 struct intel_gvt_gtt_entry old_entry;
2329 u32 index; 2357 u32 index;
2330 u32 num_entries; 2358 u32 num_entries;
2331 2359
@@ -2334,13 +2362,23 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
2334 2362
2335 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; 2363 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2336 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; 2364 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2337 while (num_entries--) 2365 while (num_entries--) {
2366 if (invalidate_old) {
2367 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2368 ggtt_invalidate_pte(vgpu, &old_entry);
2369 }
2338 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); 2370 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2371 }
2339 2372
2340 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; 2373 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2341 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; 2374 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2342 while (num_entries--) 2375 while (num_entries--) {
2376 if (invalidate_old) {
2377 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2378 ggtt_invalidate_pte(vgpu, &old_entry);
2379 }
2343 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); 2380 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2381 }
2344 2382
2345 ggtt_invalidate(dev_priv); 2383 ggtt_invalidate(dev_priv);
2346} 2384}
@@ -2360,5 +2398,5 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
2360 * removing the shadow pages. 2398 * removing the shadow pages.
2361 */ 2399 */
2362 intel_vgpu_destroy_all_ppgtt_mm(vgpu); 2400 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2363 intel_vgpu_reset_ggtt(vgpu); 2401 intel_vgpu_reset_ggtt(vgpu, true);
2364} 2402}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index a8b369cd352b..3792f2b7f4ff 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -193,7 +193,7 @@ struct intel_vgpu_gtt {
193 193
194extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); 194extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
195extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); 195extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
196void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); 196void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old);
197void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu); 197void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
198 198
199extern int intel_gvt_init_gtt(struct intel_gvt *gvt); 199extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 8c5d5d005854..a33c1c3e4a21 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1150,6 +1150,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
1150 switch (notification) { 1150 switch (notification) {
1151 case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE: 1151 case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
1152 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; 1152 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1153 /* fall through */
1153 case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE: 1154 case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
1154 mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps); 1155 mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
1155 return PTR_ERR_OR_ZERO(mm); 1156 return PTR_ERR_OR_ZERO(mm);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index c16a492449d7..1466d8769ec9 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1301,7 +1301,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1301 1301
1302 } 1302 }
1303 1303
1304 return 0; 1304 return -ENOTTY;
1305} 1305}
1306 1306
1307static ssize_t 1307static ssize_t
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 84ca369f15a5..3b4daafebdcb 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1105,30 +1105,32 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1105 1105
1106 ret = i915_ggtt_probe_hw(dev_priv); 1106 ret = i915_ggtt_probe_hw(dev_priv);
1107 if (ret) 1107 if (ret)
1108 return ret; 1108 goto err_perf;
1109 1109
1110 /* WARNING: Apparently we must kick fbdev drivers before vgacon, 1110 /*
1111 * otherwise the vga fbdev driver falls over. */ 1111 * WARNING: Apparently we must kick fbdev drivers before vgacon,
1112 * otherwise the vga fbdev driver falls over.
1113 */
1112 ret = i915_kick_out_firmware_fb(dev_priv); 1114 ret = i915_kick_out_firmware_fb(dev_priv);
1113 if (ret) { 1115 if (ret) {
1114 DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); 1116 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1115 goto out_ggtt; 1117 goto err_ggtt;
1116 } 1118 }
1117 1119
1118 ret = i915_kick_out_vgacon(dev_priv); 1120 ret = i915_kick_out_vgacon(dev_priv);
1119 if (ret) { 1121 if (ret) {
1120 DRM_ERROR("failed to remove conflicting VGA console\n"); 1122 DRM_ERROR("failed to remove conflicting VGA console\n");
1121 goto out_ggtt; 1123 goto err_ggtt;
1122 } 1124 }
1123 1125
1124 ret = i915_ggtt_init_hw(dev_priv); 1126 ret = i915_ggtt_init_hw(dev_priv);
1125 if (ret) 1127 if (ret)
1126 return ret; 1128 goto err_ggtt;
1127 1129
1128 ret = i915_ggtt_enable_hw(dev_priv); 1130 ret = i915_ggtt_enable_hw(dev_priv);
1129 if (ret) { 1131 if (ret) {
1130 DRM_ERROR("failed to enable GGTT\n"); 1132 DRM_ERROR("failed to enable GGTT\n");
1131 goto out_ggtt; 1133 goto err_ggtt;
1132 } 1134 }
1133 1135
1134 pci_set_master(pdev); 1136 pci_set_master(pdev);
@@ -1139,7 +1141,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1139 if (ret) { 1141 if (ret) {
1140 DRM_ERROR("failed to set DMA mask\n"); 1142 DRM_ERROR("failed to set DMA mask\n");
1141 1143
1142 goto out_ggtt; 1144 goto err_ggtt;
1143 } 1145 }
1144 } 1146 }
1145 1147
@@ -1157,7 +1159,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1157 if (ret) { 1159 if (ret) {
1158 DRM_ERROR("failed to set DMA mask\n"); 1160 DRM_ERROR("failed to set DMA mask\n");
1159 1161
1160 goto out_ggtt; 1162 goto err_ggtt;
1161 } 1163 }
1162 } 1164 }
1163 1165
@@ -1190,13 +1192,14 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1190 1192
1191 ret = intel_gvt_init(dev_priv); 1193 ret = intel_gvt_init(dev_priv);
1192 if (ret) 1194 if (ret)
1193 goto out_ggtt; 1195 goto err_ggtt;
1194 1196
1195 return 0; 1197 return 0;
1196 1198
1197out_ggtt: 1199err_ggtt:
1198 i915_ggtt_cleanup_hw(dev_priv); 1200 i915_ggtt_cleanup_hw(dev_priv);
1199 1201err_perf:
1202 i915_perf_fini(dev_priv);
1200 return ret; 1203 return ret;
1201} 1204}
1202 1205
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8c170db8495d..0414228cd2b5 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -728,7 +728,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
728 728
729 err = radix_tree_insert(handles_vma, handle, vma); 729 err = radix_tree_insert(handles_vma, handle, vma);
730 if (unlikely(err)) { 730 if (unlikely(err)) {
731 kfree(lut); 731 kmem_cache_free(eb->i915->luts, lut);
732 goto err_obj; 732 goto err_obj;
733 } 733 }
734 734
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index d8feb9053e0c..f0519e31543a 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -473,20 +473,37 @@ static u64 get_rc6(struct drm_i915_private *i915)
473 spin_lock_irqsave(&i915->pmu.lock, flags); 473 spin_lock_irqsave(&i915->pmu.lock, flags);
474 spin_lock(&kdev->power.lock); 474 spin_lock(&kdev->power.lock);
475 475
476 if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) 476 /*
477 i915->pmu.suspended_jiffies_last = 477 * After the above branch intel_runtime_pm_get_if_in_use failed
478 kdev->power.suspended_jiffies; 478 * to get the runtime PM reference we cannot assume we are in
479 * runtime suspend since we can either: a) race with coming out
480 * of it before we took the power.lock, or b) there are other
481 * states than suspended which can bring us here.
482 *
483 * We need to double-check that we are indeed currently runtime
484 * suspended and if not we cannot do better than report the last
485 * known RC6 value.
486 */
487 if (kdev->power.runtime_status == RPM_SUSPENDED) {
488 if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
489 i915->pmu.suspended_jiffies_last =
490 kdev->power.suspended_jiffies;
479 491
480 val = kdev->power.suspended_jiffies - 492 val = kdev->power.suspended_jiffies -
481 i915->pmu.suspended_jiffies_last; 493 i915->pmu.suspended_jiffies_last;
482 val += jiffies - kdev->power.accounting_timestamp; 494 val += jiffies - kdev->power.accounting_timestamp;
483 495
484 spin_unlock(&kdev->power.lock); 496 val = jiffies_to_nsecs(val);
497 val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
485 498
486 val = jiffies_to_nsecs(val); 499 i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
487 val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; 500 } else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
488 i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; 501 val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
502 } else {
503 val = i915->pmu.sample[__I915_SAMPLE_RC6].cur;
504 }
489 505
506 spin_unlock(&kdev->power.lock);
490 spin_unlock_irqrestore(&i915->pmu.lock, flags); 507 spin_unlock_irqrestore(&i915->pmu.lock, flags);
491 } 508 }
492 509
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 709d6ca68074..3ea566f99450 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -729,7 +729,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
729 struct drm_i915_private *dev_priv = kdev_to_i915(kdev); 729 struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
730 u32 tmp; 730 u32 tmp;
731 731
732 if (!IS_GEN9_BC(dev_priv)) 732 if (!IS_GEN9(dev_priv))
733 return; 733 return;
734 734
735 i915_audio_component_get_power(kdev); 735 i915_audio_component_get_power(kdev);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index c5c7530ba157..447b721c3be9 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1256,7 +1256,6 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
1256 return; 1256 return;
1257 1257
1258 aux_channel = child->aux_channel; 1258 aux_channel = child->aux_channel;
1259 ddc_pin = child->ddc_pin;
1260 1259
1261 is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; 1260 is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
1262 is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; 1261 is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
@@ -1303,9 +1302,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
1303 DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port)); 1302 DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
1304 1303
1305 if (is_dvi) { 1304 if (is_dvi) {
1306 info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin); 1305 ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin);
1307 1306 if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) {
1308 sanitize_ddc_pin(dev_priv, port); 1307 info->alternate_ddc_pin = ddc_pin;
1308 sanitize_ddc_pin(dev_priv, port);
1309 } else {
1310 DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, "
1311 "sticking to defaults\n",
1312 port_name(port), ddc_pin);
1313 }
1309 } 1314 }
1310 1315
1311 if (is_dp) { 1316 if (is_dp) {
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index fc8b2c6e3508..32d24c69da3c 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -2140,10 +2140,22 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
2140 } 2140 }
2141 } 2141 }
2142 2142
2143 /* According to BSpec, "The CD clock frequency must be at least twice 2143 /*
2144 * According to BSpec, "The CD clock frequency must be at least twice
2144 * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default. 2145 * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
2146 *
2147 * FIXME: Check the actual, not default, BCLK being used.
2148 *
2149 * FIXME: This does not depend on ->has_audio because the higher CDCLK
2150 * is required for audio probe, also when there are no audio capable
2151 * displays connected at probe time. This leads to unnecessarily high
2152 * CDCLK when audio is not required.
2153 *
2154 * FIXME: This limit is only applied when there are displays connected
2155 * at probe time. If we probe without displays, we'll still end up using
2156 * the platform minimum CDCLK, failing audio probe.
2145 */ 2157 */
2146 if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) 2158 if (INTEL_GEN(dev_priv) >= 9)
2147 min_cdclk = max(2 * 96000, min_cdclk); 2159 min_cdclk = max(2 * 96000, min_cdclk);
2148 2160
2149 /* 2161 /*
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index d4368589b355..a80fbad9be0f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -49,12 +49,12 @@
49 * check the condition before the timeout. 49 * check the condition before the timeout.
50 */ 50 */
51#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ 51#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
52 unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \ 52 const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
53 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ 53 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
54 int ret__; \ 54 int ret__; \
55 might_sleep(); \ 55 might_sleep(); \
56 for (;;) { \ 56 for (;;) { \
57 bool expired__ = time_after(jiffies, timeout__); \ 57 const bool expired__ = ktime_after(ktime_get_raw(), end__); \
58 OP; \ 58 OP; \
59 if (COND) { \ 59 if (COND) { \
60 ret__ = 0; \ 60 ret__ = 0; \
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 6f12adc06365..6467a5cc2ca3 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -806,7 +806,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
806 return; 806 return;
807 807
808 intel_fbdev_sync(ifbdev); 808 intel_fbdev_sync(ifbdev);
809 if (ifbdev->vma) 809 if (ifbdev->vma || ifbdev->helper.deferred_setup)
810 drm_fb_helper_hotplug_event(&ifbdev->helper); 810 drm_fb_helper_hotplug_event(&ifbdev->helper);
811} 811}
812 812
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 697af5add78b..e3a5f673ff67 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -577,6 +577,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
577 * know the next preemption status we see corresponds 577 * know the next preemption status we see corresponds
578 * to this ELSP update. 578 * to this ELSP update.
579 */ 579 */
580 GEM_BUG_ON(!execlists_is_active(execlists,
581 EXECLISTS_ACTIVE_USER));
580 GEM_BUG_ON(!port_count(&port[0])); 582 GEM_BUG_ON(!port_count(&port[0]));
581 if (port_count(&port[0]) > 1) 583 if (port_count(&port[0]) > 1)
582 goto unlock; 584 goto unlock;
@@ -738,6 +740,8 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
738 memset(port, 0, sizeof(*port)); 740 memset(port, 0, sizeof(*port));
739 port++; 741 port++;
740 } 742 }
743
744 execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
741} 745}
742 746
743static void execlists_cancel_requests(struct intel_engine_cs *engine) 747static void execlists_cancel_requests(struct intel_engine_cs *engine)
@@ -1001,6 +1005,11 @@ static void execlists_submission_tasklet(unsigned long data)
1001 1005
1002 if (fw) 1006 if (fw)
1003 intel_uncore_forcewake_put(dev_priv, execlists->fw_domains); 1007 intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
1008
1009 /* If the engine is now idle, so should be the flag; and vice versa. */
1010 GEM_BUG_ON(execlists_is_active(&engine->execlists,
1011 EXECLISTS_ACTIVE_USER) ==
1012 !port_isset(engine->execlists.port));
1004} 1013}
1005 1014
1006static void queue_request(struct intel_engine_cs *engine, 1015static void queue_request(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 53ea564f971e..66de4b2dc8b7 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -641,19 +641,18 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
641 641
642 DRM_DEBUG_KMS("Enabling DC6\n"); 642 DRM_DEBUG_KMS("Enabling DC6\n");
643 643
644 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 644 /* Wa Display #1183: skl,kbl,cfl */
645 if (IS_GEN9_BC(dev_priv))
646 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
647 SKL_SELECT_ALTERNATE_DC_EXIT);
645 648
649 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
646} 650}
647 651
648void skl_disable_dc6(struct drm_i915_private *dev_priv) 652void skl_disable_dc6(struct drm_i915_private *dev_priv)
649{ 653{
650 DRM_DEBUG_KMS("Disabling DC6\n"); 654 DRM_DEBUG_KMS("Disabling DC6\n");
651 655
652 /* Wa Display #1183: skl,kbl,cfl */
653 if (IS_GEN9_BC(dev_priv))
654 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
655 SKL_SELECT_ALTERNATE_DC_EXIT);
656
657 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 656 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
658} 657}
659 658
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index 6e5e1aa54ce1..b001699297c4 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -351,6 +351,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
351 351
352 spin_lock_irqsave(&dev->event_lock, flags); 352 spin_lock_irqsave(&dev->event_lock, flags);
353 mdp4_crtc->event = crtc->state->event; 353 mdp4_crtc->event = crtc->state->event;
354 crtc->state->event = NULL;
354 spin_unlock_irqrestore(&dev->event_lock, flags); 355 spin_unlock_irqrestore(&dev->event_lock, flags);
355 356
356 blend_setup(crtc); 357 blend_setup(crtc);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 9893e43ba6c5..76b96081916f 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -708,6 +708,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
708 708
709 spin_lock_irqsave(&dev->event_lock, flags); 709 spin_lock_irqsave(&dev->event_lock, flags);
710 mdp5_crtc->event = crtc->state->event; 710 mdp5_crtc->event = crtc->state->event;
711 crtc->state->event = NULL;
711 spin_unlock_irqrestore(&dev->event_lock, flags); 712 spin_unlock_irqrestore(&dev->event_lock, flags);
712 713
713 /* 714 /*
diff --git a/drivers/gpu/drm/msm/disp/mdp_format.c b/drivers/gpu/drm/msm/disp/mdp_format.c
index b4a8aa4490ee..005760bee708 100644
--- a/drivers/gpu/drm/msm/disp/mdp_format.c
+++ b/drivers/gpu/drm/msm/disp/mdp_format.c
@@ -171,7 +171,8 @@ uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats,
171 return i; 171 return i;
172} 172}
173 173
174const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format) 174const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format,
175 uint64_t modifier)
175{ 176{
176 int i; 177 int i;
177 for (i = 0; i < ARRAY_SIZE(formats); i++) { 178 for (i = 0; i < ARRAY_SIZE(formats); i++) {
diff --git a/drivers/gpu/drm/msm/disp/mdp_kms.h b/drivers/gpu/drm/msm/disp/mdp_kms.h
index 1185487e7e5e..4fa8dbe4e165 100644
--- a/drivers/gpu/drm/msm/disp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp_kms.h
@@ -98,7 +98,7 @@ struct mdp_format {
98#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv) 98#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv)
99 99
100uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only); 100uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
101const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); 101const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier);
102 102
103/* MDP capabilities */ 103/* MDP capabilities */
104#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */ 104#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 7a03a9489708..8baba30d6c65 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -173,6 +173,7 @@ struct msm_dsi_host {
173 173
174 bool registered; 174 bool registered;
175 bool power_on; 175 bool power_on;
176 bool enabled;
176 int irq; 177 int irq;
177}; 178};
178 179
@@ -775,7 +776,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
775 switch (mipi_fmt) { 776 switch (mipi_fmt) {
776 case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888; 777 case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
777 case MIPI_DSI_FMT_RGB666_PACKED: 778 case MIPI_DSI_FMT_RGB666_PACKED:
778 case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666; 779 case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666;
779 case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565; 780 case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
780 default: return CMD_DST_FORMAT_RGB888; 781 default: return CMD_DST_FORMAT_RGB888;
781 } 782 }
@@ -986,13 +987,19 @@ static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
986 987
987static void dsi_wait4video_done(struct msm_dsi_host *msm_host) 988static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
988{ 989{
990 u32 ret = 0;
991 struct device *dev = &msm_host->pdev->dev;
992
989 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1); 993 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
990 994
991 reinit_completion(&msm_host->video_comp); 995 reinit_completion(&msm_host->video_comp);
992 996
993 wait_for_completion_timeout(&msm_host->video_comp, 997 ret = wait_for_completion_timeout(&msm_host->video_comp,
994 msecs_to_jiffies(70)); 998 msecs_to_jiffies(70));
995 999
1000 if (ret <= 0)
1001 dev_err(dev, "wait for video done timed out\n");
1002
996 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0); 1003 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
997} 1004}
998 1005
@@ -1001,7 +1008,7 @@ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
1001 if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO)) 1008 if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
1002 return; 1009 return;
1003 1010
1004 if (msm_host->power_on) { 1011 if (msm_host->power_on && msm_host->enabled) {
1005 dsi_wait4video_done(msm_host); 1012 dsi_wait4video_done(msm_host);
1006 /* delay 4 ms to skip BLLP */ 1013 /* delay 4 ms to skip BLLP */
1007 usleep_range(2000, 4000); 1014 usleep_range(2000, 4000);
@@ -2203,7 +2210,7 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host)
2203 * pm_runtime_put_autosuspend(&msm_host->pdev->dev); 2210 * pm_runtime_put_autosuspend(&msm_host->pdev->dev);
2204 * } 2211 * }
2205 */ 2212 */
2206 2213 msm_host->enabled = true;
2207 return 0; 2214 return 0;
2208} 2215}
2209 2216
@@ -2211,6 +2218,7 @@ int msm_dsi_host_disable(struct mipi_dsi_host *host)
2211{ 2218{
2212 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2219 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2213 2220
2221 msm_host->enabled = false;
2214 dsi_op_mode_config(msm_host, 2222 dsi_op_mode_config(msm_host,
2215 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false); 2223 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
2216 2224
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 8e9d5c255820..9a9fa0c75a13 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -265,6 +265,115 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
265 return 0; 265 return 0;
266} 266}
267 267
268int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
269 struct msm_dsi_phy_clk_request *clk_req)
270{
271 const unsigned long bit_rate = clk_req->bitclk_rate;
272 const unsigned long esc_rate = clk_req->escclk_rate;
273 s32 ui, ui_x8, lpx;
274 s32 tmax, tmin;
275 s32 pcnt0 = 50;
276 s32 pcnt1 = 50;
277 s32 pcnt2 = 10;
278 s32 pcnt3 = 30;
279 s32 pcnt4 = 10;
280 s32 pcnt5 = 2;
281 s32 coeff = 1000; /* Precision, should avoid overflow */
282 s32 hb_en, hb_en_ckln;
283 s32 temp;
284
285 if (!bit_rate || !esc_rate)
286 return -EINVAL;
287
288 timing->hs_halfbyte_en = 0;
289 hb_en = 0;
290 timing->hs_halfbyte_en_ckln = 0;
291 hb_en_ckln = 0;
292
293 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
294 ui_x8 = ui << 3;
295 lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
296
297 temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
298 tmin = max_t(s32, temp, 0);
299 temp = (95 * coeff) / ui_x8;
300 tmax = max_t(s32, temp, 0);
301 timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
302
303 temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
304 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
305 tmax = (tmin > 255) ? 511 : 255;
306 timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
307
308 tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
309 temp = 105 * coeff + 12 * ui - 20 * coeff;
310 tmax = (temp + 3 * ui) / ui_x8;
311 timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
312
313 temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
314 tmin = max_t(s32, temp, 0);
315 temp = (85 * coeff + 6 * ui) / ui_x8;
316 tmax = max_t(s32, temp, 0);
317 timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
318
319 temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
320 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
321 tmax = 255;
322 timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
323
324 tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
325 temp = 105 * coeff + 12 * ui - 20 * coeff;
326 tmax = (temp / ui_x8) - 1;
327 timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
328
329 temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
330 timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
331
332 tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
333 tmax = 255;
334 timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
335
336 temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
337 timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
338
339 temp = 60 * coeff + 52 * ui - 43 * ui;
340 tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
341 tmax = 63;
342 timing->shared_timings.clk_post =
343 linear_inter(tmax, tmin, pcnt2, 0, false);
344
345 temp = 8 * ui + (timing->clk_prepare << 3) * ui;
346 temp += (((timing->clk_zero + 3) << 3) + 11) * ui;
347 temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
348 (((timing->hs_rqst_ckln << 3) + 8) * ui);
349 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
350 tmax = 63;
351 if (tmin > tmax) {
352 temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
353 timing->shared_timings.clk_pre = temp >> 1;
354 timing->shared_timings.clk_pre_inc_by_2 = 1;
355 } else {
356 timing->shared_timings.clk_pre =
357 linear_inter(tmax, tmin, pcnt2, 0, false);
358 timing->shared_timings.clk_pre_inc_by_2 = 0;
359 }
360
361 timing->ta_go = 3;
362 timing->ta_sure = 0;
363 timing->ta_get = 4;
364
365 DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
366 timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
367 timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
368 timing->clk_trail, timing->clk_prepare, timing->hs_exit,
369 timing->hs_zero, timing->hs_prepare, timing->hs_trail,
370 timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
371 timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
372 timing->hs_prep_dly_ckln);
373
374 return 0;
375}
376
268void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, 377void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
269 u32 bit_mask) 378 u32 bit_mask)
270{ 379{
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index c56268cbdb3d..a24ab80994a3 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -101,6 +101,8 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
101 struct msm_dsi_phy_clk_request *clk_req); 101 struct msm_dsi_phy_clk_request *clk_req);
102int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, 102int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
103 struct msm_dsi_phy_clk_request *clk_req); 103 struct msm_dsi_phy_clk_request *clk_req);
104int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
105 struct msm_dsi_phy_clk_request *clk_req);
104void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, 106void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
105 u32 bit_mask); 107 u32 bit_mask);
106int msm_dsi_phy_init_common(struct msm_dsi_phy *phy); 108int msm_dsi_phy_init_common(struct msm_dsi_phy *phy);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index 0af951aaeea1..b3fffc8dbb2a 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -79,34 +79,6 @@ static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
79 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04); 79 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
80} 80}
81 81
82static int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
83 struct msm_dsi_phy_clk_request *clk_req)
84{
85 /*
86 * TODO: These params need to be computed, they're currently hardcoded
87 * for a 1440x2560@60Hz panel with a byteclk of 100.618 Mhz, and a
88 * default escape clock of 19.2 Mhz.
89 */
90
91 timing->hs_halfbyte_en = 0;
92 timing->clk_zero = 0x1c;
93 timing->clk_prepare = 0x07;
94 timing->clk_trail = 0x07;
95 timing->hs_exit = 0x23;
96 timing->hs_zero = 0x21;
97 timing->hs_prepare = 0x07;
98 timing->hs_trail = 0x07;
99 timing->hs_rqst = 0x05;
100 timing->ta_sure = 0x00;
101 timing->ta_go = 0x03;
102 timing->ta_get = 0x04;
103
104 timing->shared_timings.clk_pre = 0x2d;
105 timing->shared_timings.clk_post = 0x0d;
106
107 return 0;
108}
109
110static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, 82static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
111 struct msm_dsi_phy_clk_request *clk_req) 83 struct msm_dsi_phy_clk_request *clk_req)
112{ 84{
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 0e0c87252ab0..7a16242bf8bf 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -183,7 +183,8 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
183 hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format); 183 hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
184 vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format); 184 vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
185 185
186 format = kms->funcs->get_format(kms, mode_cmd->pixel_format); 186 format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
187 mode_cmd->modifier[0]);
187 if (!format) { 188 if (!format) {
188 dev_err(dev->dev, "unsupported pixel format: %4.4s\n", 189 dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
189 (char *)&mode_cmd->pixel_format); 190 (char *)&mode_cmd->pixel_format);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index c178563fcd4d..456622b46335 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -92,8 +92,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
92 92
93 if (IS_ERR(fb)) { 93 if (IS_ERR(fb)) {
94 dev_err(dev->dev, "failed to allocate fb\n"); 94 dev_err(dev->dev, "failed to allocate fb\n");
95 ret = PTR_ERR(fb); 95 return PTR_ERR(fb);
96 goto fail;
97 } 96 }
98 97
99 bo = msm_framebuffer_bo(fb, 0); 98 bo = msm_framebuffer_bo(fb, 0);
@@ -151,13 +150,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
151 150
152fail_unlock: 151fail_unlock:
153 mutex_unlock(&dev->struct_mutex); 152 mutex_unlock(&dev->struct_mutex);
154fail: 153 drm_framebuffer_remove(fb);
155
156 if (ret) {
157 if (fb)
158 drm_framebuffer_remove(fb);
159 }
160
161 return ret; 154 return ret;
162} 155}
163 156
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 95196479f651..f583bb4222f9 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -132,17 +132,19 @@ static void put_pages(struct drm_gem_object *obj)
132 struct msm_gem_object *msm_obj = to_msm_bo(obj); 132 struct msm_gem_object *msm_obj = to_msm_bo(obj);
133 133
134 if (msm_obj->pages) { 134 if (msm_obj->pages) {
135 /* For non-cached buffers, ensure the new pages are clean 135 if (msm_obj->sgt) {
136 * because display controller, GPU, etc. are not coherent: 136 /* For non-cached buffers, ensure the new
137 */ 137 * pages are clean because display controller,
138 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 138 * GPU, etc. are not coherent:
139 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, 139 */
140 msm_obj->sgt->nents, DMA_BIDIRECTIONAL); 140 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
141 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
142 msm_obj->sgt->nents,
143 DMA_BIDIRECTIONAL);
141 144
142 if (msm_obj->sgt)
143 sg_free_table(msm_obj->sgt); 145 sg_free_table(msm_obj->sgt);
144 146 kfree(msm_obj->sgt);
145 kfree(msm_obj->sgt); 147 }
146 148
147 if (use_pages(obj)) 149 if (use_pages(obj))
148 drm_gem_put_pages(obj, msm_obj->pages, true, false); 150 drm_gem_put_pages(obj, msm_obj->pages, true, false);
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 17d5824417ad..aaa329dc020e 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -48,8 +48,11 @@ struct msm_kms_funcs {
48 /* functions to wait for atomic commit completed on each CRTC */ 48 /* functions to wait for atomic commit completed on each CRTC */
49 void (*wait_for_crtc_commit_done)(struct msm_kms *kms, 49 void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
50 struct drm_crtc *crtc); 50 struct drm_crtc *crtc);
51 /* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */
52 const struct msm_format *(*get_format)(struct msm_kms *kms,
53 const uint32_t format,
54 const uint64_t modifiers);
51 /* misc: */ 55 /* misc: */
52 const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
53 long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, 56 long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
54 struct drm_encoder *encoder); 57 struct drm_encoder *encoder);
55 int (*set_split_display)(struct msm_kms *kms, 58 int (*set_split_display)(struct msm_kms *kms,
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index c0fb52c6d4ca..01665b98c57e 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -179,10 +179,9 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea
179 uint32_t type, bool interruptible) 179 uint32_t type, bool interruptible)
180{ 180{
181 struct qxl_command cmd; 181 struct qxl_command cmd;
182 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
183 182
184 cmd.type = type; 183 cmd.type = type;
185 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); 184 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
186 185
187 return qxl_ring_push(qdev->command_ring, &cmd, interruptible); 186 return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
188} 187}
@@ -192,10 +191,9 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
192 uint32_t type, bool interruptible) 191 uint32_t type, bool interruptible)
193{ 192{
194 struct qxl_command cmd; 193 struct qxl_command cmd;
195 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
196 194
197 cmd.type = type; 195 cmd.type = type;
198 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); 196 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
199 197
200 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); 198 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
201} 199}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 00a1a66b052a..864b456080c4 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -167,6 +167,7 @@ struct qxl_release {
167 167
168 int id; 168 int id;
169 int type; 169 int type;
170 struct qxl_bo *release_bo;
170 uint32_t release_offset; 171 uint32_t release_offset;
171 uint32_t surface_release_id; 172 uint32_t surface_release_id;
172 struct ww_acquire_ctx ticket; 173 struct ww_acquire_ctx ticket;
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index e238a1a2eca1..6cc9f3367fa0 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -182,9 +182,9 @@ static int qxl_process_single_command(struct qxl_device *qdev,
182 goto out_free_reloc; 182 goto out_free_reloc;
183 183
184 /* TODO copy slow path code from i915 */ 184 /* TODO copy slow path code from i915 */
185 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); 185 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
186 unwritten = __copy_from_user_inatomic_nocache 186 unwritten = __copy_from_user_inatomic_nocache
187 (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), 187 (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
188 u64_to_user_ptr(cmd->command), cmd->command_size); 188 u64_to_user_ptr(cmd->command), cmd->command_size);
189 189
190 { 190 {
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 5d84a66fed36..7cb214577275 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -173,6 +173,7 @@ qxl_release_free_list(struct qxl_release *release)
173 list_del(&entry->tv.head); 173 list_del(&entry->tv.head);
174 kfree(entry); 174 kfree(entry);
175 } 175 }
176 release->release_bo = NULL;
176} 177}
177 178
178void 179void
@@ -296,7 +297,6 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
296{ 297{
297 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { 298 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
298 int idr_ret; 299 int idr_ret;
299 struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
300 struct qxl_bo *bo; 300 struct qxl_bo *bo;
301 union qxl_release_info *info; 301 union qxl_release_info *info;
302 302
@@ -304,8 +304,9 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
304 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); 304 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
305 if (idr_ret < 0) 305 if (idr_ret < 0)
306 return idr_ret; 306 return idr_ret;
307 bo = to_qxl_bo(entry->tv.bo); 307 bo = create_rel->release_bo;
308 308
309 (*release)->release_bo = bo;
309 (*release)->release_offset = create_rel->release_offset + 64; 310 (*release)->release_offset = create_rel->release_offset + 64;
310 311
311 qxl_release_list_add(*release, bo); 312 qxl_release_list_add(*release, bo);
@@ -365,6 +366,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
365 366
366 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); 367 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
367 368
369 (*release)->release_bo = bo;
368 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx]; 370 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
369 qdev->current_release_bo_offset[cur_idx]++; 371 qdev->current_release_bo_offset[cur_idx]++;
370 372
@@ -408,13 +410,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
408{ 410{
409 void *ptr; 411 void *ptr;
410 union qxl_release_info *info; 412 union qxl_release_info *info;
411 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 413 struct qxl_bo *bo = release->release_bo;
412 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
413 414
414 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); 415 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
415 if (!ptr) 416 if (!ptr)
416 return NULL; 417 return NULL;
417 info = ptr + (release->release_offset & ~PAGE_SIZE); 418 info = ptr + (release->release_offset & ~PAGE_MASK);
418 return info; 419 return info;
419} 420}
420 421
@@ -422,11 +423,10 @@ void qxl_release_unmap(struct qxl_device *qdev,
422 struct qxl_release *release, 423 struct qxl_release *release,
423 union qxl_release_info *info) 424 union qxl_release_info *info)
424{ 425{
425 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 426 struct qxl_bo *bo = release->release_bo;
426 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
427 void *ptr; 427 void *ptr;
428 428
429 ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); 429 ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
430 qxl_bo_kunmap_atomic_page(qdev, bo, ptr); 430 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
431} 431}
432 432
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index bffff4c9fbf5..be3f14d7746d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -94,64 +94,9 @@ static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder)
94 } 94 }
95} 95}
96 96
97static enum drm_mode_status sun4i_lvds_encoder_mode_valid(struct drm_encoder *crtc,
98 const struct drm_display_mode *mode)
99{
100 struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(crtc);
101 struct sun4i_tcon *tcon = lvds->tcon;
102 u32 hsync = mode->hsync_end - mode->hsync_start;
103 u32 vsync = mode->vsync_end - mode->vsync_start;
104 unsigned long rate = mode->clock * 1000;
105 long rounded_rate;
106
107 DRM_DEBUG_DRIVER("Validating modes...\n");
108
109 if (hsync < 1)
110 return MODE_HSYNC_NARROW;
111
112 if (hsync > 0x3ff)
113 return MODE_HSYNC_WIDE;
114
115 if ((mode->hdisplay < 1) || (mode->htotal < 1))
116 return MODE_H_ILLEGAL;
117
118 if ((mode->hdisplay > 0x7ff) || (mode->htotal > 0xfff))
119 return MODE_BAD_HVALUE;
120
121 DRM_DEBUG_DRIVER("Horizontal parameters OK\n");
122
123 if (vsync < 1)
124 return MODE_VSYNC_NARROW;
125
126 if (vsync > 0x3ff)
127 return MODE_VSYNC_WIDE;
128
129 if ((mode->vdisplay < 1) || (mode->vtotal < 1))
130 return MODE_V_ILLEGAL;
131
132 if ((mode->vdisplay > 0x7ff) || (mode->vtotal > 0xfff))
133 return MODE_BAD_VVALUE;
134
135 DRM_DEBUG_DRIVER("Vertical parameters OK\n");
136
137 tcon->dclk_min_div = 7;
138 tcon->dclk_max_div = 7;
139 rounded_rate = clk_round_rate(tcon->dclk, rate);
140 if (rounded_rate < rate)
141 return MODE_CLOCK_LOW;
142
143 if (rounded_rate > rate)
144 return MODE_CLOCK_HIGH;
145
146 DRM_DEBUG_DRIVER("Clock rate OK\n");
147
148 return MODE_OK;
149}
150
151static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = { 97static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = {
152 .disable = sun4i_lvds_encoder_disable, 98 .disable = sun4i_lvds_encoder_disable,
153 .enable = sun4i_lvds_encoder_enable, 99 .enable = sun4i_lvds_encoder_enable,
154 .mode_valid = sun4i_lvds_encoder_mode_valid,
155}; 100};
156 101
157static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = { 102static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = {
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 2decc8e2c79f..add9cc97a3b6 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -195,6 +195,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
195 vc4_bo_set_label(obj, -1); 195 vc4_bo_set_label(obj, -1);
196 196
197 if (bo->validated_shader) { 197 if (bo->validated_shader) {
198 kfree(bo->validated_shader->uniform_addr_offsets);
198 kfree(bo->validated_shader->texture_samples); 199 kfree(bo->validated_shader->texture_samples);
199 kfree(bo->validated_shader); 200 kfree(bo->validated_shader);
200 bo->validated_shader = NULL; 201 bo->validated_shader = NULL;
@@ -591,6 +592,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
591 } 592 }
592 593
593 if (bo->validated_shader) { 594 if (bo->validated_shader) {
595 kfree(bo->validated_shader->uniform_addr_offsets);
594 kfree(bo->validated_shader->texture_samples); 596 kfree(bo->validated_shader->texture_samples);
595 kfree(bo->validated_shader); 597 kfree(bo->validated_shader);
596 bo->validated_shader = NULL; 598 bo->validated_shader = NULL;
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index d3f15bf60900..7cf82b071de2 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -942,6 +942,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
942fail: 942fail:
943 kfree(validation_state.branch_targets); 943 kfree(validation_state.branch_targets);
944 if (validated_shader) { 944 if (validated_shader) {
945 kfree(validated_shader->uniform_addr_offsets);
945 kfree(validated_shader->texture_samples); 946 kfree(validated_shader->texture_samples);
946 kfree(validated_shader); 947 kfree(validated_shader);
947 } 948 }
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 48e4f1df6e5d..020070d483d3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -293,7 +293,7 @@ retry:
293 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); 293 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
294 if (ret == -ENOSPC) { 294 if (ret == -ENOSPC) {
295 spin_unlock(&vgdev->ctrlq.qlock); 295 spin_unlock(&vgdev->ctrlq.qlock);
296 wait_event(vgdev->ctrlq.ack_queue, vq->num_free); 296 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
297 spin_lock(&vgdev->ctrlq.qlock); 297 spin_lock(&vgdev->ctrlq.qlock);
298 goto retry; 298 goto retry;
299 } else { 299 } else {
@@ -368,7 +368,7 @@ retry:
368 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC); 368 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
369 if (ret == -ENOSPC) { 369 if (ret == -ENOSPC) {
370 spin_unlock(&vgdev->cursorq.qlock); 370 spin_unlock(&vgdev->cursorq.qlock);
371 wait_event(vgdev->cursorq.ack_queue, vq->num_free); 371 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
372 spin_lock(&vgdev->cursorq.qlock); 372 spin_lock(&vgdev->cursorq.qlock);
373 goto retry; 373 goto retry;
374 } else { 374 } else {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5a3a7ead3012..0b5cc910f62e 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -525,6 +525,9 @@
525#define I2C_VENDOR_ID_HANTICK 0x0911 525#define I2C_VENDOR_ID_HANTICK 0x0911
526#define I2C_PRODUCT_ID_HANTICK_5288 0x5288 526#define I2C_PRODUCT_ID_HANTICK_5288 0x5288
527 527
528#define I2C_VENDOR_ID_RAYD 0x2386
529#define I2C_PRODUCT_ID_RAYD_3118 0x3118
530
528#define USB_VENDOR_ID_HANWANG 0x0b57 531#define USB_VENDOR_ID_HANWANG 0x0b57
529#define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000 532#define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000
530#define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff 533#define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 6836a856c243..930652c25120 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -387,7 +387,8 @@ static int hidinput_get_battery_property(struct power_supply *psy,
387 break; 387 break;
388 388
389 case POWER_SUPPLY_PROP_CAPACITY: 389 case POWER_SUPPLY_PROP_CAPACITY:
390 if (dev->battery_report_type == HID_FEATURE_REPORT) { 390 if (dev->battery_status != HID_BATTERY_REPORTED &&
391 !dev->battery_avoid_query) {
391 value = hidinput_query_battery_capacity(dev); 392 value = hidinput_query_battery_capacity(dev);
392 if (value < 0) 393 if (value < 0)
393 return value; 394 return value;
@@ -403,17 +404,17 @@ static int hidinput_get_battery_property(struct power_supply *psy,
403 break; 404 break;
404 405
405 case POWER_SUPPLY_PROP_STATUS: 406 case POWER_SUPPLY_PROP_STATUS:
406 if (!dev->battery_reported && 407 if (dev->battery_status != HID_BATTERY_REPORTED &&
407 dev->battery_report_type == HID_FEATURE_REPORT) { 408 !dev->battery_avoid_query) {
408 value = hidinput_query_battery_capacity(dev); 409 value = hidinput_query_battery_capacity(dev);
409 if (value < 0) 410 if (value < 0)
410 return value; 411 return value;
411 412
412 dev->battery_capacity = value; 413 dev->battery_capacity = value;
413 dev->battery_reported = true; 414 dev->battery_status = HID_BATTERY_QUERIED;
414 } 415 }
415 416
416 if (!dev->battery_reported) 417 if (dev->battery_status == HID_BATTERY_UNKNOWN)
417 val->intval = POWER_SUPPLY_STATUS_UNKNOWN; 418 val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
418 else if (dev->battery_capacity == 100) 419 else if (dev->battery_capacity == 100)
419 val->intval = POWER_SUPPLY_STATUS_FULL; 420 val->intval = POWER_SUPPLY_STATUS_FULL;
@@ -486,6 +487,14 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
486 dev->battery_report_type = report_type; 487 dev->battery_report_type = report_type;
487 dev->battery_report_id = field->report->id; 488 dev->battery_report_id = field->report->id;
488 489
490 /*
491 * Stylus is normally not connected to the device and thus we
492 * can't query the device and get meaningful battery strength.
493 * We have to wait for the device to report it on its own.
494 */
495 dev->battery_avoid_query = report_type == HID_INPUT_REPORT &&
496 field->physical == HID_DG_STYLUS;
497
489 dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg); 498 dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg);
490 if (IS_ERR(dev->battery)) { 499 if (IS_ERR(dev->battery)) {
491 error = PTR_ERR(dev->battery); 500 error = PTR_ERR(dev->battery);
@@ -530,9 +539,10 @@ static void hidinput_update_battery(struct hid_device *dev, int value)
530 539
531 capacity = hidinput_scale_battery_capacity(dev, value); 540 capacity = hidinput_scale_battery_capacity(dev, value);
532 541
533 if (!dev->battery_reported || capacity != dev->battery_capacity) { 542 if (dev->battery_status != HID_BATTERY_REPORTED ||
543 capacity != dev->battery_capacity) {
534 dev->battery_capacity = capacity; 544 dev->battery_capacity = capacity;
535 dev->battery_reported = true; 545 dev->battery_status = HID_BATTERY_REPORTED;
536 power_supply_changed(dev->battery); 546 power_supply_changed(dev->battery);
537 } 547 }
538} 548}
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index fbfcc8009432..b39844adea47 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -192,6 +192,11 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
192 int ret = 0, len; 192 int ret = 0, len;
193 unsigned char report_number; 193 unsigned char report_number;
194 194
195 if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
196 ret = -ENODEV;
197 goto out;
198 }
199
195 dev = hidraw_table[minor]->hid; 200 dev = hidraw_table[minor]->hid;
196 201
197 if (!dev->ll_driver->raw_request) { 202 if (!dev->ll_driver->raw_request) {
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 97689e98e53f..963328674e93 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -47,6 +47,7 @@
47/* quirks to control the device */ 47/* quirks to control the device */
48#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0) 48#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
49#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) 49#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
50#define I2C_HID_QUIRK_RESEND_REPORT_DESCR BIT(2)
50 51
51/* flags */ 52/* flags */
52#define I2C_HID_STARTED 0 53#define I2C_HID_STARTED 0
@@ -171,6 +172,8 @@ static const struct i2c_hid_quirks {
171 I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, 172 I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
172 { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, 173 { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
173 I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, 174 I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
175 { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118,
176 I2C_HID_QUIRK_RESEND_REPORT_DESCR },
174 { 0, 0 } 177 { 0, 0 }
175}; 178};
176 179
@@ -1220,6 +1223,16 @@ static int i2c_hid_resume(struct device *dev)
1220 if (ret) 1223 if (ret)
1221 return ret; 1224 return ret;
1222 1225
1226 /* RAYDIUM device (2386:3118) need to re-send report descr cmd
1227 * after resume, after this it will be back normal.
1228 * otherwise it issues too many incomplete reports.
1229 */
1230 if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) {
1231 ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0);
1232 if (ret)
1233 return ret;
1234 }
1235
1223 if (hid->driver && hid->driver->reset_resume) { 1236 if (hid->driver && hid->driver->reset_resume) {
1224 ret = hid->driver->reset_resume(hid); 1237 ret = hid->driver->reset_resume(hid);
1225 return ret; 1238 return ret;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 6da16a879c9f..5f947ec20dcb 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -689,6 +689,45 @@ static int wacom_intuos_get_tool_type(int tool_id)
689 return tool_type; 689 return tool_type;
690} 690}
691 691
692static void wacom_exit_report(struct wacom_wac *wacom)
693{
694 struct input_dev *input = wacom->pen_input;
695 struct wacom_features *features = &wacom->features;
696 unsigned char *data = wacom->data;
697 int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0;
698
699 /*
700 * Reset all states otherwise we lose the initial states
701 * when in-prox next time
702 */
703 input_report_abs(input, ABS_X, 0);
704 input_report_abs(input, ABS_Y, 0);
705 input_report_abs(input, ABS_DISTANCE, 0);
706 input_report_abs(input, ABS_TILT_X, 0);
707 input_report_abs(input, ABS_TILT_Y, 0);
708 if (wacom->tool[idx] >= BTN_TOOL_MOUSE) {
709 input_report_key(input, BTN_LEFT, 0);
710 input_report_key(input, BTN_MIDDLE, 0);
711 input_report_key(input, BTN_RIGHT, 0);
712 input_report_key(input, BTN_SIDE, 0);
713 input_report_key(input, BTN_EXTRA, 0);
714 input_report_abs(input, ABS_THROTTLE, 0);
715 input_report_abs(input, ABS_RZ, 0);
716 } else {
717 input_report_abs(input, ABS_PRESSURE, 0);
718 input_report_key(input, BTN_STYLUS, 0);
719 input_report_key(input, BTN_STYLUS2, 0);
720 input_report_key(input, BTN_TOUCH, 0);
721 input_report_abs(input, ABS_WHEEL, 0);
722 if (features->type >= INTUOS3S)
723 input_report_abs(input, ABS_Z, 0);
724 }
725 input_report_key(input, wacom->tool[idx], 0);
726 input_report_abs(input, ABS_MISC, 0); /* reset tool id */
727 input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
728 wacom->id[idx] = 0;
729}
730
692static int wacom_intuos_inout(struct wacom_wac *wacom) 731static int wacom_intuos_inout(struct wacom_wac *wacom)
693{ 732{
694 struct wacom_features *features = &wacom->features; 733 struct wacom_features *features = &wacom->features;
@@ -741,36 +780,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
741 if (!wacom->id[idx]) 780 if (!wacom->id[idx])
742 return 1; 781 return 1;
743 782
744 /* 783 wacom_exit_report(wacom);
745 * Reset all states otherwise we lose the initial states
746 * when in-prox next time
747 */
748 input_report_abs(input, ABS_X, 0);
749 input_report_abs(input, ABS_Y, 0);
750 input_report_abs(input, ABS_DISTANCE, 0);
751 input_report_abs(input, ABS_TILT_X, 0);
752 input_report_abs(input, ABS_TILT_Y, 0);
753 if (wacom->tool[idx] >= BTN_TOOL_MOUSE) {
754 input_report_key(input, BTN_LEFT, 0);
755 input_report_key(input, BTN_MIDDLE, 0);
756 input_report_key(input, BTN_RIGHT, 0);
757 input_report_key(input, BTN_SIDE, 0);
758 input_report_key(input, BTN_EXTRA, 0);
759 input_report_abs(input, ABS_THROTTLE, 0);
760 input_report_abs(input, ABS_RZ, 0);
761 } else {
762 input_report_abs(input, ABS_PRESSURE, 0);
763 input_report_key(input, BTN_STYLUS, 0);
764 input_report_key(input, BTN_STYLUS2, 0);
765 input_report_key(input, BTN_TOUCH, 0);
766 input_report_abs(input, ABS_WHEEL, 0);
767 if (features->type >= INTUOS3S)
768 input_report_abs(input, ABS_Z, 0);
769 }
770 input_report_key(input, wacom->tool[idx], 0);
771 input_report_abs(input, ABS_MISC, 0); /* reset tool id */
772 input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
773 wacom->id[idx] = 0;
774 return 2; 784 return 2;
775 } 785 }
776 786
@@ -1235,6 +1245,12 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
1235 if (!valid) 1245 if (!valid)
1236 continue; 1246 continue;
1237 1247
1248 if (!prox) {
1249 wacom->shared->stylus_in_proximity = false;
1250 wacom_exit_report(wacom);
1251 input_sync(pen_input);
1252 return;
1253 }
1238 if (range) { 1254 if (range) {
1239 input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1])); 1255 input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
1240 input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3])); 1256 input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 051a72eecb24..d2cc55e21374 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -40,6 +40,10 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
40#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 40#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
41#endif 41#endif
42 42
43#ifndef PCI_DEVICE_ID_AMD_17H_RR_NB
44#define PCI_DEVICE_ID_AMD_17H_RR_NB 0x15d0
45#endif
46
43/* CPUID function 0x80000001, ebx */ 47/* CPUID function 0x80000001, ebx */
44#define CPUID_PKGTYPE_MASK 0xf0000000 48#define CPUID_PKGTYPE_MASK 0xf0000000
45#define CPUID_PKGTYPE_F 0x00000000 49#define CPUID_PKGTYPE_F 0x00000000
@@ -72,6 +76,7 @@ struct k10temp_data {
72 struct pci_dev *pdev; 76 struct pci_dev *pdev;
73 void (*read_tempreg)(struct pci_dev *pdev, u32 *regval); 77 void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
74 int temp_offset; 78 int temp_offset;
79 u32 temp_adjust_mask;
75}; 80};
76 81
77struct tctl_offset { 82struct tctl_offset {
@@ -84,6 +89,7 @@ static const struct tctl_offset tctl_offset_table[] = {
84 { 0x17, "AMD Ryzen 5 1600X", 20000 }, 89 { 0x17, "AMD Ryzen 5 1600X", 20000 },
85 { 0x17, "AMD Ryzen 7 1700X", 20000 }, 90 { 0x17, "AMD Ryzen 7 1700X", 20000 },
86 { 0x17, "AMD Ryzen 7 1800X", 20000 }, 91 { 0x17, "AMD Ryzen 7 1800X", 20000 },
92 { 0x17, "AMD Ryzen 7 2700X", 10000 },
87 { 0x17, "AMD Ryzen Threadripper 1950X", 27000 }, 93 { 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
88 { 0x17, "AMD Ryzen Threadripper 1920X", 27000 }, 94 { 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
89 { 0x17, "AMD Ryzen Threadripper 1900X", 27000 }, 95 { 0x17, "AMD Ryzen Threadripper 1900X", 27000 },
@@ -129,6 +135,8 @@ static ssize_t temp1_input_show(struct device *dev,
129 135
130 data->read_tempreg(data->pdev, &regval); 136 data->read_tempreg(data->pdev, &regval);
131 temp = (regval >> 21) * 125; 137 temp = (regval >> 21) * 125;
138 if (regval & data->temp_adjust_mask)
139 temp -= 49000;
132 if (temp > data->temp_offset) 140 if (temp > data->temp_offset)
133 temp -= data->temp_offset; 141 temp -= data->temp_offset;
134 else 142 else
@@ -259,12 +267,14 @@ static int k10temp_probe(struct pci_dev *pdev,
259 data->pdev = pdev; 267 data->pdev = pdev;
260 268
261 if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 || 269 if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
262 boot_cpu_data.x86_model == 0x70)) 270 boot_cpu_data.x86_model == 0x70)) {
263 data->read_tempreg = read_tempreg_nb_f15; 271 data->read_tempreg = read_tempreg_nb_f15;
264 else if (boot_cpu_data.x86 == 0x17) 272 } else if (boot_cpu_data.x86 == 0x17) {
273 data->temp_adjust_mask = 0x80000;
265 data->read_tempreg = read_tempreg_nb_f17; 274 data->read_tempreg = read_tempreg_nb_f17;
266 else 275 } else {
267 data->read_tempreg = read_tempreg_pci; 276 data->read_tempreg = read_tempreg_pci;
277 }
268 278
269 for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) { 279 for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
270 const struct tctl_offset *entry = &tctl_offset_table[i]; 280 const struct tctl_offset *entry = &tctl_offset_table[i];
@@ -292,6 +302,7 @@ static const struct pci_device_id k10temp_id_table[] = {
292 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 302 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
293 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 303 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
294 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, 304 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
305 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_RR_NB) },
295 {} 306 {}
296}; 307};
297MODULE_DEVICE_TABLE(pci, k10temp_id_table); 308MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
index 8b0bc4fc06e8..b0bc77bf2cd9 100644
--- a/drivers/hwmon/nct6683.c
+++ b/drivers/hwmon/nct6683.c
@@ -1380,8 +1380,8 @@ static int __init nct6683_find(int sioaddr, struct nct6683_sio_data *sio_data)
1380 /* Activate logical device if needed */ 1380 /* Activate logical device if needed */
1381 val = superio_inb(sioaddr, SIO_REG_ENABLE); 1381 val = superio_inb(sioaddr, SIO_REG_ENABLE);
1382 if (!(val & 0x01)) { 1382 if (!(val & 0x01)) {
1383 pr_err("EC is disabled\n"); 1383 pr_warn("Forcibly enabling EC access. Data may be unusable.\n");
1384 goto fail; 1384 superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
1385 } 1385 }
1386 1386
1387 superio_exit(sioaddr); 1387 superio_exit(sioaddr);
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c
index 363bf56eb0f2..91976b6ca300 100644
--- a/drivers/hwmon/scmi-hwmon.c
+++ b/drivers/hwmon/scmi-hwmon.c
@@ -170,7 +170,10 @@ static int scmi_hwmon_probe(struct scmi_device *sdev)
170 scmi_chip_info.info = ptr_scmi_ci; 170 scmi_chip_info.info = ptr_scmi_ci;
171 chip_info = &scmi_chip_info; 171 chip_info = &scmi_chip_info;
172 172
173 for (type = 0; type < hwmon_max && nr_count[type]; type++) { 173 for (type = 0; type < hwmon_max; type++) {
174 if (!nr_count[type])
175 continue;
176
174 scmi_hwmon_add_chan_info(scmi_hwmon_chan, dev, nr_count[type], 177 scmi_hwmon_add_chan_info(scmi_hwmon_chan, dev, nr_count[type],
175 type, hwmon_attributes[type]); 178 type, hwmon_attributes[type]);
176 *ptr_scmi_ci++ = scmi_hwmon_chan++; 179 *ptr_scmi_ci++ = scmi_hwmon_chan++;
diff --git a/drivers/isdn/mISDN/dsp_hwec.c b/drivers/isdn/mISDN/dsp_hwec.c
index a6e87076acc2..5336bbdbfdc5 100644
--- a/drivers/isdn/mISDN/dsp_hwec.c
+++ b/drivers/isdn/mISDN/dsp_hwec.c
@@ -68,12 +68,12 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg)
68 goto _do; 68 goto _do;
69 69
70 { 70 {
71 char _dup[len + 1];
72 char *dup, *tok, *name, *val; 71 char *dup, *tok, *name, *val;
73 int tmp; 72 int tmp;
74 73
75 strcpy(_dup, arg); 74 dup = kstrdup(arg, GFP_ATOMIC);
76 dup = _dup; 75 if (!dup)
76 return;
77 77
78 while ((tok = strsep(&dup, ","))) { 78 while ((tok = strsep(&dup, ","))) {
79 if (!strlen(tok)) 79 if (!strlen(tok))
@@ -89,6 +89,8 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg)
89 deftaps = tmp; 89 deftaps = tmp;
90 } 90 }
91 } 91 }
92
93 kfree(dup);
92 } 94 }
93 95
94_do: 96_do:
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 21d50e4cc5e1..b05022f94f18 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -279,7 +279,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
279 u16 timebase, u8 *buf, int len) 279 u16 timebase, u8 *buf, int len)
280{ 280{
281 u8 *p; 281 u8 *p;
282 u8 frame[len + 32]; 282 u8 frame[MAX_DFRAME_LEN_L1 + 32];
283 struct socket *socket = NULL; 283 struct socket *socket = NULL;
284 284
285 if (debug & DEBUG_L1OIP_MSG) 285 if (debug & DEBUG_L1OIP_MSG)
@@ -902,7 +902,11 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
902 p = skb->data; 902 p = skb->data;
903 l = skb->len; 903 l = skb->len;
904 while (l) { 904 while (l) {
905 ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME; 905 /*
906 * This is technically bounded by L1OIP_MAX_PERFRAME but
907 * MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME
908 */
909 ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1;
906 l1oip_socket_send(hc, 0, dch->slot, 0, 910 l1oip_socket_send(hc, 0, dch->slot, 0,
907 hc->chan[dch->slot].tx_counter++, p, ll); 911 hc->chan[dch->slot].tx_counter++, p, ll);
908 p += ll; 912 p += ll;
@@ -1140,7 +1144,11 @@ handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
1140 p = skb->data; 1144 p = skb->data;
1141 l = skb->len; 1145 l = skb->len;
1142 while (l) { 1146 while (l) {
1143 ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME; 1147 /*
1148 * This is technically bounded by L1OIP_MAX_PERFRAME but
1149 * MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME
1150 */
1151 ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1;
1144 l1oip_socket_send(hc, hc->codec, bch->slot, 0, 1152 l1oip_socket_send(hc, hc->codec, bch->slot, 0,
1145 hc->chan[bch->slot].tx_counter, p, ll); 1153 hc->chan[bch->slot].tx_counter, p, ll);
1146 hc->chan[bch->slot].tx_counter += ll; 1154 hc->chan[bch->slot].tx_counter += ll;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 3bea45e8ccff..c208c01f63a5 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -9256,8 +9256,10 @@ void md_reload_sb(struct mddev *mddev, int nr)
9256 check_sb_changes(mddev, rdev); 9256 check_sb_changes(mddev, rdev);
9257 9257
9258 /* Read all rdev's to update recovery_offset */ 9258 /* Read all rdev's to update recovery_offset */
9259 rdev_for_each_rcu(rdev, mddev) 9259 rdev_for_each_rcu(rdev, mddev) {
9260 read_rdev(mddev, rdev); 9260 if (!test_bit(Faulty, &rdev->flags))
9261 read_rdev(mddev, rdev);
9262 }
9261} 9263}
9262EXPORT_SYMBOL(md_reload_sb); 9264EXPORT_SYMBOL(md_reload_sb);
9263 9265
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e2943fb74056..e9e3308cb0a7 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -854,7 +854,7 @@ static void flush_pending_writes(struct r1conf *conf)
854 * there is no normal IO happeing. It must arrange to call 854 * there is no normal IO happeing. It must arrange to call
855 * lower_barrier when the particular background IO completes. 855 * lower_barrier when the particular background IO completes.
856 */ 856 */
857static void raise_barrier(struct r1conf *conf, sector_t sector_nr) 857static sector_t raise_barrier(struct r1conf *conf, sector_t sector_nr)
858{ 858{
859 int idx = sector_to_idx(sector_nr); 859 int idx = sector_to_idx(sector_nr);
860 860
@@ -885,13 +885,23 @@ static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
885 * max resync count which allowed on current I/O barrier bucket. 885 * max resync count which allowed on current I/O barrier bucket.
886 */ 886 */
887 wait_event_lock_irq(conf->wait_barrier, 887 wait_event_lock_irq(conf->wait_barrier,
888 !conf->array_frozen && 888 (!conf->array_frozen &&
889 !atomic_read(&conf->nr_pending[idx]) && 889 !atomic_read(&conf->nr_pending[idx]) &&
890 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH, 890 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
891 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
891 conf->resync_lock); 892 conf->resync_lock);
892 893
894 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
895 atomic_dec(&conf->barrier[idx]);
896 spin_unlock_irq(&conf->resync_lock);
897 wake_up(&conf->wait_barrier);
898 return -EINTR;
899 }
900
893 atomic_inc(&conf->nr_sync_pending); 901 atomic_inc(&conf->nr_sync_pending);
894 spin_unlock_irq(&conf->resync_lock); 902 spin_unlock_irq(&conf->resync_lock);
903
904 return 0;
895} 905}
896 906
897static void lower_barrier(struct r1conf *conf, sector_t sector_nr) 907static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
@@ -1092,6 +1102,8 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
1092 goto skip_copy; 1102 goto skip_copy;
1093 } 1103 }
1094 1104
1105 behind_bio->bi_write_hint = bio->bi_write_hint;
1106
1095 while (i < vcnt && size) { 1107 while (i < vcnt && size) {
1096 struct page *page; 1108 struct page *page;
1097 int len = min_t(int, PAGE_SIZE, size); 1109 int len = min_t(int, PAGE_SIZE, size);
@@ -2662,9 +2674,12 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2662 2674
2663 bitmap_cond_end_sync(mddev->bitmap, sector_nr, 2675 bitmap_cond_end_sync(mddev->bitmap, sector_nr,
2664 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); 2676 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2665 r1_bio = raid1_alloc_init_r1buf(conf);
2666 2677
2667 raise_barrier(conf, sector_nr); 2678
2679 if (raise_barrier(conf, sector_nr))
2680 return 0;
2681
2682 r1_bio = raid1_alloc_init_r1buf(conf);
2668 2683
2669 rcu_read_lock(); 2684 rcu_read_lock();
2670 /* 2685 /*
diff --git a/drivers/memory/emif-asm-offsets.c b/drivers/memory/emif-asm-offsets.c
index 71a89d5d3efd..db8043019ec6 100644
--- a/drivers/memory/emif-asm-offsets.c
+++ b/drivers/memory/emif-asm-offsets.c
@@ -16,77 +16,7 @@
16 16
17int main(void) 17int main(void)
18{ 18{
19 DEFINE(EMIF_SDCFG_VAL_OFFSET, 19 ti_emif_asm_offsets();
20 offsetof(struct emif_regs_amx3, emif_sdcfg_val));
21 DEFINE(EMIF_TIMING1_VAL_OFFSET,
22 offsetof(struct emif_regs_amx3, emif_timing1_val));
23 DEFINE(EMIF_TIMING2_VAL_OFFSET,
24 offsetof(struct emif_regs_amx3, emif_timing2_val));
25 DEFINE(EMIF_TIMING3_VAL_OFFSET,
26 offsetof(struct emif_regs_amx3, emif_timing3_val));
27 DEFINE(EMIF_REF_CTRL_VAL_OFFSET,
28 offsetof(struct emif_regs_amx3, emif_ref_ctrl_val));
29 DEFINE(EMIF_ZQCFG_VAL_OFFSET,
30 offsetof(struct emif_regs_amx3, emif_zqcfg_val));
31 DEFINE(EMIF_PMCR_VAL_OFFSET,
32 offsetof(struct emif_regs_amx3, emif_pmcr_val));
33 DEFINE(EMIF_PMCR_SHDW_VAL_OFFSET,
34 offsetof(struct emif_regs_amx3, emif_pmcr_shdw_val));
35 DEFINE(EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET,
36 offsetof(struct emif_regs_amx3, emif_rd_wr_level_ramp_ctrl));
37 DEFINE(EMIF_RD_WR_EXEC_THRESH_OFFSET,
38 offsetof(struct emif_regs_amx3, emif_rd_wr_exec_thresh));
39 DEFINE(EMIF_COS_CONFIG_OFFSET,
40 offsetof(struct emif_regs_amx3, emif_cos_config));
41 DEFINE(EMIF_PRIORITY_TO_COS_MAPPING_OFFSET,
42 offsetof(struct emif_regs_amx3, emif_priority_to_cos_mapping));
43 DEFINE(EMIF_CONNECT_ID_SERV_1_MAP_OFFSET,
44 offsetof(struct emif_regs_amx3, emif_connect_id_serv_1_map));
45 DEFINE(EMIF_CONNECT_ID_SERV_2_MAP_OFFSET,
46 offsetof(struct emif_regs_amx3, emif_connect_id_serv_2_map));
47 DEFINE(EMIF_OCP_CONFIG_VAL_OFFSET,
48 offsetof(struct emif_regs_amx3, emif_ocp_config_val));
49 DEFINE(EMIF_LPDDR2_NVM_TIM_OFFSET,
50 offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim));
51 DEFINE(EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET,
52 offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim_shdw));
53 DEFINE(EMIF_DLL_CALIB_CTRL_VAL_OFFSET,
54 offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val));
55 DEFINE(EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET,
56 offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val_shdw));
57 DEFINE(EMIF_DDR_PHY_CTLR_1_OFFSET,
58 offsetof(struct emif_regs_amx3, emif_ddr_phy_ctlr_1));
59 DEFINE(EMIF_EXT_PHY_CTRL_VALS_OFFSET,
60 offsetof(struct emif_regs_amx3, emif_ext_phy_ctrl_vals));
61 DEFINE(EMIF_REGS_AMX3_SIZE, sizeof(struct emif_regs_amx3));
62
63 BLANK();
64
65 DEFINE(EMIF_PM_BASE_ADDR_VIRT_OFFSET,
66 offsetof(struct ti_emif_pm_data, ti_emif_base_addr_virt));
67 DEFINE(EMIF_PM_BASE_ADDR_PHYS_OFFSET,
68 offsetof(struct ti_emif_pm_data, ti_emif_base_addr_phys));
69 DEFINE(EMIF_PM_CONFIG_OFFSET,
70 offsetof(struct ti_emif_pm_data, ti_emif_sram_config));
71 DEFINE(EMIF_PM_REGS_VIRT_OFFSET,
72 offsetof(struct ti_emif_pm_data, regs_virt));
73 DEFINE(EMIF_PM_REGS_PHYS_OFFSET,
74 offsetof(struct ti_emif_pm_data, regs_phys));
75 DEFINE(EMIF_PM_DATA_SIZE, sizeof(struct ti_emif_pm_data));
76
77 BLANK();
78
79 DEFINE(EMIF_PM_SAVE_CONTEXT_OFFSET,
80 offsetof(struct ti_emif_pm_functions, save_context));
81 DEFINE(EMIF_PM_RESTORE_CONTEXT_OFFSET,
82 offsetof(struct ti_emif_pm_functions, restore_context));
83 DEFINE(EMIF_PM_ENTER_SR_OFFSET,
84 offsetof(struct ti_emif_pm_functions, enter_sr));
85 DEFINE(EMIF_PM_EXIT_SR_OFFSET,
86 offsetof(struct ti_emif_pm_functions, exit_sr));
87 DEFINE(EMIF_PM_ABORT_SR_OFFSET,
88 offsetof(struct ti_emif_pm_functions, abort_sr));
89 DEFINE(EMIF_PM_FUNCTIONS_SIZE, sizeof(struct ti_emif_pm_functions));
90 20
91 return 0; 21 return 0;
92} 22}
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 231f3a1e27bf..86503f60468f 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = {
1994 .cmd_per_lun = 7, 1994 .cmd_per_lun = 7,
1995 .use_clustering = ENABLE_CLUSTERING, 1995 .use_clustering = ENABLE_CLUSTERING,
1996 .shost_attrs = mptscsih_host_attrs, 1996 .shost_attrs = mptscsih_host_attrs,
1997 .no_write_same = 1,
1997}; 1998};
1998 1999
1999static int mptsas_get_linkerrors(struct sas_phy *phy) 2000static int mptsas_get_linkerrors(struct sas_phy *phy)
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 8e0acd197c43..6af946d16d24 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -9,6 +9,7 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/bitops.h>
12#include <linux/device.h> 13#include <linux/device.h>
13#include <linux/dma-mapping.h> 14#include <linux/dma-mapping.h>
14#include <linux/io-64-nonatomic-hi-lo.h> 15#include <linux/io-64-nonatomic-hi-lo.h>
@@ -62,6 +63,17 @@
62 * need a custom accessor. 63 * need a custom accessor.
63 */ 64 */
64 65
66static unsigned long global_flags;
67/*
68 * Workaround for avoiding to use RX DMAC by multiple channels.
69 * On R-Car H3 ES1.* and M3-W ES1.0, when multiple SDHI channels use
70 * RX DMAC simultaneously, sometimes hundreds of bytes data are not
71 * stored into the system memory even if the DMAC interrupt happened.
72 * So, this driver then uses one RX DMAC channel only.
73 */
74#define SDHI_INTERNAL_DMAC_ONE_RX_ONLY 0
75#define SDHI_INTERNAL_DMAC_RX_IN_USE 1
76
65/* Definitions for sampling clocks */ 77/* Definitions for sampling clocks */
66static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { 78static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
67 { 79 {
@@ -126,6 +138,9 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) {
126 renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST, 138 renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST,
127 RST_RESERVED_BITS | val); 139 RST_RESERVED_BITS | val);
128 140
141 if (host->data && host->data->flags & MMC_DATA_READ)
142 clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
143
129 renesas_sdhi_internal_dmac_enable_dma(host, true); 144 renesas_sdhi_internal_dmac_enable_dma(host, true);
130} 145}
131 146
@@ -155,6 +170,9 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
155 if (data->flags & MMC_DATA_READ) { 170 if (data->flags & MMC_DATA_READ) {
156 dtran_mode |= DTRAN_MODE_CH_NUM_CH1; 171 dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
157 dir = DMA_FROM_DEVICE; 172 dir = DMA_FROM_DEVICE;
173 if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) &&
174 test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags))
175 goto force_pio;
158 } else { 176 } else {
159 dtran_mode |= DTRAN_MODE_CH_NUM_CH0; 177 dtran_mode |= DTRAN_MODE_CH_NUM_CH0;
160 dir = DMA_TO_DEVICE; 178 dir = DMA_TO_DEVICE;
@@ -208,6 +226,9 @@ static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
208 renesas_sdhi_internal_dmac_enable_dma(host, false); 226 renesas_sdhi_internal_dmac_enable_dma(host, false);
209 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->sg_len, dir); 227 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->sg_len, dir);
210 228
229 if (dir == DMA_FROM_DEVICE)
230 clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
231
211 tmio_mmc_do_data_irq(host); 232 tmio_mmc_do_data_irq(host);
212out: 233out:
213 spin_unlock_irq(&host->lock); 234 spin_unlock_irq(&host->lock);
@@ -251,18 +272,24 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = {
251 * implementation as others may use a different implementation. 272 * implementation as others may use a different implementation.
252 */ 273 */
253static const struct soc_device_attribute gen3_soc_whitelist[] = { 274static const struct soc_device_attribute gen3_soc_whitelist[] = {
254 { .soc_id = "r8a7795", .revision = "ES1.*" }, 275 { .soc_id = "r8a7795", .revision = "ES1.*",
255 { .soc_id = "r8a7795", .revision = "ES2.0" }, 276 .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
256 { .soc_id = "r8a7796", .revision = "ES1.0" }, 277 { .soc_id = "r8a7795", .revision = "ES2.0" },
257 { .soc_id = "r8a77995", .revision = "ES1.0" }, 278 { .soc_id = "r8a7796", .revision = "ES1.0",
258 { /* sentinel */ } 279 .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
280 { .soc_id = "r8a77995", .revision = "ES1.0" },
281 { /* sentinel */ }
259}; 282};
260 283
261static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev) 284static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev)
262{ 285{
263 if (!soc_device_match(gen3_soc_whitelist)) 286 const struct soc_device_attribute *soc = soc_device_match(gen3_soc_whitelist);
287
288 if (!soc)
264 return -ENODEV; 289 return -ENODEV;
265 290
291 global_flags |= (unsigned long)soc->data;
292
266 return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops); 293 return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops);
267} 294}
268 295
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 787434e5589d..78c25ad35fd2 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -1312,7 +1312,7 @@ static void amd_enable_manual_tuning(struct pci_dev *pdev)
1312 pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val); 1312 pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val);
1313} 1313}
1314 1314
1315static int amd_execute_tuning(struct sdhci_host *host, u32 opcode) 1315static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode)
1316{ 1316{
1317 struct sdhci_pci_slot *slot = sdhci_priv(host); 1317 struct sdhci_pci_slot *slot = sdhci_priv(host);
1318 struct pci_dev *pdev = slot->chip->pdev; 1318 struct pci_dev *pdev = slot->chip->pdev;
@@ -1351,6 +1351,27 @@ static int amd_execute_tuning(struct sdhci_host *host, u32 opcode)
1351 return 0; 1351 return 0;
1352} 1352}
1353 1353
1354static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode)
1355{
1356 struct sdhci_host *host = mmc_priv(mmc);
1357
1358 /* AMD requires custom HS200 tuning */
1359 if (host->timing == MMC_TIMING_MMC_HS200)
1360 return amd_execute_tuning_hs200(host, opcode);
1361
1362 /* Otherwise perform standard SDHCI tuning */
1363 return sdhci_execute_tuning(mmc, opcode);
1364}
1365
1366static int amd_probe_slot(struct sdhci_pci_slot *slot)
1367{
1368 struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
1369
1370 ops->execute_tuning = amd_execute_tuning;
1371
1372 return 0;
1373}
1374
1354static int amd_probe(struct sdhci_pci_chip *chip) 1375static int amd_probe(struct sdhci_pci_chip *chip)
1355{ 1376{
1356 struct pci_dev *smbus_dev; 1377 struct pci_dev *smbus_dev;
@@ -1385,12 +1406,12 @@ static const struct sdhci_ops amd_sdhci_pci_ops = {
1385 .set_bus_width = sdhci_set_bus_width, 1406 .set_bus_width = sdhci_set_bus_width,
1386 .reset = sdhci_reset, 1407 .reset = sdhci_reset,
1387 .set_uhs_signaling = sdhci_set_uhs_signaling, 1408 .set_uhs_signaling = sdhci_set_uhs_signaling,
1388 .platform_execute_tuning = amd_execute_tuning,
1389}; 1409};
1390 1410
1391static const struct sdhci_pci_fixes sdhci_amd = { 1411static const struct sdhci_pci_fixes sdhci_amd = {
1392 .probe = amd_probe, 1412 .probe = amd_probe,
1393 .ops = &amd_sdhci_pci_ops, 1413 .ops = &amd_sdhci_pci_ops,
1414 .probe_slot = amd_probe_slot,
1394}; 1415};
1395 1416
1396static const struct pci_device_id pci_ids[] = { 1417static const struct pci_device_id pci_ids[] = {
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index d4c07b85f18e..f5695be14499 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -45,6 +45,7 @@
45#define I82802AB 0x00ad 45#define I82802AB 0x00ad
46#define I82802AC 0x00ac 46#define I82802AC 0x00ac
47#define PF38F4476 0x881c 47#define PF38F4476 0x881c
48#define M28F00AP30 0x8963
48/* STMicroelectronics chips */ 49/* STMicroelectronics chips */
49#define M50LPW080 0x002F 50#define M50LPW080 0x002F
50#define M50FLW080A 0x0080 51#define M50FLW080A 0x0080
@@ -375,6 +376,17 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi,
375 extp->MinorVersion = '1'; 376 extp->MinorVersion = '1';
376} 377}
377 378
379static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
380{
381 /*
382 * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
383 * Erase Supend for their small Erase Blocks(0x8000)
384 */
385 if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
386 return 1;
387 return 0;
388}
389
378static inline struct cfi_pri_intelext * 390static inline struct cfi_pri_intelext *
379read_pri_intelext(struct map_info *map, __u16 adr) 391read_pri_intelext(struct map_info *map, __u16 adr)
380{ 392{
@@ -831,21 +843,30 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
831 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1)))) 843 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
832 goto sleep; 844 goto sleep;
833 845
846 /* Do not allow suspend iff read/write to EB address */
847 if ((adr & chip->in_progress_block_mask) ==
848 chip->in_progress_block_addr)
849 goto sleep;
850
851 /* do not suspend small EBs, buggy Micron Chips */
852 if (cfi_is_micron_28F00AP30(cfi, chip) &&
853 (chip->in_progress_block_mask == ~(0x8000-1)))
854 goto sleep;
834 855
835 /* Erase suspend */ 856 /* Erase suspend */
836 map_write(map, CMD(0xB0), adr); 857 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
837 858
838 /* If the flash has finished erasing, then 'erase suspend' 859 /* If the flash has finished erasing, then 'erase suspend'
839 * appears to make some (28F320) flash devices switch to 860 * appears to make some (28F320) flash devices switch to
840 * 'read' mode. Make sure that we switch to 'read status' 861 * 'read' mode. Make sure that we switch to 'read status'
841 * mode so we get the right data. --rmk 862 * mode so we get the right data. --rmk
842 */ 863 */
843 map_write(map, CMD(0x70), adr); 864 map_write(map, CMD(0x70), chip->in_progress_block_addr);
844 chip->oldstate = FL_ERASING; 865 chip->oldstate = FL_ERASING;
845 chip->state = FL_ERASE_SUSPENDING; 866 chip->state = FL_ERASE_SUSPENDING;
846 chip->erase_suspended = 1; 867 chip->erase_suspended = 1;
847 for (;;) { 868 for (;;) {
848 status = map_read(map, adr); 869 status = map_read(map, chip->in_progress_block_addr);
849 if (map_word_andequal(map, status, status_OK, status_OK)) 870 if (map_word_andequal(map, status, status_OK, status_OK))
850 break; 871 break;
851 872
@@ -1041,8 +1062,8 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
1041 sending the 0x70 (Read Status) command to an erasing 1062 sending the 0x70 (Read Status) command to an erasing
1042 chip and expecting it to be ignored, that's what we 1063 chip and expecting it to be ignored, that's what we
1043 do. */ 1064 do. */
1044 map_write(map, CMD(0xd0), adr); 1065 map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1045 map_write(map, CMD(0x70), adr); 1066 map_write(map, CMD(0x70), chip->in_progress_block_addr);
1046 chip->oldstate = FL_READY; 1067 chip->oldstate = FL_READY;
1047 chip->state = FL_ERASING; 1068 chip->state = FL_ERASING;
1048 break; 1069 break;
@@ -1933,6 +1954,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1933 map_write(map, CMD(0xD0), adr); 1954 map_write(map, CMD(0xD0), adr);
1934 chip->state = FL_ERASING; 1955 chip->state = FL_ERASING;
1935 chip->erase_suspended = 0; 1956 chip->erase_suspended = 0;
1957 chip->in_progress_block_addr = adr;
1958 chip->in_progress_block_mask = ~(len - 1);
1936 1959
1937 ret = INVAL_CACHE_AND_WAIT(map, chip, adr, 1960 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1938 adr, len, 1961 adr, len,
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 668e2cbc155b..692902df2598 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -816,9 +816,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
816 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) 816 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
817 goto sleep; 817 goto sleep;
818 818
819 /* We could check to see if we're trying to access the sector 819 /* Do not allow suspend iff read/write to EB address */
820 * that is currently being erased. However, no user will try 820 if ((adr & chip->in_progress_block_mask) ==
821 * anything like that so we just wait for the timeout. */ 821 chip->in_progress_block_addr)
822 goto sleep;
822 823
823 /* Erase suspend */ 824 /* Erase suspend */
824 /* It's harmless to issue the Erase-Suspend and Erase-Resume 825 /* It's harmless to issue the Erase-Suspend and Erase-Resume
@@ -2267,6 +2268,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2267 chip->state = FL_ERASING; 2268 chip->state = FL_ERASING;
2268 chip->erase_suspended = 0; 2269 chip->erase_suspended = 0;
2269 chip->in_progress_block_addr = adr; 2270 chip->in_progress_block_addr = adr;
2271 chip->in_progress_block_mask = ~(map->size - 1);
2270 2272
2271 INVALIDATE_CACHE_UDELAY(map, chip, 2273 INVALIDATE_CACHE_UDELAY(map, chip,
2272 adr, map->size, 2274 adr, map->size,
@@ -2356,6 +2358,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
2356 chip->state = FL_ERASING; 2358 chip->state = FL_ERASING;
2357 chip->erase_suspended = 0; 2359 chip->erase_suspended = 0;
2358 chip->in_progress_block_addr = adr; 2360 chip->in_progress_block_addr = adr;
2361 chip->in_progress_block_mask = ~(len - 1);
2359 2362
2360 INVALIDATE_CACHE_UDELAY(map, chip, 2363 INVALIDATE_CACHE_UDELAY(map, chip,
2361 adr, len, 2364 adr, len,
diff --git a/drivers/mtd/nand/core.c b/drivers/mtd/nand/core.c
index d0cd6f8635d7..9c9f8936b63b 100644
--- a/drivers/mtd/nand/core.c
+++ b/drivers/mtd/nand/core.c
@@ -162,7 +162,6 @@ int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
162 ret = nanddev_erase(nand, &pos); 162 ret = nanddev_erase(nand, &pos);
163 if (ret) { 163 if (ret) {
164 einfo->fail_addr = nanddev_pos_to_offs(nand, &pos); 164 einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
165 einfo->state = MTD_ERASE_FAILED;
166 165
167 return ret; 166 return ret;
168 } 167 }
@@ -170,8 +169,6 @@ int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
170 nanddev_pos_next_eraseblock(nand, &pos); 169 nanddev_pos_next_eraseblock(nand, &pos);
171 } 170 }
172 171
173 einfo->state = MTD_ERASE_DONE;
174
175 return 0; 172 return 0;
176} 173}
177EXPORT_SYMBOL_GPL(nanddev_mtd_erase); 174EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 10e953218948..1d779a35ac8e 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -2299,29 +2299,20 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
2299 /* 2299 /*
2300 * The legacy "num-cs" property indicates the number of CS on the only 2300 * The legacy "num-cs" property indicates the number of CS on the only
2301 * chip connected to the controller (legacy bindings does not support 2301 * chip connected to the controller (legacy bindings does not support
2302 * more than one chip). CS are only incremented one by one while the RB 2302 * more than one chip). The CS and RB pins are always the #0.
2303 * pin is always the #0.
2304 * 2303 *
2305 * When not using legacy bindings, a couple of "reg" and "nand-rb" 2304 * When not using legacy bindings, a couple of "reg" and "nand-rb"
2306 * properties must be filled. For each chip, expressed as a subnode, 2305 * properties must be filled. For each chip, expressed as a subnode,
2307 * "reg" points to the CS lines and "nand-rb" to the RB line. 2306 * "reg" points to the CS lines and "nand-rb" to the RB line.
2308 */ 2307 */
2309 if (pdata) { 2308 if (pdata || nfc->caps->legacy_of_bindings) {
2310 nsels = 1; 2309 nsels = 1;
2311 } else if (nfc->caps->legacy_of_bindings && 2310 } else {
2312 !of_get_property(np, "num-cs", &nsels)) { 2311 nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
2313 dev_err(dev, "missing num-cs property\n"); 2312 if (nsels <= 0) {
2314 return -EINVAL; 2313 dev_err(dev, "missing/invalid reg property\n");
2315 } else if (!of_get_property(np, "reg", &nsels)) { 2314 return -EINVAL;
2316 dev_err(dev, "missing reg property\n"); 2315 }
2317 return -EINVAL;
2318 }
2319
2320 if (!pdata)
2321 nsels /= sizeof(u32);
2322 if (!nsels) {
2323 dev_err(dev, "invalid reg property size\n");
2324 return -EINVAL;
2325 } 2316 }
2326 2317
2327 /* Alloc the nand chip structure */ 2318 /* Alloc the nand chip structure */
diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c
index f54518ffb36a..f2052fae21c7 100644
--- a/drivers/mtd/nand/raw/tango_nand.c
+++ b/drivers/mtd/nand/raw/tango_nand.c
@@ -645,7 +645,7 @@ static int tango_nand_probe(struct platform_device *pdev)
645 645
646 writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE); 646 writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
647 647
648 clk = clk_get(&pdev->dev, NULL); 648 clk = devm_clk_get(&pdev->dev, NULL);
649 if (IS_ERR(clk)) 649 if (IS_ERR(clk))
650 return PTR_ERR(clk); 650 return PTR_ERR(clk);
651 651
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 4b8e9183489a..5872f31eaa60 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -501,7 +501,9 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
501 void __iomem *reg_base = cqspi->iobase; 501 void __iomem *reg_base = cqspi->iobase;
502 void __iomem *ahb_base = cqspi->ahb_base; 502 void __iomem *ahb_base = cqspi->ahb_base;
503 unsigned int remaining = n_rx; 503 unsigned int remaining = n_rx;
504 unsigned int mod_bytes = n_rx % 4;
504 unsigned int bytes_to_read = 0; 505 unsigned int bytes_to_read = 0;
506 u8 *rxbuf_end = rxbuf + n_rx;
505 int ret = 0; 507 int ret = 0;
506 508
507 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); 509 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
@@ -530,11 +532,24 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
530 } 532 }
531 533
532 while (bytes_to_read != 0) { 534 while (bytes_to_read != 0) {
535 unsigned int word_remain = round_down(remaining, 4);
536
533 bytes_to_read *= cqspi->fifo_width; 537 bytes_to_read *= cqspi->fifo_width;
534 bytes_to_read = bytes_to_read > remaining ? 538 bytes_to_read = bytes_to_read > remaining ?
535 remaining : bytes_to_read; 539 remaining : bytes_to_read;
536 ioread32_rep(ahb_base, rxbuf, 540 bytes_to_read = round_down(bytes_to_read, 4);
537 DIV_ROUND_UP(bytes_to_read, 4)); 541 /* Read 4 byte word chunks then single bytes */
542 if (bytes_to_read) {
543 ioread32_rep(ahb_base, rxbuf,
544 (bytes_to_read / 4));
545 } else if (!word_remain && mod_bytes) {
546 unsigned int temp = ioread32(ahb_base);
547
548 bytes_to_read = mod_bytes;
549 memcpy(rxbuf, &temp, min((unsigned int)
550 (rxbuf_end - rxbuf),
551 bytes_to_read));
552 }
538 rxbuf += bytes_to_read; 553 rxbuf += bytes_to_read;
539 remaining -= bytes_to_read; 554 remaining -= bytes_to_read;
540 bytes_to_read = cqspi_get_rd_sram_level(cqspi); 555 bytes_to_read = cqspi_get_rd_sram_level(cqspi);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b7b113018853..718e4914e3a0 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1660,8 +1660,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1660 } /* switch(bond_mode) */ 1660 } /* switch(bond_mode) */
1661 1661
1662#ifdef CONFIG_NET_POLL_CONTROLLER 1662#ifdef CONFIG_NET_POLL_CONTROLLER
1663 slave_dev->npinfo = bond->dev->npinfo; 1663 if (bond->dev->npinfo) {
1664 if (slave_dev->npinfo) {
1665 if (slave_enable_netpoll(new_slave)) { 1664 if (slave_enable_netpoll(new_slave)) {
1666 netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n"); 1665 netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
1667 res = -EBUSY; 1666 res = -EBUSY;
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index ac7694c71266..a036c490b7ce 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -285,10 +285,18 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
285 struct sk_buff_head *rxq) 285 struct sk_buff_head *rxq)
286{ 286{
287 u16 buf[4] = { 0 }, status, seq_id; 287 u16 buf[4] = { 0 }, status, seq_id;
288 u64 ns, timelo, timehi;
289 struct skb_shared_hwtstamps *shwt; 288 struct skb_shared_hwtstamps *shwt;
289 struct sk_buff_head received;
290 u64 ns, timelo, timehi;
291 unsigned long flags;
290 int err; 292 int err;
291 293
294 /* The latched timestamp belongs to one of the received frames. */
295 __skb_queue_head_init(&received);
296 spin_lock_irqsave(&rxq->lock, flags);
297 skb_queue_splice_tail_init(rxq, &received);
298 spin_unlock_irqrestore(&rxq->lock, flags);
299
292 mutex_lock(&chip->reg_lock); 300 mutex_lock(&chip->reg_lock);
293 err = mv88e6xxx_port_ptp_read(chip, ps->port_id, 301 err = mv88e6xxx_port_ptp_read(chip, ps->port_id,
294 reg, buf, ARRAY_SIZE(buf)); 302 reg, buf, ARRAY_SIZE(buf));
@@ -311,7 +319,7 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
311 /* Since the device can only handle one time stamp at a time, 319 /* Since the device can only handle one time stamp at a time,
312 * we purge any extra frames from the queue. 320 * we purge any extra frames from the queue.
313 */ 321 */
314 for ( ; skb; skb = skb_dequeue(rxq)) { 322 for ( ; skb; skb = __skb_dequeue(&received)) {
315 if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) { 323 if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) {
316 ns = timehi << 16 | timelo; 324 ns = timehi << 16 | timelo;
317 325
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 7ea72ef11a55..d272dc6984ac 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -1321,6 +1321,10 @@
1321#define MDIO_VEND2_AN_STAT 0x8002 1321#define MDIO_VEND2_AN_STAT 0x8002
1322#endif 1322#endif
1323 1323
1324#ifndef MDIO_VEND2_PMA_CDR_CONTROL
1325#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056
1326#endif
1327
1324#ifndef MDIO_CTRL1_SPEED1G 1328#ifndef MDIO_CTRL1_SPEED1G
1325#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100) 1329#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
1326#endif 1330#endif
@@ -1369,6 +1373,10 @@
1369#define XGBE_AN_CL37_TX_CONFIG_MASK 0x08 1373#define XGBE_AN_CL37_TX_CONFIG_MASK 0x08
1370#define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100 1374#define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100
1371 1375
1376#define XGBE_PMA_CDR_TRACK_EN_MASK 0x01
1377#define XGBE_PMA_CDR_TRACK_EN_OFF 0x00
1378#define XGBE_PMA_CDR_TRACK_EN_ON 0x01
1379
1372/* Bit setting and getting macros 1380/* Bit setting and getting macros
1373 * The get macro will extract the current bit field value from within 1381 * The get macro will extract the current bit field value from within
1374 * the variable 1382 * the variable
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index 7d128be61310..b91143947ed2 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -519,6 +519,22 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
519 "debugfs_create_file failed\n"); 519 "debugfs_create_file failed\n");
520 } 520 }
521 521
522 if (pdata->vdata->an_cdr_workaround) {
523 pfile = debugfs_create_bool("an_cdr_workaround", 0600,
524 pdata->xgbe_debugfs,
525 &pdata->debugfs_an_cdr_workaround);
526 if (!pfile)
527 netdev_err(pdata->netdev,
528 "debugfs_create_bool failed\n");
529
530 pfile = debugfs_create_bool("an_cdr_track_early", 0600,
531 pdata->xgbe_debugfs,
532 &pdata->debugfs_an_cdr_track_early);
533 if (!pfile)
534 netdev_err(pdata->netdev,
535 "debugfs_create_bool failed\n");
536 }
537
522 kfree(buf); 538 kfree(buf);
523} 539}
524 540
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 795e556d4a3f..441d0973957b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -349,6 +349,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
349 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); 349 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
350 350
351 /* Call MDIO/PHY initialization routine */ 351 /* Call MDIO/PHY initialization routine */
352 pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround;
352 ret = pdata->phy_if.phy_init(pdata); 353 ret = pdata->phy_if.phy_init(pdata);
353 if (ret) 354 if (ret)
354 return ret; 355 return ret;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 072b9f664597..1b45cd73a258 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -432,11 +432,16 @@ static void xgbe_an73_disable(struct xgbe_prv_data *pdata)
432 xgbe_an73_set(pdata, false, false); 432 xgbe_an73_set(pdata, false, false);
433 xgbe_an73_disable_interrupts(pdata); 433 xgbe_an73_disable_interrupts(pdata);
434 434
435 pdata->an_start = 0;
436
435 netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n"); 437 netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n");
436} 438}
437 439
438static void xgbe_an_restart(struct xgbe_prv_data *pdata) 440static void xgbe_an_restart(struct xgbe_prv_data *pdata)
439{ 441{
442 if (pdata->phy_if.phy_impl.an_pre)
443 pdata->phy_if.phy_impl.an_pre(pdata);
444
440 switch (pdata->an_mode) { 445 switch (pdata->an_mode) {
441 case XGBE_AN_MODE_CL73: 446 case XGBE_AN_MODE_CL73:
442 case XGBE_AN_MODE_CL73_REDRV: 447 case XGBE_AN_MODE_CL73_REDRV:
@@ -453,6 +458,9 @@ static void xgbe_an_restart(struct xgbe_prv_data *pdata)
453 458
454static void xgbe_an_disable(struct xgbe_prv_data *pdata) 459static void xgbe_an_disable(struct xgbe_prv_data *pdata)
455{ 460{
461 if (pdata->phy_if.phy_impl.an_post)
462 pdata->phy_if.phy_impl.an_post(pdata);
463
456 switch (pdata->an_mode) { 464 switch (pdata->an_mode) {
457 case XGBE_AN_MODE_CL73: 465 case XGBE_AN_MODE_CL73:
458 case XGBE_AN_MODE_CL73_REDRV: 466 case XGBE_AN_MODE_CL73_REDRV:
@@ -505,11 +513,11 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
505 XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, 513 XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
506 reg); 514 reg);
507 515
508 if (pdata->phy_if.phy_impl.kr_training_post)
509 pdata->phy_if.phy_impl.kr_training_post(pdata);
510
511 netif_dbg(pdata, link, pdata->netdev, 516 netif_dbg(pdata, link, pdata->netdev,
512 "KR training initiated\n"); 517 "KR training initiated\n");
518
519 if (pdata->phy_if.phy_impl.kr_training_post)
520 pdata->phy_if.phy_impl.kr_training_post(pdata);
513 } 521 }
514 522
515 return XGBE_AN_PAGE_RECEIVED; 523 return XGBE_AN_PAGE_RECEIVED;
@@ -637,11 +645,11 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
637 return XGBE_AN_NO_LINK; 645 return XGBE_AN_NO_LINK;
638 } 646 }
639 647
640 xgbe_an73_disable(pdata); 648 xgbe_an_disable(pdata);
641 649
642 xgbe_switch_mode(pdata); 650 xgbe_switch_mode(pdata);
643 651
644 xgbe_an73_restart(pdata); 652 xgbe_an_restart(pdata);
645 653
646 return XGBE_AN_INCOMPAT_LINK; 654 return XGBE_AN_INCOMPAT_LINK;
647} 655}
@@ -820,6 +828,9 @@ static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata)
820 pdata->an_result = pdata->an_state; 828 pdata->an_result = pdata->an_state;
821 pdata->an_state = XGBE_AN_READY; 829 pdata->an_state = XGBE_AN_READY;
822 830
831 if (pdata->phy_if.phy_impl.an_post)
832 pdata->phy_if.phy_impl.an_post(pdata);
833
823 netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n", 834 netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n",
824 xgbe_state_as_string(pdata->an_result)); 835 xgbe_state_as_string(pdata->an_result));
825 } 836 }
@@ -903,6 +914,9 @@ again:
903 pdata->kx_state = XGBE_RX_BPA; 914 pdata->kx_state = XGBE_RX_BPA;
904 pdata->an_start = 0; 915 pdata->an_start = 0;
905 916
917 if (pdata->phy_if.phy_impl.an_post)
918 pdata->phy_if.phy_impl.an_post(pdata);
919
906 netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n", 920 netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n",
907 xgbe_state_as_string(pdata->an_result)); 921 xgbe_state_as_string(pdata->an_result));
908 } 922 }
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index eb23f9ba1a9a..82d1f416ee2a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -456,6 +456,7 @@ static const struct xgbe_version_data xgbe_v2a = {
456 .irq_reissue_support = 1, 456 .irq_reissue_support = 1,
457 .tx_desc_prefetch = 5, 457 .tx_desc_prefetch = 5,
458 .rx_desc_prefetch = 5, 458 .rx_desc_prefetch = 5,
459 .an_cdr_workaround = 1,
459}; 460};
460 461
461static const struct xgbe_version_data xgbe_v2b = { 462static const struct xgbe_version_data xgbe_v2b = {
@@ -470,6 +471,7 @@ static const struct xgbe_version_data xgbe_v2b = {
470 .irq_reissue_support = 1, 471 .irq_reissue_support = 1,
471 .tx_desc_prefetch = 5, 472 .tx_desc_prefetch = 5,
472 .rx_desc_prefetch = 5, 473 .rx_desc_prefetch = 5,
474 .an_cdr_workaround = 1,
473}; 475};
474 476
475static const struct pci_device_id xgbe_pci_table[] = { 477static const struct pci_device_id xgbe_pci_table[] = {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 3304a291aa96..aac884314000 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -147,6 +147,14 @@
147/* Rate-change complete wait/retry count */ 147/* Rate-change complete wait/retry count */
148#define XGBE_RATECHANGE_COUNT 500 148#define XGBE_RATECHANGE_COUNT 500
149 149
150/* CDR delay values for KR support (in usec) */
151#define XGBE_CDR_DELAY_INIT 10000
152#define XGBE_CDR_DELAY_INC 10000
153#define XGBE_CDR_DELAY_MAX 100000
154
155/* RRC frequency during link status check */
156#define XGBE_RRC_FREQUENCY 10
157
150enum xgbe_port_mode { 158enum xgbe_port_mode {
151 XGBE_PORT_MODE_RSVD = 0, 159 XGBE_PORT_MODE_RSVD = 0,
152 XGBE_PORT_MODE_BACKPLANE, 160 XGBE_PORT_MODE_BACKPLANE,
@@ -245,6 +253,10 @@ enum xgbe_sfp_speed {
245#define XGBE_SFP_BASE_VENDOR_SN 4 253#define XGBE_SFP_BASE_VENDOR_SN 4
246#define XGBE_SFP_BASE_VENDOR_SN_LEN 16 254#define XGBE_SFP_BASE_VENDOR_SN_LEN 16
247 255
256#define XGBE_SFP_EXTD_OPT1 1
257#define XGBE_SFP_EXTD_OPT1_RX_LOS BIT(1)
258#define XGBE_SFP_EXTD_OPT1_TX_FAULT BIT(3)
259
248#define XGBE_SFP_EXTD_DIAG 28 260#define XGBE_SFP_EXTD_DIAG 28
249#define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2) 261#define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2)
250 262
@@ -324,6 +336,7 @@ struct xgbe_phy_data {
324 336
325 unsigned int sfp_gpio_address; 337 unsigned int sfp_gpio_address;
326 unsigned int sfp_gpio_mask; 338 unsigned int sfp_gpio_mask;
339 unsigned int sfp_gpio_inputs;
327 unsigned int sfp_gpio_rx_los; 340 unsigned int sfp_gpio_rx_los;
328 unsigned int sfp_gpio_tx_fault; 341 unsigned int sfp_gpio_tx_fault;
329 unsigned int sfp_gpio_mod_absent; 342 unsigned int sfp_gpio_mod_absent;
@@ -355,6 +368,10 @@ struct xgbe_phy_data {
355 unsigned int redrv_addr; 368 unsigned int redrv_addr;
356 unsigned int redrv_lane; 369 unsigned int redrv_lane;
357 unsigned int redrv_model; 370 unsigned int redrv_model;
371
372 /* KR AN support */
373 unsigned int phy_cdr_notrack;
374 unsigned int phy_cdr_delay;
358}; 375};
359 376
360/* I2C, MDIO and GPIO lines are muxed, so only one device at a time */ 377/* I2C, MDIO and GPIO lines are muxed, so only one device at a time */
@@ -974,6 +991,49 @@ static void xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata)
974 phy_data->sfp_phy_avail = 1; 991 phy_data->sfp_phy_avail = 1;
975} 992}
976 993
994static bool xgbe_phy_check_sfp_rx_los(struct xgbe_phy_data *phy_data)
995{
996 u8 *sfp_extd = phy_data->sfp_eeprom.extd;
997
998 if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_RX_LOS))
999 return false;
1000
1001 if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS)
1002 return false;
1003
1004 if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_rx_los))
1005 return true;
1006
1007 return false;
1008}
1009
1010static bool xgbe_phy_check_sfp_tx_fault(struct xgbe_phy_data *phy_data)
1011{
1012 u8 *sfp_extd = phy_data->sfp_eeprom.extd;
1013
1014 if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_TX_FAULT))
1015 return false;
1016
1017 if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT)
1018 return false;
1019
1020 if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_tx_fault))
1021 return true;
1022
1023 return false;
1024}
1025
1026static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data)
1027{
1028 if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT)
1029 return false;
1030
1031 if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_mod_absent))
1032 return true;
1033
1034 return false;
1035}
1036
977static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata) 1037static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata)
978{ 1038{
979 struct xgbe_phy_data *phy_data = pdata->phy_data; 1039 struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -1019,6 +1079,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
1019 if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP) 1079 if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP)
1020 return; 1080 return;
1021 1081
1082 /* Update transceiver signals (eeprom extd/options) */
1083 phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
1084 phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
1085
1022 if (xgbe_phy_sfp_parse_quirks(pdata)) 1086 if (xgbe_phy_sfp_parse_quirks(pdata))
1023 return; 1087 return;
1024 1088
@@ -1184,7 +1248,6 @@ put:
1184static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata) 1248static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
1185{ 1249{
1186 struct xgbe_phy_data *phy_data = pdata->phy_data; 1250 struct xgbe_phy_data *phy_data = pdata->phy_data;
1187 unsigned int gpio_input;
1188 u8 gpio_reg, gpio_ports[2]; 1251 u8 gpio_reg, gpio_ports[2];
1189 int ret; 1252 int ret;
1190 1253
@@ -1199,23 +1262,9 @@ static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
1199 return; 1262 return;
1200 } 1263 }
1201 1264
1202 gpio_input = (gpio_ports[1] << 8) | gpio_ports[0]; 1265 phy_data->sfp_gpio_inputs = (gpio_ports[1] << 8) | gpio_ports[0];
1203
1204 if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) {
1205 /* No GPIO, just assume the module is present for now */
1206 phy_data->sfp_mod_absent = 0;
1207 } else {
1208 if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent)))
1209 phy_data->sfp_mod_absent = 0;
1210 }
1211
1212 if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) &&
1213 (gpio_input & (1 << phy_data->sfp_gpio_rx_los)))
1214 phy_data->sfp_rx_los = 1;
1215 1266
1216 if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) && 1267 phy_data->sfp_mod_absent = xgbe_phy_check_sfp_mod_absent(phy_data);
1217 (gpio_input & (1 << phy_data->sfp_gpio_tx_fault)))
1218 phy_data->sfp_tx_fault = 1;
1219} 1268}
1220 1269
1221static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata) 1270static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata)
@@ -2361,7 +2410,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
2361 return 1; 2410 return 1;
2362 2411
2363 /* No link, attempt a receiver reset cycle */ 2412 /* No link, attempt a receiver reset cycle */
2364 if (phy_data->rrc_count++) { 2413 if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
2365 phy_data->rrc_count = 0; 2414 phy_data->rrc_count = 0;
2366 xgbe_phy_rrc(pdata); 2415 xgbe_phy_rrc(pdata);
2367 } 2416 }
@@ -2669,6 +2718,103 @@ static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata)
2669 return true; 2718 return true;
2670} 2719}
2671 2720
2721static void xgbe_phy_cdr_track(struct xgbe_prv_data *pdata)
2722{
2723 struct xgbe_phy_data *phy_data = pdata->phy_data;
2724
2725 if (!pdata->debugfs_an_cdr_workaround)
2726 return;
2727
2728 if (!phy_data->phy_cdr_notrack)
2729 return;
2730
2731 usleep_range(phy_data->phy_cdr_delay,
2732 phy_data->phy_cdr_delay + 500);
2733
2734 XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
2735 XGBE_PMA_CDR_TRACK_EN_MASK,
2736 XGBE_PMA_CDR_TRACK_EN_ON);
2737
2738 phy_data->phy_cdr_notrack = 0;
2739}
2740
2741static void xgbe_phy_cdr_notrack(struct xgbe_prv_data *pdata)
2742{
2743 struct xgbe_phy_data *phy_data = pdata->phy_data;
2744
2745 if (!pdata->debugfs_an_cdr_workaround)
2746 return;
2747
2748 if (phy_data->phy_cdr_notrack)
2749 return;
2750
2751 XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
2752 XGBE_PMA_CDR_TRACK_EN_MASK,
2753 XGBE_PMA_CDR_TRACK_EN_OFF);
2754
2755 xgbe_phy_rrc(pdata);
2756
2757 phy_data->phy_cdr_notrack = 1;
2758}
2759
2760static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata)
2761{
2762 if (!pdata->debugfs_an_cdr_track_early)
2763 xgbe_phy_cdr_track(pdata);
2764}
2765
2766static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata)
2767{
2768 if (pdata->debugfs_an_cdr_track_early)
2769 xgbe_phy_cdr_track(pdata);
2770}
2771
2772static void xgbe_phy_an_post(struct xgbe_prv_data *pdata)
2773{
2774 struct xgbe_phy_data *phy_data = pdata->phy_data;
2775
2776 switch (pdata->an_mode) {
2777 case XGBE_AN_MODE_CL73:
2778 case XGBE_AN_MODE_CL73_REDRV:
2779 if (phy_data->cur_mode != XGBE_MODE_KR)
2780 break;
2781
2782 xgbe_phy_cdr_track(pdata);
2783
2784 switch (pdata->an_result) {
2785 case XGBE_AN_READY:
2786 case XGBE_AN_COMPLETE:
2787 break;
2788 default:
2789 if (phy_data->phy_cdr_delay < XGBE_CDR_DELAY_MAX)
2790 phy_data->phy_cdr_delay += XGBE_CDR_DELAY_INC;
2791 else
2792 phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
2793 break;
2794 }
2795 break;
2796 default:
2797 break;
2798 }
2799}
2800
2801static void xgbe_phy_an_pre(struct xgbe_prv_data *pdata)
2802{
2803 struct xgbe_phy_data *phy_data = pdata->phy_data;
2804
2805 switch (pdata->an_mode) {
2806 case XGBE_AN_MODE_CL73:
2807 case XGBE_AN_MODE_CL73_REDRV:
2808 if (phy_data->cur_mode != XGBE_MODE_KR)
2809 break;
2810
2811 xgbe_phy_cdr_notrack(pdata);
2812 break;
2813 default:
2814 break;
2815 }
2816}
2817
2672static void xgbe_phy_stop(struct xgbe_prv_data *pdata) 2818static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
2673{ 2819{
2674 struct xgbe_phy_data *phy_data = pdata->phy_data; 2820 struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -2680,6 +2826,9 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
2680 xgbe_phy_sfp_reset(phy_data); 2826 xgbe_phy_sfp_reset(phy_data);
2681 xgbe_phy_sfp_mod_absent(pdata); 2827 xgbe_phy_sfp_mod_absent(pdata);
2682 2828
2829 /* Reset CDR support */
2830 xgbe_phy_cdr_track(pdata);
2831
2683 /* Power off the PHY */ 2832 /* Power off the PHY */
2684 xgbe_phy_power_off(pdata); 2833 xgbe_phy_power_off(pdata);
2685 2834
@@ -2712,6 +2861,9 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
2712 /* Start in highest supported mode */ 2861 /* Start in highest supported mode */
2713 xgbe_phy_set_mode(pdata, phy_data->start_mode); 2862 xgbe_phy_set_mode(pdata, phy_data->start_mode);
2714 2863
2864 /* Reset CDR support */
2865 xgbe_phy_cdr_track(pdata);
2866
2715 /* After starting the I2C controller, we can check for an SFP */ 2867 /* After starting the I2C controller, we can check for an SFP */
2716 switch (phy_data->port_mode) { 2868 switch (phy_data->port_mode) {
2717 case XGBE_PORT_MODE_SFP: 2869 case XGBE_PORT_MODE_SFP:
@@ -3019,6 +3171,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
3019 } 3171 }
3020 } 3172 }
3021 3173
3174 phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
3175
3022 /* Register for driving external PHYs */ 3176 /* Register for driving external PHYs */
3023 mii = devm_mdiobus_alloc(pdata->dev); 3177 mii = devm_mdiobus_alloc(pdata->dev);
3024 if (!mii) { 3178 if (!mii) {
@@ -3071,4 +3225,10 @@ void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if)
3071 phy_impl->an_advertising = xgbe_phy_an_advertising; 3225 phy_impl->an_advertising = xgbe_phy_an_advertising;
3072 3226
3073 phy_impl->an_outcome = xgbe_phy_an_outcome; 3227 phy_impl->an_outcome = xgbe_phy_an_outcome;
3228
3229 phy_impl->an_pre = xgbe_phy_an_pre;
3230 phy_impl->an_post = xgbe_phy_an_post;
3231
3232 phy_impl->kr_training_pre = xgbe_phy_kr_training_pre;
3233 phy_impl->kr_training_post = xgbe_phy_kr_training_post;
3074} 3234}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index ad102c8bac7b..95d4b56448c6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -833,6 +833,7 @@ struct xgbe_hw_if {
833/* This structure represents implementation specific routines for an 833/* This structure represents implementation specific routines for an
834 * implementation of a PHY. All routines are required unless noted below. 834 * implementation of a PHY. All routines are required unless noted below.
835 * Optional routines: 835 * Optional routines:
836 * an_pre, an_post
836 * kr_training_pre, kr_training_post 837 * kr_training_pre, kr_training_post
837 */ 838 */
838struct xgbe_phy_impl_if { 839struct xgbe_phy_impl_if {
@@ -875,6 +876,10 @@ struct xgbe_phy_impl_if {
875 /* Process results of auto-negotiation */ 876 /* Process results of auto-negotiation */
876 enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *); 877 enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *);
877 878
879 /* Pre/Post auto-negotiation support */
880 void (*an_pre)(struct xgbe_prv_data *);
881 void (*an_post)(struct xgbe_prv_data *);
882
878 /* Pre/Post KR training enablement support */ 883 /* Pre/Post KR training enablement support */
879 void (*kr_training_pre)(struct xgbe_prv_data *); 884 void (*kr_training_pre)(struct xgbe_prv_data *);
880 void (*kr_training_post)(struct xgbe_prv_data *); 885 void (*kr_training_post)(struct xgbe_prv_data *);
@@ -989,6 +994,7 @@ struct xgbe_version_data {
989 unsigned int irq_reissue_support; 994 unsigned int irq_reissue_support;
990 unsigned int tx_desc_prefetch; 995 unsigned int tx_desc_prefetch;
991 unsigned int rx_desc_prefetch; 996 unsigned int rx_desc_prefetch;
997 unsigned int an_cdr_workaround;
992}; 998};
993 999
994struct xgbe_vxlan_data { 1000struct xgbe_vxlan_data {
@@ -1257,6 +1263,9 @@ struct xgbe_prv_data {
1257 unsigned int debugfs_xprop_reg; 1263 unsigned int debugfs_xprop_reg;
1258 1264
1259 unsigned int debugfs_xi2c_reg; 1265 unsigned int debugfs_xi2c_reg;
1266
1267 bool debugfs_an_cdr_workaround;
1268 bool debugfs_an_cdr_track_early;
1260}; 1269};
1261 1270
1262/* Function prototypes*/ 1271/* Function prototypes*/
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 1f622ca2a64f..8ba14ae00e8f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1927,22 +1927,39 @@ static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
1927 return retval; 1927 return retval;
1928} 1928}
1929 1929
1930static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen) 1930static void bnxt_get_pkgver(struct net_device *dev)
1931{ 1931{
1932 struct bnxt *bp = netdev_priv(dev);
1932 u16 index = 0; 1933 u16 index = 0;
1933 u32 datalen; 1934 char *pkgver;
1935 u32 pkglen;
1936 u8 *pkgbuf;
1937 int len;
1934 1938
1935 if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, 1939 if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
1936 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 1940 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
1937 &index, NULL, &datalen) != 0) 1941 &index, NULL, &pkglen) != 0)
1938 return NULL; 1942 return;
1939 1943
1940 memset(buf, 0, buflen); 1944 pkgbuf = kzalloc(pkglen, GFP_KERNEL);
1941 if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0) 1945 if (!pkgbuf) {
1942 return NULL; 1946 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
1947 pkglen);
1948 return;
1949 }
1950
1951 if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf))
1952 goto err;
1943 1953
1944 return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf, 1954 pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
1945 datalen); 1955 pkglen);
1956 if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
1957 len = strlen(bp->fw_ver_str);
1958 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
1959 "/pkg %s", pkgver);
1960 }
1961err:
1962 kfree(pkgbuf);
1946} 1963}
1947 1964
1948static int bnxt_get_eeprom(struct net_device *dev, 1965static int bnxt_get_eeprom(struct net_device *dev,
@@ -2615,22 +2632,10 @@ void bnxt_ethtool_init(struct bnxt *bp)
2615 struct hwrm_selftest_qlist_input req = {0}; 2632 struct hwrm_selftest_qlist_input req = {0};
2616 struct bnxt_test_info *test_info; 2633 struct bnxt_test_info *test_info;
2617 struct net_device *dev = bp->dev; 2634 struct net_device *dev = bp->dev;
2618 char *pkglog;
2619 int i, rc; 2635 int i, rc;
2620 2636
2621 pkglog = kzalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL); 2637 bnxt_get_pkgver(dev);
2622 if (pkglog) {
2623 char *pkgver;
2624 int len;
2625 2638
2626 pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH);
2627 if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
2628 len = strlen(bp->fw_ver_str);
2629 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
2630 "/pkg %s", pkgver);
2631 }
2632 kfree(pkglog);
2633 }
2634 if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp)) 2639 if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
2635 return; 2640 return;
2636 2641
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
index 73f2249555b5..83444811d3c6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
@@ -59,8 +59,6 @@ enum bnxt_nvm_directory_type {
59#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0) 59#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0)
60#define BNX_DIR_ATTR_PROP_STREAM (1 << 1) 60#define BNX_DIR_ATTR_PROP_STREAM (1 << 1)
61 61
62#define BNX_PKG_LOG_MAX_LENGTH 4096
63
64enum bnxnvm_pkglog_field_index { 62enum bnxnvm_pkglog_field_index {
65 BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0, 63 BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0,
66 BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1, 64 BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1,
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 3e62692af011..fa5b30f547f6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -87,7 +87,7 @@ do { \
87 87
88#define HNAE_AE_REGISTER 0x1 88#define HNAE_AE_REGISTER 0x1
89 89
90#define RCB_RING_NAME_LEN 16 90#define RCB_RING_NAME_LEN (IFNAMSIZ + 4)
91 91
92#define HNAE_LOWEST_LATENCY_COAL_PARAM 30 92#define HNAE_LOWEST_LATENCY_COAL_PARAM 30
93#define HNAE_LOW_LATENCY_COAL_PARAM 80 93#define HNAE_LOW_LATENCY_COAL_PARAM 80
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index aad5658d79d5..6e8d6a6f6aaf 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -794,46 +794,61 @@ static int ibmvnic_login(struct net_device *netdev)
794{ 794{
795 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 795 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
796 unsigned long timeout = msecs_to_jiffies(30000); 796 unsigned long timeout = msecs_to_jiffies(30000);
797 struct device *dev = &adapter->vdev->dev; 797 int retry_count = 0;
798 int rc; 798 int rc;
799 799
800 do { 800 do {
801 if (adapter->renegotiate) { 801 if (retry_count > IBMVNIC_MAX_QUEUES) {
802 adapter->renegotiate = false; 802 netdev_warn(netdev, "Login attempts exceeded\n");
803 return -1;
804 }
805
806 adapter->init_done_rc = 0;
807 reinit_completion(&adapter->init_done);
808 rc = send_login(adapter);
809 if (rc) {
810 netdev_warn(netdev, "Unable to login\n");
811 return rc;
812 }
813
814 if (!wait_for_completion_timeout(&adapter->init_done,
815 timeout)) {
816 netdev_warn(netdev, "Login timed out\n");
817 return -1;
818 }
819
820 if (adapter->init_done_rc == PARTIALSUCCESS) {
821 retry_count++;
803 release_sub_crqs(adapter, 1); 822 release_sub_crqs(adapter, 1);
804 823
824 adapter->init_done_rc = 0;
805 reinit_completion(&adapter->init_done); 825 reinit_completion(&adapter->init_done);
806 send_cap_queries(adapter); 826 send_cap_queries(adapter);
807 if (!wait_for_completion_timeout(&adapter->init_done, 827 if (!wait_for_completion_timeout(&adapter->init_done,
808 timeout)) { 828 timeout)) {
809 dev_err(dev, "Capabilities query timeout\n"); 829 netdev_warn(netdev,
830 "Capabilities query timed out\n");
810 return -1; 831 return -1;
811 } 832 }
833
812 rc = init_sub_crqs(adapter); 834 rc = init_sub_crqs(adapter);
813 if (rc) { 835 if (rc) {
814 dev_err(dev, 836 netdev_warn(netdev,
815 "Initialization of SCRQ's failed\n"); 837 "SCRQ initialization failed\n");
816 return -1; 838 return -1;
817 } 839 }
840
818 rc = init_sub_crq_irqs(adapter); 841 rc = init_sub_crq_irqs(adapter);
819 if (rc) { 842 if (rc) {
820 dev_err(dev, 843 netdev_warn(netdev,
821 "Initialization of SCRQ's irqs failed\n"); 844 "SCRQ irq initialization failed\n");
822 return -1; 845 return -1;
823 } 846 }
824 } 847 } else if (adapter->init_done_rc) {
825 848 netdev_warn(netdev, "Adapter login failed\n");
826 reinit_completion(&adapter->init_done);
827 rc = send_login(adapter);
828 if (rc) {
829 dev_err(dev, "Unable to attempt device login\n");
830 return rc;
831 } else if (!wait_for_completion_timeout(&adapter->init_done,
832 timeout)) {
833 dev_err(dev, "Login timeout\n");
834 return -1; 849 return -1;
835 } 850 }
836 } while (adapter->renegotiate); 851 } while (adapter->init_done_rc == PARTIALSUCCESS);
837 852
838 /* handle pending MAC address changes after successful login */ 853 /* handle pending MAC address changes after successful login */
839 if (adapter->mac_change_pending) { 854 if (adapter->mac_change_pending) {
@@ -1034,16 +1049,14 @@ static int __ibmvnic_open(struct net_device *netdev)
1034 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); 1049 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1035 if (prev_state == VNIC_CLOSED) 1050 if (prev_state == VNIC_CLOSED)
1036 enable_irq(adapter->rx_scrq[i]->irq); 1051 enable_irq(adapter->rx_scrq[i]->irq);
1037 else 1052 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1038 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1039 } 1053 }
1040 1054
1041 for (i = 0; i < adapter->req_tx_queues; i++) { 1055 for (i = 0; i < adapter->req_tx_queues; i++) {
1042 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); 1056 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1043 if (prev_state == VNIC_CLOSED) 1057 if (prev_state == VNIC_CLOSED)
1044 enable_irq(adapter->tx_scrq[i]->irq); 1058 enable_irq(adapter->tx_scrq[i]->irq);
1045 else 1059 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1046 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1047 } 1060 }
1048 1061
1049 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); 1062 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
@@ -1115,7 +1128,7 @@ static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1115 if (!adapter->rx_pool) 1128 if (!adapter->rx_pool)
1116 return; 1129 return;
1117 1130
1118 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 1131 rx_scrqs = adapter->num_active_rx_pools;
1119 rx_entries = adapter->req_rx_add_entries_per_subcrq; 1132 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1120 1133
1121 /* Free any remaining skbs in the rx buffer pools */ 1134 /* Free any remaining skbs in the rx buffer pools */
@@ -1164,7 +1177,7 @@ static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1164 if (!adapter->tx_pool || !adapter->tso_pool) 1177 if (!adapter->tx_pool || !adapter->tso_pool)
1165 return; 1178 return;
1166 1179
1167 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 1180 tx_scrqs = adapter->num_active_tx_pools;
1168 1181
1169 /* Free any remaining skbs in the tx buffer pools */ 1182 /* Free any remaining skbs in the tx buffer pools */
1170 for (i = 0; i < tx_scrqs; i++) { 1183 for (i = 0; i < tx_scrqs; i++) {
@@ -1184,6 +1197,7 @@ static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1184 if (adapter->tx_scrq[i]->irq) { 1197 if (adapter->tx_scrq[i]->irq) {
1185 netdev_dbg(netdev, 1198 netdev_dbg(netdev,
1186 "Disabling tx_scrq[%d] irq\n", i); 1199 "Disabling tx_scrq[%d] irq\n", i);
1200 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1187 disable_irq(adapter->tx_scrq[i]->irq); 1201 disable_irq(adapter->tx_scrq[i]->irq);
1188 } 1202 }
1189 } 1203 }
@@ -1193,6 +1207,7 @@ static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1193 if (adapter->rx_scrq[i]->irq) { 1207 if (adapter->rx_scrq[i]->irq) {
1194 netdev_dbg(netdev, 1208 netdev_dbg(netdev,
1195 "Disabling rx_scrq[%d] irq\n", i); 1209 "Disabling rx_scrq[%d] irq\n", i);
1210 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1196 disable_irq(adapter->rx_scrq[i]->irq); 1211 disable_irq(adapter->rx_scrq[i]->irq);
1197 } 1212 }
1198 } 1213 }
@@ -1828,7 +1843,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1828 for (i = 0; i < adapter->req_rx_queues; i++) 1843 for (i = 0; i < adapter->req_rx_queues; i++)
1829 napi_schedule(&adapter->napi[i]); 1844 napi_schedule(&adapter->napi[i]);
1830 1845
1831 if (adapter->reset_reason != VNIC_RESET_FAILOVER) 1846 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
1847 adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
1832 netdev_notify_peers(netdev); 1848 netdev_notify_peers(netdev);
1833 1849
1834 netif_carrier_on(netdev); 1850 netif_carrier_on(netdev);
@@ -2601,12 +2617,19 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2601{ 2617{
2602 struct device *dev = &adapter->vdev->dev; 2618 struct device *dev = &adapter->vdev->dev;
2603 unsigned long rc; 2619 unsigned long rc;
2620 u64 val;
2604 2621
2605 if (scrq->hw_irq > 0x100000000ULL) { 2622 if (scrq->hw_irq > 0x100000000ULL) {
2606 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); 2623 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2607 return 1; 2624 return 1;
2608 } 2625 }
2609 2626
2627 val = (0xff000000) | scrq->hw_irq;
2628 rc = plpar_hcall_norets(H_EOI, val);
2629 if (rc)
2630 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
2631 val, rc);
2632
2610 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 2633 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2611 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 2634 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2612 if (rc) 2635 if (rc)
@@ -3170,7 +3193,7 @@ static int send_version_xchg(struct ibmvnic_adapter *adapter)
3170struct vnic_login_client_data { 3193struct vnic_login_client_data {
3171 u8 type; 3194 u8 type;
3172 __be16 len; 3195 __be16 len;
3173 char name; 3196 char name[];
3174} __packed; 3197} __packed;
3175 3198
3176static int vnic_client_data_len(struct ibmvnic_adapter *adapter) 3199static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
@@ -3199,21 +3222,21 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3199 vlcd->type = 1; 3222 vlcd->type = 1;
3200 len = strlen(os_name) + 1; 3223 len = strlen(os_name) + 1;
3201 vlcd->len = cpu_to_be16(len); 3224 vlcd->len = cpu_to_be16(len);
3202 strncpy(&vlcd->name, os_name, len); 3225 strncpy(vlcd->name, os_name, len);
3203 vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len); 3226 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3204 3227
3205 /* Type 2 - LPAR name */ 3228 /* Type 2 - LPAR name */
3206 vlcd->type = 2; 3229 vlcd->type = 2;
3207 len = strlen(utsname()->nodename) + 1; 3230 len = strlen(utsname()->nodename) + 1;
3208 vlcd->len = cpu_to_be16(len); 3231 vlcd->len = cpu_to_be16(len);
3209 strncpy(&vlcd->name, utsname()->nodename, len); 3232 strncpy(vlcd->name, utsname()->nodename, len);
3210 vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len); 3233 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3211 3234
3212 /* Type 3 - device name */ 3235 /* Type 3 - device name */
3213 vlcd->type = 3; 3236 vlcd->type = 3;
3214 len = strlen(adapter->netdev->name) + 1; 3237 len = strlen(adapter->netdev->name) + 1;
3215 vlcd->len = cpu_to_be16(len); 3238 vlcd->len = cpu_to_be16(len);
3216 strncpy(&vlcd->name, adapter->netdev->name, len); 3239 strncpy(vlcd->name, adapter->netdev->name, len);
3217} 3240}
3218 3241
3219static int send_login(struct ibmvnic_adapter *adapter) 3242static int send_login(struct ibmvnic_adapter *adapter)
@@ -3942,7 +3965,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
3942 * to resend the login buffer with fewer queues requested. 3965 * to resend the login buffer with fewer queues requested.
3943 */ 3966 */
3944 if (login_rsp_crq->generic.rc.code) { 3967 if (login_rsp_crq->generic.rc.code) {
3945 adapter->renegotiate = true; 3968 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
3946 complete(&adapter->init_done); 3969 complete(&adapter->init_done);
3947 return 0; 3970 return 0;
3948 } 3971 }
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 99c0b58c2c39..22391e8805f6 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1035,7 +1035,6 @@ struct ibmvnic_adapter {
1035 1035
1036 struct ibmvnic_sub_crq_queue **tx_scrq; 1036 struct ibmvnic_sub_crq_queue **tx_scrq;
1037 struct ibmvnic_sub_crq_queue **rx_scrq; 1037 struct ibmvnic_sub_crq_queue **rx_scrq;
1038 bool renegotiate;
1039 1038
1040 /* rx structs */ 1039 /* rx structs */
1041 struct napi_struct *napi; 1040 struct napi_struct *napi;
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 5b13ca1bd85f..7dc5f045e969 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -586,7 +586,7 @@ struct ice_sw_rule_lg_act {
586#define ICE_LG_ACT_MIRROR_VSI_ID_S 3 586#define ICE_LG_ACT_MIRROR_VSI_ID_S 3
587#define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S) 587#define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S)
588 588
589 /* Action type = 5 - Large Action */ 589 /* Action type = 5 - Generic Value */
590#define ICE_LG_ACT_GENERIC 0x5 590#define ICE_LG_ACT_GENERIC 0x5
591#define ICE_LG_ACT_GENERIC_VALUE_S 3 591#define ICE_LG_ACT_GENERIC_VALUE_S 3
592#define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S) 592#define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S)
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 21977ec984c4..71d032cc5fa7 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -78,6 +78,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
78 struct ice_aq_desc desc; 78 struct ice_aq_desc desc;
79 enum ice_status status; 79 enum ice_status status;
80 u16 flags; 80 u16 flags;
81 u8 i;
81 82
82 cmd = &desc.params.mac_read; 83 cmd = &desc.params.mac_read;
83 84
@@ -98,8 +99,16 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
98 return ICE_ERR_CFG; 99 return ICE_ERR_CFG;
99 } 100 }
100 101
101 ether_addr_copy(hw->port_info->mac.lan_addr, resp->mac_addr); 102 /* A single port can report up to two (LAN and WoL) addresses */
102 ether_addr_copy(hw->port_info->mac.perm_addr, resp->mac_addr); 103 for (i = 0; i < cmd->num_addr; i++)
104 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
105 ether_addr_copy(hw->port_info->mac.lan_addr,
106 resp[i].mac_addr);
107 ether_addr_copy(hw->port_info->mac.perm_addr,
108 resp[i].mac_addr);
109 break;
110 }
111
103 return 0; 112 return 0;
104} 113}
105 114
@@ -464,9 +473,12 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
464 if (status) 473 if (status)
465 goto err_unroll_sched; 474 goto err_unroll_sched;
466 475
467 /* Get port MAC information */ 476 /* Get MAC information */
468 mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp); 477 /* A single port can report up to two (LAN and WoL) addresses */
469 mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL); 478 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
479 sizeof(struct ice_aqc_manage_mac_read_resp),
480 GFP_KERNEL);
481 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
470 482
471 if (!mac_buf) { 483 if (!mac_buf) {
472 status = ICE_ERR_NO_MEMORY; 484 status = ICE_ERR_NO_MEMORY;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 1b9e2ef48a9d..499904874b3f 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -121,8 +121,6 @@
121#define PFINT_FW_CTL_CAUSE_ENA_S 30 121#define PFINT_FW_CTL_CAUSE_ENA_S 30
122#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) 122#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S)
123#define PFINT_OICR 0x0016CA00 123#define PFINT_OICR 0x0016CA00
124#define PFINT_OICR_INTEVENT_S 0
125#define PFINT_OICR_INTEVENT_M BIT(PFINT_OICR_INTEVENT_S)
126#define PFINT_OICR_HLP_RDY_S 14 124#define PFINT_OICR_HLP_RDY_S 14
127#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S) 125#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S)
128#define PFINT_OICR_CPM_RDY_S 15 126#define PFINT_OICR_CPM_RDY_S 15
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 210b7910f1cd..5299caf55a7f 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1722,9 +1722,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
1722 oicr = rd32(hw, PFINT_OICR); 1722 oicr = rd32(hw, PFINT_OICR);
1723 ena_mask = rd32(hw, PFINT_OICR_ENA); 1723 ena_mask = rd32(hw, PFINT_OICR_ENA);
1724 1724
1725 if (!(oicr & PFINT_OICR_INTEVENT_M))
1726 goto ena_intr;
1727
1728 if (oicr & PFINT_OICR_GRST_M) { 1725 if (oicr & PFINT_OICR_GRST_M) {
1729 u32 reset; 1726 u32 reset;
1730 /* we have a reset warning */ 1727 /* we have a reset warning */
@@ -1782,7 +1779,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
1782 } 1779 }
1783 ret = IRQ_HANDLED; 1780 ret = IRQ_HANDLED;
1784 1781
1785ena_intr:
1786 /* re-enable interrupt causes that are not handled during this pass */ 1782 /* re-enable interrupt causes that are not handled during this pass */
1787 wr32(hw, PFINT_OICR_ENA, ena_mask); 1783 wr32(hw, PFINT_OICR_ENA, ena_mask);
1788 if (!test_bit(__ICE_DOWN, pf->state)) { 1784 if (!test_bit(__ICE_DOWN, pf->state)) {
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index f16ff3e4a840..2e6c1d92cc88 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -751,14 +751,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
751 u16 num_added = 0; 751 u16 num_added = 0;
752 u32 temp; 752 u32 temp;
753 753
754 *num_nodes_added = 0;
755
754 if (!num_nodes) 756 if (!num_nodes)
755 return status; 757 return status;
756 758
757 if (!parent || layer < hw->sw_entry_point_layer) 759 if (!parent || layer < hw->sw_entry_point_layer)
758 return ICE_ERR_PARAM; 760 return ICE_ERR_PARAM;
759 761
760 *num_nodes_added = 0;
761
762 /* max children per node per layer */ 762 /* max children per node per layer */
763 max_child_nodes = 763 max_child_nodes =
764 le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children); 764 le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c1c0bc30a16d..cce7ada89255 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1700,7 +1700,22 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
1700 WARN_ON(hw->mac.type != e1000_i210); 1700 WARN_ON(hw->mac.type != e1000_i210);
1701 WARN_ON(queue < 0 || queue > 1); 1701 WARN_ON(queue < 0 || queue > 1);
1702 1702
1703 if (enable) { 1703 if (enable || queue == 0) {
1704 /* i210 does not allow the queue 0 to be in the Strict
1705 * Priority mode while the Qav mode is enabled, so,
1706 * instead of disabling strict priority mode, we give
1707 * queue 0 the maximum of credits possible.
1708 *
1709 * See section 8.12.19 of the i210 datasheet, "Note:
1710 * Queue0 QueueMode must be set to 1b when
1711 * TransmitMode is set to Qav."
1712 */
1713 if (queue == 0 && !enable) {
1714 /* max "linkspeed" idleslope in kbps */
1715 idleslope = 1000000;
1716 hicredit = ETH_FRAME_LEN;
1717 }
1718
1704 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); 1719 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
1705 set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); 1720 set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
1706 1721
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 3d9033f26eff..e3d04f226d57 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3420,7 +3420,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3420 if (!err) 3420 if (!err)
3421 continue; 3421 continue;
3422 hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j); 3422 hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j);
3423 break; 3423 goto err_setup_tx;
3424 } 3424 }
3425 3425
3426 return 0; 3426 return 0;
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 54a038943c06..4202f9b5b966 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -663,7 +663,7 @@ enum mvpp2_tag_type {
663#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31) 663#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
664#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \ 664#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \
665 MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1) 665 MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1)
666#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_VID_FILT_RANGE_START - 1) 666#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1)
667#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) 667#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
668#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29) 668#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
669#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) 669#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
@@ -916,6 +916,8 @@ static struct {
916 916
917#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ) 917#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ)
918 918
919#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
920
919/* Definitions */ 921/* Definitions */
920 922
921/* Shared Packet Processor resources */ 923/* Shared Packet Processor resources */
@@ -1429,7 +1431,7 @@ static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
1429 if (port->priv->hw_version == MVPP21) 1431 if (port->priv->hw_version == MVPP21)
1430 return tx_desc->pp21.buf_dma_addr; 1432 return tx_desc->pp21.buf_dma_addr;
1431 else 1433 else
1432 return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0); 1434 return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK;
1433} 1435}
1434 1436
1435static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, 1437static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
@@ -1447,7 +1449,7 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1447 } else { 1449 } else {
1448 u64 val = (u64)addr; 1450 u64 val = (u64)addr;
1449 1451
1450 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0); 1452 tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK;
1451 tx_desc->pp22.buf_dma_addr_ptp |= val; 1453 tx_desc->pp22.buf_dma_addr_ptp |= val;
1452 tx_desc->pp22.packet_offset = offset; 1454 tx_desc->pp22.packet_offset = offset;
1453 } 1455 }
@@ -1507,7 +1509,7 @@ static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1507 if (port->priv->hw_version == MVPP21) 1509 if (port->priv->hw_version == MVPP21)
1508 return rx_desc->pp21.buf_dma_addr; 1510 return rx_desc->pp21.buf_dma_addr;
1509 else 1511 else
1510 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0); 1512 return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK;
1511} 1513}
1512 1514
1513static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, 1515static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
@@ -1516,7 +1518,7 @@ static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1516 if (port->priv->hw_version == MVPP21) 1518 if (port->priv->hw_version == MVPP21)
1517 return rx_desc->pp21.buf_cookie; 1519 return rx_desc->pp21.buf_cookie;
1518 else 1520 else
1519 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0); 1521 return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK;
1520} 1522}
1521 1523
1522static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, 1524static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
@@ -8789,7 +8791,7 @@ static int mvpp2_probe(struct platform_device *pdev)
8789 } 8791 }
8790 8792
8791 if (priv->hw_version == MVPP22) { 8793 if (priv->hw_version == MVPP22) {
8792 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); 8794 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
8793 if (err) 8795 if (err)
8794 goto err_mg_clk; 8796 goto err_mg_clk;
8795 /* Sadly, the BM pools all share the same register to 8797 /* Sadly, the BM pools all share the same register to
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index 3735c09d2112..577659f332e4 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -258,9 +258,6 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
258 case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS: 258 case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
259 nfp_tunnel_keep_alive(app, skb); 259 nfp_tunnel_keep_alive(app, skb);
260 break; 260 break;
261 case NFP_FLOWER_CMSG_TYPE_TUN_NEIGH:
262 /* Acks from the NFP that the route is added - ignore. */
263 break;
264 default: 261 default:
265 nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", 262 nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
266 type); 263 type);
@@ -275,18 +272,49 @@ out:
275 272
276void nfp_flower_cmsg_process_rx(struct work_struct *work) 273void nfp_flower_cmsg_process_rx(struct work_struct *work)
277{ 274{
275 struct sk_buff_head cmsg_joined;
278 struct nfp_flower_priv *priv; 276 struct nfp_flower_priv *priv;
279 struct sk_buff *skb; 277 struct sk_buff *skb;
280 278
281 priv = container_of(work, struct nfp_flower_priv, cmsg_work); 279 priv = container_of(work, struct nfp_flower_priv, cmsg_work);
280 skb_queue_head_init(&cmsg_joined);
281
282 spin_lock_bh(&priv->cmsg_skbs_high.lock);
283 skb_queue_splice_tail_init(&priv->cmsg_skbs_high, &cmsg_joined);
284 spin_unlock_bh(&priv->cmsg_skbs_high.lock);
282 285
283 while ((skb = skb_dequeue(&priv->cmsg_skbs))) 286 spin_lock_bh(&priv->cmsg_skbs_low.lock);
287 skb_queue_splice_tail_init(&priv->cmsg_skbs_low, &cmsg_joined);
288 spin_unlock_bh(&priv->cmsg_skbs_low.lock);
289
290 while ((skb = __skb_dequeue(&cmsg_joined)))
284 nfp_flower_cmsg_process_one_rx(priv->app, skb); 291 nfp_flower_cmsg_process_one_rx(priv->app, skb);
285} 292}
286 293
287void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) 294static void
295nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type)
288{ 296{
289 struct nfp_flower_priv *priv = app->priv; 297 struct nfp_flower_priv *priv = app->priv;
298 struct sk_buff_head *skb_head;
299
300 if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY ||
301 type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
302 skb_head = &priv->cmsg_skbs_high;
303 else
304 skb_head = &priv->cmsg_skbs_low;
305
306 if (skb_queue_len(skb_head) >= NFP_FLOWER_WORKQ_MAX_SKBS) {
307 nfp_flower_cmsg_warn(app, "Dropping queued control messages\n");
308 dev_kfree_skb_any(skb);
309 return;
310 }
311
312 skb_queue_tail(skb_head, skb);
313 schedule_work(&priv->cmsg_work);
314}
315
316void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
317{
290 struct nfp_flower_cmsg_hdr *cmsg_hdr; 318 struct nfp_flower_cmsg_hdr *cmsg_hdr;
291 319
292 cmsg_hdr = nfp_flower_cmsg_get_hdr(skb); 320 cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
@@ -306,8 +334,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
306 nfp_flower_process_mtu_ack(app, skb)) { 334 nfp_flower_process_mtu_ack(app, skb)) {
307 /* Handle MTU acks outside wq to prevent RTNL conflict. */ 335 /* Handle MTU acks outside wq to prevent RTNL conflict. */
308 dev_consume_skb_any(skb); 336 dev_consume_skb_any(skb);
337 } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
338 /* Acks from the NFP that the route is added - ignore. */
339 dev_consume_skb_any(skb);
309 } else { 340 } else {
310 skb_queue_tail(&priv->cmsg_skbs, skb); 341 nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);
311 schedule_work(&priv->cmsg_work);
312 } 342 }
313} 343}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 96bc0e33980c..b6c0fd053a50 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -108,6 +108,8 @@
108#define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4) 108#define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4)
109#define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0) 109#define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0)
110 110
111#define NFP_FLOWER_WORKQ_MAX_SKBS 30000
112
111#define nfp_flower_cmsg_warn(app, fmt, args...) \ 113#define nfp_flower_cmsg_warn(app, fmt, args...) \
112 do { \ 114 do { \
113 if (net_ratelimit()) \ 115 if (net_ratelimit()) \
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 6357e0720f43..ad02592a82b7 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -519,7 +519,8 @@ static int nfp_flower_init(struct nfp_app *app)
519 519
520 app->priv = app_priv; 520 app->priv = app_priv;
521 app_priv->app = app; 521 app_priv->app = app;
522 skb_queue_head_init(&app_priv->cmsg_skbs); 522 skb_queue_head_init(&app_priv->cmsg_skbs_high);
523 skb_queue_head_init(&app_priv->cmsg_skbs_low);
523 INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); 524 INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
524 init_waitqueue_head(&app_priv->reify_wait_queue); 525 init_waitqueue_head(&app_priv->reify_wait_queue);
525 526
@@ -549,7 +550,8 @@ static void nfp_flower_clean(struct nfp_app *app)
549{ 550{
550 struct nfp_flower_priv *app_priv = app->priv; 551 struct nfp_flower_priv *app_priv = app->priv;
551 552
552 skb_queue_purge(&app_priv->cmsg_skbs); 553 skb_queue_purge(&app_priv->cmsg_skbs_high);
554 skb_queue_purge(&app_priv->cmsg_skbs_low);
553 flush_work(&app_priv->cmsg_work); 555 flush_work(&app_priv->cmsg_work);
554 556
555 nfp_flower_metadata_cleanup(app); 557 nfp_flower_metadata_cleanup(app);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index e030b3ce4510..c67e1b54c614 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -107,7 +107,10 @@ struct nfp_mtu_conf {
107 * @mask_table: Hash table used to store masks 107 * @mask_table: Hash table used to store masks
108 * @flow_table: Hash table used to store flower rules 108 * @flow_table: Hash table used to store flower rules
109 * @cmsg_work: Workqueue for control messages processing 109 * @cmsg_work: Workqueue for control messages processing
110 * @cmsg_skbs: List of skbs for control message processing 110 * @cmsg_skbs_high: List of higher priority skbs for control message
111 * processing
112 * @cmsg_skbs_low: List of lower priority skbs for control message
113 * processing
111 * @nfp_mac_off_list: List of MAC addresses to offload 114 * @nfp_mac_off_list: List of MAC addresses to offload
112 * @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs 115 * @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs
113 * @nfp_ipv4_off_list: List of IPv4 addresses to offload 116 * @nfp_ipv4_off_list: List of IPv4 addresses to offload
@@ -136,7 +139,8 @@ struct nfp_flower_priv {
136 DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS); 139 DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
137 DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS); 140 DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS);
138 struct work_struct cmsg_work; 141 struct work_struct cmsg_work;
139 struct sk_buff_head cmsg_skbs; 142 struct sk_buff_head cmsg_skbs_high;
143 struct sk_buff_head cmsg_skbs_low;
140 struct list_head nfp_mac_off_list; 144 struct list_head nfp_mac_off_list;
141 struct list_head nfp_mac_index_list; 145 struct list_head nfp_mac_index_list;
142 struct list_head nfp_ipv4_off_list; 146 struct list_head nfp_ipv4_off_list;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
index f7b958181126..cb28ac03e4ca 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
@@ -211,8 +211,11 @@ int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
211 break; 211 break;
212 212
213 err = msleep_interruptible(timeout_ms); 213 err = msleep_interruptible(timeout_ms);
214 if (err != 0) 214 if (err != 0) {
215 nfp_info(mutex->cpp,
216 "interrupted waiting for NFP mutex\n");
215 return -ERESTARTSYS; 217 return -ERESTARTSYS;
218 }
216 219
217 if (time_is_before_eq_jiffies(warn_at)) { 220 if (time_is_before_eq_jiffies(warn_at)) {
218 warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ; 221 warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 99bb679a9801..2abee0fe3a7c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -281,8 +281,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr,
281 if ((*reg & mask) == val) 281 if ((*reg & mask) == val)
282 return 0; 282 return 0;
283 283
284 if (msleep_interruptible(25)) 284 msleep(25);
285 return -ERESTARTSYS;
286 285
287 if (time_after(start_time, wait_until)) 286 if (time_after(start_time, wait_until))
288 return -ETIMEDOUT; 287 return -ETIMEDOUT;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index d33988570217..5f4e447c5dce 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -350,15 +350,16 @@ static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
350 350
351 real_dev = priv->real_dev; 351 real_dev = priv->real_dev;
352 352
353 if (!rmnet_is_real_dev_registered(real_dev))
354 return -ENODEV;
355
356 if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id)) 353 if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id))
357 goto nla_put_failure; 354 goto nla_put_failure;
358 355
359 port = rmnet_get_port_rtnl(real_dev); 356 if (rmnet_is_real_dev_registered(real_dev)) {
357 port = rmnet_get_port_rtnl(real_dev);
358 f.flags = port->data_format;
359 } else {
360 f.flags = 0;
361 }
360 362
361 f.flags = port->data_format;
362 f.mask = ~0; 363 f.mask = ~0;
363 364
364 if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f)) 365 if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f))
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 50daad0a1482..63036d9bf3e6 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3999,29 +3999,6 @@ static void efx_ef10_prepare_flr(struct efx_nic *efx)
3999 atomic_set(&efx->active_queues, 0); 3999 atomic_set(&efx->active_queues, 0);
4000} 4000}
4001 4001
4002static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
4003 const struct efx_filter_spec *right)
4004{
4005 if ((left->match_flags ^ right->match_flags) |
4006 ((left->flags ^ right->flags) &
4007 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
4008 return false;
4009
4010 return memcmp(&left->outer_vid, &right->outer_vid,
4011 sizeof(struct efx_filter_spec) -
4012 offsetof(struct efx_filter_spec, outer_vid)) == 0;
4013}
4014
4015static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
4016{
4017 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
4018 return jhash2((const u32 *)&spec->outer_vid,
4019 (sizeof(struct efx_filter_spec) -
4020 offsetof(struct efx_filter_spec, outer_vid)) / 4,
4021 0);
4022 /* XXX should we randomise the initval? */
4023}
4024
4025/* Decide whether a filter should be exclusive or else should allow 4002/* Decide whether a filter should be exclusive or else should allow
4026 * delivery to additional recipients. Currently we decide that 4003 * delivery to additional recipients. Currently we decide that
4027 * filters for specific local unicast MAC and IP addresses are 4004 * filters for specific local unicast MAC and IP addresses are
@@ -4346,7 +4323,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
4346 goto out_unlock; 4323 goto out_unlock;
4347 match_pri = rc; 4324 match_pri = rc;
4348 4325
4349 hash = efx_ef10_filter_hash(spec); 4326 hash = efx_filter_spec_hash(spec);
4350 is_mc_recip = efx_filter_is_mc_recipient(spec); 4327 is_mc_recip = efx_filter_is_mc_recipient(spec);
4351 if (is_mc_recip) 4328 if (is_mc_recip)
4352 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); 4329 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
@@ -4378,7 +4355,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
4378 if (!saved_spec) { 4355 if (!saved_spec) {
4379 if (ins_index < 0) 4356 if (ins_index < 0)
4380 ins_index = i; 4357 ins_index = i;
4381 } else if (efx_ef10_filter_equal(spec, saved_spec)) { 4358 } else if (efx_filter_spec_equal(spec, saved_spec)) {
4382 if (spec->priority < saved_spec->priority && 4359 if (spec->priority < saved_spec->priority &&
4383 spec->priority != EFX_FILTER_PRI_AUTO) { 4360 spec->priority != EFX_FILTER_PRI_AUTO) {
4384 rc = -EPERM; 4361 rc = -EPERM;
@@ -4762,28 +4739,62 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
4762static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, 4739static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
4763 unsigned int filter_idx) 4740 unsigned int filter_idx)
4764{ 4741{
4742 struct efx_filter_spec *spec, saved_spec;
4765 struct efx_ef10_filter_table *table; 4743 struct efx_ef10_filter_table *table;
4766 struct efx_filter_spec *spec; 4744 struct efx_arfs_rule *rule = NULL;
4767 bool ret; 4745 bool ret = true, force = false;
4746 u16 arfs_id;
4768 4747
4769 down_read(&efx->filter_sem); 4748 down_read(&efx->filter_sem);
4770 table = efx->filter_state; 4749 table = efx->filter_state;
4771 down_write(&table->lock); 4750 down_write(&table->lock);
4772 spec = efx_ef10_filter_entry_spec(table, filter_idx); 4751 spec = efx_ef10_filter_entry_spec(table, filter_idx);
4773 4752
4774 if (!spec || spec->priority != EFX_FILTER_PRI_HINT) { 4753 if (!spec || spec->priority != EFX_FILTER_PRI_HINT)
4775 ret = true;
4776 goto out_unlock; 4754 goto out_unlock;
4777 }
4778 4755
4779 if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, 4756 spin_lock_bh(&efx->rps_hash_lock);
4780 flow_id, filter_idx)) { 4757 if (!efx->rps_hash_table) {
4781 ret = false; 4758 /* In the absence of the table, we always return 0 to ARFS. */
4782 goto out_unlock; 4759 arfs_id = 0;
4760 } else {
4761 rule = efx_rps_hash_find(efx, spec);
4762 if (!rule)
4763 /* ARFS table doesn't know of this filter, so remove it */
4764 goto expire;
4765 arfs_id = rule->arfs_id;
4766 ret = efx_rps_check_rule(rule, filter_idx, &force);
4767 if (force)
4768 goto expire;
4769 if (!ret) {
4770 spin_unlock_bh(&efx->rps_hash_lock);
4771 goto out_unlock;
4772 }
4783 } 4773 }
4784 4774 if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id))
4775 ret = false;
4776 else if (rule)
4777 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
4778expire:
4779 saved_spec = *spec; /* remove operation will kfree spec */
4780 spin_unlock_bh(&efx->rps_hash_lock);
4781 /* At this point (since we dropped the lock), another thread might queue
4782 * up a fresh insertion request (but the actual insertion will be held
4783 * up by our possession of the filter table lock). In that case, it
4784 * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that
4785 * the rule is not removed by efx_rps_hash_del() below.
4786 */
4785 ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority, 4787 ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
4786 filter_idx, true) == 0; 4788 filter_idx, true) == 0;
4789 /* While we can't safely dereference rule (we dropped the lock), we can
4790 * still test it for NULL.
4791 */
4792 if (ret && rule) {
4793 /* Expiring, so remove entry from ARFS table */
4794 spin_lock_bh(&efx->rps_hash_lock);
4795 efx_rps_hash_del(efx, &saved_spec);
4796 spin_unlock_bh(&efx->rps_hash_lock);
4797 }
4787out_unlock: 4798out_unlock:
4788 up_write(&table->lock); 4799 up_write(&table->lock);
4789 up_read(&efx->filter_sem); 4800 up_read(&efx->filter_sem);
@@ -5265,7 +5276,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
5265 ids = vlan->uc; 5276 ids = vlan->uc;
5266 } 5277 }
5267 5278
5268 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; 5279 filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
5269 5280
5270 /* Insert/renew filters */ 5281 /* Insert/renew filters */
5271 for (i = 0; i < addr_count; i++) { 5282 for (i = 0; i < addr_count; i++) {
@@ -5334,7 +5345,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
5334 int rc; 5345 int rc;
5335 u16 *id; 5346 u16 *id;
5336 5347
5337 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; 5348 filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
5338 5349
5339 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 5350 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
5340 5351
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 692dd729ee2a..a4ebd8715494 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -3027,6 +3027,10 @@ static int efx_init_struct(struct efx_nic *efx,
3027 mutex_init(&efx->mac_lock); 3027 mutex_init(&efx->mac_lock);
3028#ifdef CONFIG_RFS_ACCEL 3028#ifdef CONFIG_RFS_ACCEL
3029 mutex_init(&efx->rps_mutex); 3029 mutex_init(&efx->rps_mutex);
3030 spin_lock_init(&efx->rps_hash_lock);
3031 /* Failure to allocate is not fatal, but may degrade ARFS performance */
3032 efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
3033 sizeof(*efx->rps_hash_table), GFP_KERNEL);
3030#endif 3034#endif
3031 efx->phy_op = &efx_dummy_phy_operations; 3035 efx->phy_op = &efx_dummy_phy_operations;
3032 efx->mdio.dev = net_dev; 3036 efx->mdio.dev = net_dev;
@@ -3070,6 +3074,10 @@ static void efx_fini_struct(struct efx_nic *efx)
3070{ 3074{
3071 int i; 3075 int i;
3072 3076
3077#ifdef CONFIG_RFS_ACCEL
3078 kfree(efx->rps_hash_table);
3079#endif
3080
3073 for (i = 0; i < EFX_MAX_CHANNELS; i++) 3081 for (i = 0; i < EFX_MAX_CHANNELS; i++)
3074 kfree(efx->channel[i]); 3082 kfree(efx->channel[i]);
3075 3083
@@ -3092,6 +3100,141 @@ void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
3092 stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); 3100 stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
3093} 3101}
3094 3102
3103bool efx_filter_spec_equal(const struct efx_filter_spec *left,
3104 const struct efx_filter_spec *right)
3105{
3106 if ((left->match_flags ^ right->match_flags) |
3107 ((left->flags ^ right->flags) &
3108 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
3109 return false;
3110
3111 return memcmp(&left->outer_vid, &right->outer_vid,
3112 sizeof(struct efx_filter_spec) -
3113 offsetof(struct efx_filter_spec, outer_vid)) == 0;
3114}
3115
3116u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
3117{
3118 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
3119 return jhash2((const u32 *)&spec->outer_vid,
3120 (sizeof(struct efx_filter_spec) -
3121 offsetof(struct efx_filter_spec, outer_vid)) / 4,
3122 0);
3123}
3124
3125#ifdef CONFIG_RFS_ACCEL
3126bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
3127 bool *force)
3128{
3129 if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
3130 /* ARFS is currently updating this entry, leave it */
3131 return false;
3132 }
3133 if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
3134 /* ARFS tried and failed to update this, so it's probably out
3135 * of date. Remove the filter and the ARFS rule entry.
3136 */
3137 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
3138 *force = true;
3139 return true;
3140 } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
3141 /* ARFS has moved on, so old filter is not needed. Since we did
3142 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
3143 * not be removed by efx_rps_hash_del() subsequently.
3144 */
3145 *force = true;
3146 return true;
3147 }
3148 /* Remove it iff ARFS wants to. */
3149 return true;
3150}
3151
3152struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
3153 const struct efx_filter_spec *spec)
3154{
3155 u32 hash = efx_filter_spec_hash(spec);
3156
3157 WARN_ON(!spin_is_locked(&efx->rps_hash_lock));
3158 if (!efx->rps_hash_table)
3159 return NULL;
3160 return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
3161}
3162
3163struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
3164 const struct efx_filter_spec *spec)
3165{
3166 struct efx_arfs_rule *rule;
3167 struct hlist_head *head;
3168 struct hlist_node *node;
3169
3170 head = efx_rps_hash_bucket(efx, spec);
3171 if (!head)
3172 return NULL;
3173 hlist_for_each(node, head) {
3174 rule = container_of(node, struct efx_arfs_rule, node);
3175 if (efx_filter_spec_equal(spec, &rule->spec))
3176 return rule;
3177 }
3178 return NULL;
3179}
3180
3181struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
3182 const struct efx_filter_spec *spec,
3183 bool *new)
3184{
3185 struct efx_arfs_rule *rule;
3186 struct hlist_head *head;
3187 struct hlist_node *node;
3188
3189 head = efx_rps_hash_bucket(efx, spec);
3190 if (!head)
3191 return NULL;
3192 hlist_for_each(node, head) {
3193 rule = container_of(node, struct efx_arfs_rule, node);
3194 if (efx_filter_spec_equal(spec, &rule->spec)) {
3195 *new = false;
3196 return rule;
3197 }
3198 }
3199 rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
3200 *new = true;
3201 if (rule) {
3202 memcpy(&rule->spec, spec, sizeof(rule->spec));
3203 hlist_add_head(&rule->node, head);
3204 }
3205 return rule;
3206}
3207
3208void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
3209{
3210 struct efx_arfs_rule *rule;
3211 struct hlist_head *head;
3212 struct hlist_node *node;
3213
3214 head = efx_rps_hash_bucket(efx, spec);
3215 if (WARN_ON(!head))
3216 return;
3217 hlist_for_each(node, head) {
3218 rule = container_of(node, struct efx_arfs_rule, node);
3219 if (efx_filter_spec_equal(spec, &rule->spec)) {
3220 /* Someone already reused the entry. We know that if
3221 * this check doesn't fire (i.e. filter_id == REMOVING)
3222 * then the REMOVING mark was put there by our caller,
3223 * because caller is holding a lock on filter table and
3224 * only holders of that lock set REMOVING.
3225 */
3226 if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
3227 return;
3228 hlist_del(node);
3229 kfree(rule);
3230 return;
3231 }
3232 }
3233 /* We didn't find it. */
3234 WARN_ON(1);
3235}
3236#endif
3237
3095/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because 3238/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
3096 * (a) this is an infrequent control-plane operation and (b) n is small (max 64) 3239 * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
3097 */ 3240 */
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index a3140e16fcef..3f759ebdcf10 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -186,6 +186,27 @@ static inline void efx_filter_rfs_expire(struct work_struct *data) {}
186#endif 186#endif
187bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec); 187bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
188 188
189bool efx_filter_spec_equal(const struct efx_filter_spec *left,
190 const struct efx_filter_spec *right);
191u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);
192
193#ifdef CONFIG_RFS_ACCEL
194bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
195 bool *force);
196
197struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
198 const struct efx_filter_spec *spec);
199
200/* @new is written to indicate if entry was newly added (true) or if an old
201 * entry was found and returned (false).
202 */
203struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
204 const struct efx_filter_spec *spec,
205 bool *new);
206
207void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
208#endif
209
189/* RSS contexts */ 210/* RSS contexts */
190struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx); 211struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
191struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id); 212struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 4a19c7efdf8d..c72adf8b52ea 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -2905,18 +2905,45 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2905{ 2905{
2906 struct efx_farch_filter_state *state = efx->filter_state; 2906 struct efx_farch_filter_state *state = efx->filter_state;
2907 struct efx_farch_filter_table *table; 2907 struct efx_farch_filter_table *table;
2908 bool ret = false; 2908 bool ret = false, force = false;
2909 u16 arfs_id;
2909 2910
2910 down_write(&state->lock); 2911 down_write(&state->lock);
2912 spin_lock_bh(&efx->rps_hash_lock);
2911 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; 2913 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2912 if (test_bit(index, table->used_bitmap) && 2914 if (test_bit(index, table->used_bitmap) &&
2913 table->spec[index].priority == EFX_FILTER_PRI_HINT && 2915 table->spec[index].priority == EFX_FILTER_PRI_HINT) {
2914 rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, 2916 struct efx_arfs_rule *rule = NULL;
2915 flow_id, index)) { 2917 struct efx_filter_spec spec;
2916 efx_farch_filter_table_clear_entry(efx, table, index); 2918
2917 ret = true; 2919 efx_farch_filter_to_gen_spec(&spec, &table->spec[index]);
2920 if (!efx->rps_hash_table) {
2921 /* In the absence of the table, we always returned 0 to
2922 * ARFS, so use the same to query it.
2923 */
2924 arfs_id = 0;
2925 } else {
2926 rule = efx_rps_hash_find(efx, &spec);
2927 if (!rule) {
2928 /* ARFS table doesn't know of this filter, remove it */
2929 force = true;
2930 } else {
2931 arfs_id = rule->arfs_id;
2932 if (!efx_rps_check_rule(rule, index, &force))
2933 goto out_unlock;
2934 }
2935 }
2936 if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id,
2937 flow_id, arfs_id)) {
2938 if (rule)
2939 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
2940 efx_rps_hash_del(efx, &spec);
2941 efx_farch_filter_table_clear_entry(efx, table, index);
2942 ret = true;
2943 }
2918 } 2944 }
2919 2945out_unlock:
2946 spin_unlock_bh(&efx->rps_hash_lock);
2920 up_write(&state->lock); 2947 up_write(&state->lock);
2921 return ret; 2948 return ret;
2922} 2949}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 5e379a83c729..65568925c3ef 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -733,6 +733,56 @@ struct efx_rss_context {
733 u32 rx_indir_table[128]; 733 u32 rx_indir_table[128];
734}; 734};
735 735
736#ifdef CONFIG_RFS_ACCEL
737/* Order of these is important, since filter_id >= %EFX_ARFS_FILTER_ID_PENDING
738 * is used to test if filter does or will exist.
739 */
740#define EFX_ARFS_FILTER_ID_PENDING -1
741#define EFX_ARFS_FILTER_ID_ERROR -2
742#define EFX_ARFS_FILTER_ID_REMOVING -3
743/**
744 * struct efx_arfs_rule - record of an ARFS filter and its IDs
745 * @node: linkage into hash table
746 * @spec: details of the filter (used as key for hash table). Use efx->type to
747 * determine which member to use.
748 * @rxq_index: channel to which the filter will steer traffic.
749 * @arfs_id: filter ID which was returned to ARFS
750 * @filter_id: index in software filter table. May be
751 * %EFX_ARFS_FILTER_ID_PENDING if filter was not inserted yet,
752 * %EFX_ARFS_FILTER_ID_ERROR if filter insertion failed, or
753 * %EFX_ARFS_FILTER_ID_REMOVING if expiry is currently removing the filter.
754 */
755struct efx_arfs_rule {
756 struct hlist_node node;
757 struct efx_filter_spec spec;
758 u16 rxq_index;
759 u16 arfs_id;
760 s32 filter_id;
761};
762
763/* Size chosen so that the table is one page (4kB) */
764#define EFX_ARFS_HASH_TABLE_SIZE 512
765
766/**
767 * struct efx_async_filter_insertion - Request to asynchronously insert a filter
768 * @net_dev: Reference to the netdevice
769 * @spec: The filter to insert
770 * @work: Workitem for this request
771 * @rxq_index: Identifies the channel for which this request was made
772 * @flow_id: Identifies the kernel-side flow for which this request was made
773 */
774struct efx_async_filter_insertion {
775 struct net_device *net_dev;
776 struct efx_filter_spec spec;
777 struct work_struct work;
778 u16 rxq_index;
779 u32 flow_id;
780};
781
782/* Maximum number of ARFS workitems that may be in flight on an efx_nic */
783#define EFX_RPS_MAX_IN_FLIGHT 8
784#endif /* CONFIG_RFS_ACCEL */
785
736/** 786/**
737 * struct efx_nic - an Efx NIC 787 * struct efx_nic - an Efx NIC
738 * @name: Device name (net device name or bus id before net device registered) 788 * @name: Device name (net device name or bus id before net device registered)
@@ -850,6 +900,12 @@ struct efx_rss_context {
850 * @rps_expire_channel: Next channel to check for expiry 900 * @rps_expire_channel: Next channel to check for expiry
851 * @rps_expire_index: Next index to check for expiry in 901 * @rps_expire_index: Next index to check for expiry in
852 * @rps_expire_channel's @rps_flow_id 902 * @rps_expire_channel's @rps_flow_id
903 * @rps_slot_map: bitmap of in-flight entries in @rps_slot
904 * @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work()
905 * @rps_hash_lock: Protects ARFS filter mapping state (@rps_hash_table and
906 * @rps_next_id).
907 * @rps_hash_table: Mapping between ARFS filters and their various IDs
908 * @rps_next_id: next arfs_id for an ARFS filter
853 * @active_queues: Count of RX and TX queues that haven't been flushed and drained. 909 * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
854 * @rxq_flush_pending: Count of number of receive queues that need to be flushed. 910 * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
855 * Decremented when the efx_flush_rx_queue() is called. 911 * Decremented when the efx_flush_rx_queue() is called.
@@ -1004,6 +1060,11 @@ struct efx_nic {
1004 struct mutex rps_mutex; 1060 struct mutex rps_mutex;
1005 unsigned int rps_expire_channel; 1061 unsigned int rps_expire_channel;
1006 unsigned int rps_expire_index; 1062 unsigned int rps_expire_index;
1063 unsigned long rps_slot_map;
1064 struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT];
1065 spinlock_t rps_hash_lock;
1066 struct hlist_head *rps_hash_table;
1067 u32 rps_next_id;
1007#endif 1068#endif
1008 1069
1009 atomic_t active_queues; 1070 atomic_t active_queues;
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 95682831484e..64a94f242027 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -827,31 +827,36 @@ MODULE_PARM_DESC(rx_refill_threshold,
827 827
828#ifdef CONFIG_RFS_ACCEL 828#ifdef CONFIG_RFS_ACCEL
829 829
830/**
831 * struct efx_async_filter_insertion - Request to asynchronously insert a filter
832 * @net_dev: Reference to the netdevice
833 * @spec: The filter to insert
834 * @work: Workitem for this request
835 * @rxq_index: Identifies the channel for which this request was made
836 * @flow_id: Identifies the kernel-side flow for which this request was made
837 */
838struct efx_async_filter_insertion {
839 struct net_device *net_dev;
840 struct efx_filter_spec spec;
841 struct work_struct work;
842 u16 rxq_index;
843 u32 flow_id;
844};
845
846static void efx_filter_rfs_work(struct work_struct *data) 830static void efx_filter_rfs_work(struct work_struct *data)
847{ 831{
848 struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion, 832 struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
849 work); 833 work);
850 struct efx_nic *efx = netdev_priv(req->net_dev); 834 struct efx_nic *efx = netdev_priv(req->net_dev);
851 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); 835 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
836 int slot_idx = req - efx->rps_slot;
837 struct efx_arfs_rule *rule;
838 u16 arfs_id = 0;
852 int rc; 839 int rc;
853 840
854 rc = efx->type->filter_insert(efx, &req->spec, false); 841 rc = efx->type->filter_insert(efx, &req->spec, true);
842 if (efx->rps_hash_table) {
843 spin_lock_bh(&efx->rps_hash_lock);
844 rule = efx_rps_hash_find(efx, &req->spec);
845 /* The rule might have already gone, if someone else's request
846 * for the same spec was already worked and then expired before
847 * we got around to our work. In that case we have nothing
848 * tying us to an arfs_id, meaning that as soon as the filter
849 * is considered for expiry it will be removed.
850 */
851 if (rule) {
852 if (rc < 0)
853 rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
854 else
855 rule->filter_id = rc;
856 arfs_id = rule->arfs_id;
857 }
858 spin_unlock_bh(&efx->rps_hash_lock);
859 }
855 if (rc >= 0) { 860 if (rc >= 0) {
856 /* Remember this so we can check whether to expire the filter 861 /* Remember this so we can check whether to expire the filter
857 * later. 862 * later.
@@ -863,23 +868,23 @@ static void efx_filter_rfs_work(struct work_struct *data)
863 868
864 if (req->spec.ether_type == htons(ETH_P_IP)) 869 if (req->spec.ether_type == htons(ETH_P_IP))
865 netif_info(efx, rx_status, efx->net_dev, 870 netif_info(efx, rx_status, efx->net_dev,
866 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", 871 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
867 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 872 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
868 req->spec.rem_host, ntohs(req->spec.rem_port), 873 req->spec.rem_host, ntohs(req->spec.rem_port),
869 req->spec.loc_host, ntohs(req->spec.loc_port), 874 req->spec.loc_host, ntohs(req->spec.loc_port),
870 req->rxq_index, req->flow_id, rc); 875 req->rxq_index, req->flow_id, rc, arfs_id);
871 else 876 else
872 netif_info(efx, rx_status, efx->net_dev, 877 netif_info(efx, rx_status, efx->net_dev,
873 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", 878 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
874 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 879 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
875 req->spec.rem_host, ntohs(req->spec.rem_port), 880 req->spec.rem_host, ntohs(req->spec.rem_port),
876 req->spec.loc_host, ntohs(req->spec.loc_port), 881 req->spec.loc_host, ntohs(req->spec.loc_port),
877 req->rxq_index, req->flow_id, rc); 882 req->rxq_index, req->flow_id, rc, arfs_id);
878 } 883 }
879 884
880 /* Release references */ 885 /* Release references */
886 clear_bit(slot_idx, &efx->rps_slot_map);
881 dev_put(req->net_dev); 887 dev_put(req->net_dev);
882 kfree(req);
883} 888}
884 889
885int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 890int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
@@ -887,23 +892,39 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
887{ 892{
888 struct efx_nic *efx = netdev_priv(net_dev); 893 struct efx_nic *efx = netdev_priv(net_dev);
889 struct efx_async_filter_insertion *req; 894 struct efx_async_filter_insertion *req;
895 struct efx_arfs_rule *rule;
890 struct flow_keys fk; 896 struct flow_keys fk;
897 int slot_idx;
898 bool new;
899 int rc;
891 900
892 if (flow_id == RPS_FLOW_ID_INVALID) 901 /* find a free slot */
893 return -EINVAL; 902 for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
903 if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
904 break;
905 if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
906 return -EBUSY;
894 907
895 if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) 908 if (flow_id == RPS_FLOW_ID_INVALID) {
896 return -EPROTONOSUPPORT; 909 rc = -EINVAL;
910 goto out_clear;
911 }
897 912
898 if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) 913 if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
899 return -EPROTONOSUPPORT; 914 rc = -EPROTONOSUPPORT;
900 if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) 915 goto out_clear;
901 return -EPROTONOSUPPORT; 916 }
902 917
903 req = kmalloc(sizeof(*req), GFP_ATOMIC); 918 if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
904 if (!req) 919 rc = -EPROTONOSUPPORT;
905 return -ENOMEM; 920 goto out_clear;
921 }
922 if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
923 rc = -EPROTONOSUPPORT;
924 goto out_clear;
925 }
906 926
927 req = efx->rps_slot + slot_idx;
907 efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT, 928 efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
908 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, 929 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
909 rxq_index); 930 rxq_index);
@@ -927,12 +948,45 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
927 req->spec.rem_port = fk.ports.src; 948 req->spec.rem_port = fk.ports.src;
928 req->spec.loc_port = fk.ports.dst; 949 req->spec.loc_port = fk.ports.dst;
929 950
951 if (efx->rps_hash_table) {
952 /* Add it to ARFS hash table */
953 spin_lock(&efx->rps_hash_lock);
954 rule = efx_rps_hash_add(efx, &req->spec, &new);
955 if (!rule) {
956 rc = -ENOMEM;
957 goto out_unlock;
958 }
959 if (new)
960 rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
961 rc = rule->arfs_id;
962 /* Skip if existing or pending filter already does the right thing */
963 if (!new && rule->rxq_index == rxq_index &&
964 rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
965 goto out_unlock;
966 rule->rxq_index = rxq_index;
967 rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
968 spin_unlock(&efx->rps_hash_lock);
969 } else {
970 /* Without an ARFS hash table, we just use arfs_id 0 for all
971 * filters. This means if multiple flows hash to the same
972 * flow_id, all but the most recently touched will be eligible
973 * for expiry.
974 */
975 rc = 0;
976 }
977
978 /* Queue the request */
930 dev_hold(req->net_dev = net_dev); 979 dev_hold(req->net_dev = net_dev);
931 INIT_WORK(&req->work, efx_filter_rfs_work); 980 INIT_WORK(&req->work, efx_filter_rfs_work);
932 req->rxq_index = rxq_index; 981 req->rxq_index = rxq_index;
933 req->flow_id = flow_id; 982 req->flow_id = flow_id;
934 schedule_work(&req->work); 983 schedule_work(&req->work);
935 return 0; 984 return rc;
985out_unlock:
986 spin_unlock(&efx->rps_hash_lock);
987out_clear:
988 clear_bit(slot_idx, &efx->rps_slot_map);
989 return rc;
936} 990}
937 991
938bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) 992bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index c7bff596c665..dedd40613090 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -347,7 +347,7 @@ enum power_event {
347#define MTL_RX_OVERFLOW_INT BIT(16) 347#define MTL_RX_OVERFLOW_INT BIT(16)
348 348
349/* Default operating mode of the MAC */ 349/* Default operating mode of the MAC */
350#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | GMAC_CONFIG_ACS | \ 350#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | \
351 GMAC_CONFIG_BE | GMAC_CONFIG_DCRS) 351 GMAC_CONFIG_BE | GMAC_CONFIG_DCRS)
352 352
353/* To dump the core regs excluding the Address Registers */ 353/* To dump the core regs excluding the Address Registers */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index a3af92ebbca8..517b1f6736a8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -31,13 +31,6 @@ static void dwmac4_core_init(struct mac_device_info *hw,
31 31
32 value |= GMAC_CORE_INIT; 32 value |= GMAC_CORE_INIT;
33 33
34 /* Clear ACS bit because Ethernet switch tagging formats such as
35 * Broadcom tags can look like invalid LLC/SNAP packets and cause the
36 * hardware to truncate packets on reception.
37 */
38 if (netdev_uses_dsa(dev))
39 value &= ~GMAC_CONFIG_ACS;
40
41 if (mtu > 1500) 34 if (mtu > 1500)
42 value |= GMAC_CONFIG_2K; 35 value |= GMAC_CONFIG_2K;
43 if (mtu > 2000) 36 if (mtu > 2000)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 9a16931ce39d..b65e2d144698 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3495,8 +3495,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3495 3495
3496 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 3496 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3497 * Type frames (LLC/LLC-SNAP) 3497 * Type frames (LLC/LLC-SNAP)
3498 *
3499 * llc_snap is never checked in GMAC >= 4, so this ACS
3500 * feature is always disabled and packets need to be
3501 * stripped manually.
3498 */ 3502 */
3499 if (unlikely(status != llc_snap)) 3503 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3504 unlikely(status != llc_snap))
3500 frame_len -= ETH_FCS_LEN; 3505 frame_len -= ETH_FCS_LEN;
3501 3506
3502 if (netif_msg_rx_status(priv)) { 3507 if (netif_msg_rx_status(priv)) {
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 30371274409d..74f828412055 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -129,7 +129,7 @@ do { \
129 129
130#define RX_PRIORITY_MAPPING 0x76543210 130#define RX_PRIORITY_MAPPING 0x76543210
131#define TX_PRIORITY_MAPPING 0x33221100 131#define TX_PRIORITY_MAPPING 0x33221100
132#define CPDMA_TX_PRIORITY_MAP 0x01234567 132#define CPDMA_TX_PRIORITY_MAP 0x76543210
133 133
134#define CPSW_VLAN_AWARE BIT(1) 134#define CPSW_VLAN_AWARE BIT(1)
135#define CPSW_RX_VLAN_ENCAP BIT(2) 135#define CPSW_RX_VLAN_ENCAP BIT(2)
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 9cbb0c8a896a..7de88b33d5b9 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3277,7 +3277,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
3277 3277
3278 err = netdev_upper_dev_link(real_dev, dev, extack); 3278 err = netdev_upper_dev_link(real_dev, dev, extack);
3279 if (err < 0) 3279 if (err < 0)
3280 goto put_dev; 3280 goto unregister;
3281 3281
3282 /* need to be already registered so that ->init has run and 3282 /* need to be already registered so that ->init has run and
3283 * the MAC addr is set 3283 * the MAC addr is set
@@ -3316,8 +3316,7 @@ del_dev:
3316 macsec_del_dev(macsec); 3316 macsec_del_dev(macsec);
3317unlink: 3317unlink:
3318 netdev_upper_dev_unlink(real_dev, dev); 3318 netdev_upper_dev_unlink(real_dev, dev);
3319put_dev: 3319unregister:
3320 dev_put(real_dev);
3321 unregister_netdevice(dev); 3320 unregister_netdevice(dev);
3322 return err; 3321 return err;
3323} 3322}
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index c22e8e383247..25e2a099b71c 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1393,6 +1393,15 @@ static int m88e1318_set_wol(struct phy_device *phydev,
1393 if (err < 0) 1393 if (err < 0)
1394 goto error; 1394 goto error;
1395 1395
1396 /* If WOL event happened once, the LED[2] interrupt pin
1397 * will not be cleared unless we reading the interrupt status
1398 * register. If interrupts are in use, the normal interrupt
1399 * handling will clear the WOL event. Clear the WOL event
1400 * before enabling it if !phy_interrupt_is_valid()
1401 */
1402 if (!phy_interrupt_is_valid(phydev))
1403 phy_read(phydev, MII_M1011_IEVENT);
1404
1396 /* Enable the WOL interrupt */ 1405 /* Enable the WOL interrupt */
1397 err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, 1406 err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
1398 MII_88E1318S_PHY_CSIER_WOL_EIE); 1407 MII_88E1318S_PHY_CSIER_WOL_EIE);
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 0f293ef28935..a97ac8c12c4c 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -20,6 +20,7 @@
20#include <linux/ethtool.h> 20#include <linux/ethtool.h>
21#include <linux/phy.h> 21#include <linux/phy.h>
22#include <linux/microchipphy.h> 22#include <linux/microchipphy.h>
23#include <linux/delay.h>
23 24
24#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>" 25#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
25#define DRIVER_DESC "Microchip LAN88XX PHY driver" 26#define DRIVER_DESC "Microchip LAN88XX PHY driver"
@@ -30,6 +31,16 @@ struct lan88xx_priv {
30 __u32 wolopts; 31 __u32 wolopts;
31}; 32};
32 33
34static int lan88xx_read_page(struct phy_device *phydev)
35{
36 return __phy_read(phydev, LAN88XX_EXT_PAGE_ACCESS);
37}
38
39static int lan88xx_write_page(struct phy_device *phydev, int page)
40{
41 return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page);
42}
43
33static int lan88xx_phy_config_intr(struct phy_device *phydev) 44static int lan88xx_phy_config_intr(struct phy_device *phydev)
34{ 45{
35 int rc; 46 int rc;
@@ -66,6 +77,150 @@ static int lan88xx_suspend(struct phy_device *phydev)
66 return 0; 77 return 0;
67} 78}
68 79
80static int lan88xx_TR_reg_set(struct phy_device *phydev, u16 regaddr,
81 u32 data)
82{
83 int val, save_page, ret = 0;
84 u16 buf;
85
86 /* Save current page */
87 save_page = phy_save_page(phydev);
88 if (save_page < 0) {
89 pr_warn("Failed to get current page\n");
90 goto err;
91 }
92
93 /* Switch to TR page */
94 lan88xx_write_page(phydev, LAN88XX_EXT_PAGE_ACCESS_TR);
95
96 ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_LOW_DATA,
97 (data & 0xFFFF));
98 if (ret < 0) {
99 pr_warn("Failed to write TR low data\n");
100 goto err;
101 }
102
103 ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_HIGH_DATA,
104 (data & 0x00FF0000) >> 16);
105 if (ret < 0) {
106 pr_warn("Failed to write TR high data\n");
107 goto err;
108 }
109
110 /* Config control bits [15:13] of register */
111 buf = (regaddr & ~(0x3 << 13));/* Clr [14:13] to write data in reg */
112 buf |= 0x8000; /* Set [15] to Packet transmit */
113
114 ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_CR, buf);
115 if (ret < 0) {
116 pr_warn("Failed to write data in reg\n");
117 goto err;
118 }
119
120 usleep_range(1000, 2000);/* Wait for Data to be written */
121 val = __phy_read(phydev, LAN88XX_EXT_PAGE_TR_CR);
122 if (!(val & 0x8000))
123 pr_warn("TR Register[0x%X] configuration failed\n", regaddr);
124err:
125 return phy_restore_page(phydev, save_page, ret);
126}
127
128static void lan88xx_config_TR_regs(struct phy_device *phydev)
129{
130 int err;
131
132 /* Get access to Channel 0x1, Node 0xF , Register 0x01.
133 * Write 24-bit value 0x12B00A to register. Setting MrvlTrFix1000Kf,
134 * MrvlTrFix1000Kp, MasterEnableTR bits.
135 */
136 err = lan88xx_TR_reg_set(phydev, 0x0F82, 0x12B00A);
137 if (err < 0)
138 pr_warn("Failed to Set Register[0x0F82]\n");
139
140 /* Get access to Channel b'10, Node b'1101, Register 0x06.
141 * Write 24-bit value 0xD2C46F to register. Setting SSTrKf1000Slv,
142 * SSTrKp1000Mas bits.
143 */
144 err = lan88xx_TR_reg_set(phydev, 0x168C, 0xD2C46F);
145 if (err < 0)
146 pr_warn("Failed to Set Register[0x168C]\n");
147
148 /* Get access to Channel b'10, Node b'1111, Register 0x11.
149 * Write 24-bit value 0x620 to register. Setting rem_upd_done_thresh
150 * bits
151 */
152 err = lan88xx_TR_reg_set(phydev, 0x17A2, 0x620);
153 if (err < 0)
154 pr_warn("Failed to Set Register[0x17A2]\n");
155
156 /* Get access to Channel b'10, Node b'1101, Register 0x10.
157 * Write 24-bit value 0xEEFFDD to register. Setting
158 * eee_TrKp1Long_1000, eee_TrKp2Long_1000, eee_TrKp3Long_1000,
159 * eee_TrKp1Short_1000,eee_TrKp2Short_1000, eee_TrKp3Short_1000 bits.
160 */
161 err = lan88xx_TR_reg_set(phydev, 0x16A0, 0xEEFFDD);
162 if (err < 0)
163 pr_warn("Failed to Set Register[0x16A0]\n");
164
165 /* Get access to Channel b'10, Node b'1101, Register 0x13.
166 * Write 24-bit value 0x071448 to register. Setting
167 * slv_lpi_tr_tmr_val1, slv_lpi_tr_tmr_val2 bits.
168 */
169 err = lan88xx_TR_reg_set(phydev, 0x16A6, 0x071448);
170 if (err < 0)
171 pr_warn("Failed to Set Register[0x16A6]\n");
172
173 /* Get access to Channel b'10, Node b'1101, Register 0x12.
174 * Write 24-bit value 0x13132F to register. Setting
175 * slv_sigdet_timer_val1, slv_sigdet_timer_val2 bits.
176 */
177 err = lan88xx_TR_reg_set(phydev, 0x16A4, 0x13132F);
178 if (err < 0)
179 pr_warn("Failed to Set Register[0x16A4]\n");
180
181 /* Get access to Channel b'10, Node b'1101, Register 0x14.
182 * Write 24-bit value 0x0 to register. Setting eee_3level_delay,
183 * eee_TrKf_freeze_delay bits.
184 */
185 err = lan88xx_TR_reg_set(phydev, 0x16A8, 0x0);
186 if (err < 0)
187 pr_warn("Failed to Set Register[0x16A8]\n");
188
189 /* Get access to Channel b'01, Node b'1111, Register 0x34.
190 * Write 24-bit value 0x91B06C to register. Setting
191 * FastMseSearchThreshLong1000, FastMseSearchThreshShort1000,
192 * FastMseSearchUpdGain1000 bits.
193 */
194 err = lan88xx_TR_reg_set(phydev, 0x0FE8, 0x91B06C);
195 if (err < 0)
196 pr_warn("Failed to Set Register[0x0FE8]\n");
197
198 /* Get access to Channel b'01, Node b'1111, Register 0x3E.
199 * Write 24-bit value 0xC0A028 to register. Setting
200 * FastMseKp2ThreshLong1000, FastMseKp2ThreshShort1000,
201 * FastMseKp2UpdGain1000, FastMseKp2ExitEn1000 bits.
202 */
203 err = lan88xx_TR_reg_set(phydev, 0x0FFC, 0xC0A028);
204 if (err < 0)
205 pr_warn("Failed to Set Register[0x0FFC]\n");
206
207 /* Get access to Channel b'01, Node b'1111, Register 0x35.
208 * Write 24-bit value 0x041600 to register. Setting
209 * FastMseSearchPhShNum1000, FastMseSearchClksPerPh1000,
210 * FastMsePhChangeDelay1000 bits.
211 */
212 err = lan88xx_TR_reg_set(phydev, 0x0FEA, 0x041600);
213 if (err < 0)
214 pr_warn("Failed to Set Register[0x0FEA]\n");
215
216 /* Get access to Channel b'10, Node b'1101, Register 0x03.
217 * Write 24-bit value 0x000004 to register. Setting TrFreeze bits.
218 */
219 err = lan88xx_TR_reg_set(phydev, 0x1686, 0x000004);
220 if (err < 0)
221 pr_warn("Failed to Set Register[0x1686]\n");
222}
223
69static int lan88xx_probe(struct phy_device *phydev) 224static int lan88xx_probe(struct phy_device *phydev)
70{ 225{
71 struct device *dev = &phydev->mdio.dev; 226 struct device *dev = &phydev->mdio.dev;
@@ -132,6 +287,25 @@ static void lan88xx_set_mdix(struct phy_device *phydev)
132 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0); 287 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
133} 288}
134 289
290static int lan88xx_config_init(struct phy_device *phydev)
291{
292 int val;
293
294 genphy_config_init(phydev);
295 /*Zerodetect delay enable */
296 val = phy_read_mmd(phydev, MDIO_MMD_PCS,
297 PHY_ARDENNES_MMD_DEV_3_PHY_CFG);
298 val |= PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_;
299
300 phy_write_mmd(phydev, MDIO_MMD_PCS, PHY_ARDENNES_MMD_DEV_3_PHY_CFG,
301 val);
302
303 /* Config DSP registers */
304 lan88xx_config_TR_regs(phydev);
305
306 return 0;
307}
308
135static int lan88xx_config_aneg(struct phy_device *phydev) 309static int lan88xx_config_aneg(struct phy_device *phydev)
136{ 310{
137 lan88xx_set_mdix(phydev); 311 lan88xx_set_mdix(phydev);
@@ -151,7 +325,7 @@ static struct phy_driver microchip_phy_driver[] = {
151 .probe = lan88xx_probe, 325 .probe = lan88xx_probe,
152 .remove = lan88xx_remove, 326 .remove = lan88xx_remove,
153 327
154 .config_init = genphy_config_init, 328 .config_init = lan88xx_config_init,
155 .config_aneg = lan88xx_config_aneg, 329 .config_aneg = lan88xx_config_aneg,
156 330
157 .ack_interrupt = lan88xx_phy_ack_interrupt, 331 .ack_interrupt = lan88xx_phy_ack_interrupt,
@@ -160,6 +334,8 @@ static struct phy_driver microchip_phy_driver[] = {
160 .suspend = lan88xx_suspend, 334 .suspend = lan88xx_suspend,
161 .resume = genphy_resume, 335 .resume = genphy_resume,
162 .set_wol = lan88xx_set_wol, 336 .set_wol = lan88xx_set_wol,
337 .read_page = lan88xx_read_page,
338 .write_page = lan88xx_write_page,
163} }; 339} };
164 340
165module_phy_driver(microchip_phy_driver); 341module_phy_driver(microchip_phy_driver);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 1483bc7b01e1..7df07337d69c 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -620,6 +620,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
620 lock_sock(sk); 620 lock_sock(sk);
621 621
622 error = -EINVAL; 622 error = -EINVAL;
623
624 if (sockaddr_len != sizeof(struct sockaddr_pppox))
625 goto end;
626
623 if (sp->sa_protocol != PX_PROTO_OE) 627 if (sp->sa_protocol != PX_PROTO_OE)
624 goto end; 628 goto end;
625 629
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index a6c6ce19eeee..ddb6bf85a59c 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -261,6 +261,17 @@ static void __team_option_inst_mark_removed_port(struct team *team,
261 } 261 }
262} 262}
263 263
264static bool __team_option_inst_tmp_find(const struct list_head *opts,
265 const struct team_option_inst *needle)
266{
267 struct team_option_inst *opt_inst;
268
269 list_for_each_entry(opt_inst, opts, tmp_list)
270 if (opt_inst == needle)
271 return true;
272 return false;
273}
274
264static int __team_options_register(struct team *team, 275static int __team_options_register(struct team *team,
265 const struct team_option *option, 276 const struct team_option *option,
266 size_t option_count) 277 size_t option_count)
@@ -1061,14 +1072,11 @@ static void team_port_leave(struct team *team, struct team_port *port)
1061} 1072}
1062 1073
1063#ifdef CONFIG_NET_POLL_CONTROLLER 1074#ifdef CONFIG_NET_POLL_CONTROLLER
1064static int team_port_enable_netpoll(struct team *team, struct team_port *port) 1075static int __team_port_enable_netpoll(struct team_port *port)
1065{ 1076{
1066 struct netpoll *np; 1077 struct netpoll *np;
1067 int err; 1078 int err;
1068 1079
1069 if (!team->dev->npinfo)
1070 return 0;
1071
1072 np = kzalloc(sizeof(*np), GFP_KERNEL); 1080 np = kzalloc(sizeof(*np), GFP_KERNEL);
1073 if (!np) 1081 if (!np)
1074 return -ENOMEM; 1082 return -ENOMEM;
@@ -1082,6 +1090,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port)
1082 return err; 1090 return err;
1083} 1091}
1084 1092
1093static int team_port_enable_netpoll(struct team_port *port)
1094{
1095 if (!port->team->dev->npinfo)
1096 return 0;
1097
1098 return __team_port_enable_netpoll(port);
1099}
1100
1085static void team_port_disable_netpoll(struct team_port *port) 1101static void team_port_disable_netpoll(struct team_port *port)
1086{ 1102{
1087 struct netpoll *np = port->np; 1103 struct netpoll *np = port->np;
@@ -1096,7 +1112,7 @@ static void team_port_disable_netpoll(struct team_port *port)
1096 kfree(np); 1112 kfree(np);
1097} 1113}
1098#else 1114#else
1099static int team_port_enable_netpoll(struct team *team, struct team_port *port) 1115static int team_port_enable_netpoll(struct team_port *port)
1100{ 1116{
1101 return 0; 1117 return 0;
1102} 1118}
@@ -1210,7 +1226,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
1210 goto err_vids_add; 1226 goto err_vids_add;
1211 } 1227 }
1212 1228
1213 err = team_port_enable_netpoll(team, port); 1229 err = team_port_enable_netpoll(port);
1214 if (err) { 1230 if (err) {
1215 netdev_err(dev, "Failed to enable netpoll on device %s\n", 1231 netdev_err(dev, "Failed to enable netpoll on device %s\n",
1216 portname); 1232 portname);
@@ -1907,7 +1923,7 @@ static int team_netpoll_setup(struct net_device *dev,
1907 1923
1908 mutex_lock(&team->lock); 1924 mutex_lock(&team->lock);
1909 list_for_each_entry(port, &team->port_list, list) { 1925 list_for_each_entry(port, &team->port_list, list) {
1910 err = team_port_enable_netpoll(team, port); 1926 err = __team_port_enable_netpoll(port);
1911 if (err) { 1927 if (err) {
1912 __team_netpoll_cleanup(team); 1928 __team_netpoll_cleanup(team);
1913 break; 1929 break;
@@ -2568,6 +2584,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2568 if (err) 2584 if (err)
2569 goto team_put; 2585 goto team_put;
2570 opt_inst->changed = true; 2586 opt_inst->changed = true;
2587
2588 /* dumb/evil user-space can send us duplicate opt,
2589 * keep only the last one
2590 */
2591 if (__team_option_inst_tmp_find(&opt_inst_list,
2592 opt_inst))
2593 continue;
2594
2571 list_add(&opt_inst->tmp_list, &opt_inst_list); 2595 list_add(&opt_inst->tmp_list, &opt_inst_list);
2572 } 2596 }
2573 if (!opt_found) { 2597 if (!opt_found) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 28583aa0c17d..ef33950a45d9 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1102,12 +1102,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
1102 goto drop; 1102 goto drop;
1103 1103
1104 len = run_ebpf_filter(tun, skb, len); 1104 len = run_ebpf_filter(tun, skb, len);
1105 1105 if (len == 0 || pskb_trim(skb, len))
1106 /* Trim extra bytes since we may insert vlan proto & TCI
1107 * in tun_put_user().
1108 */
1109 len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0;
1110 if (len <= 0 || pskb_trim(skb, len))
1111 goto drop; 1106 goto drop;
1112 1107
1113 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 1108 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index ca066b785e9f..c853e7410f5a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1107,6 +1107,7 @@ static const struct usb_device_id products[] = {
1107 {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */ 1107 {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
1108 {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */ 1108 {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
1109 {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */ 1109 {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
1110 {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
1110 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ 1111 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
1111 {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ 1112 {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
1112 {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ 1113 {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7b187ec7411e..770422e953f7 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -147,6 +147,17 @@ struct receive_queue {
147 struct xdp_rxq_info xdp_rxq; 147 struct xdp_rxq_info xdp_rxq;
148}; 148};
149 149
150/* Control VQ buffers: protected by the rtnl lock */
151struct control_buf {
152 struct virtio_net_ctrl_hdr hdr;
153 virtio_net_ctrl_ack status;
154 struct virtio_net_ctrl_mq mq;
155 u8 promisc;
156 u8 allmulti;
157 __virtio16 vid;
158 __virtio64 offloads;
159};
160
150struct virtnet_info { 161struct virtnet_info {
151 struct virtio_device *vdev; 162 struct virtio_device *vdev;
152 struct virtqueue *cvq; 163 struct virtqueue *cvq;
@@ -192,14 +203,7 @@ struct virtnet_info {
192 struct hlist_node node; 203 struct hlist_node node;
193 struct hlist_node node_dead; 204 struct hlist_node node_dead;
194 205
195 /* Control VQ buffers: protected by the rtnl lock */ 206 struct control_buf *ctrl;
196 struct virtio_net_ctrl_hdr ctrl_hdr;
197 virtio_net_ctrl_ack ctrl_status;
198 struct virtio_net_ctrl_mq ctrl_mq;
199 u8 ctrl_promisc;
200 u8 ctrl_allmulti;
201 u16 ctrl_vid;
202 u64 ctrl_offloads;
203 207
204 /* Ethtool settings */ 208 /* Ethtool settings */
205 u8 duplex; 209 u8 duplex;
@@ -1269,7 +1273,9 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
1269{ 1273{
1270 struct receive_queue *rq = 1274 struct receive_queue *rq =
1271 container_of(napi, struct receive_queue, napi); 1275 container_of(napi, struct receive_queue, napi);
1272 unsigned int received; 1276 struct virtnet_info *vi = rq->vq->vdev->priv;
1277 struct send_queue *sq;
1278 unsigned int received, qp;
1273 bool xdp_xmit = false; 1279 bool xdp_xmit = false;
1274 1280
1275 virtnet_poll_cleantx(rq); 1281 virtnet_poll_cleantx(rq);
@@ -1280,8 +1286,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
1280 if (received < budget) 1286 if (received < budget)
1281 virtqueue_napi_complete(napi, rq->vq, received); 1287 virtqueue_napi_complete(napi, rq->vq, received);
1282 1288
1283 if (xdp_xmit) 1289 if (xdp_xmit) {
1290 qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
1291 smp_processor_id();
1292 sq = &vi->sq[qp];
1293 virtqueue_kick(sq->vq);
1284 xdp_do_flush_map(); 1294 xdp_do_flush_map();
1295 }
1285 1296
1286 return received; 1297 return received;
1287} 1298}
@@ -1454,25 +1465,25 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
1454 /* Caller should know better */ 1465 /* Caller should know better */
1455 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); 1466 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
1456 1467
1457 vi->ctrl_status = ~0; 1468 vi->ctrl->status = ~0;
1458 vi->ctrl_hdr.class = class; 1469 vi->ctrl->hdr.class = class;
1459 vi->ctrl_hdr.cmd = cmd; 1470 vi->ctrl->hdr.cmd = cmd;
1460 /* Add header */ 1471 /* Add header */
1461 sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr)); 1472 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
1462 sgs[out_num++] = &hdr; 1473 sgs[out_num++] = &hdr;
1463 1474
1464 if (out) 1475 if (out)
1465 sgs[out_num++] = out; 1476 sgs[out_num++] = out;
1466 1477
1467 /* Add return status. */ 1478 /* Add return status. */
1468 sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status)); 1479 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
1469 sgs[out_num] = &stat; 1480 sgs[out_num] = &stat;
1470 1481
1471 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); 1482 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
1472 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); 1483 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
1473 1484
1474 if (unlikely(!virtqueue_kick(vi->cvq))) 1485 if (unlikely(!virtqueue_kick(vi->cvq)))
1475 return vi->ctrl_status == VIRTIO_NET_OK; 1486 return vi->ctrl->status == VIRTIO_NET_OK;
1476 1487
1477 /* Spin for a response, the kick causes an ioport write, trapping 1488 /* Spin for a response, the kick causes an ioport write, trapping
1478 * into the hypervisor, so the request should be handled immediately. 1489 * into the hypervisor, so the request should be handled immediately.
@@ -1481,7 +1492,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
1481 !virtqueue_is_broken(vi->cvq)) 1492 !virtqueue_is_broken(vi->cvq))
1482 cpu_relax(); 1493 cpu_relax();
1483 1494
1484 return vi->ctrl_status == VIRTIO_NET_OK; 1495 return vi->ctrl->status == VIRTIO_NET_OK;
1485} 1496}
1486 1497
1487static int virtnet_set_mac_address(struct net_device *dev, void *p) 1498static int virtnet_set_mac_address(struct net_device *dev, void *p)
@@ -1593,8 +1604,8 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1593 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) 1604 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
1594 return 0; 1605 return 0;
1595 1606
1596 vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); 1607 vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
1597 sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq)); 1608 sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
1598 1609
1599 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 1610 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
1600 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { 1611 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
@@ -1653,22 +1664,22 @@ static void virtnet_set_rx_mode(struct net_device *dev)
1653 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) 1664 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
1654 return; 1665 return;
1655 1666
1656 vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0); 1667 vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
1657 vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 1668 vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1658 1669
1659 sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc)); 1670 sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
1660 1671
1661 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1672 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1662 VIRTIO_NET_CTRL_RX_PROMISC, sg)) 1673 VIRTIO_NET_CTRL_RX_PROMISC, sg))
1663 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 1674 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
1664 vi->ctrl_promisc ? "en" : "dis"); 1675 vi->ctrl->promisc ? "en" : "dis");
1665 1676
1666 sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti)); 1677 sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
1667 1678
1668 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1679 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1669 VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) 1680 VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
1670 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 1681 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
1671 vi->ctrl_allmulti ? "en" : "dis"); 1682 vi->ctrl->allmulti ? "en" : "dis");
1672 1683
1673 uc_count = netdev_uc_count(dev); 1684 uc_count = netdev_uc_count(dev);
1674 mc_count = netdev_mc_count(dev); 1685 mc_count = netdev_mc_count(dev);
@@ -1714,8 +1725,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
1714 struct virtnet_info *vi = netdev_priv(dev); 1725 struct virtnet_info *vi = netdev_priv(dev);
1715 struct scatterlist sg; 1726 struct scatterlist sg;
1716 1727
1717 vi->ctrl_vid = vid; 1728 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
1718 sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); 1729 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
1719 1730
1720 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1731 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1721 VIRTIO_NET_CTRL_VLAN_ADD, &sg)) 1732 VIRTIO_NET_CTRL_VLAN_ADD, &sg))
@@ -1729,8 +1740,8 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
1729 struct virtnet_info *vi = netdev_priv(dev); 1740 struct virtnet_info *vi = netdev_priv(dev);
1730 struct scatterlist sg; 1741 struct scatterlist sg;
1731 1742
1732 vi->ctrl_vid = vid; 1743 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
1733 sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); 1744 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
1734 1745
1735 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1746 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1736 VIRTIO_NET_CTRL_VLAN_DEL, &sg)) 1747 VIRTIO_NET_CTRL_VLAN_DEL, &sg))
@@ -2126,9 +2137,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
2126static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) 2137static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
2127{ 2138{
2128 struct scatterlist sg; 2139 struct scatterlist sg;
2129 vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads); 2140 vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
2130 2141
2131 sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads)); 2142 sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
2132 2143
2133 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, 2144 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
2134 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { 2145 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
@@ -2351,6 +2362,7 @@ static void virtnet_free_queues(struct virtnet_info *vi)
2351 2362
2352 kfree(vi->rq); 2363 kfree(vi->rq);
2353 kfree(vi->sq); 2364 kfree(vi->sq);
2365 kfree(vi->ctrl);
2354} 2366}
2355 2367
2356static void _free_receive_bufs(struct virtnet_info *vi) 2368static void _free_receive_bufs(struct virtnet_info *vi)
@@ -2543,6 +2555,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
2543{ 2555{
2544 int i; 2556 int i;
2545 2557
2558 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
2559 if (!vi->ctrl)
2560 goto err_ctrl;
2546 vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); 2561 vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
2547 if (!vi->sq) 2562 if (!vi->sq)
2548 goto err_sq; 2563 goto err_sq;
@@ -2571,6 +2586,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
2571err_rq: 2586err_rq:
2572 kfree(vi->sq); 2587 kfree(vi->sq);
2573err_sq: 2588err_sq:
2589 kfree(vi->ctrl);
2590err_ctrl:
2574 return -ENOMEM; 2591 return -ENOMEM;
2575} 2592}
2576 2593
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index e04937f44f33..9ebe2a689966 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1218,6 +1218,7 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
1218 union { 1218 union {
1219 void *ptr; 1219 void *ptr;
1220 struct ethhdr *eth; 1220 struct ethhdr *eth;
1221 struct vlan_ethhdr *veth;
1221 struct iphdr *ipv4; 1222 struct iphdr *ipv4;
1222 struct ipv6hdr *ipv6; 1223 struct ipv6hdr *ipv6;
1223 struct tcphdr *tcp; 1224 struct tcphdr *tcp;
@@ -1228,16 +1229,24 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
1228 if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen)) 1229 if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
1229 return 0; 1230 return 0;
1230 1231
1232 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
1233 skb->protocol == cpu_to_be16(ETH_P_8021AD))
1234 hlen = sizeof(struct vlan_ethhdr);
1235 else
1236 hlen = sizeof(struct ethhdr);
1237
1231 hdr.eth = eth_hdr(skb); 1238 hdr.eth = eth_hdr(skb);
1232 if (gdesc->rcd.v4) { 1239 if (gdesc->rcd.v4) {
1233 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP)); 1240 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
1234 hdr.ptr += sizeof(struct ethhdr); 1241 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
1242 hdr.ptr += hlen;
1235 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP); 1243 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
1236 hlen = hdr.ipv4->ihl << 2; 1244 hlen = hdr.ipv4->ihl << 2;
1237 hdr.ptr += hdr.ipv4->ihl << 2; 1245 hdr.ptr += hdr.ipv4->ihl << 2;
1238 } else if (gdesc->rcd.v6) { 1246 } else if (gdesc->rcd.v6) {
1239 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6)); 1247 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
1240 hdr.ptr += sizeof(struct ethhdr); 1248 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
1249 hdr.ptr += hlen;
1241 /* Use an estimated value, since we also need to handle 1250 /* Use an estimated value, since we also need to handle
1242 * TSO case. 1251 * TSO case.
1243 */ 1252 */
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 59ec34052a65..a3326463b71f 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.13.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.14.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040d00 75#define VMXNET3_DRIVER_VERSION_NUM 0x01040e00
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 85997184e047..9d36473dc2a2 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -103,8 +103,7 @@ config NVDIMM_DAX
103 Select Y if unsure 103 Select Y if unsure
104 104
105config OF_PMEM 105config OF_PMEM
106 # FIXME: make tristate once OF_NUMA dependency removed 106 tristate "Device-tree support for persistent memory regions"
107 bool "Device-tree support for persistent memory regions"
108 depends on OF 107 depends on OF
109 default LIBNVDIMM 108 default LIBNVDIMM
110 help 109 help
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index e00d45522b80..8d348b22ba45 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -88,9 +88,9 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
88int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) 88int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
89{ 89{
90 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); 90 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
91 int rc = validate_dimm(ndd), cmd_rc = 0;
91 struct nd_cmd_get_config_data_hdr *cmd; 92 struct nd_cmd_get_config_data_hdr *cmd;
92 struct nvdimm_bus_descriptor *nd_desc; 93 struct nvdimm_bus_descriptor *nd_desc;
93 int rc = validate_dimm(ndd);
94 u32 max_cmd_size, config_size; 94 u32 max_cmd_size, config_size;
95 size_t offset; 95 size_t offset;
96 96
@@ -124,9 +124,11 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
124 cmd->in_offset = offset; 124 cmd->in_offset = offset;
125 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), 125 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
126 ND_CMD_GET_CONFIG_DATA, cmd, 126 ND_CMD_GET_CONFIG_DATA, cmd,
127 cmd->in_length + sizeof(*cmd), NULL); 127 cmd->in_length + sizeof(*cmd), &cmd_rc);
128 if (rc || cmd->status) { 128 if (rc < 0)
129 rc = -ENXIO; 129 break;
130 if (cmd_rc < 0) {
131 rc = cmd_rc;
130 break; 132 break;
131 } 133 }
132 memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length); 134 memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
@@ -140,9 +142,9 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
140int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, 142int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
141 void *buf, size_t len) 143 void *buf, size_t len)
142{ 144{
143 int rc = validate_dimm(ndd);
144 size_t max_cmd_size, buf_offset; 145 size_t max_cmd_size, buf_offset;
145 struct nd_cmd_set_config_hdr *cmd; 146 struct nd_cmd_set_config_hdr *cmd;
147 int rc = validate_dimm(ndd), cmd_rc = 0;
146 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); 148 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
147 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 149 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
148 150
@@ -164,7 +166,6 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
164 for (buf_offset = 0; len; len -= cmd->in_length, 166 for (buf_offset = 0; len; len -= cmd->in_length,
165 buf_offset += cmd->in_length) { 167 buf_offset += cmd->in_length) {
166 size_t cmd_size; 168 size_t cmd_size;
167 u32 *status;
168 169
169 cmd->in_offset = offset + buf_offset; 170 cmd->in_offset = offset + buf_offset;
170 cmd->in_length = min(max_cmd_size, len); 171 cmd->in_length = min(max_cmd_size, len);
@@ -172,12 +173,13 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
172 173
173 /* status is output in the last 4-bytes of the command buffer */ 174 /* status is output in the last 4-bytes of the command buffer */
174 cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32); 175 cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
175 status = ((void *) cmd) + cmd_size - sizeof(u32);
176 176
177 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), 177 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
178 ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL); 178 ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
179 if (rc || *status) { 179 if (rc < 0)
180 rc = rc ? rc : -ENXIO; 180 break;
181 if (cmd_rc < 0) {
182 rc = cmd_rc;
181 break; 183 break;
182 } 184 }
183 } 185 }
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
index 85013bad35de..0a701837dfc0 100644
--- a/drivers/nvdimm/of_pmem.c
+++ b/drivers/nvdimm/of_pmem.c
@@ -67,7 +67,7 @@ static int of_pmem_region_probe(struct platform_device *pdev)
67 */ 67 */
68 memset(&ndr_desc, 0, sizeof(ndr_desc)); 68 memset(&ndr_desc, 0, sizeof(ndr_desc));
69 ndr_desc.attr_groups = region_attr_groups; 69 ndr_desc.attr_groups = region_attr_groups;
70 ndr_desc.numa_node = of_node_to_nid(np); 70 ndr_desc.numa_node = dev_to_node(&pdev->dev);
71 ndr_desc.res = &pdev->resource[i]; 71 ndr_desc.res = &pdev->resource[i];
72 ndr_desc.of_node = np; 72 ndr_desc.of_node = np;
73 set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); 73 set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c
index a6b88c7f6e3e..d2970a009eb5 100644
--- a/drivers/pci/dwc/pcie-kirin.c
+++ b/drivers/pci/dwc/pcie-kirin.c
@@ -486,7 +486,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
486 return ret; 486 return ret;
487 487
488 kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node, 488 kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,
489 "reset-gpio", 0); 489 "reset-gpios", 0);
490 if (kirin_pcie->gpio_id_reset < 0) 490 if (kirin_pcie->gpio_id_reset < 0)
491 return -ENODEV; 491 return -ENODEV;
492 492
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index b04d37b3c5de..9abf549631b4 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -29,6 +29,7 @@
29#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5 29#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
30#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11) 30#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11)
31#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12 31#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12
32#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2
32#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0 33#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0
33#define PCIE_CORE_LINK_L0S_ENTRY BIT(0) 34#define PCIE_CORE_LINK_L0S_ENTRY BIT(0)
34#define PCIE_CORE_LINK_TRAINING BIT(5) 35#define PCIE_CORE_LINK_TRAINING BIT(5)
@@ -100,7 +101,8 @@
100#define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C) 101#define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C)
101#define PCIE_ISR1_POWER_STATE_CHANGE BIT(4) 102#define PCIE_ISR1_POWER_STATE_CHANGE BIT(4)
102#define PCIE_ISR1_FLUSH BIT(5) 103#define PCIE_ISR1_FLUSH BIT(5)
103#define PCIE_ISR1_ALL_MASK GENMASK(5, 4) 104#define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val))
105#define PCIE_ISR1_ALL_MASK GENMASK(11, 4)
104#define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50) 106#define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50)
105#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54) 107#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
106#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58) 108#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
@@ -172,8 +174,6 @@
172#define PCIE_CONFIG_WR_TYPE0 0xa 174#define PCIE_CONFIG_WR_TYPE0 0xa
173#define PCIE_CONFIG_WR_TYPE1 0xb 175#define PCIE_CONFIG_WR_TYPE1 0xb
174 176
175/* PCI_BDF shifts 8bit, so we need extra 4bit shift */
176#define PCIE_BDF(dev) (dev << 4)
177#define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20) 177#define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20)
178#define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15) 178#define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15)
179#define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12) 179#define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12)
@@ -296,7 +296,8 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
296 reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE | 296 reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
297 (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) | 297 (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
298 PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE | 298 PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
299 PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT; 299 (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ <<
300 PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT);
300 advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG); 301 advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
301 302
302 /* Program PCIe Control 2 to disable strict ordering */ 303 /* Program PCIe Control 2 to disable strict ordering */
@@ -437,7 +438,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
437 u32 reg; 438 u32 reg;
438 int ret; 439 int ret;
439 440
440 if (PCI_SLOT(devfn) != 0) { 441 if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) {
441 *val = 0xffffffff; 442 *val = 0xffffffff;
442 return PCIBIOS_DEVICE_NOT_FOUND; 443 return PCIBIOS_DEVICE_NOT_FOUND;
443 } 444 }
@@ -456,7 +457,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
456 advk_writel(pcie, reg, PIO_CTRL); 457 advk_writel(pcie, reg, PIO_CTRL);
457 458
458 /* Program the address registers */ 459 /* Program the address registers */
459 reg = PCIE_BDF(devfn) | PCIE_CONF_REG(where); 460 reg = PCIE_CONF_ADDR(bus->number, devfn, where);
460 advk_writel(pcie, reg, PIO_ADDR_LS); 461 advk_writel(pcie, reg, PIO_ADDR_LS);
461 advk_writel(pcie, 0, PIO_ADDR_MS); 462 advk_writel(pcie, 0, PIO_ADDR_MS);
462 463
@@ -491,7 +492,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
491 int offset; 492 int offset;
492 int ret; 493 int ret;
493 494
494 if (PCI_SLOT(devfn) != 0) 495 if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
495 return PCIBIOS_DEVICE_NOT_FOUND; 496 return PCIBIOS_DEVICE_NOT_FOUND;
496 497
497 if (where % size) 498 if (where % size)
@@ -609,9 +610,9 @@ static void advk_pcie_irq_mask(struct irq_data *d)
609 irq_hw_number_t hwirq = irqd_to_hwirq(d); 610 irq_hw_number_t hwirq = irqd_to_hwirq(d);
610 u32 mask; 611 u32 mask;
611 612
612 mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); 613 mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
613 mask |= PCIE_ISR0_INTX_ASSERT(hwirq); 614 mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
614 advk_writel(pcie, mask, PCIE_ISR0_MASK_REG); 615 advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
615} 616}
616 617
617static void advk_pcie_irq_unmask(struct irq_data *d) 618static void advk_pcie_irq_unmask(struct irq_data *d)
@@ -620,9 +621,9 @@ static void advk_pcie_irq_unmask(struct irq_data *d)
620 irq_hw_number_t hwirq = irqd_to_hwirq(d); 621 irq_hw_number_t hwirq = irqd_to_hwirq(d);
621 u32 mask; 622 u32 mask;
622 623
623 mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); 624 mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
624 mask &= ~PCIE_ISR0_INTX_ASSERT(hwirq); 625 mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
625 advk_writel(pcie, mask, PCIE_ISR0_MASK_REG); 626 advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
626} 627}
627 628
628static int advk_pcie_irq_map(struct irq_domain *h, 629static int advk_pcie_irq_map(struct irq_domain *h,
@@ -765,29 +766,35 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie)
765 766
766static void advk_pcie_handle_int(struct advk_pcie *pcie) 767static void advk_pcie_handle_int(struct advk_pcie *pcie)
767{ 768{
768 u32 val, mask, status; 769 u32 isr0_val, isr0_mask, isr0_status;
770 u32 isr1_val, isr1_mask, isr1_status;
769 int i, virq; 771 int i, virq;
770 772
771 val = advk_readl(pcie, PCIE_ISR0_REG); 773 isr0_val = advk_readl(pcie, PCIE_ISR0_REG);
772 mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); 774 isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
773 status = val & ((~mask) & PCIE_ISR0_ALL_MASK); 775 isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK);
776
777 isr1_val = advk_readl(pcie, PCIE_ISR1_REG);
778 isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
779 isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
774 780
775 if (!status) { 781 if (!isr0_status && !isr1_status) {
776 advk_writel(pcie, val, PCIE_ISR0_REG); 782 advk_writel(pcie, isr0_val, PCIE_ISR0_REG);
783 advk_writel(pcie, isr1_val, PCIE_ISR1_REG);
777 return; 784 return;
778 } 785 }
779 786
780 /* Process MSI interrupts */ 787 /* Process MSI interrupts */
781 if (status & PCIE_ISR0_MSI_INT_PENDING) 788 if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
782 advk_pcie_handle_msi(pcie); 789 advk_pcie_handle_msi(pcie);
783 790
784 /* Process legacy interrupts */ 791 /* Process legacy interrupts */
785 for (i = 0; i < PCI_NUM_INTX; i++) { 792 for (i = 0; i < PCI_NUM_INTX; i++) {
786 if (!(status & PCIE_ISR0_INTX_ASSERT(i))) 793 if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i)))
787 continue; 794 continue;
788 795
789 advk_writel(pcie, PCIE_ISR0_INTX_ASSERT(i), 796 advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
790 PCIE_ISR0_REG); 797 PCIE_ISR1_REG);
791 798
792 virq = irq_find_mapping(pcie->irq_domain, i); 799 virq = irq_find_mapping(pcie->irq_domain, i);
793 generic_handle_irq(virq); 800 generic_handle_irq(virq);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 6ace47099fc5..b9a131137e64 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -958,10 +958,11 @@ static int pci_pm_freeze(struct device *dev)
958 * devices should not be touched during freeze/thaw transitions, 958 * devices should not be touched during freeze/thaw transitions,
959 * however. 959 * however.
960 */ 960 */
961 if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) 961 if (!dev_pm_smart_suspend_and_suspended(dev)) {
962 pm_runtime_resume(dev); 962 pm_runtime_resume(dev);
963 pci_dev->state_saved = false;
964 }
963 965
964 pci_dev->state_saved = false;
965 if (pm->freeze) { 966 if (pm->freeze) {
966 int error; 967 int error;
967 968
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e597655a5643..a04197ce767d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -5273,11 +5273,11 @@ void pcie_print_link_status(struct pci_dev *dev)
5273 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width); 5273 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
5274 5274
5275 if (bw_avail >= bw_cap) 5275 if (bw_avail >= bw_cap)
5276 pci_info(dev, "%u.%03u Gb/s available bandwidth (%s x%d link)\n", 5276 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
5277 bw_cap / 1000, bw_cap % 1000, 5277 bw_cap / 1000, bw_cap % 1000,
5278 PCIE_SPEED2STR(speed_cap), width_cap); 5278 PCIE_SPEED2STR(speed_cap), width_cap);
5279 else 5279 else
5280 pci_info(dev, "%u.%03u Gb/s available bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", 5280 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
5281 bw_avail / 1000, bw_avail % 1000, 5281 bw_avail / 1000, bw_avail % 1000,
5282 PCIE_SPEED2STR(speed), width, 5282 PCIE_SPEED2STR(speed), width,
5283 limiting_dev ? pci_name(limiting_dev) : "<unknown>", 5283 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 9d27016c899e..0434ab7b6497 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -740,10 +740,7 @@ static int do_dma_request(struct mport_dma_req *req,
740 tx->callback = dma_xfer_callback; 740 tx->callback = dma_xfer_callback;
741 tx->callback_param = req; 741 tx->callback_param = req;
742 742
743 req->dmach = chan;
744 req->sync = sync;
745 req->status = DMA_IN_PROGRESS; 743 req->status = DMA_IN_PROGRESS;
746 init_completion(&req->req_comp);
747 kref_get(&req->refcount); 744 kref_get(&req->refcount);
748 745
749 cookie = dmaengine_submit(tx); 746 cookie = dmaengine_submit(tx);
@@ -831,13 +828,20 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
831 if (!req) 828 if (!req)
832 return -ENOMEM; 829 return -ENOMEM;
833 830
834 kref_init(&req->refcount);
835
836 ret = get_dma_channel(priv); 831 ret = get_dma_channel(priv);
837 if (ret) { 832 if (ret) {
838 kfree(req); 833 kfree(req);
839 return ret; 834 return ret;
840 } 835 }
836 chan = priv->dmach;
837
838 kref_init(&req->refcount);
839 init_completion(&req->req_comp);
840 req->dir = dir;
841 req->filp = filp;
842 req->priv = priv;
843 req->dmach = chan;
844 req->sync = sync;
841 845
842 /* 846 /*
843 * If parameter loc_addr != NULL, we are transferring data from/to 847 * If parameter loc_addr != NULL, we are transferring data from/to
@@ -925,11 +929,6 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
925 xfer->offset, xfer->length); 929 xfer->offset, xfer->length);
926 } 930 }
927 931
928 req->dir = dir;
929 req->filp = filp;
930 req->priv = priv;
931 chan = priv->dmach;
932
933 nents = dma_map_sg(chan->device->dev, 932 nents = dma_map_sg(chan->device->dev,
934 req->sgt.sgl, req->sgt.nents, dir); 933 req->sgt.sgl, req->sgt.nents, dir);
935 if (nents == 0) { 934 if (nents == 0) {
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 62f5f04d8f61..5e963fe0e38d 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -592,13 +592,22 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
592int dasd_alias_add_device(struct dasd_device *device) 592int dasd_alias_add_device(struct dasd_device *device)
593{ 593{
594 struct dasd_eckd_private *private = device->private; 594 struct dasd_eckd_private *private = device->private;
595 struct alias_lcu *lcu; 595 __u8 uaddr = private->uid.real_unit_addr;
596 struct alias_lcu *lcu = private->lcu;
596 unsigned long flags; 597 unsigned long flags;
597 int rc; 598 int rc;
598 599
599 lcu = private->lcu;
600 rc = 0; 600 rc = 0;
601 spin_lock_irqsave(&lcu->lock, flags); 601 spin_lock_irqsave(&lcu->lock, flags);
602 /*
603 * Check if device and lcu type differ. If so, the uac data may be
604 * outdated and needs to be updated.
605 */
606 if (private->uid.type != lcu->uac->unit[uaddr].ua_type) {
607 lcu->flags |= UPDATE_PENDING;
608 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
609 "uid type mismatch - trigger rescan");
610 }
602 if (!(lcu->flags & UPDATE_PENDING)) { 611 if (!(lcu->flags & UPDATE_PENDING)) {
603 rc = _add_device_to_lcu(lcu, device, device); 612 rc = _add_device_to_lcu(lcu, device, device);
604 if (rc) 613 if (rc)
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index f035c2f25d35..131f1989f6f3 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -27,7 +27,6 @@
27#include <asm/io.h> 27#include <asm/io.h>
28#include <asm/irq.h> 28#include <asm/irq.h>
29#include <asm/vtoc.h> 29#include <asm/vtoc.h>
30#include <asm/diag.h>
31 30
32#include "dasd_int.h" 31#include "dasd_int.h"
33#include "dasd_diag.h" 32#include "dasd_diag.h"
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index 5f8d9ea69ebd..eceba3858cef 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -18,7 +18,7 @@ int sclp_init_state __section(.data) = sclp_init_state_uninitialized;
18 * Used to keep track of the size of the event masks. Qemu until version 2.11 18 * Used to keep track of the size of the event masks. Qemu until version 2.11
19 * only supports 4 and needs a workaround. 19 * only supports 4 and needs a workaround.
20 */ 20 */
21bool sclp_mask_compat_mode; 21bool sclp_mask_compat_mode __section(.data);
22 22
23void sclp_early_wait_irq(void) 23void sclp_early_wait_irq(void)
24{ 24{
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 6652a49a49b1..9029804dcd22 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -452,6 +452,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
452 452
453static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) 453static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
454{ 454{
455 struct channel_path *chp;
455 struct chp_link link; 456 struct chp_link link;
456 struct chp_id chpid; 457 struct chp_id chpid;
457 int status; 458 int status;
@@ -464,10 +465,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
464 chpid.id = sei_area->rsid; 465 chpid.id = sei_area->rsid;
465 /* allocate a new channel path structure, if needed */ 466 /* allocate a new channel path structure, if needed */
466 status = chp_get_status(chpid); 467 status = chp_get_status(chpid);
467 if (status < 0) 468 if (!status)
468 chp_new(chpid);
469 else if (!status)
470 return; 469 return;
470
471 if (status < 0) {
472 chp_new(chpid);
473 } else {
474 chp = chpid_to_chp(chpid);
475 mutex_lock(&chp->lock);
476 chp_update_desc(chp);
477 mutex_unlock(&chp->lock);
478 }
471 memset(&link, 0, sizeof(struct chp_link)); 479 memset(&link, 0, sizeof(struct chp_link));
472 link.chpid = chpid; 480 link.chpid = chpid;
473 if ((sei_area->vf & 0xc0) != 0) { 481 if ((sei_area->vf & 0xc0) != 0) {
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index ff6963ad6e39..3c800642134e 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -20,12 +20,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
20 int ccode; 20 int ccode;
21 __u8 lpm; 21 __u8 lpm;
22 unsigned long flags; 22 unsigned long flags;
23 int ret;
23 24
24 sch = private->sch; 25 sch = private->sch;
25 26
26 spin_lock_irqsave(sch->lock, flags); 27 spin_lock_irqsave(sch->lock, flags);
27 private->state = VFIO_CCW_STATE_BUSY; 28 private->state = VFIO_CCW_STATE_BUSY;
28 spin_unlock_irqrestore(sch->lock, flags);
29 29
30 orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm); 30 orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
31 31
@@ -38,10 +38,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
38 * Initialize device status information 38 * Initialize device status information
39 */ 39 */
40 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; 40 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
41 return 0; 41 ret = 0;
42 break;
42 case 1: /* Status pending */ 43 case 1: /* Status pending */
43 case 2: /* Busy */ 44 case 2: /* Busy */
44 return -EBUSY; 45 ret = -EBUSY;
46 break;
45 case 3: /* Device/path not operational */ 47 case 3: /* Device/path not operational */
46 { 48 {
47 lpm = orb->cmd.lpm; 49 lpm = orb->cmd.lpm;
@@ -51,13 +53,16 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
51 sch->lpm = 0; 53 sch->lpm = 0;
52 54
53 if (cio_update_schib(sch)) 55 if (cio_update_schib(sch))
54 return -ENODEV; 56 ret = -ENODEV;
55 57 else
56 return sch->lpm ? -EACCES : -ENODEV; 58 ret = sch->lpm ? -EACCES : -ENODEV;
59 break;
57 } 60 }
58 default: 61 default:
59 return ccode; 62 ret = ccode;
60 } 63 }
64 spin_unlock_irqrestore(sch->lock, flags);
65 return ret;
61} 66}
62 67
63static void fsm_notoper(struct vfio_ccw_private *private, 68static void fsm_notoper(struct vfio_ccw_private *private,
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 4326715dc13e..78b98b3e7efa 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -557,7 +557,6 @@ enum qeth_prot_versions {
557enum qeth_cmd_buffer_state { 557enum qeth_cmd_buffer_state {
558 BUF_STATE_FREE, 558 BUF_STATE_FREE,
559 BUF_STATE_LOCKED, 559 BUF_STATE_LOCKED,
560 BUF_STATE_PROCESSED,
561}; 560};
562 561
563enum qeth_cq { 562enum qeth_cq {
@@ -601,7 +600,6 @@ struct qeth_channel {
601 struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO]; 600 struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
602 atomic_t irq_pending; 601 atomic_t irq_pending;
603 int io_buf_no; 602 int io_buf_no;
604 int buf_no;
605}; 603};
606 604
607/** 605/**
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 04fefa5bb08d..dffd820731f2 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -706,7 +706,6 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
706 qeth_put_reply(reply); 706 qeth_put_reply(reply);
707 } 707 }
708 spin_unlock_irqrestore(&card->lock, flags); 708 spin_unlock_irqrestore(&card->lock, flags);
709 atomic_set(&card->write.irq_pending, 0);
710} 709}
711EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); 710EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
712 711
@@ -818,7 +817,6 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel)
818 817
819 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) 818 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
820 qeth_release_buffer(channel, &channel->iob[cnt]); 819 qeth_release_buffer(channel, &channel->iob[cnt]);
821 channel->buf_no = 0;
822 channel->io_buf_no = 0; 820 channel->io_buf_no = 0;
823} 821}
824EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers); 822EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
@@ -924,7 +922,6 @@ static int qeth_setup_channel(struct qeth_channel *channel)
924 kfree(channel->iob[cnt].data); 922 kfree(channel->iob[cnt].data);
925 return -ENOMEM; 923 return -ENOMEM;
926 } 924 }
927 channel->buf_no = 0;
928 channel->io_buf_no = 0; 925 channel->io_buf_no = 0;
929 atomic_set(&channel->irq_pending, 0); 926 atomic_set(&channel->irq_pending, 0);
930 spin_lock_init(&channel->iob_lock); 927 spin_lock_init(&channel->iob_lock);
@@ -1100,16 +1097,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1100{ 1097{
1101 int rc; 1098 int rc;
1102 int cstat, dstat; 1099 int cstat, dstat;
1103 struct qeth_cmd_buffer *buffer; 1100 struct qeth_cmd_buffer *iob = NULL;
1104 struct qeth_channel *channel; 1101 struct qeth_channel *channel;
1105 struct qeth_card *card; 1102 struct qeth_card *card;
1106 struct qeth_cmd_buffer *iob;
1107 __u8 index;
1108
1109 if (__qeth_check_irb_error(cdev, intparm, irb))
1110 return;
1111 cstat = irb->scsw.cmd.cstat;
1112 dstat = irb->scsw.cmd.dstat;
1113 1103
1114 card = CARD_FROM_CDEV(cdev); 1104 card = CARD_FROM_CDEV(cdev);
1115 if (!card) 1105 if (!card)
@@ -1127,6 +1117,19 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1127 channel = &card->data; 1117 channel = &card->data;
1128 QETH_CARD_TEXT(card, 5, "data"); 1118 QETH_CARD_TEXT(card, 5, "data");
1129 } 1119 }
1120
1121 if (qeth_intparm_is_iob(intparm))
1122 iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
1123
1124 if (__qeth_check_irb_error(cdev, intparm, irb)) {
1125 /* IO was terminated, free its resources. */
1126 if (iob)
1127 qeth_release_buffer(iob->channel, iob);
1128 atomic_set(&channel->irq_pending, 0);
1129 wake_up(&card->wait_q);
1130 return;
1131 }
1132
1130 atomic_set(&channel->irq_pending, 0); 1133 atomic_set(&channel->irq_pending, 0);
1131 1134
1132 if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC)) 1135 if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
@@ -1150,6 +1153,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1150 /* we don't have to handle this further */ 1153 /* we don't have to handle this further */
1151 intparm = 0; 1154 intparm = 0;
1152 } 1155 }
1156
1157 cstat = irb->scsw.cmd.cstat;
1158 dstat = irb->scsw.cmd.dstat;
1159
1153 if ((dstat & DEV_STAT_UNIT_EXCEP) || 1160 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1154 (dstat & DEV_STAT_UNIT_CHECK) || 1161 (dstat & DEV_STAT_UNIT_CHECK) ||
1155 (cstat)) { 1162 (cstat)) {
@@ -1182,25 +1189,15 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1182 channel->state = CH_STATE_RCD_DONE; 1189 channel->state = CH_STATE_RCD_DONE;
1183 goto out; 1190 goto out;
1184 } 1191 }
1185 if (intparm) {
1186 buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
1187 buffer->state = BUF_STATE_PROCESSED;
1188 }
1189 if (channel == &card->data) 1192 if (channel == &card->data)
1190 return; 1193 return;
1191 if (channel == &card->read && 1194 if (channel == &card->read &&
1192 channel->state == CH_STATE_UP) 1195 channel->state == CH_STATE_UP)
1193 __qeth_issue_next_read(card); 1196 __qeth_issue_next_read(card);
1194 1197
1195 iob = channel->iob; 1198 if (iob && iob->callback)
1196 index = channel->buf_no; 1199 iob->callback(iob->channel, iob);
1197 while (iob[index].state == BUF_STATE_PROCESSED) {
1198 if (iob[index].callback != NULL)
1199 iob[index].callback(channel, iob + index);
1200 1200
1201 index = (index + 1) % QETH_CMD_BUFFER_NO;
1202 }
1203 channel->buf_no = index;
1204out: 1201out:
1205 wake_up(&card->wait_q); 1202 wake_up(&card->wait_q);
1206 return; 1203 return;
@@ -1870,8 +1867,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
1870 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); 1867 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1871 QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); 1868 QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
1872 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 1869 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1873 rc = ccw_device_start(channel->ccwdev, 1870 rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
1874 &channel->ccw, (addr_t) iob, 0, 0); 1871 (addr_t) iob, 0, 0, QETH_TIMEOUT);
1875 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 1872 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1876 1873
1877 if (rc) { 1874 if (rc) {
@@ -1888,7 +1885,6 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
1888 if (channel->state != CH_STATE_UP) { 1885 if (channel->state != CH_STATE_UP) {
1889 rc = -ETIME; 1886 rc = -ETIME;
1890 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); 1887 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
1891 qeth_clear_cmd_buffers(channel);
1892 } else 1888 } else
1893 rc = 0; 1889 rc = 0;
1894 return rc; 1890 return rc;
@@ -1942,8 +1938,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
1942 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); 1938 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1943 QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); 1939 QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
1944 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 1940 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1945 rc = ccw_device_start(channel->ccwdev, 1941 rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
1946 &channel->ccw, (addr_t) iob, 0, 0); 1942 (addr_t) iob, 0, 0, QETH_TIMEOUT);
1947 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 1943 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1948 1944
1949 if (rc) { 1945 if (rc) {
@@ -1964,7 +1960,6 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
1964 QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n", 1960 QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
1965 dev_name(&channel->ccwdev->dev)); 1961 dev_name(&channel->ccwdev->dev));
1966 QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); 1962 QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
1967 qeth_clear_cmd_buffers(channel);
1968 return -ETIME; 1963 return -ETIME;
1969 } 1964 }
1970 return qeth_idx_activate_get_answer(channel, idx_reply_cb); 1965 return qeth_idx_activate_get_answer(channel, idx_reply_cb);
@@ -2166,8 +2161,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2166 2161
2167 QETH_CARD_TEXT(card, 6, "noirqpnd"); 2162 QETH_CARD_TEXT(card, 6, "noirqpnd");
2168 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); 2163 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
2169 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, 2164 rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
2170 (addr_t) iob, 0, 0); 2165 (addr_t) iob, 0, 0, event_timeout);
2171 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); 2166 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
2172 if (rc) { 2167 if (rc) {
2173 QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " 2168 QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
@@ -2199,8 +2194,6 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2199 } 2194 }
2200 } 2195 }
2201 2196
2202 if (reply->rc == -EIO)
2203 goto error;
2204 rc = reply->rc; 2197 rc = reply->rc;
2205 qeth_put_reply(reply); 2198 qeth_put_reply(reply);
2206 return rc; 2199 return rc;
@@ -2211,10 +2204,6 @@ time_err:
2211 list_del_init(&reply->list); 2204 list_del_init(&reply->list);
2212 spin_unlock_irqrestore(&reply->card->lock, flags); 2205 spin_unlock_irqrestore(&reply->card->lock, flags);
2213 atomic_inc(&reply->received); 2206 atomic_inc(&reply->received);
2214error:
2215 atomic_set(&card->write.irq_pending, 0);
2216 qeth_release_buffer(iob->channel, iob);
2217 card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO;
2218 rc = reply->rc; 2207 rc = reply->rc;
2219 qeth_put_reply(reply); 2208 qeth_put_reply(reply);
2220 return rc; 2209 return rc;
@@ -3033,28 +3022,23 @@ static int qeth_send_startlan(struct qeth_card *card)
3033 return rc; 3022 return rc;
3034} 3023}
3035 3024
3036static int qeth_default_setadapterparms_cb(struct qeth_card *card, 3025static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
3037 struct qeth_reply *reply, unsigned long data)
3038{ 3026{
3039 struct qeth_ipa_cmd *cmd; 3027 if (!cmd->hdr.return_code)
3040
3041 QETH_CARD_TEXT(card, 4, "defadpcb");
3042
3043 cmd = (struct qeth_ipa_cmd *) data;
3044 if (cmd->hdr.return_code == 0)
3045 cmd->hdr.return_code = 3028 cmd->hdr.return_code =
3046 cmd->data.setadapterparms.hdr.return_code; 3029 cmd->data.setadapterparms.hdr.return_code;
3047 return 0; 3030 return cmd->hdr.return_code;
3048} 3031}
3049 3032
3050static int qeth_query_setadapterparms_cb(struct qeth_card *card, 3033static int qeth_query_setadapterparms_cb(struct qeth_card *card,
3051 struct qeth_reply *reply, unsigned long data) 3034 struct qeth_reply *reply, unsigned long data)
3052{ 3035{
3053 struct qeth_ipa_cmd *cmd; 3036 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3054 3037
3055 QETH_CARD_TEXT(card, 3, "quyadpcb"); 3038 QETH_CARD_TEXT(card, 3, "quyadpcb");
3039 if (qeth_setadpparms_inspect_rc(cmd))
3040 return 0;
3056 3041
3057 cmd = (struct qeth_ipa_cmd *) data;
3058 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { 3042 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
3059 card->info.link_type = 3043 card->info.link_type =
3060 cmd->data.setadapterparms.data.query_cmds_supp.lan_type; 3044 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
@@ -3062,7 +3046,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
3062 } 3046 }
3063 card->options.adp.supported_funcs = 3047 card->options.adp.supported_funcs =
3064 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds; 3048 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
3065 return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); 3049 return 0;
3066} 3050}
3067 3051
3068static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, 3052static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
@@ -3154,22 +3138,20 @@ EXPORT_SYMBOL_GPL(qeth_query_ipassists);
3154static int qeth_query_switch_attributes_cb(struct qeth_card *card, 3138static int qeth_query_switch_attributes_cb(struct qeth_card *card,
3155 struct qeth_reply *reply, unsigned long data) 3139 struct qeth_reply *reply, unsigned long data)
3156{ 3140{
3157 struct qeth_ipa_cmd *cmd; 3141 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3158 struct qeth_switch_info *sw_info;
3159 struct qeth_query_switch_attributes *attrs; 3142 struct qeth_query_switch_attributes *attrs;
3143 struct qeth_switch_info *sw_info;
3160 3144
3161 QETH_CARD_TEXT(card, 2, "qswiatcb"); 3145 QETH_CARD_TEXT(card, 2, "qswiatcb");
3162 cmd = (struct qeth_ipa_cmd *) data; 3146 if (qeth_setadpparms_inspect_rc(cmd))
3163 sw_info = (struct qeth_switch_info *)reply->param; 3147 return 0;
3164 if (cmd->data.setadapterparms.hdr.return_code == 0) {
3165 attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3166 sw_info->capabilities = attrs->capabilities;
3167 sw_info->settings = attrs->settings;
3168 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3169 sw_info->settings);
3170 }
3171 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
3172 3148
3149 sw_info = (struct qeth_switch_info *)reply->param;
3150 attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3151 sw_info->capabilities = attrs->capabilities;
3152 sw_info->settings = attrs->settings;
3153 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3154 sw_info->settings);
3173 return 0; 3155 return 0;
3174} 3156}
3175 3157
@@ -4207,16 +4189,13 @@ EXPORT_SYMBOL_GPL(qeth_do_send_packet);
4207static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, 4189static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4208 struct qeth_reply *reply, unsigned long data) 4190 struct qeth_reply *reply, unsigned long data)
4209{ 4191{
4210 struct qeth_ipa_cmd *cmd; 4192 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4211 struct qeth_ipacmd_setadpparms *setparms; 4193 struct qeth_ipacmd_setadpparms *setparms;
4212 4194
4213 QETH_CARD_TEXT(card, 4, "prmadpcb"); 4195 QETH_CARD_TEXT(card, 4, "prmadpcb");
4214 4196
4215 cmd = (struct qeth_ipa_cmd *) data;
4216 setparms = &(cmd->data.setadapterparms); 4197 setparms = &(cmd->data.setadapterparms);
4217 4198 if (qeth_setadpparms_inspect_rc(cmd)) {
4218 qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
4219 if (cmd->hdr.return_code) {
4220 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); 4199 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4221 setparms->data.mode = SET_PROMISC_MODE_OFF; 4200 setparms->data.mode = SET_PROMISC_MODE_OFF;
4222 } 4201 }
@@ -4286,18 +4265,18 @@ EXPORT_SYMBOL_GPL(qeth_get_stats);
4286static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, 4265static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4287 struct qeth_reply *reply, unsigned long data) 4266 struct qeth_reply *reply, unsigned long data)
4288{ 4267{
4289 struct qeth_ipa_cmd *cmd; 4268 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4290 4269
4291 QETH_CARD_TEXT(card, 4, "chgmaccb"); 4270 QETH_CARD_TEXT(card, 4, "chgmaccb");
4271 if (qeth_setadpparms_inspect_rc(cmd))
4272 return 0;
4292 4273
4293 cmd = (struct qeth_ipa_cmd *) data;
4294 if (!card->options.layer2 || 4274 if (!card->options.layer2 ||
4295 !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) { 4275 !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
4296 ether_addr_copy(card->dev->dev_addr, 4276 ether_addr_copy(card->dev->dev_addr,
4297 cmd->data.setadapterparms.data.change_addr.addr); 4277 cmd->data.setadapterparms.data.change_addr.addr);
4298 card->info.mac_bits |= QETH_LAYER2_MAC_READ; 4278 card->info.mac_bits |= QETH_LAYER2_MAC_READ;
4299 } 4279 }
4300 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
4301 return 0; 4280 return 0;
4302} 4281}
4303 4282
@@ -4328,13 +4307,15 @@ EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4328static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, 4307static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4329 struct qeth_reply *reply, unsigned long data) 4308 struct qeth_reply *reply, unsigned long data)
4330{ 4309{
4331 struct qeth_ipa_cmd *cmd; 4310 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4332 struct qeth_set_access_ctrl *access_ctrl_req; 4311 struct qeth_set_access_ctrl *access_ctrl_req;
4333 int fallback = *(int *)reply->param; 4312 int fallback = *(int *)reply->param;
4334 4313
4335 QETH_CARD_TEXT(card, 4, "setaccb"); 4314 QETH_CARD_TEXT(card, 4, "setaccb");
4315 if (cmd->hdr.return_code)
4316 return 0;
4317 qeth_setadpparms_inspect_rc(cmd);
4336 4318
4337 cmd = (struct qeth_ipa_cmd *) data;
4338 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4319 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4339 QETH_DBF_TEXT_(SETUP, 2, "setaccb"); 4320 QETH_DBF_TEXT_(SETUP, 2, "setaccb");
4340 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); 4321 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
@@ -4407,7 +4388,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4407 card->options.isolation = card->options.prev_isolation; 4388 card->options.isolation = card->options.prev_isolation;
4408 break; 4389 break;
4409 } 4390 }
4410 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
4411 return 0; 4391 return 0;
4412} 4392}
4413 4393
@@ -4695,14 +4675,15 @@ out:
4695static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, 4675static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4696 struct qeth_reply *reply, unsigned long data) 4676 struct qeth_reply *reply, unsigned long data)
4697{ 4677{
4698 struct qeth_ipa_cmd *cmd; 4678 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4699 struct qeth_qoat_priv *priv; 4679 struct qeth_qoat_priv *priv;
4700 char *resdata; 4680 char *resdata;
4701 int resdatalen; 4681 int resdatalen;
4702 4682
4703 QETH_CARD_TEXT(card, 3, "qoatcb"); 4683 QETH_CARD_TEXT(card, 3, "qoatcb");
4684 if (qeth_setadpparms_inspect_rc(cmd))
4685 return 0;
4704 4686
4705 cmd = (struct qeth_ipa_cmd *)data;
4706 priv = (struct qeth_qoat_priv *)reply->param; 4687 priv = (struct qeth_qoat_priv *)reply->param;
4707 resdatalen = cmd->data.setadapterparms.hdr.cmdlength; 4688 resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4708 resdata = (char *)data + 28; 4689 resdata = (char *)data + 28;
@@ -4796,21 +4777,18 @@ out:
4796static int qeth_query_card_info_cb(struct qeth_card *card, 4777static int qeth_query_card_info_cb(struct qeth_card *card,
4797 struct qeth_reply *reply, unsigned long data) 4778 struct qeth_reply *reply, unsigned long data)
4798{ 4779{
4799 struct qeth_ipa_cmd *cmd; 4780 struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
4781 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4800 struct qeth_query_card_info *card_info; 4782 struct qeth_query_card_info *card_info;
4801 struct carrier_info *carrier_info;
4802 4783
4803 QETH_CARD_TEXT(card, 2, "qcrdincb"); 4784 QETH_CARD_TEXT(card, 2, "qcrdincb");
4804 carrier_info = (struct carrier_info *)reply->param; 4785 if (qeth_setadpparms_inspect_rc(cmd))
4805 cmd = (struct qeth_ipa_cmd *)data; 4786 return 0;
4806 card_info = &cmd->data.setadapterparms.data.card_info;
4807 if (cmd->data.setadapterparms.hdr.return_code == 0) {
4808 carrier_info->card_type = card_info->card_type;
4809 carrier_info->port_mode = card_info->port_mode;
4810 carrier_info->port_speed = card_info->port_speed;
4811 }
4812 4787
4813 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); 4788 card_info = &cmd->data.setadapterparms.data.card_info;
4789 carrier_info->card_type = card_info->card_type;
4790 carrier_info->port_mode = card_info->port_mode;
4791 carrier_info->port_speed = card_info->port_speed;
4814 return 0; 4792 return 0;
4815} 4793}
4816 4794
@@ -4857,7 +4835,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
4857 goto out; 4835 goto out;
4858 } 4836 }
4859 4837
4860 ccw_device_get_id(CARD_DDEV(card), &id); 4838 ccw_device_get_id(CARD_RDEV(card), &id);
4861 request->resp_buf_len = sizeof(*response); 4839 request->resp_buf_len = sizeof(*response);
4862 request->resp_version = DIAG26C_VERSION2; 4840 request->resp_version = DIAG26C_VERSION2;
4863 request->op_code = DIAG26C_GET_MAC; 4841 request->op_code = DIAG26C_GET_MAC;
@@ -6563,10 +6541,14 @@ static int __init qeth_core_init(void)
6563 mutex_init(&qeth_mod_mutex); 6541 mutex_init(&qeth_mod_mutex);
6564 6542
6565 qeth_wq = create_singlethread_workqueue("qeth_wq"); 6543 qeth_wq = create_singlethread_workqueue("qeth_wq");
6544 if (!qeth_wq) {
6545 rc = -ENOMEM;
6546 goto out_err;
6547 }
6566 6548
6567 rc = qeth_register_dbf_views(); 6549 rc = qeth_register_dbf_views();
6568 if (rc) 6550 if (rc)
6569 goto out_err; 6551 goto dbf_err;
6570 qeth_core_root_dev = root_device_register("qeth"); 6552 qeth_core_root_dev = root_device_register("qeth");
6571 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); 6553 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
6572 if (rc) 6554 if (rc)
@@ -6603,6 +6585,8 @@ slab_err:
6603 root_device_unregister(qeth_core_root_dev); 6585 root_device_unregister(qeth_core_root_dev);
6604register_err: 6586register_err:
6605 qeth_unregister_dbf_views(); 6587 qeth_unregister_dbf_views();
6588dbf_err:
6589 destroy_workqueue(qeth_wq);
6606out_err: 6590out_err:
6607 pr_err("Initializing the qeth device driver failed\n"); 6591 pr_err("Initializing the qeth device driver failed\n");
6608 return rc; 6592 return rc;
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 619f897b4bb0..f4d1ec0b8f5a 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -35,6 +35,18 @@ extern unsigned char IPA_PDU_HEADER[];
35#define QETH_HALT_CHANNEL_PARM -11 35#define QETH_HALT_CHANNEL_PARM -11
36#define QETH_RCD_PARM -12 36#define QETH_RCD_PARM -12
37 37
38static inline bool qeth_intparm_is_iob(unsigned long intparm)
39{
40 switch (intparm) {
41 case QETH_CLEAR_CHANNEL_PARM:
42 case QETH_HALT_CHANNEL_PARM:
43 case QETH_RCD_PARM:
44 case 0:
45 return false;
46 }
47 return true;
48}
49
38/*****************************************************************************/ 50/*****************************************************************************/
39/* IP Assist related definitions */ 51/* IP Assist related definitions */
40/*****************************************************************************/ 52/*****************************************************************************/
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 50a313806dde..b8079f2a65b3 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -21,7 +21,6 @@
21#include <linux/list.h> 21#include <linux/list.h>
22#include <linux/hash.h> 22#include <linux/hash.h>
23#include <linux/hashtable.h> 23#include <linux/hashtable.h>
24#include <linux/string.h>
25#include <asm/setup.h> 24#include <asm/setup.h>
26#include "qeth_core.h" 25#include "qeth_core.h"
27#include "qeth_l2.h" 26#include "qeth_l2.h"
@@ -122,13 +121,10 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
122 QETH_CARD_TEXT(card, 2, "L2Setmac"); 121 QETH_CARD_TEXT(card, 2, "L2Setmac");
123 rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC); 122 rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
124 if (rc == 0) { 123 if (rc == 0) {
125 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
126 ether_addr_copy(card->dev->dev_addr, mac);
127 dev_info(&card->gdev->dev, 124 dev_info(&card->gdev->dev,
128 "MAC address %pM successfully registered on device %s\n", 125 "MAC address %pM successfully registered on device %s\n",
129 card->dev->dev_addr, card->dev->name); 126 mac, card->dev->name);
130 } else { 127 } else {
131 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
132 switch (rc) { 128 switch (rc) {
133 case -EEXIST: 129 case -EEXIST:
134 dev_warn(&card->gdev->dev, 130 dev_warn(&card->gdev->dev,
@@ -143,19 +139,6 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
143 return rc; 139 return rc;
144} 140}
145 141
146static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
147{
148 int rc;
149
150 QETH_CARD_TEXT(card, 2, "L2Delmac");
151 if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
152 return 0;
153 rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC);
154 if (rc == 0)
155 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
156 return rc;
157}
158
159static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) 142static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
160{ 143{
161 enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? 144 enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
@@ -520,6 +503,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
520{ 503{
521 struct sockaddr *addr = p; 504 struct sockaddr *addr = p;
522 struct qeth_card *card = dev->ml_priv; 505 struct qeth_card *card = dev->ml_priv;
506 u8 old_addr[ETH_ALEN];
523 int rc = 0; 507 int rc = 0;
524 508
525 QETH_CARD_TEXT(card, 3, "setmac"); 509 QETH_CARD_TEXT(card, 3, "setmac");
@@ -531,14 +515,35 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
531 return -EOPNOTSUPP; 515 return -EOPNOTSUPP;
532 } 516 }
533 QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN); 517 QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN);
518 if (!is_valid_ether_addr(addr->sa_data))
519 return -EADDRNOTAVAIL;
520
534 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { 521 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
535 QETH_CARD_TEXT(card, 3, "setmcREC"); 522 QETH_CARD_TEXT(card, 3, "setmcREC");
536 return -ERESTARTSYS; 523 return -ERESTARTSYS;
537 } 524 }
538 rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); 525
539 if (!rc || (rc == -ENOENT)) 526 if (!qeth_card_hw_is_reachable(card)) {
540 rc = qeth_l2_send_setmac(card, addr->sa_data); 527 ether_addr_copy(dev->dev_addr, addr->sa_data);
541 return rc ? -EINVAL : 0; 528 return 0;
529 }
530
531 /* don't register the same address twice */
532 if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
533 (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
534 return 0;
535
536 /* add the new address, switch over, drop the old */
537 rc = qeth_l2_send_setmac(card, addr->sa_data);
538 if (rc)
539 return rc;
540 ether_addr_copy(old_addr, dev->dev_addr);
541 ether_addr_copy(dev->dev_addr, addr->sa_data);
542
543 if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
544 qeth_l2_remove_mac(card, old_addr);
545 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
546 return 0;
542} 547}
543 548
544static void qeth_promisc_to_bridge(struct qeth_card *card) 549static void qeth_promisc_to_bridge(struct qeth_card *card)
@@ -1068,8 +1073,9 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1068 goto out_remove; 1073 goto out_remove;
1069 } 1074 }
1070 1075
1071 if (card->info.type != QETH_CARD_TYPE_OSN) 1076 if (card->info.type != QETH_CARD_TYPE_OSN &&
1072 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]); 1077 !qeth_l2_send_setmac(card, card->dev->dev_addr))
1078 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
1073 1079
1074 if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) { 1080 if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
1075 if (card->info.hwtrap && 1081 if (card->info.hwtrap &&
@@ -1339,8 +1345,8 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
1339 qeth_prepare_control_data(card, len, iob); 1345 qeth_prepare_control_data(card, len, iob);
1340 QETH_CARD_TEXT(card, 6, "osnoirqp"); 1346 QETH_CARD_TEXT(card, 6, "osnoirqp");
1341 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); 1347 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1342 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, 1348 rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
1343 (addr_t) iob, 0, 0); 1349 (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT);
1344 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); 1350 spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
1345 if (rc) { 1351 if (rc) {
1346 QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: " 1352 QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 3b0c8b8a7634..066b5c3aaae6 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -176,7 +176,7 @@ static struct device_driver smsg_driver = {
176 176
177static void __exit smsg_exit(void) 177static void __exit smsg_exit(void)
178{ 178{
179 cpcmd("SET SMSG IUCV", NULL, 0, NULL); 179 cpcmd("SET SMSG OFF", NULL, 0, NULL);
180 device_unregister(smsg_dev); 180 device_unregister(smsg_dev);
181 iucv_unregister(&smsg_handler, 1); 181 iucv_unregister(&smsg_handler, 1);
182 driver_unregister(&smsg_driver); 182 driver_unregister(&smsg_driver);
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index abddde11982b..98597b59c12a 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -296,7 +296,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
296 "Number of Abort FW Timeouts: %lld\n" 296 "Number of Abort FW Timeouts: %lld\n"
297 "Number of Abort IO NOT Found: %lld\n" 297 "Number of Abort IO NOT Found: %lld\n"
298 298
299 "Abord issued times: \n" 299 "Abort issued times: \n"
300 " < 6 sec : %lld\n" 300 " < 6 sec : %lld\n"
301 " 6 sec - 20 sec : %lld\n" 301 " 6 sec - 20 sec : %lld\n"
302 " 20 sec - 30 sec : %lld\n" 302 " 20 sec - 30 sec : %lld\n"
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index ce97cde3b41c..f4d988dd1e9d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1124,12 +1124,12 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
1124 goto fail_fw_init; 1124 goto fail_fw_init;
1125 } 1125 }
1126 1126
1127 ret = 0; 1127 return 0;
1128 1128
1129fail_fw_init: 1129fail_fw_init:
1130 dev_err(&instance->pdev->dev, 1130 dev_err(&instance->pdev->dev,
1131 "Init cmd return status %s for SCSI host %d\n", 1131 "Init cmd return status FAILED for SCSI host %d\n",
1132 ret ? "FAILED" : "SUCCESS", instance->host->host_no); 1132 instance->host->host_no);
1133 1133
1134 return ret; 1134 return ret;
1135} 1135}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 9ef5e3b810f6..656c98e116a9 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -234,11 +234,13 @@ static const char *sdebug_version_date = "20180128";
234#define F_INV_OP 0x200 234#define F_INV_OP 0x200
235#define F_FAKE_RW 0x400 235#define F_FAKE_RW 0x400
236#define F_M_ACCESS 0x800 /* media access */ 236#define F_M_ACCESS 0x800 /* media access */
237#define F_LONG_DELAY 0x1000 237#define F_SSU_DELAY 0x1000
238#define F_SYNC_DELAY 0x2000
238 239
239#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR) 240#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
240#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW) 241#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
241#define FF_SA (F_SA_HIGH | F_SA_LOW) 242#define FF_SA (F_SA_HIGH | F_SA_LOW)
243#define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
242 244
243#define SDEBUG_MAX_PARTS 4 245#define SDEBUG_MAX_PARTS 4
244 246
@@ -510,7 +512,7 @@ static const struct opcode_info_t release_iarr[] = {
510}; 512};
511 513
512static const struct opcode_info_t sync_cache_iarr[] = { 514static const struct opcode_info_t sync_cache_iarr[] = {
513 {0, 0x91, 0, F_LONG_DELAY | F_M_ACCESS, resp_sync_cache, NULL, 515 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
514 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 516 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
515 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */ 517 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
516}; 518};
@@ -553,7 +555,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
553 resp_write_dt0, write_iarr, /* WRITE(16) */ 555 resp_write_dt0, write_iarr, /* WRITE(16) */
554 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 556 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
555 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, 557 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
556 {0, 0x1b, 0, F_LONG_DELAY, resp_start_stop, NULL,/* START STOP UNIT */ 558 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
557 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 559 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
558 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN, 560 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
559 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */ 561 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
@@ -606,7 +608,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
606 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */ 608 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
607 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 609 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
608 0, 0, 0, 0, 0} }, 610 0, 0, 0, 0, 0} },
609 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_LONG_DELAY | F_M_ACCESS, 611 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
610 resp_sync_cache, sync_cache_iarr, 612 resp_sync_cache, sync_cache_iarr,
611 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 613 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
612 0, 0, 0, 0} }, /* SYNC_CACHE (10) */ 614 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
@@ -667,6 +669,7 @@ static bool sdebug_strict = DEF_STRICT;
667static bool sdebug_any_injecting_opt; 669static bool sdebug_any_injecting_opt;
668static bool sdebug_verbose; 670static bool sdebug_verbose;
669static bool have_dif_prot; 671static bool have_dif_prot;
672static bool write_since_sync;
670static bool sdebug_statistics = DEF_STATISTICS; 673static bool sdebug_statistics = DEF_STATISTICS;
671 674
672static unsigned int sdebug_store_sectors; 675static unsigned int sdebug_store_sectors;
@@ -1607,6 +1610,7 @@ static int resp_start_stop(struct scsi_cmnd *scp,
1607{ 1610{
1608 unsigned char *cmd = scp->cmnd; 1611 unsigned char *cmd = scp->cmnd;
1609 int power_cond, stop; 1612 int power_cond, stop;
1613 bool changing;
1610 1614
1611 power_cond = (cmd[4] & 0xf0) >> 4; 1615 power_cond = (cmd[4] & 0xf0) >> 4;
1612 if (power_cond) { 1616 if (power_cond) {
@@ -1614,8 +1618,12 @@ static int resp_start_stop(struct scsi_cmnd *scp,
1614 return check_condition_result; 1618 return check_condition_result;
1615 } 1619 }
1616 stop = !(cmd[4] & 1); 1620 stop = !(cmd[4] & 1);
1621 changing = atomic_read(&devip->stopped) == !stop;
1617 atomic_xchg(&devip->stopped, stop); 1622 atomic_xchg(&devip->stopped, stop);
1618 return (cmd[1] & 0x1) ? SDEG_RES_IMMED_MASK : 0; /* check IMMED bit */ 1623 if (!changing || cmd[1] & 0x1) /* state unchanged or IMMED set */
1624 return SDEG_RES_IMMED_MASK;
1625 else
1626 return 0;
1619} 1627}
1620 1628
1621static sector_t get_sdebug_capacity(void) 1629static sector_t get_sdebug_capacity(void)
@@ -2473,6 +2481,7 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
2473 if (do_write) { 2481 if (do_write) {
2474 sdb = scsi_out(scmd); 2482 sdb = scsi_out(scmd);
2475 dir = DMA_TO_DEVICE; 2483 dir = DMA_TO_DEVICE;
2484 write_since_sync = true;
2476 } else { 2485 } else {
2477 sdb = scsi_in(scmd); 2486 sdb = scsi_in(scmd);
2478 dir = DMA_FROM_DEVICE; 2487 dir = DMA_FROM_DEVICE;
@@ -3583,6 +3592,7 @@ static int resp_get_lba_status(struct scsi_cmnd *scp,
3583static int resp_sync_cache(struct scsi_cmnd *scp, 3592static int resp_sync_cache(struct scsi_cmnd *scp,
3584 struct sdebug_dev_info *devip) 3593 struct sdebug_dev_info *devip)
3585{ 3594{
3595 int res = 0;
3586 u64 lba; 3596 u64 lba;
3587 u32 num_blocks; 3597 u32 num_blocks;
3588 u8 *cmd = scp->cmnd; 3598 u8 *cmd = scp->cmnd;
@@ -3598,7 +3608,11 @@ static int resp_sync_cache(struct scsi_cmnd *scp,
3598 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); 3608 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3599 return check_condition_result; 3609 return check_condition_result;
3600 } 3610 }
3601 return (cmd[1] & 0x2) ? SDEG_RES_IMMED_MASK : 0; /* check IMMED bit */ 3611 if (!write_since_sync || cmd[1] & 0x2)
3612 res = SDEG_RES_IMMED_MASK;
3613 else /* delay if write_since_sync and IMMED clear */
3614 write_since_sync = false;
3615 return res;
3602} 3616}
3603 3617
3604#define RL_BUCKET_ELEMS 8 3618#define RL_BUCKET_ELEMS 8
@@ -5777,13 +5791,14 @@ fini:
5777 return schedule_resp(scp, devip, errsts, pfp, 0, 0); 5791 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
5778 else if ((sdebug_jdelay || sdebug_ndelay) && (flags & F_LONG_DELAY)) { 5792 else if ((sdebug_jdelay || sdebug_ndelay) && (flags & F_LONG_DELAY)) {
5779 /* 5793 /*
5780 * If any delay is active, want F_LONG_DELAY to be at least 1 5794 * If any delay is active, for F_SSU_DELAY want at least 1
5781 * second and if sdebug_jdelay>0 want a long delay of that 5795 * second and if sdebug_jdelay>0 want a long delay of that
5782 * many seconds. 5796 * many seconds; for F_SYNC_DELAY want 1/20 of that.
5783 */ 5797 */
5784 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay; 5798 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
5799 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
5785 5800
5786 jdelay = mult_frac(USER_HZ * jdelay, HZ, USER_HZ); 5801 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
5787 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0); 5802 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
5788 } else 5803 } else
5789 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay, 5804 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index f4b52b44b966..65f6c94f2e9b 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2322,6 +2322,12 @@ iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
2322 return nlmsg_multicast(nls, skb, 0, group, gfp); 2322 return nlmsg_multicast(nls, skb, 0, group, gfp);
2323} 2323}
2324 2324
2325static int
2326iscsi_unicast_skb(struct sk_buff *skb, u32 portid)
2327{
2328 return nlmsg_unicast(nls, skb, portid);
2329}
2330
2325int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, 2331int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
2326 char *data, uint32_t data_size) 2332 char *data, uint32_t data_size)
2327{ 2333{
@@ -2524,14 +2530,11 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
2524EXPORT_SYMBOL_GPL(iscsi_ping_comp_event); 2530EXPORT_SYMBOL_GPL(iscsi_ping_comp_event);
2525 2531
2526static int 2532static int
2527iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi, 2533iscsi_if_send_reply(u32 portid, int type, void *payload, int size)
2528 void *payload, int size)
2529{ 2534{
2530 struct sk_buff *skb; 2535 struct sk_buff *skb;
2531 struct nlmsghdr *nlh; 2536 struct nlmsghdr *nlh;
2532 int len = nlmsg_total_size(size); 2537 int len = nlmsg_total_size(size);
2533 int flags = multi ? NLM_F_MULTI : 0;
2534 int t = done ? NLMSG_DONE : type;
2535 2538
2536 skb = alloc_skb(len, GFP_ATOMIC); 2539 skb = alloc_skb(len, GFP_ATOMIC);
2537 if (!skb) { 2540 if (!skb) {
@@ -2539,10 +2542,9 @@ iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
2539 return -ENOMEM; 2542 return -ENOMEM;
2540 } 2543 }
2541 2544
2542 nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0); 2545 nlh = __nlmsg_put(skb, 0, 0, type, (len - sizeof(*nlh)), 0);
2543 nlh->nlmsg_flags = flags;
2544 memcpy(nlmsg_data(nlh), payload, size); 2546 memcpy(nlmsg_data(nlh), payload, size);
2545 return iscsi_multicast_skb(skb, group, GFP_ATOMIC); 2547 return iscsi_unicast_skb(skb, portid);
2546} 2548}
2547 2549
2548static int 2550static int
@@ -3470,6 +3472,7 @@ static int
3470iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) 3472iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
3471{ 3473{
3472 int err = 0; 3474 int err = 0;
3475 u32 portid;
3473 struct iscsi_uevent *ev = nlmsg_data(nlh); 3476 struct iscsi_uevent *ev = nlmsg_data(nlh);
3474 struct iscsi_transport *transport = NULL; 3477 struct iscsi_transport *transport = NULL;
3475 struct iscsi_internal *priv; 3478 struct iscsi_internal *priv;
@@ -3490,10 +3493,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
3490 if (!try_module_get(transport->owner)) 3493 if (!try_module_get(transport->owner))
3491 return -EINVAL; 3494 return -EINVAL;
3492 3495
3496 portid = NETLINK_CB(skb).portid;
3497
3493 switch (nlh->nlmsg_type) { 3498 switch (nlh->nlmsg_type) {
3494 case ISCSI_UEVENT_CREATE_SESSION: 3499 case ISCSI_UEVENT_CREATE_SESSION:
3495 err = iscsi_if_create_session(priv, ep, ev, 3500 err = iscsi_if_create_session(priv, ep, ev,
3496 NETLINK_CB(skb).portid, 3501 portid,
3497 ev->u.c_session.initial_cmdsn, 3502 ev->u.c_session.initial_cmdsn,
3498 ev->u.c_session.cmds_max, 3503 ev->u.c_session.cmds_max,
3499 ev->u.c_session.queue_depth); 3504 ev->u.c_session.queue_depth);
@@ -3506,7 +3511,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
3506 } 3511 }
3507 3512
3508 err = iscsi_if_create_session(priv, ep, ev, 3513 err = iscsi_if_create_session(priv, ep, ev,
3509 NETLINK_CB(skb).portid, 3514 portid,
3510 ev->u.c_bound_session.initial_cmdsn, 3515 ev->u.c_bound_session.initial_cmdsn,
3511 ev->u.c_bound_session.cmds_max, 3516 ev->u.c_bound_session.cmds_max,
3512 ev->u.c_bound_session.queue_depth); 3517 ev->u.c_bound_session.queue_depth);
@@ -3664,6 +3669,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
3664static void 3669static void
3665iscsi_if_rx(struct sk_buff *skb) 3670iscsi_if_rx(struct sk_buff *skb)
3666{ 3671{
3672 u32 portid = NETLINK_CB(skb).portid;
3673
3667 mutex_lock(&rx_queue_mutex); 3674 mutex_lock(&rx_queue_mutex);
3668 while (skb->len >= NLMSG_HDRLEN) { 3675 while (skb->len >= NLMSG_HDRLEN) {
3669 int err; 3676 int err;
@@ -3699,8 +3706,8 @@ iscsi_if_rx(struct sk_buff *skb)
3699 break; 3706 break;
3700 if (ev->type == ISCSI_UEVENT_GET_CHAP && !err) 3707 if (ev->type == ISCSI_UEVENT_GET_CHAP && !err)
3701 break; 3708 break;
3702 err = iscsi_if_send_reply(group, nlh->nlmsg_seq, 3709 err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
3703 nlh->nlmsg_type, 0, 0, ev, sizeof(*ev)); 3710 ev, sizeof(*ev));
3704 } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH); 3711 } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
3705 skb_pull(skb, rlen); 3712 skb_pull(skb, rlen);
3706 } 3713 }
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a6201e696ab9..9421d9877730 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2121,6 +2121,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
2121 break; /* standby */ 2121 break; /* standby */
2122 if (sshdr.asc == 4 && sshdr.ascq == 0xc) 2122 if (sshdr.asc == 4 && sshdr.ascq == 0xc)
2123 break; /* unavailable */ 2123 break; /* unavailable */
2124 if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
2125 break; /* sanitize in progress */
2124 /* 2126 /*
2125 * Issue command to spin up drive when not ready 2127 * Issue command to spin up drive when not ready
2126 */ 2128 */
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 41df75eea57b..210407cd2341 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -400,8 +400,10 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
400 * 400 *
401 * Check that all zones of the device are equal. The last zone can however 401 * Check that all zones of the device are equal. The last zone can however
402 * be smaller. The zone size must also be a power of two number of LBAs. 402 * be smaller. The zone size must also be a power of two number of LBAs.
403 *
404 * Returns the zone size in bytes upon success or an error code upon failure.
403 */ 405 */
404static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) 406static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
405{ 407{
406 u64 zone_blocks = 0; 408 u64 zone_blocks = 0;
407 sector_t block = 0; 409 sector_t block = 0;
@@ -412,8 +414,6 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
412 int ret; 414 int ret;
413 u8 same; 415 u8 same;
414 416
415 sdkp->zone_blocks = 0;
416
417 /* Get a buffer */ 417 /* Get a buffer */
418 buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL); 418 buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
419 if (!buf) 419 if (!buf)
@@ -445,16 +445,17 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
445 445
446 /* Parse zone descriptors */ 446 /* Parse zone descriptors */
447 while (rec < buf + buf_len) { 447 while (rec < buf + buf_len) {
448 zone_blocks = get_unaligned_be64(&rec[8]); 448 u64 this_zone_blocks = get_unaligned_be64(&rec[8]);
449 if (sdkp->zone_blocks == 0) { 449
450 sdkp->zone_blocks = zone_blocks; 450 if (zone_blocks == 0) {
451 } else if (zone_blocks != sdkp->zone_blocks && 451 zone_blocks = this_zone_blocks;
452 (block + zone_blocks < sdkp->capacity 452 } else if (this_zone_blocks != zone_blocks &&
453 || zone_blocks > sdkp->zone_blocks)) { 453 (block + this_zone_blocks < sdkp->capacity
454 zone_blocks = 0; 454 || this_zone_blocks > zone_blocks)) {
455 this_zone_blocks = 0;
455 goto out; 456 goto out;
456 } 457 }
457 block += zone_blocks; 458 block += this_zone_blocks;
458 rec += 64; 459 rec += 64;
459 } 460 }
460 461
@@ -467,8 +468,6 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
467 468
468 } while (block < sdkp->capacity); 469 } while (block < sdkp->capacity);
469 470
470 zone_blocks = sdkp->zone_blocks;
471
472out: 471out:
473 if (!zone_blocks) { 472 if (!zone_blocks) {
474 if (sdkp->first_scan) 473 if (sdkp->first_scan)
@@ -488,8 +487,7 @@ out:
488 "Zone size too large\n"); 487 "Zone size too large\n");
489 ret = -ENODEV; 488 ret = -ENODEV;
490 } else { 489 } else {
491 sdkp->zone_blocks = zone_blocks; 490 ret = zone_blocks;
492 sdkp->zone_shift = ilog2(zone_blocks);
493 } 491 }
494 492
495out_free: 493out_free:
@@ -500,15 +498,14 @@ out_free:
500 498
501/** 499/**
502 * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone). 500 * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone).
503 * @sdkp: The disk of the bitmap 501 * @nr_zones: Number of zones to allocate space for.
502 * @numa_node: NUMA node to allocate the memory from.
504 */ 503 */
505static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp) 504static inline unsigned long *
505sd_zbc_alloc_zone_bitmap(u32 nr_zones, int numa_node)
506{ 506{
507 struct request_queue *q = sdkp->disk->queue; 507 return kzalloc_node(BITS_TO_LONGS(nr_zones) * sizeof(unsigned long),
508 508 GFP_KERNEL, numa_node);
509 return kzalloc_node(BITS_TO_LONGS(sdkp->nr_zones)
510 * sizeof(unsigned long),
511 GFP_KERNEL, q->node);
512} 509}
513 510
514/** 511/**
@@ -516,6 +513,7 @@ static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
516 * @sdkp: disk used 513 * @sdkp: disk used
517 * @buf: report reply buffer 514 * @buf: report reply buffer
518 * @buflen: length of @buf 515 * @buflen: length of @buf
516 * @zone_shift: logarithm base 2 of the number of blocks in a zone
519 * @seq_zones_bitmap: bitmap of sequential zones to set 517 * @seq_zones_bitmap: bitmap of sequential zones to set
520 * 518 *
521 * Parse reported zone descriptors in @buf to identify sequential zones and 519 * Parse reported zone descriptors in @buf to identify sequential zones and
@@ -525,7 +523,7 @@ static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
525 * Return the LBA after the last zone reported. 523 * Return the LBA after the last zone reported.
526 */ 524 */
527static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf, 525static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
528 unsigned int buflen, 526 unsigned int buflen, u32 zone_shift,
529 unsigned long *seq_zones_bitmap) 527 unsigned long *seq_zones_bitmap)
530{ 528{
531 sector_t lba, next_lba = sdkp->capacity; 529 sector_t lba, next_lba = sdkp->capacity;
@@ -544,7 +542,7 @@ static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
544 if (type != ZBC_ZONE_TYPE_CONV && 542 if (type != ZBC_ZONE_TYPE_CONV &&
545 cond != ZBC_ZONE_COND_READONLY && 543 cond != ZBC_ZONE_COND_READONLY &&
546 cond != ZBC_ZONE_COND_OFFLINE) 544 cond != ZBC_ZONE_COND_OFFLINE)
547 set_bit(lba >> sdkp->zone_shift, seq_zones_bitmap); 545 set_bit(lba >> zone_shift, seq_zones_bitmap);
548 next_lba = lba + get_unaligned_be64(&rec[8]); 546 next_lba = lba + get_unaligned_be64(&rec[8]);
549 rec += 64; 547 rec += 64;
550 } 548 }
@@ -553,12 +551,16 @@ static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
553} 551}
554 552
555/** 553/**
556 * sd_zbc_setup_seq_zones_bitmap - Initialize the disk seq zone bitmap. 554 * sd_zbc_setup_seq_zones_bitmap - Initialize a seq zone bitmap.
557 * @sdkp: target disk 555 * @sdkp: target disk
556 * @zone_shift: logarithm base 2 of the number of blocks in a zone
557 * @nr_zones: number of zones to set up a seq zone bitmap for
558 * 558 *
559 * Allocate a zone bitmap and initialize it by identifying sequential zones. 559 * Allocate a zone bitmap and initialize it by identifying sequential zones.
560 */ 560 */
561static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp) 561static unsigned long *
562sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp, u32 zone_shift,
563 u32 nr_zones)
562{ 564{
563 struct request_queue *q = sdkp->disk->queue; 565 struct request_queue *q = sdkp->disk->queue;
564 unsigned long *seq_zones_bitmap; 566 unsigned long *seq_zones_bitmap;
@@ -566,9 +568,9 @@ static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
566 unsigned char *buf; 568 unsigned char *buf;
567 int ret = -ENOMEM; 569 int ret = -ENOMEM;
568 570
569 seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(sdkp); 571 seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(nr_zones, q->node);
570 if (!seq_zones_bitmap) 572 if (!seq_zones_bitmap)
571 return -ENOMEM; 573 return ERR_PTR(-ENOMEM);
572 574
573 buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL); 575 buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
574 if (!buf) 576 if (!buf)
@@ -579,7 +581,7 @@ static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
579 if (ret) 581 if (ret)
580 goto out; 582 goto out;
581 lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 583 lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
582 seq_zones_bitmap); 584 zone_shift, seq_zones_bitmap);
583 } 585 }
584 586
585 if (lba != sdkp->capacity) { 587 if (lba != sdkp->capacity) {
@@ -591,12 +593,9 @@ out:
591 kfree(buf); 593 kfree(buf);
592 if (ret) { 594 if (ret) {
593 kfree(seq_zones_bitmap); 595 kfree(seq_zones_bitmap);
594 return ret; 596 return ERR_PTR(ret);
595 } 597 }
596 598 return seq_zones_bitmap;
597 q->seq_zones_bitmap = seq_zones_bitmap;
598
599 return 0;
600} 599}
601 600
602static void sd_zbc_cleanup(struct scsi_disk *sdkp) 601static void sd_zbc_cleanup(struct scsi_disk *sdkp)
@@ -612,44 +611,64 @@ static void sd_zbc_cleanup(struct scsi_disk *sdkp)
612 q->nr_zones = 0; 611 q->nr_zones = 0;
613} 612}
614 613
615static int sd_zbc_setup(struct scsi_disk *sdkp) 614static int sd_zbc_setup(struct scsi_disk *sdkp, u32 zone_blocks)
616{ 615{
617 struct request_queue *q = sdkp->disk->queue; 616 struct request_queue *q = sdkp->disk->queue;
617 u32 zone_shift = ilog2(zone_blocks);
618 u32 nr_zones;
618 int ret; 619 int ret;
619 620
620 /* READ16/WRITE16 is mandatory for ZBC disks */
621 sdkp->device->use_16_for_rw = 1;
622 sdkp->device->use_10_for_rw = 0;
623
624 /* chunk_sectors indicates the zone size */ 621 /* chunk_sectors indicates the zone size */
625 blk_queue_chunk_sectors(sdkp->disk->queue, 622 blk_queue_chunk_sectors(q,
626 logical_to_sectors(sdkp->device, sdkp->zone_blocks)); 623 logical_to_sectors(sdkp->device, zone_blocks));
627 sdkp->nr_zones = 624 nr_zones = round_up(sdkp->capacity, zone_blocks) >> zone_shift;
628 round_up(sdkp->capacity, sdkp->zone_blocks) >> sdkp->zone_shift;
629 625
630 /* 626 /*
631 * Initialize the device request queue information if the number 627 * Initialize the device request queue information if the number
632 * of zones changed. 628 * of zones changed.
633 */ 629 */
634 if (sdkp->nr_zones != q->nr_zones) { 630 if (nr_zones != sdkp->nr_zones || nr_zones != q->nr_zones) {
635 631 unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
636 sd_zbc_cleanup(sdkp); 632 size_t zone_bitmap_size;
637 633
638 q->nr_zones = sdkp->nr_zones; 634 if (nr_zones) {
639 if (sdkp->nr_zones) { 635 seq_zones_wlock = sd_zbc_alloc_zone_bitmap(nr_zones,
640 q->seq_zones_wlock = sd_zbc_alloc_zone_bitmap(sdkp); 636 q->node);
641 if (!q->seq_zones_wlock) { 637 if (!seq_zones_wlock) {
642 ret = -ENOMEM; 638 ret = -ENOMEM;
643 goto err; 639 goto err;
644 } 640 }
645 641
646 ret = sd_zbc_setup_seq_zones_bitmap(sdkp); 642 seq_zones_bitmap = sd_zbc_setup_seq_zones_bitmap(sdkp,
647 if (ret) { 643 zone_shift, nr_zones);
648 sd_zbc_cleanup(sdkp); 644 if (IS_ERR(seq_zones_bitmap)) {
645 ret = PTR_ERR(seq_zones_bitmap);
646 kfree(seq_zones_wlock);
649 goto err; 647 goto err;
650 } 648 }
651 } 649 }
652 650 zone_bitmap_size = BITS_TO_LONGS(nr_zones) *
651 sizeof(unsigned long);
652 blk_mq_freeze_queue(q);
653 if (q->nr_zones != nr_zones) {
654 /* READ16/WRITE16 is mandatory for ZBC disks */
655 sdkp->device->use_16_for_rw = 1;
656 sdkp->device->use_10_for_rw = 0;
657
658 sdkp->zone_blocks = zone_blocks;
659 sdkp->zone_shift = zone_shift;
660 sdkp->nr_zones = nr_zones;
661 q->nr_zones = nr_zones;
662 swap(q->seq_zones_wlock, seq_zones_wlock);
663 swap(q->seq_zones_bitmap, seq_zones_bitmap);
664 } else if (memcmp(q->seq_zones_bitmap, seq_zones_bitmap,
665 zone_bitmap_size) != 0) {
666 memcpy(q->seq_zones_bitmap, seq_zones_bitmap,
667 zone_bitmap_size);
668 }
669 blk_mq_unfreeze_queue(q);
670 kfree(seq_zones_wlock);
671 kfree(seq_zones_bitmap);
653 } 672 }
654 673
655 return 0; 674 return 0;
@@ -661,6 +680,7 @@ err:
661 680
662int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) 681int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
663{ 682{
683 int64_t zone_blocks;
664 int ret; 684 int ret;
665 685
666 if (!sd_is_zoned(sdkp)) 686 if (!sd_is_zoned(sdkp))
@@ -697,12 +717,16 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
697 * Check zone size: only devices with a constant zone size (except 717 * Check zone size: only devices with a constant zone size (except
698 * an eventual last runt zone) that is a power of 2 are supported. 718 * an eventual last runt zone) that is a power of 2 are supported.
699 */ 719 */
700 ret = sd_zbc_check_zone_size(sdkp); 720 zone_blocks = sd_zbc_check_zone_size(sdkp);
701 if (ret) 721 ret = -EFBIG;
722 if (zone_blocks != (u32)zone_blocks)
723 goto err;
724 ret = zone_blocks;
725 if (ret < 0)
702 goto err; 726 goto err;
703 727
704 /* The drive satisfies the kernel restrictions: set it up */ 728 /* The drive satisfies the kernel restrictions: set it up */
705 ret = sd_zbc_setup(sdkp); 729 ret = sd_zbc_setup(sdkp, zone_blocks);
706 if (ret) 730 if (ret)
707 goto err; 731 goto err;
708 732
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index c5b1bf1cadcb..00e79057f870 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -276,6 +276,35 @@ static inline void ufshcd_remove_non_printable(char *val)
276 *val = ' '; 276 *val = ' ';
277} 277}
278 278
279static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
280 const char *str)
281{
282 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
283
284 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
285}
286
287static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
288 const char *str)
289{
290 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
291
292 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
293}
294
295static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
296 const char *str)
297{
298 struct utp_task_req_desc *descp;
299 struct utp_upiu_task_req *task_req;
300 int off = (int)tag - hba->nutrs;
301
302 descp = &hba->utmrdl_base_addr[off];
303 task_req = (struct utp_upiu_task_req *)descp->task_req_upiu;
304 trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header,
305 &task_req->input_param1);
306}
307
279static void ufshcd_add_command_trace(struct ufs_hba *hba, 308static void ufshcd_add_command_trace(struct ufs_hba *hba,
280 unsigned int tag, const char *str) 309 unsigned int tag, const char *str)
281{ 310{
@@ -285,6 +314,9 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
285 struct ufshcd_lrb *lrbp; 314 struct ufshcd_lrb *lrbp;
286 int transfer_len = -1; 315 int transfer_len = -1;
287 316
317 /* trace UPIU also */
318 ufshcd_add_cmd_upiu_trace(hba, tag, str);
319
288 if (!trace_ufshcd_command_enabled()) 320 if (!trace_ufshcd_command_enabled())
289 return; 321 return;
290 322
@@ -2550,6 +2582,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2550 2582
2551 hba->dev_cmd.complete = &wait; 2583 hba->dev_cmd.complete = &wait;
2552 2584
2585 ufshcd_add_query_upiu_trace(hba, tag, "query_send");
2553 /* Make sure descriptors are ready before ringing the doorbell */ 2586 /* Make sure descriptors are ready before ringing the doorbell */
2554 wmb(); 2587 wmb();
2555 spin_lock_irqsave(hba->host->host_lock, flags); 2588 spin_lock_irqsave(hba->host->host_lock, flags);
@@ -2559,6 +2592,9 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2559 2592
2560 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); 2593 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2561 2594
2595 ufshcd_add_query_upiu_trace(hba, tag,
2596 err ? "query_complete_err" : "query_complete");
2597
2562out_put_tag: 2598out_put_tag:
2563 ufshcd_put_dev_cmd_tag(hba, tag); 2599 ufshcd_put_dev_cmd_tag(hba, tag);
2564 wake_up(&hba->dev_cmd.tag_wq); 2600 wake_up(&hba->dev_cmd.tag_wq);
@@ -5443,11 +5479,14 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5443 5479
5444 spin_unlock_irqrestore(host->host_lock, flags); 5480 spin_unlock_irqrestore(host->host_lock, flags);
5445 5481
5482 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
5483
5446 /* wait until the task management command is completed */ 5484 /* wait until the task management command is completed */
5447 err = wait_event_timeout(hba->tm_wq, 5485 err = wait_event_timeout(hba->tm_wq,
5448 test_bit(free_slot, &hba->tm_condition), 5486 test_bit(free_slot, &hba->tm_condition),
5449 msecs_to_jiffies(TM_CMD_TIMEOUT)); 5487 msecs_to_jiffies(TM_CMD_TIMEOUT));
5450 if (!err) { 5488 if (!err) {
5489 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
5451 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", 5490 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
5452 __func__, tm_function); 5491 __func__, tm_function);
5453 if (ufshcd_clear_tm_cmd(hba, free_slot)) 5492 if (ufshcd_clear_tm_cmd(hba, free_slot))
@@ -5456,6 +5495,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5456 err = -ETIMEDOUT; 5495 err = -ETIMEDOUT;
5457 } else { 5496 } else {
5458 err = ufshcd_task_req_compl(hba, free_slot, tm_response); 5497 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
5498 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
5459 } 5499 }
5460 5500
5461 clear_bit(free_slot, &hba->tm_condition); 5501 clear_bit(free_slot, &hba->tm_condition);
diff --git a/drivers/soc/bcm/raspberrypi-power.c b/drivers/soc/bcm/raspberrypi-power.c
index fe96a8b956fb..f7ed1187518b 100644
--- a/drivers/soc/bcm/raspberrypi-power.c
+++ b/drivers/soc/bcm/raspberrypi-power.c
@@ -45,7 +45,7 @@ struct rpi_power_domains {
45struct rpi_power_domain_packet { 45struct rpi_power_domain_packet {
46 u32 domain; 46 u32 domain;
47 u32 on; 47 u32 on;
48} __packet; 48};
49 49
50/* 50/*
51 * Asks the firmware to enable or disable power on a specific power 51 * Asks the firmware to enable or disable power on a specific power
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 0d99b242e82e..6cb933ecc084 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -890,6 +890,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
890 bytes = min(bytes, data_len); 890 bytes = min(bytes, data_len);
891 891
892 if (!bio) { 892 if (!bio) {
893new_bio:
893 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); 894 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
894 nr_pages -= nr_vecs; 895 nr_pages -= nr_vecs;
895 /* 896 /*
@@ -931,6 +932,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
931 * be allocated with pscsi_get_bio() above. 932 * be allocated with pscsi_get_bio() above.
932 */ 933 */
933 bio = NULL; 934 bio = NULL;
935 goto new_bio;
934 } 936 }
935 937
936 data_len -= bytes; 938 data_len -= bytes;
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index a5b8eb21201f..1abe4d021fd2 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -55,6 +55,8 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
55#define WDT_CTRL_WDT_INTR BIT(2) 55#define WDT_CTRL_WDT_INTR BIT(2)
56#define WDT_CTRL_RESET_SYSTEM BIT(1) 56#define WDT_CTRL_RESET_SYSTEM BIT(1)
57#define WDT_CTRL_ENABLE BIT(0) 57#define WDT_CTRL_ENABLE BIT(0)
58#define WDT_TIMEOUT_STATUS 0x10
59#define WDT_TIMEOUT_STATUS_BOOT_SECONDARY BIT(1)
58 60
59/* 61/*
60 * WDT_RESET_WIDTH controls the characteristics of the external pulse (if 62 * WDT_RESET_WIDTH controls the characteristics of the external pulse (if
@@ -192,6 +194,7 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
192 struct device_node *np; 194 struct device_node *np;
193 const char *reset_type; 195 const char *reset_type;
194 u32 duration; 196 u32 duration;
197 u32 status;
195 int ret; 198 int ret;
196 199
197 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); 200 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
@@ -307,6 +310,10 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
307 writel(duration - 1, wdt->base + WDT_RESET_WIDTH); 310 writel(duration - 1, wdt->base + WDT_RESET_WIDTH);
308 } 311 }
309 312
313 status = readl(wdt->base + WDT_TIMEOUT_STATUS);
314 if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY)
315 wdt->wdd.bootstatus = WDIOF_CARDRESET;
316
310 ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd); 317 ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd);
311 if (ret) { 318 if (ret) {
312 dev_err(&pdev->dev, "failed to register\n"); 319 dev_err(&pdev->dev, "failed to register\n");
diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c
index 6b8c6ddfe30b..514db5cc1595 100644
--- a/drivers/watchdog/renesas_wdt.c
+++ b/drivers/watchdog/renesas_wdt.c
@@ -121,7 +121,8 @@ static int rwdt_restart(struct watchdog_device *wdev, unsigned long action,
121} 121}
122 122
123static const struct watchdog_info rwdt_ident = { 123static const struct watchdog_info rwdt_ident = {
124 .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT, 124 .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
125 WDIOF_CARDRESET,
125 .identity = "Renesas WDT Watchdog", 126 .identity = "Renesas WDT Watchdog",
126}; 127};
127 128
@@ -197,9 +198,10 @@ static int rwdt_probe(struct platform_device *pdev)
197 return PTR_ERR(clk); 198 return PTR_ERR(clk);
198 199
199 pm_runtime_enable(&pdev->dev); 200 pm_runtime_enable(&pdev->dev);
200
201 pm_runtime_get_sync(&pdev->dev); 201 pm_runtime_get_sync(&pdev->dev);
202 priv->clk_rate = clk_get_rate(clk); 202 priv->clk_rate = clk_get_rate(clk);
203 priv->wdev.bootstatus = (readb_relaxed(priv->base + RWTCSRA) &
204 RWTCSRA_WOVF) ? WDIOF_CARDRESET : 0;
203 pm_runtime_put(&pdev->dev); 205 pm_runtime_put(&pdev->dev);
204 206
205 if (!priv->clk_rate) { 207 if (!priv->clk_rate) {
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index 43d0cbb7ba0b..814cdf539b0f 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -299,7 +299,7 @@ static long sch311x_wdt_ioctl(struct file *file, unsigned int cmd,
299 if (sch311x_wdt_set_heartbeat(new_timeout)) 299 if (sch311x_wdt_set_heartbeat(new_timeout))
300 return -EINVAL; 300 return -EINVAL;
301 sch311x_wdt_keepalive(); 301 sch311x_wdt_keepalive();
302 /* Fall */ 302 /* Fall through */
303 case WDIOC_GETTIMEOUT: 303 case WDIOC_GETTIMEOUT:
304 return put_user(timeout, p); 304 return put_user(timeout, p);
305 default: 305 default:
diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c
index 20e2bba10400..672b61a7f9a3 100644
--- a/drivers/watchdog/w83977f_wdt.c
+++ b/drivers/watchdog/w83977f_wdt.c
@@ -427,7 +427,7 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
427 return -EINVAL; 427 return -EINVAL;
428 428
429 wdt_keepalive(); 429 wdt_keepalive();
430 /* Fall */ 430 /* Fall through */
431 431
432 case WDIOC_GETTIMEOUT: 432 case WDIOC_GETTIMEOUT:
433 return put_user(timeout, uarg.i); 433 return put_user(timeout, uarg.i);
diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c
index db0da7ea4fd8..93c5b610e264 100644
--- a/drivers/watchdog/wafer5823wdt.c
+++ b/drivers/watchdog/wafer5823wdt.c
@@ -178,7 +178,7 @@ static long wafwdt_ioctl(struct file *file, unsigned int cmd,
178 timeout = new_timeout; 178 timeout = new_timeout;
179 wafwdt_stop(); 179 wafwdt_stop();
180 wafwdt_start(); 180 wafwdt_start();
181 /* Fall */ 181 /* Fall through */
182 case WDIOC_GETTIMEOUT: 182 case WDIOC_GETTIMEOUT:
183 return put_user(timeout, p); 183 return put_user(timeout, p);
184 184
diff --git a/drivers/xen/xen-pciback/conf_space_quirks.c b/drivers/xen/xen-pciback/conf_space_quirks.c
index 89d9744ece61..ed593d1042a6 100644
--- a/drivers/xen/xen-pciback/conf_space_quirks.c
+++ b/drivers/xen/xen-pciback/conf_space_quirks.c
@@ -95,7 +95,7 @@ int xen_pcibk_config_quirks_init(struct pci_dev *dev)
95 struct xen_pcibk_config_quirk *quirk; 95 struct xen_pcibk_config_quirk *quirk;
96 int ret = 0; 96 int ret = 0;
97 97
98 quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC); 98 quirk = kzalloc(sizeof(*quirk), GFP_KERNEL);
99 if (!quirk) { 99 if (!quirk) {
100 ret = -ENOMEM; 100 ret = -ENOMEM;
101 goto out; 101 goto out;
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 9e480fdebe1f..59661db144e5 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -71,7 +71,7 @@ static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
71 71
72 dev_dbg(&dev->dev, "pcistub_device_alloc\n"); 72 dev_dbg(&dev->dev, "pcistub_device_alloc\n");
73 73
74 psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC); 74 psdev = kzalloc(sizeof(*psdev), GFP_KERNEL);
75 if (!psdev) 75 if (!psdev)
76 return NULL; 76 return NULL;
77 77
@@ -364,7 +364,7 @@ static int pcistub_init_device(struct pci_dev *dev)
364 * here and then to call kfree(pci_get_drvdata(psdev->dev)). 364 * here and then to call kfree(pci_get_drvdata(psdev->dev)).
365 */ 365 */
366 dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]") 366 dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]")
367 + strlen(pci_name(dev)) + 1, GFP_ATOMIC); 367 + strlen(pci_name(dev)) + 1, GFP_KERNEL);
368 if (!dev_data) { 368 if (!dev_data) {
369 err = -ENOMEM; 369 err = -ENOMEM;
370 goto out; 370 goto out;
@@ -577,7 +577,7 @@ static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id)
577 } 577 }
578 578
579 if (!match) { 579 if (!match) {
580 pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_ATOMIC); 580 pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
581 if (!pci_dev_id) { 581 if (!pci_dev_id) {
582 err = -ENOMEM; 582 err = -ENOMEM;
583 goto out; 583 goto out;
@@ -1149,7 +1149,7 @@ static int pcistub_reg_add(int domain, int bus, int slot, int func,
1149 } 1149 }
1150 dev = psdev->dev; 1150 dev = psdev->dev;
1151 1151
1152 field = kzalloc(sizeof(*field), GFP_ATOMIC); 1152 field = kzalloc(sizeof(*field), GFP_KERNEL);
1153 if (!field) { 1153 if (!field) {
1154 err = -ENOMEM; 1154 err = -ENOMEM;
1155 goto out; 1155 goto out;
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 0d6d9264d6a9..c3e201025ef0 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -403,7 +403,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u,
403{ 403{
404 struct { 404 struct {
405 struct xsd_sockmsg hdr; 405 struct xsd_sockmsg hdr;
406 const char body[16]; 406 char body[16];
407 } msg; 407 } msg;
408 int rc; 408 int rc;
409 409
@@ -412,6 +412,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u,
412 msg.hdr.len = strlen(reply) + 1; 412 msg.hdr.len = strlen(reply) + 1;
413 if (msg.hdr.len > sizeof(msg.body)) 413 if (msg.hdr.len > sizeof(msg.body))
414 return -E2BIG; 414 return -E2BIG;
415 memcpy(&msg.body, reply, msg.hdr.len);
415 416
416 mutex_lock(&u->reply_mutex); 417 mutex_lock(&u->reply_mutex);
417 rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len); 418 rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len);