aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/button.c9
-rw-r--r--drivers/acpi/nfit/mce.c2
-rw-r--r--drivers/base/power/wakeup.c11
-rw-r--r--drivers/block/drbd/drbd_req.c27
-rw-r--r--drivers/block/xen-blkback/xenbus.c8
-rw-r--r--drivers/char/lp.c6
-rw-r--r--drivers/char/mem.c5
-rw-r--r--drivers/cpufreq/Kconfig.arm9
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/dax/super.c2
-rw-r--r--drivers/edac/amd64_edac.c40
-rw-r--r--drivers/firmware/efi/efi-pstore.c29
-rw-r--r--drivers/firmware/google/vpd.c21
-rw-r--r--drivers/firmware/ti_sci.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c2
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c47
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c36
-rw-r--r--drivers/gpu/drm/drm_plane.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c18
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c4
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/cik.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c4
-rw-r--r--drivers/hwmon/coretemp.c14
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c18
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c6
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c25
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c1
-rw-r--r--drivers/i2c/i2c-mux.c26
-rw-r--r--drivers/i2c/muxes/i2c-mux-reg.c21
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c30
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c1
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c2
-rw-r--r--drivers/iommu/dma-iommu.c13
-rw-r--r--drivers/iommu/intel-iommu.c5
-rw-r--r--drivers/iommu/mtk_iommu_v1.c1
-rw-r--r--drivers/irqchip/irq-mbigen.c17
-rw-r--r--drivers/leds/leds-pca955x.c2
-rw-r--r--drivers/md/dm-bufio.c16
-rw-r--r--drivers/md/dm-cache-background-tracker.c5
-rw-r--r--drivers/md/dm-cache-policy-smq.c31
-rw-r--r--drivers/md/dm-cache-target.c27
-rw-r--r--drivers/md/dm-mpath.c19
-rw-r--r--drivers/md/dm-rq.c1
-rw-r--r--drivers/md/dm-thin-metadata.c4
-rw-r--r--drivers/md/md.c20
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c15
-rw-r--r--drivers/md/raid0.c116
-rw-r--r--drivers/md/raid1.c21
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5-cache.c47
-rw-r--r--drivers/md/raid5-log.h3
-rw-r--r--drivers/md/raid5.c79
-rw-r--r--drivers/memory/omap-gpmc.c2
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/mmc/core/pwrseq_simple.c7
-rw-r--r--drivers/mmc/host/cavium-octeon.c15
-rw-r--r--drivers/mmc/host/cavium-thunderx.c6
-rw-r--r--drivers/mmc/host/cavium.c25
-rw-r--r--drivers/mmc/host/sdhci-iproc.c3
-rw-r--r--drivers/mmc/host/sdhci-xenon-phy.c14
-rw-r--r--drivers/mmc/host/sdhci-xenon.c6
-rw-r--r--drivers/mmc/host/sdhci-xenon.h1
-rw-r--r--drivers/net/bonding/bond_3ad.c2
-rw-r--r--drivers/net/bonding/bond_main.c16
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c3
-rw-r--r--drivers/net/ethernet/8390/ax88796.c7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c13
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c12
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c4
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c7
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c34
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c10
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c3
-rw-r--r--drivers/net/ethernet/sfc/nic.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c4
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c6
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c1
-rw-r--r--drivers/net/geneve.c8
-rw-r--r--drivers/net/gtp.c2
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/marvell.c66
-rw-r--r--drivers/net/phy/mdio-mux.c11
-rw-r--r--drivers/net/phy/mdio_bus.c6
-rw-r--r--drivers/net/usb/cdc_ether.c31
-rw-r--r--drivers/net/usb/ch9200.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/smsc95xx.c13
-rw-r--r--drivers/net/virtio_net.c1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c5
-rw-r--r--drivers/net/vrf.c3
-rw-r--r--drivers/net/xen-netfront.c3
-rw-r--r--drivers/nvme/host/core.c65
-rw-r--r--drivers/nvme/host/fc.c157
-rw-r--r--drivers/nvme/host/nvme.h4
-rw-r--r--drivers/nvme/host/pci.c22
-rw-r--r--drivers/nvme/host/rdma.c20
-rw-r--r--drivers/nvme/target/core.c6
-rw-r--r--drivers/nvme/target/fc.c4
-rw-r--r--drivers/nvme/target/fcloop.c1
-rw-r--r--drivers/nvme/target/loop.c2
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/rdma.c1
-rw-r--r--drivers/of/fdt.c3
-rw-r--r--drivers/of/of_reserved_mem.c2
-rw-r--r--drivers/of/platform.c3
-rw-r--r--drivers/pci/dwc/pci-imx6.c33
-rw-r--r--drivers/pci/endpoint/Kconfig1
-rw-r--r--drivers/pci/pci.c3
-rw-r--r--drivers/pci/switch/switchtec.c16
-rw-r--r--drivers/powercap/powercap_sys.c1
-rw-r--r--drivers/rtc/rtc-cmos.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c4
-rw-r--r--drivers/s390/cio/qdio_debug.h2
-rw-r--r--drivers/s390/net/qeth_core.h4
-rw-r--r--drivers/s390/net/qeth_core_main.c21
-rw-r--r--drivers/s390/net/qeth_core_sys.c24
-rw-r--r--drivers/s390/net/qeth_l2.h2
-rw-r--r--drivers/s390/net/qeth_l2_main.c26
-rw-r--r--drivers/s390/net/qeth_l2_sys.c8
-rw-r--r--drivers/s390/net/qeth_l3_main.c8
-rw-r--r--drivers/s390/virtio/virtio_ccw.c2
-rw-r--r--drivers/scsi/csiostor/csio_hw.c5
-rw-r--r--drivers/scsi/cxlflash/Kconfig1
-rw-r--r--drivers/scsi/libfc/fc_fcp.c15
-rw-r--r--drivers/scsi/libfc/fc_rport.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c47
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c69
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c26
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h16
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c146
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c100
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c415
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c376
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/pmcraid.c3
-rw-r--r--drivers/scsi/qedf/qedf.h2
-rw-r--r--drivers/scsi/qedf/qedf_els.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c2
-rw-r--r--drivers/scsi/scsi.c2
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/sd.c63
-rw-r--r--drivers/scsi/sg.c5
-rw-r--r--drivers/scsi/ufs/ufshcd.c7
-rw-r--r--drivers/soc/bcm/brcmstb/common.c2
-rw-r--r--drivers/soc/imx/Kconfig3
-rw-r--r--drivers/soc/ti/knav_dma.c2
-rw-r--r--drivers/staging/android/ion/devicetree.txt51
-rw-r--r--drivers/staging/ccree/ssi_request_mgr.c1
-rw-r--r--drivers/staging/fsl-dpaa2/Kconfig1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c24
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c15
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c1
-rw-r--r--drivers/staging/typec/fusb302/fusb302.c86
-rw-r--r--drivers/staging/typec/pd.h10
-rw-r--r--drivers/staging/typec/pd_vdo.h4
-rw-r--r--drivers/staging/typec/tcpci.c2
-rw-r--r--drivers/staging/typec/tcpm.c77
-rw-r--r--drivers/staging/typec/tcpm.h3
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c31
-rw-r--r--drivers/tee/Kconfig1
-rw-r--r--drivers/thermal/broadcom/Kconfig9
-rw-r--r--drivers/thermal/qoriq_thermal.c3
-rw-r--r--drivers/thermal/thermal_core.c2
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c14
-rw-r--r--drivers/tty/ehv_bytechan.c17
-rw-r--r--drivers/tty/serdev/core.c12
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c21
-rw-r--r--drivers/tty/serial/8250/8250_port.c21
-rw-r--r--drivers/tty/serial/altera_jtaguart.c1
-rw-r--r--drivers/tty/serial/altera_uart.c1
-rw-r--r--drivers/tty/serial/efm32-uart.c11
-rw-r--r--drivers/tty/serial/ifx6x60.c2
-rw-r--r--drivers/tty/serial/imx.c14
-rw-r--r--drivers/tty/serial/serial_core.c6
-rw-r--r--drivers/tty/tty_port.c75
-rw-r--r--drivers/uio/uio.c8
-rw-r--r--drivers/usb/core/devio.c14
-rw-r--r--drivers/usb/core/hcd.c5
-rw-r--r--drivers/usb/core/hub.c27
-rw-r--r--drivers/usb/core/of.c3
-rw-r--r--drivers/usb/core/urb.c2
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c4
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c21
-rw-r--r--drivers/usb/gadget/function/f_fs.c10
-rw-r--r--drivers/usb/gadget/function/u_serial.c2
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c6
-rw-r--r--drivers/usb/host/ehci-platform.c4
-rw-r--r--drivers/usb/host/r8a66597-hcd.c6
-rw-r--r--drivers/usb/host/xhci-hub.c2
-rw-r--r--drivers/usb/host/xhci-mem.c11
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/host/xhci-plat.c2
-rw-r--r--drivers/usb/host/xhci-ring.c20
-rw-r--r--drivers/usb/host/xhci.c13
-rw-r--r--drivers/usb/misc/chaoskey.c2
-rw-r--r--drivers/usb/misc/iowarrior.c2
-rw-r--r--drivers/usb/misc/legousbtower.c1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c2
-rw-r--r--drivers/usb/musb/musb_host.c9
-rw-r--r--drivers/usb/musb/tusb6010_omap.c13
-rw-r--r--drivers/usb/serial/ftdi_sio.c10
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h2
-rw-r--r--drivers/usb/serial/io_ti.c5
-rw-r--r--drivers/usb/serial/ir-usb.c21
-rw-r--r--drivers/usb/serial/mct_u232.c2
-rw-r--r--drivers/usb/serial/option.c8
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/storage/ene_ub6250.c90
-rw-r--r--drivers/usb/usbip/vhci_hcd.c11
-rw-r--r--drivers/uwb/i1480/dfu/usb.c5
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/bcm_kona_wdt.c3
-rw-r--r--drivers/watchdog/cadence_wdt.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c22
-rw-r--r--drivers/watchdog/pcwd_usb.c3
-rw-r--r--drivers/watchdog/sama5d4_wdt.c77
-rw-r--r--drivers/watchdog/wdt_pci.c2
-rw-r--r--drivers/watchdog/zx2967_wdt.c4
282 files changed, 2948 insertions, 1609 deletions
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index b7c2a06963d6..25aba9b107dd 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -57,6 +57,7 @@
57 57
58#define ACPI_BUTTON_LID_INIT_IGNORE 0x00 58#define ACPI_BUTTON_LID_INIT_IGNORE 0x00
59#define ACPI_BUTTON_LID_INIT_OPEN 0x01 59#define ACPI_BUTTON_LID_INIT_OPEN 0x01
60#define ACPI_BUTTON_LID_INIT_METHOD 0x02
60 61
61#define _COMPONENT ACPI_BUTTON_COMPONENT 62#define _COMPONENT ACPI_BUTTON_COMPONENT
62ACPI_MODULE_NAME("button"); 63ACPI_MODULE_NAME("button");
@@ -376,6 +377,9 @@ static void acpi_lid_initialize_state(struct acpi_device *device)
376 case ACPI_BUTTON_LID_INIT_OPEN: 377 case ACPI_BUTTON_LID_INIT_OPEN:
377 (void)acpi_lid_notify_state(device, 1); 378 (void)acpi_lid_notify_state(device, 1);
378 break; 379 break;
380 case ACPI_BUTTON_LID_INIT_METHOD:
381 (void)acpi_lid_update_state(device);
382 break;
379 case ACPI_BUTTON_LID_INIT_IGNORE: 383 case ACPI_BUTTON_LID_INIT_IGNORE:
380 default: 384 default:
381 break; 385 break;
@@ -560,6 +564,9 @@ static int param_set_lid_init_state(const char *val, struct kernel_param *kp)
560 if (!strncmp(val, "open", sizeof("open") - 1)) { 564 if (!strncmp(val, "open", sizeof("open") - 1)) {
561 lid_init_state = ACPI_BUTTON_LID_INIT_OPEN; 565 lid_init_state = ACPI_BUTTON_LID_INIT_OPEN;
562 pr_info("Notify initial lid state as open\n"); 566 pr_info("Notify initial lid state as open\n");
567 } else if (!strncmp(val, "method", sizeof("method") - 1)) {
568 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
569 pr_info("Notify initial lid state with _LID return value\n");
563 } else if (!strncmp(val, "ignore", sizeof("ignore") - 1)) { 570 } else if (!strncmp(val, "ignore", sizeof("ignore") - 1)) {
564 lid_init_state = ACPI_BUTTON_LID_INIT_IGNORE; 571 lid_init_state = ACPI_BUTTON_LID_INIT_IGNORE;
565 pr_info("Do not notify initial lid state\n"); 572 pr_info("Do not notify initial lid state\n");
@@ -573,6 +580,8 @@ static int param_get_lid_init_state(char *buffer, struct kernel_param *kp)
573 switch (lid_init_state) { 580 switch (lid_init_state) {
574 case ACPI_BUTTON_LID_INIT_OPEN: 581 case ACPI_BUTTON_LID_INIT_OPEN:
575 return sprintf(buffer, "open"); 582 return sprintf(buffer, "open");
583 case ACPI_BUTTON_LID_INIT_METHOD:
584 return sprintf(buffer, "method");
576 case ACPI_BUTTON_LID_INIT_IGNORE: 585 case ACPI_BUTTON_LID_INIT_IGNORE:
577 return sprintf(buffer, "ignore"); 586 return sprintf(buffer, "ignore");
578 default: 587 default:
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
index 3ba1c3472cf9..fd86bec98dea 100644
--- a/drivers/acpi/nfit/mce.c
+++ b/drivers/acpi/nfit/mce.c
@@ -26,7 +26,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
26 struct nfit_spa *nfit_spa; 26 struct nfit_spa *nfit_spa;
27 27
28 /* We only care about memory errors */ 28 /* We only care about memory errors */
29 if (!(mce->status & MCACOD)) 29 if (!mce_is_memory_error(mce))
30 return NOTIFY_DONE; 30 return NOTIFY_DONE;
31 31
32 /* 32 /*
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index f62082fdd670..9c36b27996fc 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -512,13 +512,12 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws)
512/** 512/**
513 * wakup_source_activate - Mark given wakeup source as active. 513 * wakup_source_activate - Mark given wakeup source as active.
514 * @ws: Wakeup source to handle. 514 * @ws: Wakeup source to handle.
515 * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
516 * 515 *
517 * Update the @ws' statistics and, if @ws has just been activated, notify the PM 516 * Update the @ws' statistics and, if @ws has just been activated, notify the PM
518 * core of the event by incrementing the counter of of wakeup events being 517 * core of the event by incrementing the counter of of wakeup events being
519 * processed. 518 * processed.
520 */ 519 */
521static void wakeup_source_activate(struct wakeup_source *ws, bool hard) 520static void wakeup_source_activate(struct wakeup_source *ws)
522{ 521{
523 unsigned int cec; 522 unsigned int cec;
524 523
@@ -526,9 +525,6 @@ static void wakeup_source_activate(struct wakeup_source *ws, bool hard)
526 "unregistered wakeup source\n")) 525 "unregistered wakeup source\n"))
527 return; 526 return;
528 527
529 if (hard)
530 pm_system_wakeup();
531
532 ws->active = true; 528 ws->active = true;
533 ws->active_count++; 529 ws->active_count++;
534 ws->last_time = ktime_get(); 530 ws->last_time = ktime_get();
@@ -554,7 +550,10 @@ static void wakeup_source_report_event(struct wakeup_source *ws, bool hard)
554 ws->wakeup_count++; 550 ws->wakeup_count++;
555 551
556 if (!ws->active) 552 if (!ws->active)
557 wakeup_source_activate(ws, hard); 553 wakeup_source_activate(ws);
554
555 if (hard)
556 pm_system_wakeup();
558} 557}
559 558
560/** 559/**
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index b5730e17b455..656624314f0d 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -315,24 +315,32 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
315} 315}
316 316
317/* still holds resource->req_lock */ 317/* still holds resource->req_lock */
318static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) 318static void drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
319{ 319{
320 struct drbd_device *device = req->device; 320 struct drbd_device *device = req->device;
321 D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED)); 321 D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
322 322
323 if (!put)
324 return;
325
323 if (!atomic_sub_and_test(put, &req->completion_ref)) 326 if (!atomic_sub_and_test(put, &req->completion_ref))
324 return 0; 327 return;
325 328
326 drbd_req_complete(req, m); 329 drbd_req_complete(req, m);
327 330
331 /* local completion may still come in later,
332 * we need to keep the req object around. */
333 if (req->rq_state & RQ_LOCAL_ABORTED)
334 return;
335
328 if (req->rq_state & RQ_POSTPONED) { 336 if (req->rq_state & RQ_POSTPONED) {
329 /* don't destroy the req object just yet, 337 /* don't destroy the req object just yet,
330 * but queue it for retry */ 338 * but queue it for retry */
331 drbd_restart_request(req); 339 drbd_restart_request(req);
332 return 0; 340 return;
333 } 341 }
334 342
335 return 1; 343 kref_put(&req->kref, drbd_req_destroy);
336} 344}
337 345
338static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) 346static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
@@ -519,12 +527,8 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
519 if (req->i.waiting) 527 if (req->i.waiting)
520 wake_up(&device->misc_wait); 528 wake_up(&device->misc_wait);
521 529
522 if (c_put) { 530 drbd_req_put_completion_ref(req, m, c_put);
523 if (drbd_req_put_completion_ref(req, m, c_put)) 531 kref_put(&req->kref, drbd_req_destroy);
524 kref_put(&req->kref, drbd_req_destroy);
525 } else {
526 kref_put(&req->kref, drbd_req_destroy);
527 }
528} 532}
529 533
530static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req) 534static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
@@ -1366,8 +1370,7 @@ nodata:
1366 } 1370 }
1367 1371
1368out: 1372out:
1369 if (drbd_req_put_completion_ref(req, &m, 1)) 1373 drbd_req_put_completion_ref(req, &m, 1);
1370 kref_put(&req->kref, drbd_req_destroy);
1371 spin_unlock_irq(&resource->req_lock); 1374 spin_unlock_irq(&resource->req_lock);
1372 1375
1373 /* Even though above is a kref_put(), this is safe. 1376 /* Even though above is a kref_put(), this is safe.
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 8fe61b5dc5a6..1f3dfaa54d87 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -504,11 +504,13 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
504 504
505 dev_set_drvdata(&dev->dev, NULL); 505 dev_set_drvdata(&dev->dev, NULL);
506 506
507 if (be->blkif) 507 if (be->blkif) {
508 xen_blkif_disconnect(be->blkif); 508 xen_blkif_disconnect(be->blkif);
509 509
510 /* Put the reference we set in xen_blkif_alloc(). */ 510 /* Put the reference we set in xen_blkif_alloc(). */
511 xen_blkif_put(be->blkif); 511 xen_blkif_put(be->blkif);
512 }
513
512 kfree(be->mode); 514 kfree(be->mode);
513 kfree(be); 515 kfree(be);
514 return 0; 516 return 0;
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 565e4cf04a02..8249762192d5 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -859,7 +859,11 @@ static int __init lp_setup (char *str)
859 } else if (!strcmp(str, "auto")) { 859 } else if (!strcmp(str, "auto")) {
860 parport_nr[0] = LP_PARPORT_AUTO; 860 parport_nr[0] = LP_PARPORT_AUTO;
861 } else if (!strcmp(str, "none")) { 861 } else if (!strcmp(str, "none")) {
862 parport_nr[parport_ptr++] = LP_PARPORT_NONE; 862 if (parport_ptr < LP_NO)
863 parport_nr[parport_ptr++] = LP_PARPORT_NONE;
864 else
865 printk(KERN_INFO "lp: too many ports, %s ignored.\n",
866 str);
863 } else if (!strcmp(str, "reset")) { 867 } else if (!strcmp(str, "reset")) {
864 reset = 1; 868 reset = 1;
865 } 869 }
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 7e4a9d1296bb..6e0cbe092220 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -340,6 +340,11 @@ static const struct vm_operations_struct mmap_mem_ops = {
340static int mmap_mem(struct file *file, struct vm_area_struct *vma) 340static int mmap_mem(struct file *file, struct vm_area_struct *vma)
341{ 341{
342 size_t size = vma->vm_end - vma->vm_start; 342 size_t size = vma->vm_end - vma->vm_start;
343 phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
344
345 /* It's illegal to wrap around the end of the physical address space. */
346 if (offset + (phys_addr_t)size < offset)
347 return -EINVAL;
343 348
344 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) 349 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
345 return -EINVAL; 350 return -EINVAL;
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 74ed7e9a7f27..2011fec2d6ad 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -71,6 +71,15 @@ config ARM_HIGHBANK_CPUFREQ
71 71
72 If in doubt, say N. 72 If in doubt, say N.
73 73
74config ARM_DB8500_CPUFREQ
75 tristate "ST-Ericsson DB8500 cpufreq" if COMPILE_TEST && !ARCH_U8500
76 default ARCH_U8500
77 depends on HAS_IOMEM
78 depends on !CPU_THERMAL || THERMAL
79 help
80 This adds the CPUFreq driver for ST-Ericsson Ux500 (DB8500) SoC
81 series.
82
74config ARM_IMX6Q_CPUFREQ 83config ARM_IMX6Q_CPUFREQ
75 tristate "Freescale i.MX6 cpufreq support" 84 tristate "Freescale i.MX6 cpufreq support"
76 depends on ARCH_MXC 85 depends on ARCH_MXC
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index b7e78f063c4f..ab3a42cd29ef 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -53,7 +53,7 @@ obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
53 53
54obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o 54obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
55obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o 55obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
56obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o 56obj-$(CONFIG_ARM_DB8500_CPUFREQ) += dbx500-cpufreq.o
57obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o 57obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
58obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o 58obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
59obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o 59obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index ebf43f531ada..6ed32aac8bbe 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -44,6 +44,7 @@ void dax_read_unlock(int id)
44} 44}
45EXPORT_SYMBOL_GPL(dax_read_unlock); 45EXPORT_SYMBOL_GPL(dax_read_unlock);
46 46
47#ifdef CONFIG_BLOCK
47int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, 48int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
48 pgoff_t *pgoff) 49 pgoff_t *pgoff)
49{ 50{
@@ -112,6 +113,7 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize)
112 return 0; 113 return 0;
113} 114}
114EXPORT_SYMBOL_GPL(__bdev_dax_supported); 115EXPORT_SYMBOL_GPL(__bdev_dax_supported);
116#endif
115 117
116/** 118/**
117 * struct dax_device - anchor object for dax services 119 * struct dax_device - anchor object for dax services
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 82dab1692264..3aea55698165 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -782,24 +782,26 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
782 782
783static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl) 783static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
784{ 784{
785 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; 785 int dimm, size0, size1, cs0, cs1;
786 int dimm, size0, size1;
787 786
788 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl); 787 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
789 788
790 for (dimm = 0; dimm < 4; dimm++) { 789 for (dimm = 0; dimm < 4; dimm++) {
791 size0 = 0; 790 size0 = 0;
791 cs0 = dimm * 2;
792 792
793 if (dcsb[dimm*2] & DCSB_CS_ENABLE) 793 if (csrow_enabled(cs0, ctrl, pvt))
794 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm); 794 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs0);
795 795
796 size1 = 0; 796 size1 = 0;
797 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE) 797 cs1 = dimm * 2 + 1;
798 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm); 798
799 if (csrow_enabled(cs1, ctrl, pvt))
800 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1);
799 801
800 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", 802 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
801 dimm * 2, size0, 803 cs0, size0,
802 dimm * 2 + 1, size1); 804 cs1, size1);
803 } 805 }
804} 806}
805 807
@@ -2756,26 +2758,22 @@ skip:
2756 * encompasses 2758 * encompasses
2757 * 2759 *
2758 */ 2760 */
2759static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) 2761static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
2760{ 2762{
2761 u32 cs_mode, nr_pages;
2762 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0; 2763 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2764 int csrow_nr = csrow_nr_orig;
2765 u32 cs_mode, nr_pages;
2763 2766
2767 if (!pvt->umc)
2768 csrow_nr >>= 1;
2764 2769
2765 /* 2770 cs_mode = DBAM_DIMM(csrow_nr, dbam);
2766 * The math on this doesn't look right on the surface because x/2*4 can
2767 * be simplified to x*2 but this expression makes use of the fact that
2768 * it is integral math where 1/2=0. This intermediate value becomes the
2769 * number of bits to shift the DBAM register to extract the proper CSROW
2770 * field.
2771 */
2772 cs_mode = DBAM_DIMM(csrow_nr / 2, dbam);
2773 2771
2774 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, (csrow_nr / 2)) 2772 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
2775 << (20 - PAGE_SHIFT); 2773 nr_pages <<= 20 - PAGE_SHIFT;
2776 2774
2777 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n", 2775 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2778 csrow_nr, dct, cs_mode); 2776 csrow_nr_orig, dct, cs_mode);
2779 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages); 2777 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2780 2778
2781 return nr_pages; 2779 return nr_pages;
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index ed3137c1ceb0..ef1fafdad400 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -53,6 +53,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry,
53 if (sscanf(name, "dump-type%u-%u-%d-%lu-%c", 53 if (sscanf(name, "dump-type%u-%u-%d-%lu-%c",
54 &record->type, &part, &cnt, &time, &data_type) == 5) { 54 &record->type, &part, &cnt, &time, &data_type) == 5) {
55 record->id = generic_id(time, part, cnt); 55 record->id = generic_id(time, part, cnt);
56 record->part = part;
56 record->count = cnt; 57 record->count = cnt;
57 record->time.tv_sec = time; 58 record->time.tv_sec = time;
58 record->time.tv_nsec = 0; 59 record->time.tv_nsec = 0;
@@ -64,6 +65,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry,
64 } else if (sscanf(name, "dump-type%u-%u-%d-%lu", 65 } else if (sscanf(name, "dump-type%u-%u-%d-%lu",
65 &record->type, &part, &cnt, &time) == 4) { 66 &record->type, &part, &cnt, &time) == 4) {
66 record->id = generic_id(time, part, cnt); 67 record->id = generic_id(time, part, cnt);
68 record->part = part;
67 record->count = cnt; 69 record->count = cnt;
68 record->time.tv_sec = time; 70 record->time.tv_sec = time;
69 record->time.tv_nsec = 0; 71 record->time.tv_nsec = 0;
@@ -77,6 +79,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry,
77 * multiple logs, remains. 79 * multiple logs, remains.
78 */ 80 */
79 record->id = generic_id(time, part, 0); 81 record->id = generic_id(time, part, 0);
82 record->part = part;
80 record->count = 0; 83 record->count = 0;
81 record->time.tv_sec = time; 84 record->time.tv_sec = time;
82 record->time.tv_nsec = 0; 85 record->time.tv_nsec = 0;
@@ -155,19 +158,14 @@ static int efi_pstore_scan_sysfs_exit(struct efivar_entry *pos,
155 * efi_pstore_sysfs_entry_iter 158 * efi_pstore_sysfs_entry_iter
156 * 159 *
157 * @record: pstore record to pass to callback 160 * @record: pstore record to pass to callback
158 * @pos: entry to begin iterating from
159 * 161 *
160 * You MUST call efivar_enter_iter_begin() before this function, and 162 * You MUST call efivar_enter_iter_begin() before this function, and
161 * efivar_entry_iter_end() afterwards. 163 * efivar_entry_iter_end() afterwards.
162 * 164 *
163 * It is possible to begin iteration from an arbitrary entry within
164 * the list by passing @pos. @pos is updated on return to point to
165 * the next entry of the last one passed to efi_pstore_read_func().
166 * To begin iterating from the beginning of the list @pos must be %NULL.
167 */ 165 */
168static int efi_pstore_sysfs_entry_iter(struct pstore_record *record, 166static int efi_pstore_sysfs_entry_iter(struct pstore_record *record)
169 struct efivar_entry **pos)
170{ 167{
168 struct efivar_entry **pos = (struct efivar_entry **)&record->psi->data;
171 struct efivar_entry *entry, *n; 169 struct efivar_entry *entry, *n;
172 struct list_head *head = &efivar_sysfs_list; 170 struct list_head *head = &efivar_sysfs_list;
173 int size = 0; 171 int size = 0;
@@ -218,7 +216,6 @@ static int efi_pstore_sysfs_entry_iter(struct pstore_record *record,
218 */ 216 */
219static ssize_t efi_pstore_read(struct pstore_record *record) 217static ssize_t efi_pstore_read(struct pstore_record *record)
220{ 218{
221 struct efivar_entry *entry = (struct efivar_entry *)record->psi->data;
222 ssize_t size; 219 ssize_t size;
223 220
224 record->buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL); 221 record->buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL);
@@ -229,7 +226,7 @@ static ssize_t efi_pstore_read(struct pstore_record *record)
229 size = -EINTR; 226 size = -EINTR;
230 goto out; 227 goto out;
231 } 228 }
232 size = efi_pstore_sysfs_entry_iter(record, &entry); 229 size = efi_pstore_sysfs_entry_iter(record);
233 efivar_entry_iter_end(); 230 efivar_entry_iter_end();
234 231
235out: 232out:
@@ -247,9 +244,15 @@ static int efi_pstore_write(struct pstore_record *record)
247 efi_guid_t vendor = LINUX_EFI_CRASH_GUID; 244 efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
248 int i, ret = 0; 245 int i, ret = 0;
249 246
247 record->time.tv_sec = get_seconds();
248 record->time.tv_nsec = 0;
249
250 record->id = generic_id(record->time.tv_sec, record->part,
251 record->count);
252
250 snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lu-%c", 253 snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lu-%c",
251 record->type, record->part, record->count, 254 record->type, record->part, record->count,
252 get_seconds(), record->compressed ? 'C' : 'D'); 255 record->time.tv_sec, record->compressed ? 'C' : 'D');
253 256
254 for (i = 0; i < DUMP_NAME_LEN; i++) 257 for (i = 0; i < DUMP_NAME_LEN; i++)
255 efi_name[i] = name[i]; 258 efi_name[i] = name[i];
@@ -261,7 +264,6 @@ static int efi_pstore_write(struct pstore_record *record)
261 if (record->reason == KMSG_DUMP_OOPS) 264 if (record->reason == KMSG_DUMP_OOPS)
262 efivar_run_worker(); 265 efivar_run_worker();
263 266
264 record->id = record->part;
265 return ret; 267 return ret;
266}; 268};
267 269
@@ -293,7 +295,7 @@ static int efi_pstore_erase_func(struct efivar_entry *entry, void *data)
293 * holding multiple logs, remains. 295 * holding multiple logs, remains.
294 */ 296 */
295 snprintf(name_old, sizeof(name_old), "dump-type%u-%u-%lu", 297 snprintf(name_old, sizeof(name_old), "dump-type%u-%u-%lu",
296 ed->record->type, (unsigned int)ed->record->id, 298 ed->record->type, ed->record->part,
297 ed->record->time.tv_sec); 299 ed->record->time.tv_sec);
298 300
299 for (i = 0; i < DUMP_NAME_LEN; i++) 301 for (i = 0; i < DUMP_NAME_LEN; i++)
@@ -326,10 +328,7 @@ static int efi_pstore_erase(struct pstore_record *record)
326 char name[DUMP_NAME_LEN]; 328 char name[DUMP_NAME_LEN];
327 efi_char16_t efi_name[DUMP_NAME_LEN]; 329 efi_char16_t efi_name[DUMP_NAME_LEN];
328 int found, i; 330 int found, i;
329 unsigned int part;
330 331
331 do_div(record->id, 1000);
332 part = do_div(record->id, 100);
333 snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lu", 332 snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lu",
334 record->type, record->part, record->count, 333 record->type, record->part, record->count,
335 record->time.tv_sec); 334 record->time.tv_sec);
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 3ce813110d5e..1e7860f02f4f 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -116,9 +116,13 @@ static int vpd_section_attrib_add(const u8 *key, s32 key_len,
116 return VPD_OK; 116 return VPD_OK;
117 117
118 info = kzalloc(sizeof(*info), GFP_KERNEL); 118 info = kzalloc(sizeof(*info), GFP_KERNEL);
119 info->key = kzalloc(key_len + 1, GFP_KERNEL); 119 if (!info)
120 if (!info->key)
121 return -ENOMEM; 120 return -ENOMEM;
121 info->key = kzalloc(key_len + 1, GFP_KERNEL);
122 if (!info->key) {
123 ret = -ENOMEM;
124 goto free_info;
125 }
122 126
123 memcpy(info->key, key, key_len); 127 memcpy(info->key, key, key_len);
124 128
@@ -135,12 +139,17 @@ static int vpd_section_attrib_add(const u8 *key, s32 key_len,
135 list_add_tail(&info->list, &sec->attribs); 139 list_add_tail(&info->list, &sec->attribs);
136 140
137 ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr); 141 ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr);
138 if (ret) { 142 if (ret)
139 kfree(info->key); 143 goto free_info_key;
140 return ret;
141 }
142 144
143 return 0; 145 return 0;
146
147free_info_key:
148 kfree(info->key);
149free_info:
150 kfree(info);
151
152 return ret;
144} 153}
145 154
146static void vpd_section_attrib_destroy(struct vpd_section *sec) 155static void vpd_section_attrib_destroy(struct vpd_section *sec)
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 874ff32db366..00cfed3c3e1a 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -202,7 +202,8 @@ static int ti_sci_debugfs_create(struct platform_device *pdev,
202 info->debug_buffer[info->debug_region_size] = 0; 202 info->debug_buffer[info->debug_region_size] = 0;
203 203
204 info->d = debugfs_create_file(strncat(debug_name, dev_name(dev), 204 info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
205 sizeof(debug_name)), 205 sizeof(debug_name) -
206 sizeof("ti_sci_debug@")),
206 0444, NULL, info, &ti_sci_debug_fops); 207 0444, NULL, info, &ti_sci_debug_fops);
207 if (IS_ERR(info->d)) 208 if (IS_ERR(info->d))
208 return PTR_ERR(info->d); 209 return PTR_ERR(info->d);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 236d9950221b..c0d8c6ff6380 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -425,10 +425,15 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
425 425
426void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev) 426void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
427{ 427{
428 struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev; 428 struct amdgpu_fbdev *afbdev;
429 struct drm_fb_helper *fb_helper; 429 struct drm_fb_helper *fb_helper;
430 int ret; 430 int ret;
431 431
432 if (!adev)
433 return;
434
435 afbdev = adev->mode_info.rfbdev;
436
432 if (!afbdev) 437 if (!afbdev)
433 return; 438 return;
434 439
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 749a6cde7985..83c172a6e938 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -635,7 +635,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
635 mutex_unlock(&id_mgr->lock); 635 mutex_unlock(&id_mgr->lock);
636 } 636 }
637 637
638 if (gds_switch_needed) { 638 if (ring->funcs->emit_gds_switch && gds_switch_needed) {
639 id->gds_base = job->gds_base; 639 id->gds_base = job->gds_base;
640 id->gds_size = job->gds_size; 640 id->gds_size = job->gds_size;
641 id->gws_base = job->gws_base; 641 id->gws_base = job->gws_base;
@@ -673,6 +673,7 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
673 struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 673 struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
674 struct amdgpu_vm_id *id = &id_mgr->ids[vmid]; 674 struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
675 675
676 atomic64_set(&id->owner, 0);
676 id->gds_base = 0; 677 id->gds_base = 0;
677 id->gds_size = 0; 678 id->gds_size = 0;
678 id->gws_base = 0; 679 id->gws_base = 0;
@@ -682,6 +683,26 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
682} 683}
683 684
684/** 685/**
686 * amdgpu_vm_reset_all_id - reset VMID to zero
687 *
688 * @adev: amdgpu device structure
689 *
690 * Reset VMID to force flush on next use
691 */
692void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev)
693{
694 unsigned i, j;
695
696 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
697 struct amdgpu_vm_id_manager *id_mgr =
698 &adev->vm_manager.id_mgr[i];
699
700 for (j = 1; j < id_mgr->num_ids; ++j)
701 amdgpu_vm_reset_id(adev, i, j);
702 }
703}
704
705/**
685 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo 706 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
686 * 707 *
687 * @vm: requested vm 708 * @vm: requested vm
@@ -2271,7 +2292,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2271 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 2292 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2272 adev->vm_manager.seqno[i] = 0; 2293 adev->vm_manager.seqno[i] = 0;
2273 2294
2274
2275 atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); 2295 atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
2276 atomic64_set(&adev->vm_manager.client_counter, 0); 2296 atomic64_set(&adev->vm_manager.client_counter, 0);
2277 spin_lock_init(&adev->vm_manager.prt_lock); 2297 spin_lock_init(&adev->vm_manager.prt_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index d97e28b4bdc4..e1d951ece433 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -204,6 +204,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
204int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); 204int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
205void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, 205void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
206 unsigned vmid); 206 unsigned vmid);
207void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev);
207int amdgpu_vm_update_directories(struct amdgpu_device *adev, 208int amdgpu_vm_update_directories(struct amdgpu_device *adev,
208 struct amdgpu_vm *vm); 209 struct amdgpu_vm *vm);
209int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 210int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 00e56a28b593..cb508a211b2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -906,6 +906,12 @@ static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
906 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); 906 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
907 u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300; 907 u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
908 908
909 /* disable mclk switching if the refresh is >120Hz, even if the
910 * blanking period would allow it
911 */
912 if (amdgpu_dpm_get_vrefresh(adev) > 120)
913 return true;
914
909 if (vblank_time < switch_limit) 915 if (vblank_time < switch_limit)
910 return true; 916 return true;
911 else 917 else
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 2a3983036a30..9776ad3d2d71 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -950,10 +950,6 @@ static int gmc_v6_0_suspend(void *handle)
950{ 950{
951 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 951 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
952 952
953 if (adev->vm_manager.enabled) {
954 gmc_v6_0_vm_fini(adev);
955 adev->vm_manager.enabled = false;
956 }
957 gmc_v6_0_hw_fini(adev); 953 gmc_v6_0_hw_fini(adev);
958 954
959 return 0; 955 return 0;
@@ -968,16 +964,9 @@ static int gmc_v6_0_resume(void *handle)
968 if (r) 964 if (r)
969 return r; 965 return r;
970 966
971 if (!adev->vm_manager.enabled) { 967 amdgpu_vm_reset_all_ids(adev);
972 r = gmc_v6_0_vm_init(adev);
973 if (r) {
974 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
975 return r;
976 }
977 adev->vm_manager.enabled = true;
978 }
979 968
980 return r; 969 return 0;
981} 970}
982 971
983static bool gmc_v6_0_is_idle(void *handle) 972static bool gmc_v6_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 6d347c1d2516..fca8e77182c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1117,10 +1117,6 @@ static int gmc_v7_0_suspend(void *handle)
1117{ 1117{
1118 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1118 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1119 1119
1120 if (adev->vm_manager.enabled) {
1121 gmc_v7_0_vm_fini(adev);
1122 adev->vm_manager.enabled = false;
1123 }
1124 gmc_v7_0_hw_fini(adev); 1120 gmc_v7_0_hw_fini(adev);
1125 1121
1126 return 0; 1122 return 0;
@@ -1135,16 +1131,9 @@ static int gmc_v7_0_resume(void *handle)
1135 if (r) 1131 if (r)
1136 return r; 1132 return r;
1137 1133
1138 if (!adev->vm_manager.enabled) { 1134 amdgpu_vm_reset_all_ids(adev);
1139 r = gmc_v7_0_vm_init(adev);
1140 if (r) {
1141 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1142 return r;
1143 }
1144 adev->vm_manager.enabled = true;
1145 }
1146 1135
1147 return r; 1136 return 0;
1148} 1137}
1149 1138
1150static bool gmc_v7_0_is_idle(void *handle) 1139static bool gmc_v7_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 2784ff49cf56..e9c127037b39 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1209,10 +1209,6 @@ static int gmc_v8_0_suspend(void *handle)
1209{ 1209{
1210 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1210 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1211 1211
1212 if (adev->vm_manager.enabled) {
1213 gmc_v8_0_vm_fini(adev);
1214 adev->vm_manager.enabled = false;
1215 }
1216 gmc_v8_0_hw_fini(adev); 1212 gmc_v8_0_hw_fini(adev);
1217 1213
1218 return 0; 1214 return 0;
@@ -1227,16 +1223,9 @@ static int gmc_v8_0_resume(void *handle)
1227 if (r) 1223 if (r)
1228 return r; 1224 return r;
1229 1225
1230 if (!adev->vm_manager.enabled) { 1226 amdgpu_vm_reset_all_ids(adev);
1231 r = gmc_v8_0_vm_init(adev);
1232 if (r) {
1233 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1234 return r;
1235 }
1236 adev->vm_manager.enabled = true;
1237 }
1238 1227
1239 return r; 1228 return 0;
1240} 1229}
1241 1230
1242static bool gmc_v8_0_is_idle(void *handle) 1231static bool gmc_v8_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index dc1e1c1d6b24..f936332a069d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -791,10 +791,6 @@ static int gmc_v9_0_suspend(void *handle)
791{ 791{
792 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 792 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
793 793
794 if (adev->vm_manager.enabled) {
795 gmc_v9_0_vm_fini(adev);
796 adev->vm_manager.enabled = false;
797 }
798 gmc_v9_0_hw_fini(adev); 794 gmc_v9_0_hw_fini(adev);
799 795
800 return 0; 796 return 0;
@@ -809,17 +805,9 @@ static int gmc_v9_0_resume(void *handle)
809 if (r) 805 if (r)
810 return r; 806 return r;
811 807
812 if (!adev->vm_manager.enabled) { 808 amdgpu_vm_reset_all_ids(adev);
813 r = gmc_v9_0_vm_init(adev);
814 if (r) {
815 dev_err(adev->dev,
816 "vm manager initialization failed (%d).\n", r);
817 return r;
818 }
819 adev->vm_manager.enabled = true;
820 }
821 809
822 return r; 810 return 0;
823} 811}
824 812
825static bool gmc_v9_0_is_idle(void *handle) 813static bool gmc_v9_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 975567f6813d..1f01020ce3a9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -2655,6 +2655,28 @@ static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
2655 return sizeof(struct smu7_power_state); 2655 return sizeof(struct smu7_power_state);
2656} 2656}
2657 2657
2658static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
2659 uint32_t vblank_time_us)
2660{
2661 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2662 uint32_t switch_limit_us;
2663
2664 switch (hwmgr->chip_id) {
2665 case CHIP_POLARIS10:
2666 case CHIP_POLARIS11:
2667 case CHIP_POLARIS12:
2668 switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
2669 break;
2670 default:
2671 switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
2672 break;
2673 }
2674
2675 if (vblank_time_us < switch_limit_us)
2676 return true;
2677 else
2678 return false;
2679}
2658 2680
2659static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, 2681static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2660 struct pp_power_state *request_ps, 2682 struct pp_power_state *request_ps,
@@ -2669,6 +2691,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2669 bool disable_mclk_switching; 2691 bool disable_mclk_switching;
2670 bool disable_mclk_switching_for_frame_lock; 2692 bool disable_mclk_switching_for_frame_lock;
2671 struct cgs_display_info info = {0}; 2693 struct cgs_display_info info = {0};
2694 struct cgs_mode_info mode_info = {0};
2672 const struct phm_clock_and_voltage_limits *max_limits; 2695 const struct phm_clock_and_voltage_limits *max_limits;
2673 uint32_t i; 2696 uint32_t i;
2674 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2697 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -2677,6 +2700,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2677 int32_t count; 2700 int32_t count;
2678 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; 2701 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
2679 2702
2703 info.mode_info = &mode_info;
2680 data->battery_state = (PP_StateUILabel_Battery == 2704 data->battery_state = (PP_StateUILabel_Battery ==
2681 request_ps->classification.ui_label); 2705 request_ps->classification.ui_label);
2682 2706
@@ -2703,8 +2727,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2703 2727
2704 cgs_get_active_displays_info(hwmgr->device, &info); 2728 cgs_get_active_displays_info(hwmgr->device, &info);
2705 2729
2706 /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
2707
2708 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock; 2730 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
2709 minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; 2731 minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
2710 2732
@@ -2769,8 +2791,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2769 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); 2791 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2770 2792
2771 2793
2772 disable_mclk_switching = (1 < info.display_count) || 2794 disable_mclk_switching = ((1 < info.display_count) ||
2773 disable_mclk_switching_for_frame_lock; 2795 disable_mclk_switching_for_frame_lock ||
2796 smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
2797 (mode_info.refresh_rate > 120));
2774 2798
2775 sclk = smu7_ps->performance_levels[0].engine_clock; 2799 sclk = smu7_ps->performance_levels[0].engine_clock;
2776 mclk = smu7_ps->performance_levels[0].memory_clock; 2800 mclk = smu7_ps->performance_levels[0].memory_clock;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index cd7bf6d3859a..ab17350e853d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -4187,7 +4187,7 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
4187 enum pp_clock_type type, uint32_t mask) 4187 enum pp_clock_type type, uint32_t mask)
4188{ 4188{
4189 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); 4189 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4190 uint32_t i; 4190 int i;
4191 4191
4192 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 4192 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
4193 return -EINVAL; 4193 return -EINVAL;
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 798a3cc480a2..1a3359c0f6cd 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <drm/drmP.h> 12#include <drm/drmP.h>
13#include <drm/drm_atomic.h>
13#include <drm/drm_atomic_helper.h> 14#include <drm/drm_atomic_helper.h>
14#include <drm/drm_crtc.h> 15#include <drm/drm_crtc.h>
15#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
@@ -226,16 +227,33 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
226static int hdlcd_plane_atomic_check(struct drm_plane *plane, 227static int hdlcd_plane_atomic_check(struct drm_plane *plane,
227 struct drm_plane_state *state) 228 struct drm_plane_state *state)
228{ 229{
229 u32 src_w, src_h; 230 struct drm_rect clip = { 0 };
231 struct drm_crtc_state *crtc_state;
232 u32 src_h = state->src_h >> 16;
230 233
231 src_w = state->src_w >> 16; 234 /* only the HDLCD_REG_FB_LINE_COUNT register has a limit */
232 src_h = state->src_h >> 16; 235 if (src_h >= HDLCD_MAX_YRES) {
236 DRM_DEBUG_KMS("Invalid source width: %d\n", src_h);
237 return -EINVAL;
238 }
239
240 if (!state->fb || !state->crtc)
241 return 0;
233 242
234 /* we can't do any scaling of the plane source */ 243 crtc_state = drm_atomic_get_existing_crtc_state(state->state,
235 if ((src_w != state->crtc_w) || (src_h != state->crtc_h)) 244 state->crtc);
245 if (!crtc_state) {
246 DRM_DEBUG_KMS("Invalid crtc state\n");
236 return -EINVAL; 247 return -EINVAL;
248 }
237 249
238 return 0; 250 clip.x2 = crtc_state->adjusted_mode.hdisplay;
251 clip.y2 = crtc_state->adjusted_mode.vdisplay;
252
253 return drm_plane_helper_check_state(state, &clip,
254 DRM_PLANE_HELPER_NO_SCALING,
255 DRM_PLANE_HELPER_NO_SCALING,
256 false, true);
239} 257}
240 258
241static void hdlcd_plane_atomic_update(struct drm_plane *plane, 259static void hdlcd_plane_atomic_update(struct drm_plane *plane,
@@ -244,21 +262,20 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
244 struct drm_framebuffer *fb = plane->state->fb; 262 struct drm_framebuffer *fb = plane->state->fb;
245 struct hdlcd_drm_private *hdlcd; 263 struct hdlcd_drm_private *hdlcd;
246 struct drm_gem_cma_object *gem; 264 struct drm_gem_cma_object *gem;
247 u32 src_w, src_h, dest_w, dest_h; 265 u32 src_x, src_y, dest_h;
248 dma_addr_t scanout_start; 266 dma_addr_t scanout_start;
249 267
250 if (!fb) 268 if (!fb)
251 return; 269 return;
252 270
253 src_w = plane->state->src_w >> 16; 271 src_x = plane->state->src.x1 >> 16;
254 src_h = plane->state->src_h >> 16; 272 src_y = plane->state->src.y1 >> 16;
255 dest_w = plane->state->crtc_w; 273 dest_h = drm_rect_height(&plane->state->dst);
256 dest_h = plane->state->crtc_h;
257 gem = drm_fb_cma_get_gem_obj(fb, 0); 274 gem = drm_fb_cma_get_gem_obj(fb, 0);
275
258 scanout_start = gem->paddr + fb->offsets[0] + 276 scanout_start = gem->paddr + fb->offsets[0] +
259 plane->state->crtc_y * fb->pitches[0] + 277 src_y * fb->pitches[0] +
260 plane->state->crtc_x * 278 src_x * fb->format->cpp[0];
261 fb->format->cpp[0];
262 279
263 hdlcd = plane->dev->dev_private; 280 hdlcd = plane->dev->dev_private;
264 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]); 281 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]);
@@ -305,7 +322,6 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
305 formats, ARRAY_SIZE(formats), 322 formats, ARRAY_SIZE(formats),
306 DRM_PLANE_TYPE_PRIMARY, NULL); 323 DRM_PLANE_TYPE_PRIMARY, NULL);
307 if (ret) { 324 if (ret) {
308 devm_kfree(drm->dev, plane);
309 return ERR_PTR(ret); 325 return ERR_PTR(ret);
310 } 326 }
311 327
@@ -329,7 +345,6 @@ int hdlcd_setup_crtc(struct drm_device *drm)
329 &hdlcd_crtc_funcs, NULL); 345 &hdlcd_crtc_funcs, NULL);
330 if (ret) { 346 if (ret) {
331 hdlcd_plane_destroy(primary); 347 hdlcd_plane_destroy(primary);
332 devm_kfree(drm->dev, primary);
333 return ret; 348 return ret;
334 } 349 }
335 350
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 65a3bd7a0c00..423dda2785d4 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -152,8 +152,7 @@ static const struct drm_connector_funcs atmel_hlcdc_panel_connector_funcs = {
152 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 152 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
153}; 153};
154 154
155static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, 155static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
156 const struct device_node *np)
157{ 156{
158 struct atmel_hlcdc_dc *dc = dev->dev_private; 157 struct atmel_hlcdc_dc *dc = dev->dev_private;
159 struct atmel_hlcdc_rgb_output *output; 158 struct atmel_hlcdc_rgb_output *output;
@@ -161,6 +160,11 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev,
161 struct drm_bridge *bridge; 160 struct drm_bridge *bridge;
162 int ret; 161 int ret;
163 162
163 ret = drm_of_find_panel_or_bridge(dev->dev->of_node, 0, endpoint,
164 &panel, &bridge);
165 if (ret)
166 return ret;
167
164 output = devm_kzalloc(dev->dev, sizeof(*output), GFP_KERNEL); 168 output = devm_kzalloc(dev->dev, sizeof(*output), GFP_KERNEL);
165 if (!output) 169 if (!output)
166 return -EINVAL; 170 return -EINVAL;
@@ -177,10 +181,6 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev,
177 181
178 output->encoder.possible_crtcs = 0x1; 182 output->encoder.possible_crtcs = 0x1;
179 183
180 ret = drm_of_find_panel_or_bridge(np, 0, 0, &panel, &bridge);
181 if (ret)
182 return ret;
183
184 if (panel) { 184 if (panel) {
185 output->connector.dpms = DRM_MODE_DPMS_OFF; 185 output->connector.dpms = DRM_MODE_DPMS_OFF;
186 output->connector.polled = DRM_CONNECTOR_POLL_CONNECT; 186 output->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
@@ -220,22 +220,14 @@ err_encoder_cleanup:
220 220
221int atmel_hlcdc_create_outputs(struct drm_device *dev) 221int atmel_hlcdc_create_outputs(struct drm_device *dev)
222{ 222{
223 struct device_node *remote; 223 int endpoint, ret = 0;
224 int ret = -ENODEV; 224
225 int endpoint = 0; 225 for (endpoint = 0; !ret; endpoint++)
226 226 ret = atmel_hlcdc_attach_endpoint(dev, endpoint);
227 while (true) { 227
228 /* Loop thru possible multiple connections to the output */ 228 /* At least one device was successfully attached.*/
229 remote = of_graph_get_remote_node(dev->dev->of_node, 0, 229 if (ret == -ENODEV && endpoint)
230 endpoint++); 230 return 0;
231 if (!remote)
232 break;
233
234 ret = atmel_hlcdc_attach_endpoint(dev, remote);
235 of_node_put(remote);
236 if (ret)
237 return ret;
238 }
239 231
240 return ret; 232 return ret;
241} 233}
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index fedd4d60d9cd..5dc8c4350602 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -948,8 +948,6 @@ retry:
948 } 948 }
949 949
950out: 950out:
951 if (ret && crtc->funcs->page_flip_target)
952 drm_crtc_vblank_put(crtc);
953 if (fb) 951 if (fb)
954 drm_framebuffer_put(fb); 952 drm_framebuffer_put(fb);
955 if (crtc->primary->old_fb) 953 if (crtc->primary->old_fb)
@@ -964,5 +962,8 @@ out:
964 drm_modeset_drop_locks(&ctx); 962 drm_modeset_drop_locks(&ctx);
965 drm_modeset_acquire_fini(&ctx); 963 drm_modeset_acquire_fini(&ctx);
966 964
965 if (ret && crtc->funcs->page_flip_target)
966 drm_crtc_vblank_put(crtc);
967
967 return ret; 968 return ret;
968} 969}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index a13930e1d8c9..ee7069e93eda 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -44,6 +44,7 @@ static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
44 44
45 /* initially, until copy_from_user() and bo lookup succeeds: */ 45 /* initially, until copy_from_user() and bo lookup succeeds: */
46 submit->nr_bos = 0; 46 submit->nr_bos = 0;
47 submit->fence = NULL;
47 48
48 ww_acquire_init(&submit->ticket, &reservation_ww_class); 49 ww_acquire_init(&submit->ticket, &reservation_ww_class);
49 } 50 }
@@ -294,7 +295,8 @@ static void submit_cleanup(struct etnaviv_gem_submit *submit)
294 } 295 }
295 296
296 ww_acquire_fini(&submit->ticket); 297 ww_acquire_fini(&submit->ticket);
297 dma_fence_put(submit->fence); 298 if (submit->fence)
299 dma_fence_put(submit->fence);
298 kfree(submit); 300 kfree(submit);
299} 301}
300 302
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 0066fe7e622e..be3eefec5152 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -759,20 +759,23 @@ void psb_intel_lvds_init(struct drm_device *dev,
759 if (scan->type & DRM_MODE_TYPE_PREFERRED) { 759 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
760 mode_dev->panel_fixed_mode = 760 mode_dev->panel_fixed_mode =
761 drm_mode_duplicate(dev, scan); 761 drm_mode_duplicate(dev, scan);
762 DRM_DEBUG_KMS("Using mode from DDC\n");
762 goto out; /* FIXME: check for quirks */ 763 goto out; /* FIXME: check for quirks */
763 } 764 }
764 } 765 }
765 766
766 /* Failed to get EDID, what about VBT? do we need this? */ 767 /* Failed to get EDID, what about VBT? do we need this? */
767 if (mode_dev->vbt_mode) 768 if (dev_priv->lfp_lvds_vbt_mode) {
768 mode_dev->panel_fixed_mode = 769 mode_dev->panel_fixed_mode =
769 drm_mode_duplicate(dev, mode_dev->vbt_mode); 770 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
770 771
771 if (!mode_dev->panel_fixed_mode) 772 if (mode_dev->panel_fixed_mode) {
772 if (dev_priv->lfp_lvds_vbt_mode) 773 mode_dev->panel_fixed_mode->type |=
773 mode_dev->panel_fixed_mode = 774 DRM_MODE_TYPE_PREFERRED;
774 drm_mode_duplicate(dev, 775 DRM_DEBUG_KMS("Using mode from VBT\n");
775 dev_priv->lfp_lvds_vbt_mode); 776 goto out;
777 }
778 }
776 779
777 /* 780 /*
778 * If we didn't get EDID, try checking if the panel is already turned 781 * If we didn't get EDID, try checking if the panel is already turned
@@ -789,6 +792,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
789 if (mode_dev->panel_fixed_mode) { 792 if (mode_dev->panel_fixed_mode) {
790 mode_dev->panel_fixed_mode->type |= 793 mode_dev->panel_fixed_mode->type |=
791 DRM_MODE_TYPE_PREFERRED; 794 DRM_MODE_TYPE_PREFERRED;
795 DRM_DEBUG_KMS("Using pre-programmed mode\n");
792 goto out; /* FIXME: check for quirks */ 796 goto out; /* FIXME: check for quirks */
793 } 797 }
794 } 798 }
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 0ad1a508e2af..c995e540ff96 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1244,7 +1244,7 @@ static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
1244 mode = vgpu_vreg(vgpu, offset); 1244 mode = vgpu_vreg(vgpu, offset);
1245 1245
1246 if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) { 1246 if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
1247 WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n", 1247 WARN_ONCE(1, "VM(%d): iGVT-g doesn't support GuC\n",
1248 vgpu->id); 1248 vgpu->id);
1249 return 0; 1249 return 0;
1250 } 1250 }
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index c6e7972ac21d..a5e11d89df2f 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -340,6 +340,9 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
340 } else 340 } else
341 v = mmio->value; 341 v = mmio->value;
342 342
343 if (mmio->in_context)
344 continue;
345
343 I915_WRITE(mmio->reg, v); 346 I915_WRITE(mmio->reg, v);
344 POSTING_READ(mmio->reg); 347 POSTING_READ(mmio->reg);
345 348
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 79ba4b3440aa..f25ff133865f 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -129,9 +129,13 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
129 struct vgpu_sched_data *vgpu_data; 129 struct vgpu_sched_data *vgpu_data;
130 ktime_t cur_time; 130 ktime_t cur_time;
131 131
132 /* no target to schedule */ 132 /* no need to schedule if next_vgpu is the same with current_vgpu,
133 if (!scheduler->next_vgpu) 133 * let scheduler chose next_vgpu again by setting it to NULL.
134 */
135 if (scheduler->next_vgpu == scheduler->current_vgpu) {
136 scheduler->next_vgpu = NULL;
134 return; 137 return;
138 }
135 139
136 /* 140 /*
137 * after the flag is set, workload dispatch thread will 141 * after the flag is set, workload dispatch thread will
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 6718c84fb862..8d1df5678eaa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -337,6 +337,8 @@ nouveau_display_hpd_work(struct work_struct *work)
337 pm_runtime_get_sync(drm->dev->dev); 337 pm_runtime_get_sync(drm->dev->dev);
338 338
339 drm_helper_hpd_irq_event(drm->dev); 339 drm_helper_hpd_irq_event(drm->dev);
340 /* enable polling for external displays */
341 drm_kms_helper_poll_enable(drm->dev);
340 342
341 pm_runtime_mark_last_busy(drm->dev->dev); 343 pm_runtime_mark_last_busy(drm->dev->dev);
342 pm_runtime_put_sync(drm->dev->dev); 344 pm_runtime_put_sync(drm->dev->dev);
@@ -390,10 +392,6 @@ nouveau_display_init(struct drm_device *dev)
390 if (ret) 392 if (ret)
391 return ret; 393 return ret;
392 394
393 /* enable polling for external displays */
394 if (!dev->mode_config.poll_enabled)
395 drm_kms_helper_poll_enable(dev);
396
397 /* enable hotplug interrupts */ 395 /* enable hotplug interrupts */
398 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 396 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
399 struct nouveau_connector *conn = nouveau_connector(connector); 397 struct nouveau_connector *conn = nouveau_connector(connector);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index c3dc75fee700..6844372366d3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -502,6 +502,9 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
502 pm_runtime_allow(dev->dev); 502 pm_runtime_allow(dev->dev);
503 pm_runtime_mark_last_busy(dev->dev); 503 pm_runtime_mark_last_busy(dev->dev);
504 pm_runtime_put(dev->dev); 504 pm_runtime_put(dev->dev);
505 } else {
506 /* enable polling for external displays */
507 drm_kms_helper_poll_enable(dev);
505 } 508 }
506 return 0; 509 return 0;
507 510
@@ -774,9 +777,6 @@ nouveau_pmops_runtime_resume(struct device *dev)
774 777
775 ret = nouveau_do_resume(drm_dev, true); 778 ret = nouveau_do_resume(drm_dev, true);
776 779
777 if (!drm_dev->mode_config.poll_enabled)
778 drm_kms_helper_poll_enable(drm_dev);
779
780 /* do magic */ 780 /* do magic */
781 nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); 781 nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
782 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); 782 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 3a24788c3185..a7e55c422501 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -148,7 +148,7 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
148 case NVKM_MEM_TARGET_NCOH: target = 3; break; 148 case NVKM_MEM_TARGET_NCOH: target = 3; break;
149 default: 149 default:
150 WARN_ON(1); 150 WARN_ON(1);
151 return; 151 goto unlock;
152 } 152 }
153 153
154 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) | 154 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
@@ -160,6 +160,7 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
160 & 0x00100000), 160 & 0x00100000),
161 msecs_to_jiffies(2000)) == 0) 161 msecs_to_jiffies(2000)) == 0)
162 nvkm_error(subdev, "runlist %d update timeout\n", runl); 162 nvkm_error(subdev, "runlist %d update timeout\n", runl);
163unlock:
163 mutex_unlock(&subdev->mutex); 164 mutex_unlock(&subdev->mutex);
164} 165}
165 166
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
index d1cf02d22db1..1b0c793c0192 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
@@ -116,6 +116,7 @@ ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img,
116 ret = nvkm_firmware_get(subdev->device, f, &sig); 116 ret = nvkm_firmware_get(subdev->device, f, &sig);
117 if (ret) 117 if (ret)
118 goto free_data; 118 goto free_data;
119
119 img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL); 120 img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
120 if (!img->sig) { 121 if (!img->sig) {
121 ret = -ENOMEM; 122 ret = -ENOMEM;
@@ -126,8 +127,9 @@ ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img,
126 img->ucode_data = ls_ucode_img_build(bl, code, data, 127 img->ucode_data = ls_ucode_img_build(bl, code, data,
127 &img->ucode_desc); 128 &img->ucode_desc);
128 if (IS_ERR(img->ucode_data)) { 129 if (IS_ERR(img->ucode_data)) {
130 kfree(img->sig);
129 ret = PTR_ERR(img->ucode_data); 131 ret = PTR_ERR(img->ucode_data);
130 goto free_data; 132 goto free_sig;
131 } 133 }
132 img->ucode_size = img->ucode_desc.image_size; 134 img->ucode_size = img->ucode_desc.image_size;
133 135
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index f5ef81595f5a..03fe182203ce 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -574,8 +574,6 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
574 if (ret) 574 if (ret)
575 return; 575 return;
576 576
577 cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
578
579 if (fb != old_state->fb) { 577 if (fb != old_state->fb) {
580 obj = to_qxl_framebuffer(fb)->obj; 578 obj = to_qxl_framebuffer(fb)->obj;
581 user_bo = gem_to_qxl_bo(obj); 579 user_bo = gem_to_qxl_bo(obj);
@@ -613,6 +611,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
613 qxl_bo_kunmap(cursor_bo); 611 qxl_bo_kunmap(cursor_bo);
614 qxl_bo_kunmap(user_bo); 612 qxl_bo_kunmap(user_bo);
615 613
614 cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
616 cmd->u.set.visible = 1; 615 cmd->u.set.visible = 1;
617 cmd->u.set.shape = qxl_bo_physical_address(qdev, 616 cmd->u.set.shape = qxl_bo_physical_address(qdev,
618 cursor_bo, 0); 617 cursor_bo, 0);
@@ -623,6 +622,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
623 if (ret) 622 if (ret)
624 goto out_free_release; 623 goto out_free_release;
625 624
625 cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
626 cmd->type = QXL_CURSOR_MOVE; 626 cmd->type = QXL_CURSOR_MOVE;
627 } 627 }
628 628
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index f0cf99783c62..c97fbb2ab48b 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -776,6 +776,12 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
776 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 776 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
777 u32 switch_limit = pi->mem_gddr5 ? 450 : 300; 777 u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
778 778
779 /* disable mclk switching if the refresh is >120Hz, even if the
780 * blanking period would allow it
781 */
782 if (r600_dpm_get_vrefresh(rdev) > 120)
783 return true;
784
779 if (vblank_time < switch_limit) 785 if (vblank_time < switch_limit)
780 return true; 786 return true;
781 else 787 else
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index e368ce22bcc4..258912132b62 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7401,7 +7401,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
7401 WREG32(DC_HPD5_INT_CONTROL, tmp); 7401 WREG32(DC_HPD5_INT_CONTROL, tmp);
7402 } 7402 }
7403 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) { 7403 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
7404 tmp = RREG32(DC_HPD5_INT_CONTROL); 7404 tmp = RREG32(DC_HPD6_INT_CONTROL);
7405 tmp |= DC_HPDx_INT_ACK; 7405 tmp |= DC_HPDx_INT_ACK;
7406 WREG32(DC_HPD6_INT_CONTROL, tmp); 7406 WREG32(DC_HPD6_INT_CONTROL, tmp);
7407 } 7407 }
@@ -7431,7 +7431,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
7431 WREG32(DC_HPD5_INT_CONTROL, tmp); 7431 WREG32(DC_HPD5_INT_CONTROL, tmp);
7432 } 7432 }
7433 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { 7433 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
7434 tmp = RREG32(DC_HPD5_INT_CONTROL); 7434 tmp = RREG32(DC_HPD6_INT_CONTROL);
7435 tmp |= DC_HPDx_RX_INT_ACK; 7435 tmp |= DC_HPDx_RX_INT_ACK;
7436 WREG32(DC_HPD6_INT_CONTROL, tmp); 7436 WREG32(DC_HPD6_INT_CONTROL, tmp);
7437 } 7437 }
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f130ec41ee4b..0bf103536404 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4927,7 +4927,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
4927 WREG32(DC_HPD5_INT_CONTROL, tmp); 4927 WREG32(DC_HPD5_INT_CONTROL, tmp);
4928 } 4928 }
4929 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { 4929 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
4930 tmp = RREG32(DC_HPD5_INT_CONTROL); 4930 tmp = RREG32(DC_HPD6_INT_CONTROL);
4931 tmp |= DC_HPDx_INT_ACK; 4931 tmp |= DC_HPDx_INT_ACK;
4932 WREG32(DC_HPD6_INT_CONTROL, tmp); 4932 WREG32(DC_HPD6_INT_CONTROL, tmp);
4933 } 4933 }
@@ -4958,7 +4958,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
4958 WREG32(DC_HPD5_INT_CONTROL, tmp); 4958 WREG32(DC_HPD5_INT_CONTROL, tmp);
4959 } 4959 }
4960 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { 4960 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
4961 tmp = RREG32(DC_HPD5_INT_CONTROL); 4961 tmp = RREG32(DC_HPD6_INT_CONTROL);
4962 tmp |= DC_HPDx_RX_INT_ACK; 4962 tmp |= DC_HPDx_RX_INT_ACK;
4963 WREG32(DC_HPD6_INT_CONTROL, tmp); 4963 WREG32(DC_HPD6_INT_CONTROL, tmp);
4964 } 4964 }
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 0a085176e79b..e06e2d8feab3 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3988,7 +3988,7 @@ static void r600_irq_ack(struct radeon_device *rdev)
3988 WREG32(DC_HPD5_INT_CONTROL, tmp); 3988 WREG32(DC_HPD5_INT_CONTROL, tmp);
3989 } 3989 }
3990 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { 3990 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3991 tmp = RREG32(DC_HPD5_INT_CONTROL); 3991 tmp = RREG32(DC_HPD6_INT_CONTROL);
3992 tmp |= DC_HPDx_INT_ACK; 3992 tmp |= DC_HPDx_INT_ACK;
3993 WREG32(DC_HPD6_INT_CONTROL, tmp); 3993 WREG32(DC_HPD6_INT_CONTROL, tmp);
3994 } 3994 }
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 6a68d440bc44..d0ad03674250 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -116,7 +116,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
116 if ((radeon_runtime_pm != 0) && 116 if ((radeon_runtime_pm != 0) &&
117 radeon_has_atpx() && 117 radeon_has_atpx() &&
118 ((flags & RADEON_IS_IGP) == 0) && 118 ((flags & RADEON_IS_IGP) == 0) &&
119 !pci_is_thunderbolt_attached(rdev->pdev)) 119 !pci_is_thunderbolt_attached(dev->pdev))
120 flags |= RADEON_IS_PX; 120 flags |= RADEON_IS_PX;
121 121
122 /* radeon_device_init should report only fatal error 122 /* radeon_device_init should report only fatal error
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index ceee87f029d9..76d1888528e6 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6317,7 +6317,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
6317 WREG32(DC_HPD5_INT_CONTROL, tmp); 6317 WREG32(DC_HPD5_INT_CONTROL, tmp);
6318 } 6318 }
6319 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { 6319 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
6320 tmp = RREG32(DC_HPD5_INT_CONTROL); 6320 tmp = RREG32(DC_HPD6_INT_CONTROL);
6321 tmp |= DC_HPDx_INT_ACK; 6321 tmp |= DC_HPDx_INT_ACK;
6322 WREG32(DC_HPD6_INT_CONTROL, tmp); 6322 WREG32(DC_HPD6_INT_CONTROL, tmp);
6323 } 6323 }
@@ -6348,7 +6348,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
6348 WREG32(DC_HPD5_INT_CONTROL, tmp); 6348 WREG32(DC_HPD5_INT_CONTROL, tmp);
6349 } 6349 }
6350 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { 6350 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
6351 tmp = RREG32(DC_HPD5_INT_CONTROL); 6351 tmp = RREG32(DC_HPD6_INT_CONTROL);
6352 tmp |= DC_HPDx_RX_INT_ACK; 6352 tmp |= DC_HPDx_RX_INT_ACK;
6353 WREG32(DC_HPD6_INT_CONTROL, tmp); 6353 WREG32(DC_HPD6_INT_CONTROL, tmp);
6354 } 6354 }
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 3ac4c03ba77b..c13a4fd86b3c 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -605,6 +605,13 @@ static int coretemp_cpu_online(unsigned int cpu)
605 struct platform_data *pdata; 605 struct platform_data *pdata;
606 606
607 /* 607 /*
608 * Don't execute this on resume as the offline callback did
609 * not get executed on suspend.
610 */
611 if (cpuhp_tasks_frozen)
612 return 0;
613
614 /*
608 * CPUID.06H.EAX[0] indicates whether the CPU has thermal 615 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
609 * sensors. We check this bit only, all the early CPUs 616 * sensors. We check this bit only, all the early CPUs
610 * without thermal sensors will be filtered out. 617 * without thermal sensors will be filtered out.
@@ -654,6 +661,13 @@ static int coretemp_cpu_offline(unsigned int cpu)
654 struct temp_data *tdata; 661 struct temp_data *tdata;
655 int indx, target; 662 int indx, target;
656 663
664 /*
665 * Don't execute this on suspend as the device remove locks
666 * up the machine.
667 */
668 if (cpuhp_tasks_frozen)
669 return 0;
670
657 /* If the physical CPU device does not exist, just return */ 671 /* If the physical CPU device does not exist, just return */
658 if (!pdev) 672 if (!pdev)
659 return 0; 673 return 0;
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index f2acd4b6bf01..d1263b82d646 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -94,6 +94,7 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
94static int dw_i2c_acpi_configure(struct platform_device *pdev) 94static int dw_i2c_acpi_configure(struct platform_device *pdev)
95{ 95{
96 struct dw_i2c_dev *dev = platform_get_drvdata(pdev); 96 struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
97 u32 ss_ht = 0, fp_ht = 0, hs_ht = 0, fs_ht = 0;
97 acpi_handle handle = ACPI_HANDLE(&pdev->dev); 98 acpi_handle handle = ACPI_HANDLE(&pdev->dev);
98 const struct acpi_device_id *id; 99 const struct acpi_device_id *id;
99 struct acpi_device *adev; 100 struct acpi_device *adev;
@@ -107,23 +108,24 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
107 * Try to get SDA hold time and *CNT values from an ACPI method for 108 * Try to get SDA hold time and *CNT values from an ACPI method for
108 * selected speed modes. 109 * selected speed modes.
109 */ 110 */
111 dw_i2c_acpi_params(pdev, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt, &ss_ht);
112 dw_i2c_acpi_params(pdev, "FPCN", &dev->fp_hcnt, &dev->fp_lcnt, &fp_ht);
113 dw_i2c_acpi_params(pdev, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, &hs_ht);
114 dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &fs_ht);
115
110 switch (dev->clk_freq) { 116 switch (dev->clk_freq) {
111 case 100000: 117 case 100000:
112 dw_i2c_acpi_params(pdev, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt, 118 dev->sda_hold_time = ss_ht;
113 &dev->sda_hold_time);
114 break; 119 break;
115 case 1000000: 120 case 1000000:
116 dw_i2c_acpi_params(pdev, "FPCN", &dev->fp_hcnt, &dev->fp_lcnt, 121 dev->sda_hold_time = fp_ht;
117 &dev->sda_hold_time);
118 break; 122 break;
119 case 3400000: 123 case 3400000:
120 dw_i2c_acpi_params(pdev, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, 124 dev->sda_hold_time = hs_ht;
121 &dev->sda_hold_time);
122 break; 125 break;
123 case 400000: 126 case 400000:
124 default: 127 default:
125 dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, 128 dev->sda_hold_time = fs_ht;
126 &dev->sda_hold_time);
127 break; 129 break;
128 } 130 }
129 131
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index cf737ec8563b..5c4db65c5019 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -819,7 +819,6 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
819 rc = -EINVAL; 819 rc = -EINVAL;
820 goto out; 820 goto out;
821 } 821 }
822 drv_data->irq = irq_of_parse_and_map(np, 0);
823 822
824 drv_data->rstc = devm_reset_control_get_optional(dev, NULL); 823 drv_data->rstc = devm_reset_control_get_optional(dev, NULL);
825 if (IS_ERR(drv_data->rstc)) { 824 if (IS_ERR(drv_data->rstc)) {
@@ -902,10 +901,11 @@ mv64xxx_i2c_probe(struct platform_device *pd)
902 if (!IS_ERR(drv_data->clk)) 901 if (!IS_ERR(drv_data->clk))
903 clk_prepare_enable(drv_data->clk); 902 clk_prepare_enable(drv_data->clk);
904 903
904 drv_data->irq = platform_get_irq(pd, 0);
905
905 if (pdata) { 906 if (pdata) {
906 drv_data->freq_m = pdata->freq_m; 907 drv_data->freq_m = pdata->freq_m;
907 drv_data->freq_n = pdata->freq_n; 908 drv_data->freq_n = pdata->freq_n;
908 drv_data->irq = platform_get_irq(pd, 0);
909 drv_data->adapter.timeout = msecs_to_jiffies(pdata->timeout); 909 drv_data->adapter.timeout = msecs_to_jiffies(pdata->timeout);
910 drv_data->offload_enabled = false; 910 drv_data->offload_enabled = false;
911 memcpy(&drv_data->reg_offsets, &mv64xxx_i2c_regs_mv64xxx, sizeof(drv_data->reg_offsets)); 911 memcpy(&drv_data->reg_offsets, &mv64xxx_i2c_regs_mv64xxx, sizeof(drv_data->reg_offsets));
@@ -915,7 +915,7 @@ mv64xxx_i2c_probe(struct platform_device *pd)
915 goto exit_clk; 915 goto exit_clk;
916 } 916 }
917 if (drv_data->irq < 0) { 917 if (drv_data->irq < 0) {
918 rc = -ENXIO; 918 rc = drv_data->irq;
919 goto exit_reset; 919 goto exit_reset;
920 } 920 }
921 921
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index 0ed77eeff31e..a2e3dd715380 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -178,22 +178,39 @@ static int usb_read(struct i2c_adapter *adapter, int cmd,
178 int value, int index, void *data, int len) 178 int value, int index, void *data, int len)
179{ 179{
180 struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data; 180 struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
181 void *dmadata = kmalloc(len, GFP_KERNEL);
182 int ret;
183
184 if (!dmadata)
185 return -ENOMEM;
181 186
182 /* do control transfer */ 187 /* do control transfer */
183 return usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0), 188 ret = usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0),
184 cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE | 189 cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE |
185 USB_DIR_IN, value, index, data, len, 2000); 190 USB_DIR_IN, value, index, dmadata, len, 2000);
191
192 memcpy(data, dmadata, len);
193 kfree(dmadata);
194 return ret;
186} 195}
187 196
188static int usb_write(struct i2c_adapter *adapter, int cmd, 197static int usb_write(struct i2c_adapter *adapter, int cmd,
189 int value, int index, void *data, int len) 198 int value, int index, void *data, int len)
190{ 199{
191 struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data; 200 struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
201 void *dmadata = kmemdup(data, len, GFP_KERNEL);
202 int ret;
203
204 if (!dmadata)
205 return -ENOMEM;
192 206
193 /* do control transfer */ 207 /* do control transfer */
194 return usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0), 208 ret = usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0),
195 cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 209 cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
196 value, index, data, len, 2000); 210 value, index, dmadata, len, 2000);
211
212 kfree(dmadata);
213 return ret;
197} 214}
198 215
199static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev) 216static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev)
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index dbe7e44c9321..6ba6c83ca8f1 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -416,6 +416,7 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
416 adapter->class = I2C_CLASS_HWMON; 416 adapter->class = I2C_CLASS_HWMON;
417 adapter->dev.parent = &pdev->dev; 417 adapter->dev.parent = &pdev->dev;
418 adapter->dev.of_node = pdev->dev.of_node; 418 adapter->dev.of_node = pdev->dev.of_node;
419 ACPI_COMPANION_SET(&adapter->dev, ACPI_COMPANION(&pdev->dev));
419 i2c_set_adapdata(adapter, ctx); 420 i2c_set_adapdata(adapter, ctx);
420 rc = i2c_add_adapter(adapter); 421 rc = i2c_add_adapter(adapter);
421 if (rc) { 422 if (rc) {
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 26f7237558ba..9669ca4937b8 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -395,18 +395,20 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
395 if (force_nr) { 395 if (force_nr) {
396 priv->adap.nr = force_nr; 396 priv->adap.nr = force_nr;
397 ret = i2c_add_numbered_adapter(&priv->adap); 397 ret = i2c_add_numbered_adapter(&priv->adap);
398 dev_err(&parent->dev, 398 if (ret < 0) {
399 "failed to add mux-adapter %u as bus %u (error=%d)\n", 399 dev_err(&parent->dev,
400 chan_id, force_nr, ret); 400 "failed to add mux-adapter %u as bus %u (error=%d)\n",
401 chan_id, force_nr, ret);
402 goto err_free_priv;
403 }
401 } else { 404 } else {
402 ret = i2c_add_adapter(&priv->adap); 405 ret = i2c_add_adapter(&priv->adap);
403 dev_err(&parent->dev, 406 if (ret < 0) {
404 "failed to add mux-adapter %u (error=%d)\n", 407 dev_err(&parent->dev,
405 chan_id, ret); 408 "failed to add mux-adapter %u (error=%d)\n",
406 } 409 chan_id, ret);
407 if (ret < 0) { 410 goto err_free_priv;
408 kfree(priv); 411 }
409 return ret;
410 } 412 }
411 413
412 WARN(sysfs_create_link(&priv->adap.dev.kobj, &muxc->dev->kobj, 414 WARN(sysfs_create_link(&priv->adap.dev.kobj, &muxc->dev->kobj,
@@ -422,6 +424,10 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
422 424
423 muxc->adapter[muxc->num_adapters++] = &priv->adap; 425 muxc->adapter[muxc->num_adapters++] = &priv->adap;
424 return 0; 426 return 0;
427
428err_free_priv:
429 kfree(priv);
430 return ret;
425} 431}
426EXPORT_SYMBOL_GPL(i2c_mux_add_adapter); 432EXPORT_SYMBOL_GPL(i2c_mux_add_adapter);
427 433
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
index 406d5059072c..d97031804de8 100644
--- a/drivers/i2c/muxes/i2c-mux-reg.c
+++ b/drivers/i2c/muxes/i2c-mux-reg.c
@@ -196,20 +196,25 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
196 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 196 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
197 mux->data.reg_size = resource_size(res); 197 mux->data.reg_size = resource_size(res);
198 mux->data.reg = devm_ioremap_resource(&pdev->dev, res); 198 mux->data.reg = devm_ioremap_resource(&pdev->dev, res);
199 if (IS_ERR(mux->data.reg)) 199 if (IS_ERR(mux->data.reg)) {
200 return PTR_ERR(mux->data.reg); 200 ret = PTR_ERR(mux->data.reg);
201 goto err_put_parent;
202 }
201 } 203 }
202 204
203 if (mux->data.reg_size != 4 && mux->data.reg_size != 2 && 205 if (mux->data.reg_size != 4 && mux->data.reg_size != 2 &&
204 mux->data.reg_size != 1) { 206 mux->data.reg_size != 1) {
205 dev_err(&pdev->dev, "Invalid register size\n"); 207 dev_err(&pdev->dev, "Invalid register size\n");
206 return -EINVAL; 208 ret = -EINVAL;
209 goto err_put_parent;
207 } 210 }
208 211
209 muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0, 212 muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0,
210 i2c_mux_reg_select, NULL); 213 i2c_mux_reg_select, NULL);
211 if (!muxc) 214 if (!muxc) {
212 return -ENOMEM; 215 ret = -ENOMEM;
216 goto err_put_parent;
217 }
213 muxc->priv = mux; 218 muxc->priv = mux;
214 219
215 platform_set_drvdata(pdev, muxc); 220 platform_set_drvdata(pdev, muxc);
@@ -223,7 +228,7 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
223 228
224 ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], class); 229 ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], class);
225 if (ret) 230 if (ret)
226 goto add_adapter_failed; 231 goto err_del_mux_adapters;
227 } 232 }
228 233
229 dev_dbg(&pdev->dev, "%d port mux on %s adapter\n", 234 dev_dbg(&pdev->dev, "%d port mux on %s adapter\n",
@@ -231,8 +236,10 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
231 236
232 return 0; 237 return 0;
233 238
234add_adapter_failed: 239err_del_mux_adapters:
235 i2c_mux_del_adapters(muxc); 240 i2c_mux_del_adapters(muxc);
241err_put_parent:
242 i2c_put_adapter(parent);
236 243
237 return ret; 244 return ret;
238} 245}
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index a679e56c44cd..f431da07f861 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -554,32 +554,34 @@ static int elan_i2c_finish_fw_update(struct i2c_client *client,
554 struct completion *completion) 554 struct completion *completion)
555{ 555{
556 struct device *dev = &client->dev; 556 struct device *dev = &client->dev;
557 long ret;
558 int error; 557 int error;
559 int len; 558 int len;
560 u8 buffer[ETP_I2C_INF_LENGTH]; 559 u8 buffer[ETP_I2C_REPORT_LEN];
560
561 len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_LEN);
562 if (len != ETP_I2C_REPORT_LEN) {
563 error = len < 0 ? len : -EIO;
564 dev_warn(dev, "failed to read I2C data after FW WDT reset: %d (%d)\n",
565 error, len);
566 }
561 567
562 reinit_completion(completion); 568 reinit_completion(completion);
563 enable_irq(client->irq); 569 enable_irq(client->irq);
564 570
565 error = elan_i2c_write_cmd(client, ETP_I2C_STAND_CMD, ETP_I2C_RESET); 571 error = elan_i2c_write_cmd(client, ETP_I2C_STAND_CMD, ETP_I2C_RESET);
566 if (!error)
567 ret = wait_for_completion_interruptible_timeout(completion,
568 msecs_to_jiffies(300));
569 disable_irq(client->irq);
570
571 if (error) { 572 if (error) {
572 dev_err(dev, "device reset failed: %d\n", error); 573 dev_err(dev, "device reset failed: %d\n", error);
573 return error; 574 } else if (!wait_for_completion_timeout(completion,
574 } else if (ret == 0) { 575 msecs_to_jiffies(300))) {
575 dev_err(dev, "timeout waiting for device reset\n"); 576 dev_err(dev, "timeout waiting for device reset\n");
576 return -ETIMEDOUT; 577 error = -ETIMEDOUT;
577 } else if (ret < 0) {
578 error = ret;
579 dev_err(dev, "error waiting for device reset: %d\n", error);
580 return error;
581 } 578 }
582 579
580 disable_irq(client->irq);
581
582 if (error)
583 return error;
584
583 len = i2c_master_recv(client, buffer, ETP_I2C_INF_LENGTH); 585 len = i2c_master_recv(client, buffer, ETP_I2C_INF_LENGTH);
584 if (len != ETP_I2C_INF_LENGTH) { 586 if (len != ETP_I2C_INF_LENGTH) {
585 error = len < 0 ? len : -EIO; 587 error = len < 0 ? len : -EIO;
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 2302aef2b2d4..dd042a9b0aaa 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -350,6 +350,7 @@ static bool mxt_object_readable(unsigned int type)
350 case MXT_TOUCH_KEYARRAY_T15: 350 case MXT_TOUCH_KEYARRAY_T15:
351 case MXT_TOUCH_PROXIMITY_T23: 351 case MXT_TOUCH_PROXIMITY_T23:
352 case MXT_TOUCH_PROXKEY_T52: 352 case MXT_TOUCH_PROXKEY_T52:
353 case MXT_TOUCH_MULTITOUCHSCREEN_T100:
353 case MXT_PROCI_GRIPFACE_T20: 354 case MXT_PROCI_GRIPFACE_T20:
354 case MXT_PROCG_NOISE_T22: 355 case MXT_PROCG_NOISE_T22:
355 case MXT_PROCI_ONETOUCH_T24: 356 case MXT_PROCI_ONETOUCH_T24:
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 8cf8d8d5d4ef..f872817e81e4 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -471,7 +471,7 @@ static EDT_ATTR(gain, S_IWUSR | S_IRUGO, WORK_REGISTER_GAIN,
471static EDT_ATTR(offset, S_IWUSR | S_IRUGO, WORK_REGISTER_OFFSET, 471static EDT_ATTR(offset, S_IWUSR | S_IRUGO, WORK_REGISTER_OFFSET,
472 M09_REGISTER_OFFSET, 0, 31); 472 M09_REGISTER_OFFSET, 0, 31);
473static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD, 473static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD,
474 M09_REGISTER_THRESHOLD, 20, 80); 474 M09_REGISTER_THRESHOLD, 0, 80);
475static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE, 475static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE,
476 NO_REGISTER, 3, 14); 476 NO_REGISTER, 3, 14);
477 477
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 8348f366ddd1..62618e77bedc 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -396,13 +396,13 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
396 dma_addr_t iova, size_t size) 396 dma_addr_t iova, size_t size)
397{ 397{
398 struct iova_domain *iovad = &cookie->iovad; 398 struct iova_domain *iovad = &cookie->iovad;
399 unsigned long shift = iova_shift(iovad);
400 399
401 /* The MSI case is only ever cleaning up its most recent allocation */ 400 /* The MSI case is only ever cleaning up its most recent allocation */
402 if (cookie->type == IOMMU_DMA_MSI_COOKIE) 401 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
403 cookie->msi_iova -= size; 402 cookie->msi_iova -= size;
404 else 403 else
405 free_iova_fast(iovad, iova >> shift, size >> shift); 404 free_iova_fast(iovad, iova_pfn(iovad, iova),
405 size >> iova_shift(iovad));
406} 406}
407 407
408static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, 408static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
@@ -617,11 +617,14 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
617{ 617{
618 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 618 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
619 struct iommu_dma_cookie *cookie = domain->iova_cookie; 619 struct iommu_dma_cookie *cookie = domain->iova_cookie;
620 struct iova_domain *iovad = &cookie->iovad; 620 size_t iova_off = 0;
621 size_t iova_off = iova_offset(iovad, phys);
622 dma_addr_t iova; 621 dma_addr_t iova;
623 622
624 size = iova_align(iovad, size + iova_off); 623 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
624 iova_off = iova_offset(&cookie->iovad, phys);
625 size = iova_align(&cookie->iovad, size + iova_off);
626 }
627
625 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); 628 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
626 if (!iova) 629 if (!iova)
627 return DMA_ERROR_CODE; 630 return DMA_ERROR_CODE;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 90ab0115d78e..fc2765ccdb57 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2055,11 +2055,14 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
2055 if (context_copied(context)) { 2055 if (context_copied(context)) {
2056 u16 did_old = context_domain_id(context); 2056 u16 did_old = context_domain_id(context);
2057 2057
2058 if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) 2058 if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) {
2059 iommu->flush.flush_context(iommu, did_old, 2059 iommu->flush.flush_context(iommu, did_old,
2060 (((u16)bus) << 8) | devfn, 2060 (((u16)bus) << 8) | devfn,
2061 DMA_CCMD_MASK_NOBIT, 2061 DMA_CCMD_MASK_NOBIT,
2062 DMA_CCMD_DEVICE_INVL); 2062 DMA_CCMD_DEVICE_INVL);
2063 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2064 DMA_TLB_DSI_FLUSH);
2065 }
2063 } 2066 }
2064 2067
2065 pgd = domain->pgd; 2068 pgd = domain->pgd;
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index a27ef570c328..bc1efbfb9ddf 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -18,6 +18,7 @@
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/component.h> 19#include <linux/component.h>
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/dma-mapping.h>
21#include <linux/dma-iommu.h> 22#include <linux/dma-iommu.h>
22#include <linux/err.h> 23#include <linux/err.h>
23#include <linux/interrupt.h> 24#include <linux/interrupt.h>
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index d2306c821ebb..31d6b5a582d2 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -106,10 +106,7 @@ static inline void get_mbigen_type_reg(irq_hw_number_t hwirq,
106static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq, 106static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq,
107 u32 *mask, u32 *addr) 107 u32 *mask, u32 *addr)
108{ 108{
109 unsigned int ofst; 109 unsigned int ofst = (hwirq / 32) * 4;
110
111 hwirq -= RESERVED_IRQ_PER_MBIGEN_CHIP;
112 ofst = hwirq / 32 * 4;
113 110
114 *mask = 1 << (hwirq % 32); 111 *mask = 1 << (hwirq % 32);
115 *addr = ofst + REG_MBIGEN_CLEAR_OFFSET; 112 *addr = ofst + REG_MBIGEN_CLEAR_OFFSET;
@@ -337,9 +334,15 @@ static int mbigen_device_probe(struct platform_device *pdev)
337 mgn_chip->pdev = pdev; 334 mgn_chip->pdev = pdev;
338 335
339 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 336 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
340 mgn_chip->base = devm_ioremap_resource(&pdev->dev, res); 337 if (!res)
341 if (IS_ERR(mgn_chip->base)) 338 return -EINVAL;
342 return PTR_ERR(mgn_chip->base); 339
340 mgn_chip->base = devm_ioremap(&pdev->dev, res->start,
341 resource_size(res));
342 if (!mgn_chip->base) {
343 dev_err(&pdev->dev, "failed to ioremap %pR\n", res);
344 return -ENOMEM;
345 }
343 346
344 if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) 347 if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node)
345 err = mbigen_of_create_domain(pdev, mgn_chip); 348 err = mbigen_of_create_domain(pdev, mgn_chip);
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 78a7ce816a47..9a873118ea5f 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -285,7 +285,7 @@ static int pca955x_probe(struct i2c_client *client,
285 "slave address 0x%02x\n", 285 "slave address 0x%02x\n",
286 client->name, chip->bits, client->addr); 286 client->name, chip->bits, client->addr);
287 287
288 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) 288 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
289 return -EIO; 289 return -EIO;
290 290
291 if (pdata) { 291 if (pdata) {
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 5db11a405129..cd8139593ccd 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -218,7 +218,7 @@ static DEFINE_SPINLOCK(param_spinlock);
218 * Buffers are freed after this timeout 218 * Buffers are freed after this timeout
219 */ 219 */
220static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; 220static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
221static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; 221static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
222 222
223static unsigned long dm_bufio_peak_allocated; 223static unsigned long dm_bufio_peak_allocated;
224static unsigned long dm_bufio_allocated_kmem_cache; 224static unsigned long dm_bufio_allocated_kmem_cache;
@@ -1558,10 +1558,10 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1558 return true; 1558 return true;
1559} 1559}
1560 1560
1561static unsigned get_retain_buffers(struct dm_bufio_client *c) 1561static unsigned long get_retain_buffers(struct dm_bufio_client *c)
1562{ 1562{
1563 unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes); 1563 unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
1564 return retain_bytes / c->block_size; 1564 return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
1565} 1565}
1566 1566
1567static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, 1567static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
@@ -1571,7 +1571,7 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1571 struct dm_buffer *b, *tmp; 1571 struct dm_buffer *b, *tmp;
1572 unsigned long freed = 0; 1572 unsigned long freed = 0;
1573 unsigned long count = nr_to_scan; 1573 unsigned long count = nr_to_scan;
1574 unsigned retain_target = get_retain_buffers(c); 1574 unsigned long retain_target = get_retain_buffers(c);
1575 1575
1576 for (l = 0; l < LIST_SIZE; l++) { 1576 for (l = 0; l < LIST_SIZE; l++) {
1577 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { 1577 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
@@ -1794,8 +1794,8 @@ static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1794static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) 1794static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1795{ 1795{
1796 struct dm_buffer *b, *tmp; 1796 struct dm_buffer *b, *tmp;
1797 unsigned retain_target = get_retain_buffers(c); 1797 unsigned long retain_target = get_retain_buffers(c);
1798 unsigned count; 1798 unsigned long count;
1799 LIST_HEAD(write_list); 1799 LIST_HEAD(write_list);
1800 1800
1801 dm_bufio_lock(c); 1801 dm_bufio_lock(c);
@@ -1955,7 +1955,7 @@ MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1955module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR); 1955module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1956MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds"); 1956MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1957 1957
1958module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR); 1958module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
1959MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory"); 1959MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
1960 1960
1961module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR); 1961module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
diff --git a/drivers/md/dm-cache-background-tracker.c b/drivers/md/dm-cache-background-tracker.c
index 9b1afdfb13f0..707233891291 100644
--- a/drivers/md/dm-cache-background-tracker.c
+++ b/drivers/md/dm-cache-background-tracker.c
@@ -33,6 +33,11 @@ struct background_tracker *btracker_create(unsigned max_work)
33{ 33{
34 struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL); 34 struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL);
35 35
36 if (!b) {
37 DMERR("couldn't create background_tracker");
38 return NULL;
39 }
40
36 b->max_work = max_work; 41 b->max_work = max_work;
37 atomic_set(&b->pending_promotes, 0); 42 atomic_set(&b->pending_promotes, 0);
38 atomic_set(&b->pending_writebacks, 0); 43 atomic_set(&b->pending_writebacks, 0);
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 72479bd61e11..e5eb9c9b4bc8 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1120,8 +1120,6 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
1120 * Cache entries may not be populated. So we cannot rely on the 1120 * Cache entries may not be populated. So we cannot rely on the
1121 * size of the clean queue. 1121 * size of the clean queue.
1122 */ 1122 */
1123 unsigned nr_clean;
1124
1125 if (idle) { 1123 if (idle) {
1126 /* 1124 /*
1127 * We'd like to clean everything. 1125 * We'd like to clean everything.
@@ -1129,18 +1127,16 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
1129 return q_size(&mq->dirty) == 0u; 1127 return q_size(&mq->dirty) == 0u;
1130 } 1128 }
1131 1129
1132 nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty); 1130 /*
1133 return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >= 1131 * If we're busy we don't worry about cleaning at all.
1134 percent_to_target(mq, CLEAN_TARGET); 1132 */
1133 return true;
1135} 1134}
1136 1135
1137static bool free_target_met(struct smq_policy *mq, bool idle) 1136static bool free_target_met(struct smq_policy *mq)
1138{ 1137{
1139 unsigned nr_free; 1138 unsigned nr_free;
1140 1139
1141 if (!idle)
1142 return true;
1143
1144 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; 1140 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
1145 return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >= 1141 return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
1146 percent_to_target(mq, FREE_TARGET); 1142 percent_to_target(mq, FREE_TARGET);
@@ -1190,9 +1186,9 @@ static void queue_demotion(struct smq_policy *mq)
1190 if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed))) 1186 if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed)))
1191 return; 1187 return;
1192 1188
1193 e = q_peek(&mq->clean, mq->clean.nr_levels, true); 1189 e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
1194 if (!e) { 1190 if (!e) {
1195 if (!clean_target_met(mq, false)) 1191 if (!clean_target_met(mq, true))
1196 queue_writeback(mq); 1192 queue_writeback(mq);
1197 return; 1193 return;
1198 } 1194 }
@@ -1220,7 +1216,7 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
1220 * We always claim to be 'idle' to ensure some demotions happen 1216 * We always claim to be 'idle' to ensure some demotions happen
1221 * with continuous loads. 1217 * with continuous loads.
1222 */ 1218 */
1223 if (!free_target_met(mq, true)) 1219 if (!free_target_met(mq))
1224 queue_demotion(mq); 1220 queue_demotion(mq);
1225 return; 1221 return;
1226 } 1222 }
@@ -1421,14 +1417,10 @@ static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
1421 spin_lock_irqsave(&mq->lock, flags); 1417 spin_lock_irqsave(&mq->lock, flags);
1422 r = btracker_issue(mq->bg_work, result); 1418 r = btracker_issue(mq->bg_work, result);
1423 if (r == -ENODATA) { 1419 if (r == -ENODATA) {
1424 /* find some writeback work to do */ 1420 if (!clean_target_met(mq, idle)) {
1425 if (mq->migrations_allowed && !free_target_met(mq, idle))
1426 queue_demotion(mq);
1427
1428 else if (!clean_target_met(mq, idle))
1429 queue_writeback(mq); 1421 queue_writeback(mq);
1430 1422 r = btracker_issue(mq->bg_work, result);
1431 r = btracker_issue(mq->bg_work, result); 1423 }
1432 } 1424 }
1433 spin_unlock_irqrestore(&mq->lock, flags); 1425 spin_unlock_irqrestore(&mq->lock, flags);
1434 1426
@@ -1452,6 +1444,7 @@ static void __complete_background_work(struct smq_policy *mq,
1452 clear_pending(mq, e); 1444 clear_pending(mq, e);
1453 if (success) { 1445 if (success) {
1454 e->oblock = work->oblock; 1446 e->oblock = work->oblock;
1447 e->level = NR_CACHE_LEVELS - 1;
1455 push(mq, e); 1448 push(mq, e);
1456 // h, q, a 1449 // h, q, a
1457 } else { 1450 } else {
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1db375f50a13..d682a0511381 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -94,6 +94,9 @@ static void iot_io_begin(struct io_tracker *iot, sector_t len)
94 94
95static void __iot_io_end(struct io_tracker *iot, sector_t len) 95static void __iot_io_end(struct io_tracker *iot, sector_t len)
96{ 96{
97 if (!len)
98 return;
99
97 iot->in_flight -= len; 100 iot->in_flight -= len;
98 if (!iot->in_flight) 101 if (!iot->in_flight)
99 iot->idle_time = jiffies; 102 iot->idle_time = jiffies;
@@ -474,7 +477,7 @@ struct cache {
474 spinlock_t invalidation_lock; 477 spinlock_t invalidation_lock;
475 struct list_head invalidation_requests; 478 struct list_head invalidation_requests;
476 479
477 struct io_tracker origin_tracker; 480 struct io_tracker tracker;
478 481
479 struct work_struct commit_ws; 482 struct work_struct commit_ws;
480 struct batcher committer; 483 struct batcher committer;
@@ -901,8 +904,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
901 904
902static bool accountable_bio(struct cache *cache, struct bio *bio) 905static bool accountable_bio(struct cache *cache, struct bio *bio)
903{ 906{
904 return ((bio->bi_bdev == cache->origin_dev->bdev) && 907 return bio_op(bio) != REQ_OP_DISCARD;
905 bio_op(bio) != REQ_OP_DISCARD);
906} 908}
907 909
908static void accounted_begin(struct cache *cache, struct bio *bio) 910static void accounted_begin(struct cache *cache, struct bio *bio)
@@ -912,7 +914,7 @@ static void accounted_begin(struct cache *cache, struct bio *bio)
912 914
913 if (accountable_bio(cache, bio)) { 915 if (accountable_bio(cache, bio)) {
914 pb->len = bio_sectors(bio); 916 pb->len = bio_sectors(bio);
915 iot_io_begin(&cache->origin_tracker, pb->len); 917 iot_io_begin(&cache->tracker, pb->len);
916 } 918 }
917} 919}
918 920
@@ -921,7 +923,7 @@ static void accounted_complete(struct cache *cache, struct bio *bio)
921 size_t pb_data_size = get_per_bio_data_size(cache); 923 size_t pb_data_size = get_per_bio_data_size(cache);
922 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 924 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
923 925
924 iot_io_end(&cache->origin_tracker, pb->len); 926 iot_io_end(&cache->tracker, pb->len);
925} 927}
926 928
927static void accounted_request(struct cache *cache, struct bio *bio) 929static void accounted_request(struct cache *cache, struct bio *bio)
@@ -1716,20 +1718,19 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
1716 1718
1717enum busy { 1719enum busy {
1718 IDLE, 1720 IDLE,
1719 MODERATE,
1720 BUSY 1721 BUSY
1721}; 1722};
1722 1723
1723static enum busy spare_migration_bandwidth(struct cache *cache) 1724static enum busy spare_migration_bandwidth(struct cache *cache)
1724{ 1725{
1725 bool idle = iot_idle_for(&cache->origin_tracker, HZ); 1726 bool idle = iot_idle_for(&cache->tracker, HZ);
1726 sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * 1727 sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
1727 cache->sectors_per_block; 1728 cache->sectors_per_block;
1728 1729
1729 if (current_volume <= cache->migration_threshold) 1730 if (idle && current_volume <= cache->migration_threshold)
1730 return idle ? IDLE : MODERATE; 1731 return IDLE;
1731 else 1732 else
1732 return idle ? MODERATE : BUSY; 1733 return BUSY;
1733} 1734}
1734 1735
1735static void inc_hit_counter(struct cache *cache, struct bio *bio) 1736static void inc_hit_counter(struct cache *cache, struct bio *bio)
@@ -2045,8 +2046,6 @@ static void check_migrations(struct work_struct *ws)
2045 2046
2046 for (;;) { 2047 for (;;) {
2047 b = spare_migration_bandwidth(cache); 2048 b = spare_migration_bandwidth(cache);
2048 if (b == BUSY)
2049 break;
2050 2049
2051 r = policy_get_background_work(cache->policy, b == IDLE, &op); 2050 r = policy_get_background_work(cache->policy, b == IDLE, &op);
2052 if (r == -ENODATA) 2051 if (r == -ENODATA)
@@ -2717,7 +2716,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2717 2716
2718 batcher_init(&cache->committer, commit_op, cache, 2717 batcher_init(&cache->committer, commit_op, cache,
2719 issue_op, cache, cache->wq); 2718 issue_op, cache, cache->wq);
2720 iot_init(&cache->origin_tracker); 2719 iot_init(&cache->tracker);
2721 2720
2722 init_rwsem(&cache->background_work_lock); 2721 init_rwsem(&cache->background_work_lock);
2723 prevent_background_work(cache); 2722 prevent_background_work(cache);
@@ -2941,7 +2940,7 @@ static void cache_postsuspend(struct dm_target *ti)
2941 2940
2942 cancel_delayed_work(&cache->waker); 2941 cancel_delayed_work(&cache->waker);
2943 flush_workqueue(cache->wq); 2942 flush_workqueue(cache->wq);
2944 WARN_ON(cache->origin_tracker.in_flight); 2943 WARN_ON(cache->tracker.in_flight);
2945 2944
2946 /* 2945 /*
2947 * If it's a flush suspend there won't be any deferred bios, so this 2946 * If it's a flush suspend there won't be any deferred bios, so this
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 926a6bcb32c8..3df056b73b66 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -447,7 +447,7 @@ failed:
447 * it has been invoked. 447 * it has been invoked.
448 */ 448 */
449#define dm_report_EIO(m) \ 449#define dm_report_EIO(m) \
450({ \ 450do { \
451 struct mapped_device *md = dm_table_get_md((m)->ti->table); \ 451 struct mapped_device *md = dm_table_get_md((m)->ti->table); \
452 \ 452 \
453 pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \ 453 pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \
@@ -455,8 +455,7 @@ failed:
455 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \ 455 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
456 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \ 456 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
457 dm_noflush_suspending((m)->ti)); \ 457 dm_noflush_suspending((m)->ti)); \
458 -EIO; \ 458} while (0)
459})
460 459
461/* 460/*
462 * Map cloned requests (request-based multipath) 461 * Map cloned requests (request-based multipath)
@@ -481,7 +480,8 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
481 if (!pgpath) { 480 if (!pgpath) {
482 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 481 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
483 return DM_MAPIO_DELAY_REQUEUE; 482 return DM_MAPIO_DELAY_REQUEUE;
484 return dm_report_EIO(m); /* Failed */ 483 dm_report_EIO(m); /* Failed */
484 return DM_MAPIO_KILL;
485 } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) || 485 } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
486 test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) { 486 test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
487 if (pg_init_all_paths(m)) 487 if (pg_init_all_paths(m))
@@ -558,7 +558,8 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
558 if (!pgpath) { 558 if (!pgpath) {
559 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 559 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
560 return DM_MAPIO_REQUEUE; 560 return DM_MAPIO_REQUEUE;
561 return dm_report_EIO(m); 561 dm_report_EIO(m);
562 return -EIO;
562 } 563 }
563 564
564 mpio->pgpath = pgpath; 565 mpio->pgpath = pgpath;
@@ -1493,7 +1494,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
1493 if (atomic_read(&m->nr_valid_paths) == 0 && 1494 if (atomic_read(&m->nr_valid_paths) == 0 &&
1494 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { 1495 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1495 if (error == -EIO) 1496 if (error == -EIO)
1496 error = dm_report_EIO(m); 1497 dm_report_EIO(m);
1497 /* complete with the original error */ 1498 /* complete with the original error */
1498 r = DM_ENDIO_DONE; 1499 r = DM_ENDIO_DONE;
1499 } 1500 }
@@ -1524,8 +1525,10 @@ static int do_end_io_bio(struct multipath *m, struct bio *clone,
1524 fail_path(mpio->pgpath); 1525 fail_path(mpio->pgpath);
1525 1526
1526 if (atomic_read(&m->nr_valid_paths) == 0 && 1527 if (atomic_read(&m->nr_valid_paths) == 0 &&
1527 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 1528 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1528 return dm_report_EIO(m); 1529 dm_report_EIO(m);
1530 return -EIO;
1531 }
1529 1532
1530 /* Queue for the daemon to resubmit */ 1533 /* Queue for the daemon to resubmit */
1531 dm_bio_restore(get_bio_details_from_bio(clone), clone); 1534 dm_bio_restore(get_bio_details_from_bio(clone), clone);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 2af27026aa2e..b639fa7246ee 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -507,6 +507,7 @@ static int map_request(struct dm_rq_target_io *tio)
507 case DM_MAPIO_KILL: 507 case DM_MAPIO_KILL:
508 /* The target wants to complete the I/O */ 508 /* The target wants to complete the I/O */
509 dm_kill_unmapped_request(rq, -EIO); 509 dm_kill_unmapped_request(rq, -EIO);
510 break;
510 default: 511 default:
511 DMWARN("unimplemented target map return value: %d", r); 512 DMWARN("unimplemented target map return value: %d", r);
512 BUG(); 513 BUG();
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 0f0251d0d337..d31d18d9727c 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -484,11 +484,11 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
484 if (r < 0) 484 if (r < 0)
485 return r; 485 return r;
486 486
487 r = save_sm_roots(pmd); 487 r = dm_tm_pre_commit(pmd->tm);
488 if (r < 0) 488 if (r < 0)
489 return r; 489 return r;
490 490
491 r = dm_tm_pre_commit(pmd->tm); 491 r = save_sm_roots(pmd);
492 if (r < 0) 492 if (r < 0)
493 return r; 493 return r;
494 494
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 82f798be964f..10367ffe92e3 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8022,18 +8022,15 @@ EXPORT_SYMBOL(md_write_end);
8022 * may proceed without blocking. It is important to call this before 8022 * may proceed without blocking. It is important to call this before
8023 * attempting a GFP_KERNEL allocation while holding the mddev lock. 8023 * attempting a GFP_KERNEL allocation while holding the mddev lock.
8024 * Must be called with mddev_lock held. 8024 * Must be called with mddev_lock held.
8025 *
8026 * In the ->external case MD_SB_CHANGE_PENDING can not be cleared until mddev->lock
8027 * is dropped, so return -EAGAIN after notifying userspace.
8028 */ 8025 */
8029int md_allow_write(struct mddev *mddev) 8026void md_allow_write(struct mddev *mddev)
8030{ 8027{
8031 if (!mddev->pers) 8028 if (!mddev->pers)
8032 return 0; 8029 return;
8033 if (mddev->ro) 8030 if (mddev->ro)
8034 return 0; 8031 return;
8035 if (!mddev->pers->sync_request) 8032 if (!mddev->pers->sync_request)
8036 return 0; 8033 return;
8037 8034
8038 spin_lock(&mddev->lock); 8035 spin_lock(&mddev->lock);
8039 if (mddev->in_sync) { 8036 if (mddev->in_sync) {
@@ -8046,13 +8043,12 @@ int md_allow_write(struct mddev *mddev)
8046 spin_unlock(&mddev->lock); 8043 spin_unlock(&mddev->lock);
8047 md_update_sb(mddev, 0); 8044 md_update_sb(mddev, 0);
8048 sysfs_notify_dirent_safe(mddev->sysfs_state); 8045 sysfs_notify_dirent_safe(mddev->sysfs_state);
8046 /* wait for the dirty state to be recorded in the metadata */
8047 wait_event(mddev->sb_wait,
8048 !test_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags) &&
8049 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
8049 } else 8050 } else
8050 spin_unlock(&mddev->lock); 8051 spin_unlock(&mddev->lock);
8051
8052 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
8053 return -EAGAIN;
8054 else
8055 return 0;
8056} 8052}
8057EXPORT_SYMBOL_GPL(md_allow_write); 8053EXPORT_SYMBOL_GPL(md_allow_write);
8058 8054
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 4e75d121bfcc..11f15146ce51 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -665,7 +665,7 @@ extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
665 bool metadata_op); 665 bool metadata_op);
666extern void md_do_sync(struct md_thread *thread); 666extern void md_do_sync(struct md_thread *thread);
667extern void md_new_event(struct mddev *mddev); 667extern void md_new_event(struct mddev *mddev);
668extern int md_allow_write(struct mddev *mddev); 668extern void md_allow_write(struct mddev *mddev);
669extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); 669extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
670extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors); 670extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
671extern int md_check_no_bitmap(struct mddev *mddev); 671extern int md_check_no_bitmap(struct mddev *mddev);
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index ebb280a14325..32adf6b4a9c7 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -142,10 +142,23 @@ static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b)
142 142
143static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b) 143static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
144{ 144{
145 int r;
146 uint32_t old_count;
145 enum allocation_event ev; 147 enum allocation_event ev;
146 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); 148 struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
147 149
148 return sm_ll_dec(&smd->ll, b, &ev); 150 r = sm_ll_dec(&smd->ll, b, &ev);
151 if (!r && (ev == SM_FREE)) {
152 /*
153 * It's only free if it's also free in the last
154 * transaction.
155 */
156 r = sm_ll_lookup(&smd->old_ll, b, &old_count);
157 if (!r && !old_count)
158 smd->nr_allocated_this_transaction--;
159 }
160
161 return r;
149} 162}
150 163
151static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b) 164static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 84e58596594d..d6c0bc76e837 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -385,7 +385,7 @@ static int raid0_run(struct mddev *mddev)
385 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); 385 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
386 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); 386 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
387 blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors); 387 blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
388 blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); 388 blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
389 389
390 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); 390 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
391 blk_queue_io_opt(mddev->queue, 391 blk_queue_io_opt(mddev->queue,
@@ -459,6 +459,95 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
459 } 459 }
460} 460}
461 461
462static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
463{
464 struct r0conf *conf = mddev->private;
465 struct strip_zone *zone;
466 sector_t start = bio->bi_iter.bi_sector;
467 sector_t end;
468 unsigned int stripe_size;
469 sector_t first_stripe_index, last_stripe_index;
470 sector_t start_disk_offset;
471 unsigned int start_disk_index;
472 sector_t end_disk_offset;
473 unsigned int end_disk_index;
474 unsigned int disk;
475
476 zone = find_zone(conf, &start);
477
478 if (bio_end_sector(bio) > zone->zone_end) {
479 struct bio *split = bio_split(bio,
480 zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
481 mddev->bio_set);
482 bio_chain(split, bio);
483 generic_make_request(bio);
484 bio = split;
485 end = zone->zone_end;
486 } else
487 end = bio_end_sector(bio);
488
489 if (zone != conf->strip_zone)
490 end = end - zone[-1].zone_end;
491
492 /* Now start and end is the offset in zone */
493 stripe_size = zone->nb_dev * mddev->chunk_sectors;
494
495 first_stripe_index = start;
496 sector_div(first_stripe_index, stripe_size);
497 last_stripe_index = end;
498 sector_div(last_stripe_index, stripe_size);
499
500 start_disk_index = (int)(start - first_stripe_index * stripe_size) /
501 mddev->chunk_sectors;
502 start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
503 mddev->chunk_sectors) +
504 first_stripe_index * mddev->chunk_sectors;
505 end_disk_index = (int)(end - last_stripe_index * stripe_size) /
506 mddev->chunk_sectors;
507 end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
508 mddev->chunk_sectors) +
509 last_stripe_index * mddev->chunk_sectors;
510
511 for (disk = 0; disk < zone->nb_dev; disk++) {
512 sector_t dev_start, dev_end;
513 struct bio *discard_bio = NULL;
514 struct md_rdev *rdev;
515
516 if (disk < start_disk_index)
517 dev_start = (first_stripe_index + 1) *
518 mddev->chunk_sectors;
519 else if (disk > start_disk_index)
520 dev_start = first_stripe_index * mddev->chunk_sectors;
521 else
522 dev_start = start_disk_offset;
523
524 if (disk < end_disk_index)
525 dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
526 else if (disk > end_disk_index)
527 dev_end = last_stripe_index * mddev->chunk_sectors;
528 else
529 dev_end = end_disk_offset;
530
531 if (dev_end <= dev_start)
532 continue;
533
534 rdev = conf->devlist[(zone - conf->strip_zone) *
535 conf->strip_zone[0].nb_dev + disk];
536 if (__blkdev_issue_discard(rdev->bdev,
537 dev_start + zone->dev_start + rdev->data_offset,
538 dev_end - dev_start, GFP_NOIO, 0, &discard_bio) ||
539 !discard_bio)
540 continue;
541 bio_chain(discard_bio, bio);
542 if (mddev->gendisk)
543 trace_block_bio_remap(bdev_get_queue(rdev->bdev),
544 discard_bio, disk_devt(mddev->gendisk),
545 bio->bi_iter.bi_sector);
546 generic_make_request(discard_bio);
547 }
548 bio_endio(bio);
549}
550
462static void raid0_make_request(struct mddev *mddev, struct bio *bio) 551static void raid0_make_request(struct mddev *mddev, struct bio *bio)
463{ 552{
464 struct strip_zone *zone; 553 struct strip_zone *zone;
@@ -473,6 +562,11 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
473 return; 562 return;
474 } 563 }
475 564
565 if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
566 raid0_handle_discard(mddev, bio);
567 return;
568 }
569
476 bio_sector = bio->bi_iter.bi_sector; 570 bio_sector = bio->bi_iter.bi_sector;
477 sector = bio_sector; 571 sector = bio_sector;
478 chunk_sects = mddev->chunk_sectors; 572 chunk_sects = mddev->chunk_sectors;
@@ -498,19 +592,13 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
498 bio->bi_iter.bi_sector = sector + zone->dev_start + 592 bio->bi_iter.bi_sector = sector + zone->dev_start +
499 tmp_dev->data_offset; 593 tmp_dev->data_offset;
500 594
501 if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 595 if (mddev->gendisk)
502 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { 596 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
503 /* Just ignore it */ 597 bio, disk_devt(mddev->gendisk),
504 bio_endio(bio); 598 bio_sector);
505 } else { 599 mddev_check_writesame(mddev, bio);
506 if (mddev->gendisk) 600 mddev_check_write_zeroes(mddev, bio);
507 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), 601 generic_make_request(bio);
508 bio, disk_devt(mddev->gendisk),
509 bio_sector);
510 mddev_check_writesame(mddev, bio);
511 mddev_check_write_zeroes(mddev, bio);
512 generic_make_request(bio);
513 }
514} 602}
515 603
516static void raid0_status(struct seq_file *seq, struct mddev *mddev) 604static void raid0_status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 7ed59351fe97..af5056d56878 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -666,8 +666,11 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
666 break; 666 break;
667 } 667 }
668 continue; 668 continue;
669 } else 669 } else {
670 if ((sectors > best_good_sectors) && (best_disk >= 0))
671 best_disk = -1;
670 best_good_sectors = sectors; 672 best_good_sectors = sectors;
673 }
671 674
672 if (best_disk >= 0) 675 if (best_disk >= 0)
673 /* At least two disks to choose from so failfast is OK */ 676 /* At least two disks to choose from so failfast is OK */
@@ -1529,17 +1532,16 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1529 plug = container_of(cb, struct raid1_plug_cb, cb); 1532 plug = container_of(cb, struct raid1_plug_cb, cb);
1530 else 1533 else
1531 plug = NULL; 1534 plug = NULL;
1532 spin_lock_irqsave(&conf->device_lock, flags);
1533 if (plug) { 1535 if (plug) {
1534 bio_list_add(&plug->pending, mbio); 1536 bio_list_add(&plug->pending, mbio);
1535 plug->pending_cnt++; 1537 plug->pending_cnt++;
1536 } else { 1538 } else {
1539 spin_lock_irqsave(&conf->device_lock, flags);
1537 bio_list_add(&conf->pending_bio_list, mbio); 1540 bio_list_add(&conf->pending_bio_list, mbio);
1538 conf->pending_count++; 1541 conf->pending_count++;
1539 } 1542 spin_unlock_irqrestore(&conf->device_lock, flags);
1540 spin_unlock_irqrestore(&conf->device_lock, flags);
1541 if (!plug)
1542 md_wakeup_thread(mddev->thread); 1543 md_wakeup_thread(mddev->thread);
1544 }
1543 } 1545 }
1544 1546
1545 r1_bio_write_done(r1_bio); 1547 r1_bio_write_done(r1_bio);
@@ -3197,7 +3199,7 @@ static int raid1_reshape(struct mddev *mddev)
3197 struct r1conf *conf = mddev->private; 3199 struct r1conf *conf = mddev->private;
3198 int cnt, raid_disks; 3200 int cnt, raid_disks;
3199 unsigned long flags; 3201 unsigned long flags;
3200 int d, d2, err; 3202 int d, d2;
3201 3203
3202 /* Cannot change chunk_size, layout, or level */ 3204 /* Cannot change chunk_size, layout, or level */
3203 if (mddev->chunk_sectors != mddev->new_chunk_sectors || 3205 if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
@@ -3209,11 +3211,8 @@ static int raid1_reshape(struct mddev *mddev)
3209 return -EINVAL; 3211 return -EINVAL;
3210 } 3212 }
3211 3213
3212 if (!mddev_is_clustered(mddev)) { 3214 if (!mddev_is_clustered(mddev))
3213 err = md_allow_write(mddev); 3215 md_allow_write(mddev);
3214 if (err)
3215 return err;
3216 }
3217 3216
3218 raid_disks = mddev->raid_disks + mddev->delta_disks; 3217 raid_disks = mddev->raid_disks + mddev->delta_disks;
3219 3218
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 6b86a0032cf8..4343d7ff9916 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1282,17 +1282,16 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
1282 plug = container_of(cb, struct raid10_plug_cb, cb); 1282 plug = container_of(cb, struct raid10_plug_cb, cb);
1283 else 1283 else
1284 plug = NULL; 1284 plug = NULL;
1285 spin_lock_irqsave(&conf->device_lock, flags);
1286 if (plug) { 1285 if (plug) {
1287 bio_list_add(&plug->pending, mbio); 1286 bio_list_add(&plug->pending, mbio);
1288 plug->pending_cnt++; 1287 plug->pending_cnt++;
1289 } else { 1288 } else {
1289 spin_lock_irqsave(&conf->device_lock, flags);
1290 bio_list_add(&conf->pending_bio_list, mbio); 1290 bio_list_add(&conf->pending_bio_list, mbio);
1291 conf->pending_count++; 1291 conf->pending_count++;
1292 } 1292 spin_unlock_irqrestore(&conf->device_lock, flags);
1293 spin_unlock_irqrestore(&conf->device_lock, flags);
1294 if (!plug)
1295 md_wakeup_thread(mddev->thread); 1293 md_wakeup_thread(mddev->thread);
1294 }
1296} 1295}
1297 1296
1298static void raid10_write_request(struct mddev *mddev, struct bio *bio, 1297static void raid10_write_request(struct mddev *mddev, struct bio *bio,
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 26ba09282e7c..4c00bc248287 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -24,6 +24,7 @@
24#include "md.h" 24#include "md.h"
25#include "raid5.h" 25#include "raid5.h"
26#include "bitmap.h" 26#include "bitmap.h"
27#include "raid5-log.h"
27 28
28/* 29/*
29 * metadata/data stored in disk with 4k size unit (a block) regardless 30 * metadata/data stored in disk with 4k size unit (a block) regardless
@@ -622,20 +623,30 @@ static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
622 __r5l_set_io_unit_state(io, IO_UNIT_IO_START); 623 __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
623 spin_unlock_irqrestore(&log->io_list_lock, flags); 624 spin_unlock_irqrestore(&log->io_list_lock, flags);
624 625
626 /*
627 * In case of journal device failures, submit_bio will get error
628 * and calls endio, then active stripes will continue write
629 * process. Therefore, it is not necessary to check Faulty bit
630 * of journal device here.
631 *
632 * We can't check split_bio after current_bio is submitted. If
633 * io->split_bio is null, after current_bio is submitted, current_bio
634 * might already be completed and the io_unit is freed. We submit
635 * split_bio first to avoid the issue.
636 */
637 if (io->split_bio) {
638 if (io->has_flush)
639 io->split_bio->bi_opf |= REQ_PREFLUSH;
640 if (io->has_fua)
641 io->split_bio->bi_opf |= REQ_FUA;
642 submit_bio(io->split_bio);
643 }
644
625 if (io->has_flush) 645 if (io->has_flush)
626 io->current_bio->bi_opf |= REQ_PREFLUSH; 646 io->current_bio->bi_opf |= REQ_PREFLUSH;
627 if (io->has_fua) 647 if (io->has_fua)
628 io->current_bio->bi_opf |= REQ_FUA; 648 io->current_bio->bi_opf |= REQ_FUA;
629 submit_bio(io->current_bio); 649 submit_bio(io->current_bio);
630
631 if (!io->split_bio)
632 return;
633
634 if (io->has_flush)
635 io->split_bio->bi_opf |= REQ_PREFLUSH;
636 if (io->has_fua)
637 io->split_bio->bi_opf |= REQ_FUA;
638 submit_bio(io->split_bio);
639} 650}
640 651
641/* deferred io_unit will be dispatched here */ 652/* deferred io_unit will be dispatched here */
@@ -670,6 +681,11 @@ static void r5c_disable_writeback_async(struct work_struct *work)
670 return; 681 return;
671 pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n", 682 pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
672 mdname(mddev)); 683 mdname(mddev));
684
685 /* wait superblock change before suspend */
686 wait_event(mddev->sb_wait,
687 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
688
673 mddev_suspend(mddev); 689 mddev_suspend(mddev);
674 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; 690 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
675 mddev_resume(mddev); 691 mddev_resume(mddev);
@@ -2621,8 +2637,11 @@ int r5c_try_caching_write(struct r5conf *conf,
2621 * When run in degraded mode, array is set to write-through mode. 2637 * When run in degraded mode, array is set to write-through mode.
2622 * This check helps drain pending write safely in the transition to 2638 * This check helps drain pending write safely in the transition to
2623 * write-through mode. 2639 * write-through mode.
2640 *
2641 * When a stripe is syncing, the write is also handled in write
2642 * through mode.
2624 */ 2643 */
2625 if (s->failed) { 2644 if (s->failed || test_bit(STRIPE_SYNCING, &sh->state)) {
2626 r5c_make_stripe_write_out(sh); 2645 r5c_make_stripe_write_out(sh);
2627 return -EAGAIN; 2646 return -EAGAIN;
2628 } 2647 }
@@ -2825,6 +2844,9 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
2825 } 2844 }
2826 2845
2827 r5l_append_flush_payload(log, sh->sector); 2846 r5l_append_flush_payload(log, sh->sector);
2847 /* stripe is flused to raid disks, we can do resync now */
2848 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
2849 set_bit(STRIPE_HANDLE, &sh->state);
2828} 2850}
2829 2851
2830int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh) 2852int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
@@ -2973,7 +2995,7 @@ ioerr:
2973 return ret; 2995 return ret;
2974} 2996}
2975 2997
2976void r5c_update_on_rdev_error(struct mddev *mddev) 2998void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev)
2977{ 2999{
2978 struct r5conf *conf = mddev->private; 3000 struct r5conf *conf = mddev->private;
2979 struct r5l_log *log = conf->log; 3001 struct r5l_log *log = conf->log;
@@ -2981,7 +3003,8 @@ void r5c_update_on_rdev_error(struct mddev *mddev)
2981 if (!log) 3003 if (!log)
2982 return; 3004 return;
2983 3005
2984 if (raid5_calc_degraded(conf) > 0 && 3006 if ((raid5_calc_degraded(conf) > 0 ||
3007 test_bit(Journal, &rdev->flags)) &&
2985 conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) 3008 conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
2986 schedule_work(&log->disable_writeback_work); 3009 schedule_work(&log->disable_writeback_work);
2987} 3010}
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
index 27097101ccca..328d67aedda4 100644
--- a/drivers/md/raid5-log.h
+++ b/drivers/md/raid5-log.h
@@ -28,7 +28,8 @@ extern void r5c_flush_cache(struct r5conf *conf, int num);
28extern void r5c_check_stripe_cache_usage(struct r5conf *conf); 28extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
29extern void r5c_check_cached_full_stripe(struct r5conf *conf); 29extern void r5c_check_cached_full_stripe(struct r5conf *conf);
30extern struct md_sysfs_entry r5c_journal_mode; 30extern struct md_sysfs_entry r5c_journal_mode;
31extern void r5c_update_on_rdev_error(struct mddev *mddev); 31extern void r5c_update_on_rdev_error(struct mddev *mddev,
32 struct md_rdev *rdev);
32extern bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect); 33extern bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect);
33 34
34extern struct dma_async_tx_descriptor * 35extern struct dma_async_tx_descriptor *
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2e38cfac5b1d..9c4f7659f8b1 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -103,8 +103,7 @@ static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
103static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) 103static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
104{ 104{
105 int i; 105 int i;
106 local_irq_disable(); 106 spin_lock_irq(conf->hash_locks);
107 spin_lock(conf->hash_locks);
108 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 107 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
109 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); 108 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
110 spin_lock(&conf->device_lock); 109 spin_lock(&conf->device_lock);
@@ -114,9 +113,9 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
114{ 113{
115 int i; 114 int i;
116 spin_unlock(&conf->device_lock); 115 spin_unlock(&conf->device_lock);
117 for (i = NR_STRIPE_HASH_LOCKS; i; i--) 116 for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--)
118 spin_unlock(conf->hash_locks + i - 1); 117 spin_unlock(conf->hash_locks + i);
119 local_irq_enable(); 118 spin_unlock_irq(conf->hash_locks);
120} 119}
121 120
122/* Find first data disk in a raid6 stripe */ 121/* Find first data disk in a raid6 stripe */
@@ -234,11 +233,15 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
234 if (test_bit(R5_InJournal, &sh->dev[i].flags)) 233 if (test_bit(R5_InJournal, &sh->dev[i].flags))
235 injournal++; 234 injournal++;
236 /* 235 /*
237 * When quiesce in r5c write back, set STRIPE_HANDLE for stripes with 236 * In the following cases, the stripe cannot be released to cached
238 * data in journal, so they are not released to cached lists 237 * lists. Therefore, we make the stripe write out and set
238 * STRIPE_HANDLE:
239 * 1. when quiesce in r5c write back;
240 * 2. when resync is requested fot the stripe.
239 */ 241 */
240 if (conf->quiesce && r5c_is_writeback(conf->log) && 242 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) ||
241 !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0) { 243 (conf->quiesce && r5c_is_writeback(conf->log) &&
244 !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0)) {
242 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) 245 if (test_bit(STRIPE_R5C_CACHING, &sh->state))
243 r5c_make_stripe_write_out(sh); 246 r5c_make_stripe_write_out(sh);
244 set_bit(STRIPE_HANDLE, &sh->state); 247 set_bit(STRIPE_HANDLE, &sh->state);
@@ -714,12 +717,11 @@ static bool is_full_stripe_write(struct stripe_head *sh)
714 717
715static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 718static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
716{ 719{
717 local_irq_disable();
718 if (sh1 > sh2) { 720 if (sh1 > sh2) {
719 spin_lock(&sh2->stripe_lock); 721 spin_lock_irq(&sh2->stripe_lock);
720 spin_lock_nested(&sh1->stripe_lock, 1); 722 spin_lock_nested(&sh1->stripe_lock, 1);
721 } else { 723 } else {
722 spin_lock(&sh1->stripe_lock); 724 spin_lock_irq(&sh1->stripe_lock);
723 spin_lock_nested(&sh2->stripe_lock, 1); 725 spin_lock_nested(&sh2->stripe_lock, 1);
724 } 726 }
725} 727}
@@ -727,8 +729,7 @@ static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
727static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 729static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
728{ 730{
729 spin_unlock(&sh1->stripe_lock); 731 spin_unlock(&sh1->stripe_lock);
730 spin_unlock(&sh2->stripe_lock); 732 spin_unlock_irq(&sh2->stripe_lock);
731 local_irq_enable();
732} 733}
733 734
734/* Only freshly new full stripe normal write stripe can be added to a batch list */ 735/* Only freshly new full stripe normal write stripe can be added to a batch list */
@@ -2312,14 +2313,12 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2312 struct stripe_head *osh, *nsh; 2313 struct stripe_head *osh, *nsh;
2313 LIST_HEAD(newstripes); 2314 LIST_HEAD(newstripes);
2314 struct disk_info *ndisks; 2315 struct disk_info *ndisks;
2315 int err; 2316 int err = 0;
2316 struct kmem_cache *sc; 2317 struct kmem_cache *sc;
2317 int i; 2318 int i;
2318 int hash, cnt; 2319 int hash, cnt;
2319 2320
2320 err = md_allow_write(conf->mddev); 2321 md_allow_write(conf->mddev);
2321 if (err)
2322 return err;
2323 2322
2324 /* Step 1 */ 2323 /* Step 1 */
2325 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 2324 sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
@@ -2694,7 +2693,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
2694 bdevname(rdev->bdev, b), 2693 bdevname(rdev->bdev, b),
2695 mdname(mddev), 2694 mdname(mddev),
2696 conf->raid_disks - mddev->degraded); 2695 conf->raid_disks - mddev->degraded);
2697 r5c_update_on_rdev_error(mddev); 2696 r5c_update_on_rdev_error(mddev, rdev);
2698} 2697}
2699 2698
2700/* 2699/*
@@ -3055,6 +3054,11 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
3055 * When LOG_CRITICAL, stripes with injournal == 0 will be sent to 3054 * When LOG_CRITICAL, stripes with injournal == 0 will be sent to
3056 * no_space_stripes list. 3055 * no_space_stripes list.
3057 * 3056 *
3057 * 3. during journal failure
3058 * In journal failure, we try to flush all cached data to raid disks
3059 * based on data in stripe cache. The array is read-only to upper
3060 * layers, so we would skip all pending writes.
3061 *
3058 */ 3062 */
3059static inline bool delay_towrite(struct r5conf *conf, 3063static inline bool delay_towrite(struct r5conf *conf,
3060 struct r5dev *dev, 3064 struct r5dev *dev,
@@ -3068,6 +3072,9 @@ static inline bool delay_towrite(struct r5conf *conf,
3068 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && 3072 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
3069 s->injournal > 0) 3073 s->injournal > 0)
3070 return true; 3074 return true;
3075 /* case 3 above */
3076 if (s->log_failed && s->injournal)
3077 return true;
3071 return false; 3078 return false;
3072} 3079}
3073 3080
@@ -4653,8 +4660,13 @@ static void handle_stripe(struct stripe_head *sh)
4653 4660
4654 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { 4661 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) {
4655 spin_lock(&sh->stripe_lock); 4662 spin_lock(&sh->stripe_lock);
4656 /* Cannot process 'sync' concurrently with 'discard' */ 4663 /*
4657 if (!test_bit(STRIPE_DISCARD, &sh->state) && 4664 * Cannot process 'sync' concurrently with 'discard'.
4665 * Flush data in r5cache before 'sync'.
4666 */
4667 if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) &&
4668 !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) &&
4669 !test_bit(STRIPE_DISCARD, &sh->state) &&
4658 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { 4670 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
4659 set_bit(STRIPE_SYNCING, &sh->state); 4671 set_bit(STRIPE_SYNCING, &sh->state);
4660 clear_bit(STRIPE_INSYNC, &sh->state); 4672 clear_bit(STRIPE_INSYNC, &sh->state);
@@ -4701,10 +4713,15 @@ static void handle_stripe(struct stripe_head *sh)
4701 " to_write=%d failed=%d failed_num=%d,%d\n", 4713 " to_write=%d failed=%d failed_num=%d,%d\n",
4702 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 4714 s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
4703 s.failed_num[0], s.failed_num[1]); 4715 s.failed_num[0], s.failed_num[1]);
4704 /* check if the array has lost more than max_degraded devices and, 4716 /*
4717 * check if the array has lost more than max_degraded devices and,
4705 * if so, some requests might need to be failed. 4718 * if so, some requests might need to be failed.
4719 *
4720 * When journal device failed (log_failed), we will only process
4721 * the stripe if there is data need write to raid disks
4706 */ 4722 */
4707 if (s.failed > conf->max_degraded || s.log_failed) { 4723 if (s.failed > conf->max_degraded ||
4724 (s.log_failed && s.injournal == 0)) {
4708 sh->check_state = 0; 4725 sh->check_state = 0;
4709 sh->reconstruct_state = 0; 4726 sh->reconstruct_state = 0;
4710 break_stripe_batch_list(sh, 0); 4727 break_stripe_batch_list(sh, 0);
@@ -5277,8 +5294,10 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
5277 struct stripe_head *sh, *tmp; 5294 struct stripe_head *sh, *tmp;
5278 struct list_head *handle_list = NULL; 5295 struct list_head *handle_list = NULL;
5279 struct r5worker_group *wg; 5296 struct r5worker_group *wg;
5280 bool second_try = !r5c_is_writeback(conf->log); 5297 bool second_try = !r5c_is_writeback(conf->log) &&
5281 bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state); 5298 !r5l_log_disk_error(conf);
5299 bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state) ||
5300 r5l_log_disk_error(conf);
5282 5301
5283again: 5302again:
5284 wg = NULL; 5303 wg = NULL;
@@ -6313,7 +6332,6 @@ int
6313raid5_set_cache_size(struct mddev *mddev, int size) 6332raid5_set_cache_size(struct mddev *mddev, int size)
6314{ 6333{
6315 struct r5conf *conf = mddev->private; 6334 struct r5conf *conf = mddev->private;
6316 int err;
6317 6335
6318 if (size <= 16 || size > 32768) 6336 if (size <= 16 || size > 32768)
6319 return -EINVAL; 6337 return -EINVAL;
@@ -6325,10 +6343,7 @@ raid5_set_cache_size(struct mddev *mddev, int size)
6325 ; 6343 ;
6326 mutex_unlock(&conf->cache_size_mutex); 6344 mutex_unlock(&conf->cache_size_mutex);
6327 6345
6328 6346 md_allow_write(mddev);
6329 err = md_allow_write(mddev);
6330 if (err)
6331 return err;
6332 6347
6333 mutex_lock(&conf->cache_size_mutex); 6348 mutex_lock(&conf->cache_size_mutex);
6334 while (size > conf->max_nr_stripes) 6349 while (size > conf->max_nr_stripes)
@@ -7530,7 +7545,9 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
7530 * neilb: there is no locking about new writes here, 7545 * neilb: there is no locking about new writes here,
7531 * so this cannot be safe. 7546 * so this cannot be safe.
7532 */ 7547 */
7533 if (atomic_read(&conf->active_stripes)) { 7548 if (atomic_read(&conf->active_stripes) ||
7549 atomic_read(&conf->r5c_cached_full_stripes) ||
7550 atomic_read(&conf->r5c_cached_partial_stripes)) {
7534 return -EBUSY; 7551 return -EBUSY;
7535 } 7552 }
7536 log_exit(conf); 7553 log_exit(conf);
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index bf0fe0137dfe..6d1b4b707cc2 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -512,7 +512,7 @@ static void gpmc_cs_show_timings(int cs, const char *desc)
512 pr_info("gpmc cs%i access configuration:\n", cs); 512 pr_info("gpmc cs%i access configuration:\n", cs);
513 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 4, 4, "time-para-granularity"); 513 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 4, 4, "time-para-granularity");
514 GPMC_GET_RAW(GPMC_CS_CONFIG1, 8, 9, "mux-add-data"); 514 GPMC_GET_RAW(GPMC_CS_CONFIG1, 8, 9, "mux-add-data");
515 GPMC_GET_RAW_MAX(GPMC_CS_CONFIG1, 12, 13, 515 GPMC_GET_RAW_SHIFT_MAX(GPMC_CS_CONFIG1, 12, 13, 1,
516 GPMC_CONFIG1_DEVICESIZE_MAX, "device-width"); 516 GPMC_CONFIG1_DEVICESIZE_MAX, "device-width");
517 GPMC_GET_RAW(GPMC_CS_CONFIG1, 16, 17, "wait-pin"); 517 GPMC_GET_RAW(GPMC_CS_CONFIG1, 16, 17, "wait-pin");
518 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 21, 21, "wait-on-write"); 518 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 21, 21, "wait-on-write");
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 2cba76e6fa3c..07bbd4cc1852 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -492,6 +492,7 @@ config ASPEED_LPC_CTRL
492 492
493config PCI_ENDPOINT_TEST 493config PCI_ENDPOINT_TEST
494 depends on PCI 494 depends on PCI
495 select CRC32
495 tristate "PCI Endpoint Test driver" 496 tristate "PCI Endpoint Test driver"
496 ---help--- 497 ---help---
497 Enable this configuration option to enable the host side test driver 498 Enable this configuration option to enable the host side test driver
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index 1304160de168..13ef162cf066 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -27,6 +27,7 @@ struct mmc_pwrseq_simple {
27 struct mmc_pwrseq pwrseq; 27 struct mmc_pwrseq pwrseq;
28 bool clk_enabled; 28 bool clk_enabled;
29 u32 post_power_on_delay_ms; 29 u32 post_power_on_delay_ms;
30 u32 power_off_delay_us;
30 struct clk *ext_clk; 31 struct clk *ext_clk;
31 struct gpio_descs *reset_gpios; 32 struct gpio_descs *reset_gpios;
32}; 33};
@@ -78,6 +79,10 @@ static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
78 79
79 mmc_pwrseq_simple_set_gpios_value(pwrseq, 1); 80 mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
80 81
82 if (pwrseq->power_off_delay_us)
83 usleep_range(pwrseq->power_off_delay_us,
84 2 * pwrseq->power_off_delay_us);
85
81 if (!IS_ERR(pwrseq->ext_clk) && pwrseq->clk_enabled) { 86 if (!IS_ERR(pwrseq->ext_clk) && pwrseq->clk_enabled) {
82 clk_disable_unprepare(pwrseq->ext_clk); 87 clk_disable_unprepare(pwrseq->ext_clk);
83 pwrseq->clk_enabled = false; 88 pwrseq->clk_enabled = false;
@@ -119,6 +124,8 @@ static int mmc_pwrseq_simple_probe(struct platform_device *pdev)
119 124
120 device_property_read_u32(dev, "post-power-on-delay-ms", 125 device_property_read_u32(dev, "post-power-on-delay-ms",
121 &pwrseq->post_power_on_delay_ms); 126 &pwrseq->post_power_on_delay_ms);
127 device_property_read_u32(dev, "power-off-delay-us",
128 &pwrseq->power_off_delay_us);
122 129
123 pwrseq->pwrseq.dev = dev; 130 pwrseq->pwrseq.dev = dev;
124 pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops; 131 pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c
index 772d0900026d..951d2cdd7888 100644
--- a/drivers/mmc/host/cavium-octeon.c
+++ b/drivers/mmc/host/cavium-octeon.c
@@ -108,7 +108,7 @@ static void octeon_mmc_release_bus(struct cvm_mmc_host *host)
108static void octeon_mmc_int_enable(struct cvm_mmc_host *host, u64 val) 108static void octeon_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
109{ 109{
110 writeq(val, host->base + MIO_EMM_INT(host)); 110 writeq(val, host->base + MIO_EMM_INT(host));
111 if (!host->dma_active || (host->dma_active && !host->has_ciu3)) 111 if (!host->has_ciu3)
112 writeq(val, host->base + MIO_EMM_INT_EN(host)); 112 writeq(val, host->base + MIO_EMM_INT_EN(host));
113} 113}
114 114
@@ -267,7 +267,7 @@ static int octeon_mmc_probe(struct platform_device *pdev)
267 } 267 }
268 268
269 host->global_pwr_gpiod = devm_gpiod_get_optional(&pdev->dev, 269 host->global_pwr_gpiod = devm_gpiod_get_optional(&pdev->dev,
270 "power-gpios", 270 "power",
271 GPIOD_OUT_HIGH); 271 GPIOD_OUT_HIGH);
272 if (IS_ERR(host->global_pwr_gpiod)) { 272 if (IS_ERR(host->global_pwr_gpiod)) {
273 dev_err(&pdev->dev, "Invalid power GPIO\n"); 273 dev_err(&pdev->dev, "Invalid power GPIO\n");
@@ -288,11 +288,20 @@ static int octeon_mmc_probe(struct platform_device *pdev)
288 if (ret) { 288 if (ret) {
289 dev_err(&pdev->dev, "Error populating slots\n"); 289 dev_err(&pdev->dev, "Error populating slots\n");
290 octeon_mmc_set_shared_power(host, 0); 290 octeon_mmc_set_shared_power(host, 0);
291 return ret; 291 goto error;
292 } 292 }
293 i++; 293 i++;
294 } 294 }
295 return 0; 295 return 0;
296
297error:
298 for (i = 0; i < CAVIUM_MAX_MMC; i++) {
299 if (host->slot[i])
300 cvm_mmc_of_slot_remove(host->slot[i]);
301 if (host->slot_pdev[i])
302 of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
303 }
304 return ret;
296} 305}
297 306
298static int octeon_mmc_remove(struct platform_device *pdev) 307static int octeon_mmc_remove(struct platform_device *pdev)
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c
index fe3d77267cd6..b9cc95998799 100644
--- a/drivers/mmc/host/cavium-thunderx.c
+++ b/drivers/mmc/host/cavium-thunderx.c
@@ -146,6 +146,12 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
146 return 0; 146 return 0;
147 147
148error: 148error:
149 for (i = 0; i < CAVIUM_MAX_MMC; i++) {
150 if (host->slot[i])
151 cvm_mmc_of_slot_remove(host->slot[i]);
152 if (host->slot_pdev[i])
153 of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
154 }
149 clk_disable_unprepare(host->clk); 155 clk_disable_unprepare(host->clk);
150 return ret; 156 return ret;
151} 157}
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index 58b51ba6aabd..b8aaf0fdb77c 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -839,14 +839,14 @@ static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
839 cvm_mmc_reset_bus(slot); 839 cvm_mmc_reset_bus(slot);
840 if (host->global_pwr_gpiod) 840 if (host->global_pwr_gpiod)
841 host->set_shared_power(host, 0); 841 host->set_shared_power(host, 0);
842 else 842 else if (!IS_ERR(mmc->supply.vmmc))
843 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 843 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
844 break; 844 break;
845 845
846 case MMC_POWER_UP: 846 case MMC_POWER_UP:
847 if (host->global_pwr_gpiod) 847 if (host->global_pwr_gpiod)
848 host->set_shared_power(host, 1); 848 host->set_shared_power(host, 1);
849 else 849 else if (!IS_ERR(mmc->supply.vmmc))
850 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 850 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
851 break; 851 break;
852 } 852 }
@@ -968,20 +968,15 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
968 return -EINVAL; 968 return -EINVAL;
969 } 969 }
970 970
971 mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc"); 971 ret = mmc_regulator_get_supply(mmc);
972 if (IS_ERR(mmc->supply.vmmc)) { 972 if (ret == -EPROBE_DEFER)
973 if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER) 973 return ret;
974 return -EPROBE_DEFER; 974 /*
975 /* 975 * Legacy Octeon firmware has no regulator entry, fall-back to
976 * Legacy Octeon firmware has no regulator entry, fall-back to 976 * a hard-coded voltage to get a sane OCR.
977 * a hard-coded voltage to get a sane OCR. 977 */
978 */ 978 if (IS_ERR(mmc->supply.vmmc))
979 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 979 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
980 } else {
981 ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
982 if (ret > 0)
983 mmc->ocr_avail = ret;
984 }
985 980
986 /* Common MMC bindings */ 981 /* Common MMC bindings */
987 ret = mmc_of_parse(mmc); 982 ret = mmc_of_parse(mmc);
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 3275d4995812..61666d269771 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -187,7 +187,8 @@ static const struct sdhci_iproc_data iproc_cygnus_data = {
187}; 187};
188 188
189static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = { 189static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
190 .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, 190 .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
191 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
191 .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN, 192 .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
192 .ops = &sdhci_iproc_ops, 193 .ops = &sdhci_iproc_ops,
193}; 194};
diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c
index 6356781f1cca..f7e26b031e76 100644
--- a/drivers/mmc/host/sdhci-xenon-phy.c
+++ b/drivers/mmc/host/sdhci-xenon-phy.c
@@ -787,14 +787,6 @@ int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios)
787 return ret; 787 return ret;
788} 788}
789 789
790void xenon_clean_phy(struct sdhci_host *host)
791{
792 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
793 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
794
795 kfree(priv->phy_params);
796}
797
798static int xenon_add_phy(struct device_node *np, struct sdhci_host *host, 790static int xenon_add_phy(struct device_node *np, struct sdhci_host *host,
799 const char *phy_name) 791 const char *phy_name)
800{ 792{
@@ -819,11 +811,7 @@ static int xenon_add_phy(struct device_node *np, struct sdhci_host *host,
819 if (ret) 811 if (ret)
820 return ret; 812 return ret;
821 813
822 ret = xenon_emmc_phy_parse_param_dt(host, np, priv->phy_params); 814 return xenon_emmc_phy_parse_param_dt(host, np, priv->phy_params);
823 if (ret)
824 xenon_clean_phy(host);
825
826 return ret;
827} 815}
828 816
829int xenon_phy_parse_dt(struct device_node *np, struct sdhci_host *host) 817int xenon_phy_parse_dt(struct device_node *np, struct sdhci_host *host)
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 67246655315b..bc1781bb070b 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -486,7 +486,7 @@ static int xenon_probe(struct platform_device *pdev)
486 486
487 err = xenon_sdhc_prepare(host); 487 err = xenon_sdhc_prepare(host);
488 if (err) 488 if (err)
489 goto clean_phy_param; 489 goto err_clk;
490 490
491 err = sdhci_add_host(host); 491 err = sdhci_add_host(host);
492 if (err) 492 if (err)
@@ -496,8 +496,6 @@ static int xenon_probe(struct platform_device *pdev)
496 496
497remove_sdhc: 497remove_sdhc:
498 xenon_sdhc_unprepare(host); 498 xenon_sdhc_unprepare(host);
499clean_phy_param:
500 xenon_clean_phy(host);
501err_clk: 499err_clk:
502 clk_disable_unprepare(pltfm_host->clk); 500 clk_disable_unprepare(pltfm_host->clk);
503free_pltfm: 501free_pltfm:
@@ -510,8 +508,6 @@ static int xenon_remove(struct platform_device *pdev)
510 struct sdhci_host *host = platform_get_drvdata(pdev); 508 struct sdhci_host *host = platform_get_drvdata(pdev);
511 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 509 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
512 510
513 xenon_clean_phy(host);
514
515 sdhci_remove_host(host, 0); 511 sdhci_remove_host(host, 0);
516 512
517 xenon_sdhc_unprepare(host); 513 xenon_sdhc_unprepare(host);
diff --git a/drivers/mmc/host/sdhci-xenon.h b/drivers/mmc/host/sdhci-xenon.h
index 6e6523ea01ce..73debb42dc2f 100644
--- a/drivers/mmc/host/sdhci-xenon.h
+++ b/drivers/mmc/host/sdhci-xenon.h
@@ -93,7 +93,6 @@ struct xenon_priv {
93}; 93};
94 94
95int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios); 95int xenon_phy_adj(struct sdhci_host *host, struct mmc_ios *ios);
96void xenon_clean_phy(struct sdhci_host *host);
97int xenon_phy_parse_dt(struct device_node *np, 96int xenon_phy_parse_dt(struct device_node *np,
98 struct sdhci_host *host); 97 struct sdhci_host *host);
99void xenon_soc_pad_ctrl(struct sdhci_host *host, 98void xenon_soc_pad_ctrl(struct sdhci_host *host,
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index c5fd4259da33..b44a6aeb346d 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2577,7 +2577,7 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
2577 return -1; 2577 return -1;
2578 2578
2579 ad_info->aggregator_id = aggregator->aggregator_identifier; 2579 ad_info->aggregator_id = aggregator->aggregator_identifier;
2580 ad_info->ports = aggregator->num_of_ports; 2580 ad_info->ports = __agg_active_ports(aggregator);
2581 ad_info->actor_key = aggregator->actor_oper_aggregator_key; 2581 ad_info->actor_key = aggregator->actor_oper_aggregator_key;
2582 ad_info->partner_key = aggregator->partner_oper_aggregator_key; 2582 ad_info->partner_key = aggregator->partner_oper_aggregator_key;
2583 ether_addr_copy(ad_info->partner_system, 2583 ether_addr_copy(ad_info->partner_system,
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2be78807fd6e..2359478b977f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2612,11 +2612,13 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
2612 bond_for_each_slave_rcu(bond, slave, iter) { 2612 bond_for_each_slave_rcu(bond, slave, iter) {
2613 unsigned long trans_start = dev_trans_start(slave->dev); 2613 unsigned long trans_start = dev_trans_start(slave->dev);
2614 2614
2615 slave->new_link = BOND_LINK_NOCHANGE;
2616
2615 if (slave->link != BOND_LINK_UP) { 2617 if (slave->link != BOND_LINK_UP) {
2616 if (bond_time_in_interval(bond, trans_start, 1) && 2618 if (bond_time_in_interval(bond, trans_start, 1) &&
2617 bond_time_in_interval(bond, slave->last_rx, 1)) { 2619 bond_time_in_interval(bond, slave->last_rx, 1)) {
2618 2620
2619 slave->link = BOND_LINK_UP; 2621 slave->new_link = BOND_LINK_UP;
2620 slave_state_changed = 1; 2622 slave_state_changed = 1;
2621 2623
2622 /* primary_slave has no meaning in round-robin 2624 /* primary_slave has no meaning in round-robin
@@ -2643,7 +2645,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
2643 if (!bond_time_in_interval(bond, trans_start, 2) || 2645 if (!bond_time_in_interval(bond, trans_start, 2) ||
2644 !bond_time_in_interval(bond, slave->last_rx, 2)) { 2646 !bond_time_in_interval(bond, slave->last_rx, 2)) {
2645 2647
2646 slave->link = BOND_LINK_DOWN; 2648 slave->new_link = BOND_LINK_DOWN;
2647 slave_state_changed = 1; 2649 slave_state_changed = 1;
2648 2650
2649 if (slave->link_failure_count < UINT_MAX) 2651 if (slave->link_failure_count < UINT_MAX)
@@ -2674,6 +2676,11 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
2674 if (!rtnl_trylock()) 2676 if (!rtnl_trylock())
2675 goto re_arm; 2677 goto re_arm;
2676 2678
2679 bond_for_each_slave(bond, slave, iter) {
2680 if (slave->new_link != BOND_LINK_NOCHANGE)
2681 slave->link = slave->new_link;
2682 }
2683
2677 if (slave_state_changed) { 2684 if (slave_state_changed) {
2678 bond_slave_state_change(bond); 2685 bond_slave_state_change(bond);
2679 if (BOND_MODE(bond) == BOND_MODE_XOR) 2686 if (BOND_MODE(bond) == BOND_MODE_XOR)
@@ -4271,10 +4278,10 @@ static int bond_check_params(struct bond_params *params)
4271 int arp_validate_value, fail_over_mac_value, primary_reselect_value, i; 4278 int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
4272 struct bond_opt_value newval; 4279 struct bond_opt_value newval;
4273 const struct bond_opt_value *valptr; 4280 const struct bond_opt_value *valptr;
4274 int arp_all_targets_value; 4281 int arp_all_targets_value = 0;
4275 u16 ad_actor_sys_prio = 0; 4282 u16 ad_actor_sys_prio = 0;
4276 u16 ad_user_port_key = 0; 4283 u16 ad_user_port_key = 0;
4277 __be32 arp_target[BOND_MAX_ARP_TARGETS]; 4284 __be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 };
4278 int arp_ip_count; 4285 int arp_ip_count;
4279 int bond_mode = BOND_MODE_ROUNDROBIN; 4286 int bond_mode = BOND_MODE_ROUNDROBIN;
4280 int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; 4287 int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
@@ -4501,7 +4508,6 @@ static int bond_check_params(struct bond_params *params)
4501 arp_validate_value = 0; 4508 arp_validate_value = 0;
4502 } 4509 }
4503 4510
4504 arp_all_targets_value = 0;
4505 if (arp_all_targets) { 4511 if (arp_all_targets) {
4506 bond_opt_initstr(&newval, arp_all_targets); 4512 bond_opt_initstr(&newval, arp_all_targets);
4507 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS), 4513 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 19581d783d8e..d034d8cd7d22 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -849,6 +849,9 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
849 mv88e6xxx_g1_stats_read(chip, reg, &low); 849 mv88e6xxx_g1_stats_read(chip, reg, &low);
850 if (s->sizeof_stat == 8) 850 if (s->sizeof_stat == 8)
851 mv88e6xxx_g1_stats_read(chip, reg + 1, &high); 851 mv88e6xxx_g1_stats_read(chip, reg + 1, &high);
852 break;
853 default:
854 return UINT64_MAX;
852 } 855 }
853 value = (((u64)high) << 16) | low; 856 value = (((u64)high) << 16) | low;
854 return value; 857 return value;
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index b0a3b85fc6f8..db02bc2fb4b2 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -748,13 +748,13 @@ static int ax_init_dev(struct net_device *dev)
748 748
749 ret = ax_mii_init(dev); 749 ret = ax_mii_init(dev);
750 if (ret) 750 if (ret)
751 goto out_irq; 751 goto err_out;
752 752
753 ax_NS8390_init(dev, 0); 753 ax_NS8390_init(dev, 0);
754 754
755 ret = register_netdev(dev); 755 ret = register_netdev(dev);
756 if (ret) 756 if (ret)
757 goto out_irq; 757 goto err_out;
758 758
759 netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n", 759 netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n",
760 ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr, 760 ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr,
@@ -762,9 +762,6 @@ static int ax_init_dev(struct net_device *dev)
762 762
763 return 0; 763 return 0;
764 764
765 out_irq:
766 /* cleanup irq */
767 free_irq(dev->irq, dev);
768 err_out: 765 err_out:
769 return ret; 766 return ret;
770} 767}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 4ee15ff06a44..faeb4935ef3e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -200,29 +200,18 @@ err_exit:
200static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self, 200static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self,
201 struct aq_nic_cfg_s *aq_nic_cfg) 201 struct aq_nic_cfg_s *aq_nic_cfg)
202{ 202{
203 int err = 0;
204
205 /* TX checksums offloads*/ 203 /* TX checksums offloads*/
206 tpo_ipv4header_crc_offload_en_set(self, 1); 204 tpo_ipv4header_crc_offload_en_set(self, 1);
207 tpo_tcp_udp_crc_offload_en_set(self, 1); 205 tpo_tcp_udp_crc_offload_en_set(self, 1);
208 if (err < 0)
209 goto err_exit;
210 206
211 /* RX checksums offloads*/ 207 /* RX checksums offloads*/
212 rpo_ipv4header_crc_offload_en_set(self, 1); 208 rpo_ipv4header_crc_offload_en_set(self, 1);
213 rpo_tcp_udp_crc_offload_en_set(self, 1); 209 rpo_tcp_udp_crc_offload_en_set(self, 1);
214 if (err < 0)
215 goto err_exit;
216 210
217 /* LSO offloads*/ 211 /* LSO offloads*/
218 tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); 212 tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
219 if (err < 0)
220 goto err_exit;
221
222 err = aq_hw_err_from_flags(self);
223 213
224err_exit: 214 return aq_hw_err_from_flags(self);
225 return err;
226} 215}
227 216
228static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self) 217static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 42150708191d..1bceb7358e5c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -200,25 +200,18 @@ err_exit:
200static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, 200static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
201 struct aq_nic_cfg_s *aq_nic_cfg) 201 struct aq_nic_cfg_s *aq_nic_cfg)
202{ 202{
203 int err = 0;
204 unsigned int i; 203 unsigned int i;
205 204
206 /* TX checksums offloads*/ 205 /* TX checksums offloads*/
207 tpo_ipv4header_crc_offload_en_set(self, 1); 206 tpo_ipv4header_crc_offload_en_set(self, 1);
208 tpo_tcp_udp_crc_offload_en_set(self, 1); 207 tpo_tcp_udp_crc_offload_en_set(self, 1);
209 if (err < 0)
210 goto err_exit;
211 208
212 /* RX checksums offloads*/ 209 /* RX checksums offloads*/
213 rpo_ipv4header_crc_offload_en_set(self, 1); 210 rpo_ipv4header_crc_offload_en_set(self, 1);
214 rpo_tcp_udp_crc_offload_en_set(self, 1); 211 rpo_tcp_udp_crc_offload_en_set(self, 1);
215 if (err < 0)
216 goto err_exit;
217 212
218 /* LSO offloads*/ 213 /* LSO offloads*/
219 tdm_large_send_offload_en_set(self, 0xFFFFFFFFU); 214 tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
220 if (err < 0)
221 goto err_exit;
222 215
223/* LRO offloads */ 216/* LRO offloads */
224 { 217 {
@@ -245,10 +238,7 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
245 238
246 rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U); 239 rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
247 } 240 }
248 err = aq_hw_err_from_flags(self); 241 return aq_hw_err_from_flags(self);
249
250err_exit:
251 return err;
252} 242}
253 243
254static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self) 244static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 63f2deec2a52..77a1c03255de 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1353,6 +1353,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1353 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) && 1353 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1354 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { 1354 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1355 printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n"); 1355 printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n");
1356 err = -EIO;
1356 goto err_dma; 1357 goto err_dma;
1357 } 1358 }
1358 1359
@@ -1366,10 +1367,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1366 * pcibios_set_master to do the needed arch specific settings */ 1367 * pcibios_set_master to do the needed arch specific settings */
1367 pci_set_master(pdev); 1368 pci_set_master(pdev);
1368 1369
1369 err = -ENOMEM;
1370 netdev = alloc_etherdev(sizeof(struct atl2_adapter)); 1370 netdev = alloc_etherdev(sizeof(struct atl2_adapter));
1371 if (!netdev) 1371 if (!netdev) {
1372 err = -ENOMEM;
1372 goto err_alloc_etherdev; 1373 goto err_alloc_etherdev;
1374 }
1373 1375
1374 SET_NETDEV_DEV(netdev, &pdev->dev); 1376 SET_NETDEV_DEV(netdev, &pdev->dev);
1375 1377
@@ -1408,8 +1410,6 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1408 if (err) 1410 if (err)
1409 goto err_sw_init; 1411 goto err_sw_init;
1410 1412
1411 err = -EIO;
1412
1413 netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX; 1413 netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
1414 netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); 1414 netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
1415 1415
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b56c54d68d5e..03f55daecb20 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -7630,8 +7630,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7630 dev->min_mtu = ETH_ZLEN; 7630 dev->min_mtu = ETH_ZLEN;
7631 dev->max_mtu = BNXT_MAX_MTU; 7631 dev->max_mtu = BNXT_MAX_MTU;
7632 7632
7633 bnxt_dcb_init(bp);
7634
7635#ifdef CONFIG_BNXT_SRIOV 7633#ifdef CONFIG_BNXT_SRIOV
7636 init_waitqueue_head(&bp->sriov_cfg_wait); 7634 init_waitqueue_head(&bp->sriov_cfg_wait);
7637#endif 7635#endif
@@ -7669,6 +7667,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7669 bnxt_hwrm_func_qcfg(bp); 7667 bnxt_hwrm_func_qcfg(bp);
7670 bnxt_hwrm_port_led_qcaps(bp); 7668 bnxt_hwrm_port_led_qcaps(bp);
7671 bnxt_ethtool_init(bp); 7669 bnxt_ethtool_init(bp);
7670 bnxt_dcb_init(bp);
7672 7671
7673 bnxt_set_rx_skb_mode(bp, false); 7672 bnxt_set_rx_skb_mode(bp, false);
7674 bnxt_set_tpa_flags(bp); 7673 bnxt_set_tpa_flags(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 46de2f8ff024..5c6dd0ce209f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -553,8 +553,10 @@ static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode)
553 if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE)) 553 if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE))
554 return 1; 554 return 1;
555 555
556 if ((mode & DCB_CAP_DCBX_HOST) && BNXT_VF(bp)) 556 if (mode & DCB_CAP_DCBX_HOST) {
557 return 1; 557 if (BNXT_VF(bp) || (bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
558 return 1;
559 }
558 560
559 if (mode == bp->dcbx_cap) 561 if (mode == bp->dcbx_cap)
560 return 0; 562 return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index fa376444e57c..3549d3876278 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -37,7 +37,7 @@
37 37
38#define T4FW_VERSION_MAJOR 0x01 38#define T4FW_VERSION_MAJOR 0x01
39#define T4FW_VERSION_MINOR 0x10 39#define T4FW_VERSION_MINOR 0x10
40#define T4FW_VERSION_MICRO 0x21 40#define T4FW_VERSION_MICRO 0x2B
41#define T4FW_VERSION_BUILD 0x00 41#define T4FW_VERSION_BUILD 0x00
42 42
43#define T4FW_MIN_VERSION_MAJOR 0x01 43#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -46,7 +46,7 @@
46 46
47#define T5FW_VERSION_MAJOR 0x01 47#define T5FW_VERSION_MAJOR 0x01
48#define T5FW_VERSION_MINOR 0x10 48#define T5FW_VERSION_MINOR 0x10
49#define T5FW_VERSION_MICRO 0x21 49#define T5FW_VERSION_MICRO 0x2B
50#define T5FW_VERSION_BUILD 0x00 50#define T5FW_VERSION_BUILD 0x00
51 51
52#define T5FW_MIN_VERSION_MAJOR 0x00 52#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -55,7 +55,7 @@
55 55
56#define T6FW_VERSION_MAJOR 0x01 56#define T6FW_VERSION_MAJOR 0x01
57#define T6FW_VERSION_MINOR 0x10 57#define T6FW_VERSION_MINOR 0x10
58#define T6FW_VERSION_MICRO 0x21 58#define T6FW_VERSION_MICRO 0x2B
59#define T6FW_VERSION_BUILD 0x00 59#define T6FW_VERSION_BUILD 0x00
60 60
61#define T6FW_MIN_VERSION_MAJOR 0x00 61#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index f3a09ab55900..4eee18ce9be4 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5078,9 +5078,11 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
5078 struct be_adapter *adapter = netdev_priv(dev); 5078 struct be_adapter *adapter = netdev_priv(dev);
5079 u8 l4_hdr = 0; 5079 u8 l4_hdr = 0;
5080 5080
5081 /* The code below restricts offload features for some tunneled packets. 5081 /* The code below restricts offload features for some tunneled and
5082 * Q-in-Q packets.
5082 * Offload features for normal (non tunnel) packets are unchanged. 5083 * Offload features for normal (non tunnel) packets are unchanged.
5083 */ 5084 */
5085 features = vlan_features_check(skb, features);
5084 if (!skb->encapsulation || 5086 if (!skb->encapsulation ||
5085 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)) 5087 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5086 return features; 5088 return features;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 6ac336b546e6..1536356e2ea8 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1174,11 +1174,17 @@ static int ftmac100_remove(struct platform_device *pdev)
1174 return 0; 1174 return 0;
1175} 1175}
1176 1176
1177static const struct of_device_id ftmac100_of_ids[] = {
1178 { .compatible = "andestech,atmac100" },
1179 { }
1180};
1181
1177static struct platform_driver ftmac100_driver = { 1182static struct platform_driver ftmac100_driver = {
1178 .probe = ftmac100_probe, 1183 .probe = ftmac100_probe,
1179 .remove = ftmac100_remove, 1184 .remove = ftmac100_remove,
1180 .driver = { 1185 .driver = {
1181 .name = DRV_NAME, 1186 .name = DRV_NAME,
1187 .of_match_table = ftmac100_of_ids
1182 }, 1188 },
1183}; 1189};
1184 1190
@@ -1202,3 +1208,4 @@ module_exit(ftmac100_exit);
1202MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); 1208MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
1203MODULE_DESCRIPTION("FTMAC100 driver"); 1209MODULE_DESCRIPTION("FTMAC100 driver");
1204MODULE_LICENSE("GPL"); 1210MODULE_LICENSE("GPL");
1211MODULE_DEVICE_TABLE(of, ftmac100_of_ids);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 56a563f90b0b..f7c8649fd28f 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3192,7 +3192,7 @@ static int fec_reset_phy(struct platform_device *pdev)
3192{ 3192{
3193 int err, phy_reset; 3193 int err, phy_reset;
3194 bool active_high = false; 3194 bool active_high = false;
3195 int msec = 1; 3195 int msec = 1, phy_post_delay = 0;
3196 struct device_node *np = pdev->dev.of_node; 3196 struct device_node *np = pdev->dev.of_node;
3197 3197
3198 if (!np) 3198 if (!np)
@@ -3209,6 +3209,11 @@ static int fec_reset_phy(struct platform_device *pdev)
3209 else if (!gpio_is_valid(phy_reset)) 3209 else if (!gpio_is_valid(phy_reset))
3210 return 0; 3210 return 0;
3211 3211
3212 err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
3213 /* valid reset duration should be less than 1s */
3214 if (!err && phy_post_delay > 1000)
3215 return -EINVAL;
3216
3212 active_high = of_property_read_bool(np, "phy-reset-active-high"); 3217 active_high = of_property_read_bool(np, "phy-reset-active-high");
3213 3218
3214 err = devm_gpio_request_one(&pdev->dev, phy_reset, 3219 err = devm_gpio_request_one(&pdev->dev, phy_reset,
@@ -3226,6 +3231,15 @@ static int fec_reset_phy(struct platform_device *pdev)
3226 3231
3227 gpio_set_value_cansleep(phy_reset, !active_high); 3232 gpio_set_value_cansleep(phy_reset, !active_high);
3228 3233
3234 if (!phy_post_delay)
3235 return 0;
3236
3237 if (phy_post_delay > 20)
3238 msleep(phy_post_delay);
3239 else
3240 usleep_range(phy_post_delay * 1000,
3241 phy_post_delay * 1000 + 1000);
3242
3229 return 0; 3243 return 0;
3230} 3244}
3231#else /* CONFIG_OF */ 3245#else /* CONFIG_OF */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 703205475524..83aab1e4c8c8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2862,12 +2862,10 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2862 int port = 0; 2862 int port = 0;
2863 2863
2864 if (msi_x) { 2864 if (msi_x) {
2865 int nreq = dev->caps.num_ports * num_online_cpus() + 1; 2865 int nreq = min3(dev->caps.num_ports *
2866 2866 (int)num_online_cpus() + 1,
2867 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 2867 dev->caps.num_eqs - dev->caps.reserved_eqs,
2868 nreq); 2868 MAX_MSIX);
2869 if (nreq > MAX_MSIX)
2870 nreq = MAX_MSIX;
2871 2869
2872 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 2870 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
2873 if (!entries) 2871 if (!entries)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index fc52d742b7f7..27251a78075c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -13,7 +13,7 @@ config MLX5_CORE
13 13
14config MLX5_CORE_EN 14config MLX5_CORE_EN
15 bool "Mellanox Technologies ConnectX-4 Ethernet support" 15 bool "Mellanox Technologies ConnectX-4 Ethernet support"
16 depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE 16 depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
17 depends on IPV6=y || IPV6=n || MLX5_CORE=m 17 depends on IPV6=y || IPV6=n || MLX5_CORE=m
18 imply PTP_1588_CLOCK 18 imply PTP_1588_CLOCK
19 default n 19 default n
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 5bdaf3d545b2..10d282841f5b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -774,7 +774,7 @@ static void cb_timeout_handler(struct work_struct *work)
774 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 774 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
775 mlx5_command_str(msg_to_opcode(ent->in)), 775 mlx5_command_str(msg_to_opcode(ent->in)),
776 msg_to_opcode(ent->in)); 776 msg_to_opcode(ent->in));
777 mlx5_cmd_comp_handler(dev, 1UL << ent->idx); 777 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
778} 778}
779 779
780static void cmd_work_handler(struct work_struct *work) 780static void cmd_work_handler(struct work_struct *work)
@@ -804,6 +804,7 @@ static void cmd_work_handler(struct work_struct *work)
804 } 804 }
805 805
806 cmd->ent_arr[ent->idx] = ent; 806 cmd->ent_arr[ent->idx] = ent;
807 set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
807 lay = get_inst(cmd, ent->idx); 808 lay = get_inst(cmd, ent->idx);
808 ent->lay = lay; 809 ent->lay = lay;
809 memset(lay, 0, sizeof(*lay)); 810 memset(lay, 0, sizeof(*lay));
@@ -825,6 +826,20 @@ static void cmd_work_handler(struct work_struct *work)
825 if (ent->callback) 826 if (ent->callback)
826 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); 827 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
827 828
829 /* Skip sending command to fw if internal error */
830 if (pci_channel_offline(dev->pdev) ||
831 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
832 u8 status = 0;
833 u32 drv_synd;
834
835 ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
836 MLX5_SET(mbox_out, ent->out, status, status);
837 MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
838
839 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
840 return;
841 }
842
828 /* ring doorbell after the descriptor is valid */ 843 /* ring doorbell after the descriptor is valid */
829 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); 844 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
830 wmb(); 845 wmb();
@@ -835,7 +850,7 @@ static void cmd_work_handler(struct work_struct *work)
835 poll_timeout(ent); 850 poll_timeout(ent);
836 /* make sure we read the descriptor after ownership is SW */ 851 /* make sure we read the descriptor after ownership is SW */
837 rmb(); 852 rmb();
838 mlx5_cmd_comp_handler(dev, 1UL << ent->idx); 853 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT));
839 } 854 }
840} 855}
841 856
@@ -879,7 +894,7 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
879 wait_for_completion(&ent->done); 894 wait_for_completion(&ent->done);
880 } else if (!wait_for_completion_timeout(&ent->done, timeout)) { 895 } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
881 ent->ret = -ETIMEDOUT; 896 ent->ret = -ETIMEDOUT;
882 mlx5_cmd_comp_handler(dev, 1UL << ent->idx); 897 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
883 } 898 }
884 899
885 err = ent->ret; 900 err = ent->ret;
@@ -1375,7 +1390,7 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1375 } 1390 }
1376} 1391}
1377 1392
1378void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec) 1393void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
1379{ 1394{
1380 struct mlx5_cmd *cmd = &dev->cmd; 1395 struct mlx5_cmd *cmd = &dev->cmd;
1381 struct mlx5_cmd_work_ent *ent; 1396 struct mlx5_cmd_work_ent *ent;
@@ -1395,6 +1410,19 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
1395 struct semaphore *sem; 1410 struct semaphore *sem;
1396 1411
1397 ent = cmd->ent_arr[i]; 1412 ent = cmd->ent_arr[i];
1413
1414 /* if we already completed the command, ignore it */
1415 if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
1416 &ent->state)) {
1417 /* only real completion can free the cmd slot */
1418 if (!forced) {
1419 mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
1420 ent->idx);
1421 free_ent(cmd, ent->idx);
1422 }
1423 continue;
1424 }
1425
1398 if (ent->callback) 1426 if (ent->callback)
1399 cancel_delayed_work(&ent->cb_timeout_work); 1427 cancel_delayed_work(&ent->cb_timeout_work);
1400 if (ent->page_queue) 1428 if (ent->page_queue)
@@ -1417,7 +1445,10 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
1417 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", 1445 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1418 ent->ret, deliv_status_to_str(ent->status), ent->status); 1446 ent->ret, deliv_status_to_str(ent->status), ent->status);
1419 } 1447 }
1420 free_ent(cmd, ent->idx); 1448
1449 /* only real completion will free the entry slot */
1450 if (!forced)
1451 free_ent(cmd, ent->idx);
1421 1452
1422 if (ent->callback) { 1453 if (ent->callback) {
1423 ds = ent->ts2 - ent->ts1; 1454 ds = ent->ts2 - ent->ts1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 0099a3e397bc..2fd044b23875 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -1003,7 +1003,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
1003void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv); 1003void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
1004void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); 1004void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
1005 1005
1006int mlx5e_create_ttc_table(struct mlx5e_priv *priv, u32 underlay_qpn); 1006int mlx5e_create_ttc_table(struct mlx5e_priv *priv);
1007void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv); 1007void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv);
1008 1008
1009int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc, 1009int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index ce7b09d72ff6..8209affa75c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -794,7 +794,6 @@ static void get_supported(u32 eth_proto_cap,
794 ptys2ethtool_supported_port(link_ksettings, eth_proto_cap); 794 ptys2ethtool_supported_port(link_ksettings, eth_proto_cap);
795 ptys2ethtool_supported_link(supported, eth_proto_cap); 795 ptys2ethtool_supported_link(supported, eth_proto_cap);
796 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); 796 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
797 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Asym_Pause);
798} 797}
799 798
800static void get_advertising(u32 eth_proto_cap, u8 tx_pause, 799static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
@@ -804,7 +803,7 @@ static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
804 unsigned long *advertising = link_ksettings->link_modes.advertising; 803 unsigned long *advertising = link_ksettings->link_modes.advertising;
805 804
806 ptys2ethtool_adver_link(advertising, eth_proto_cap); 805 ptys2ethtool_adver_link(advertising, eth_proto_cap);
807 if (tx_pause) 806 if (rx_pause)
808 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); 807 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
809 if (tx_pause ^ rx_pause) 808 if (tx_pause ^ rx_pause)
810 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause); 809 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause);
@@ -849,6 +848,8 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
849 struct mlx5e_priv *priv = netdev_priv(netdev); 848 struct mlx5e_priv *priv = netdev_priv(netdev);
850 struct mlx5_core_dev *mdev = priv->mdev; 849 struct mlx5_core_dev *mdev = priv->mdev;
851 u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; 850 u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
851 u32 rx_pause = 0;
852 u32 tx_pause = 0;
852 u32 eth_proto_cap; 853 u32 eth_proto_cap;
853 u32 eth_proto_admin; 854 u32 eth_proto_admin;
854 u32 eth_proto_lp; 855 u32 eth_proto_lp;
@@ -871,11 +872,13 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
871 an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); 872 an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
872 an_status = MLX5_GET(ptys_reg, out, an_status); 873 an_status = MLX5_GET(ptys_reg, out, an_status);
873 874
875 mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
876
874 ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); 877 ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
875 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); 878 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
876 879
877 get_supported(eth_proto_cap, link_ksettings); 880 get_supported(eth_proto_cap, link_ksettings);
878 get_advertising(eth_proto_admin, 0, 0, link_ksettings); 881 get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings);
879 get_speed_duplex(netdev, eth_proto_oper, link_ksettings); 882 get_speed_duplex(netdev, eth_proto_oper, link_ksettings);
880 883
881 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; 884 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 576d6787b484..53ed58320a24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -800,7 +800,7 @@ void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
800 mlx5e_destroy_flow_table(&ttc->ft); 800 mlx5e_destroy_flow_table(&ttc->ft);
801} 801}
802 802
803int mlx5e_create_ttc_table(struct mlx5e_priv *priv, u32 underlay_qpn) 803int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
804{ 804{
805 struct mlx5e_ttc_table *ttc = &priv->fs.ttc; 805 struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
806 struct mlx5_flow_table_attr ft_attr = {}; 806 struct mlx5_flow_table_attr ft_attr = {};
@@ -810,7 +810,6 @@ int mlx5e_create_ttc_table(struct mlx5e_priv *priv, u32 underlay_qpn)
810 ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE; 810 ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE;
811 ft_attr.level = MLX5E_TTC_FT_LEVEL; 811 ft_attr.level = MLX5E_TTC_FT_LEVEL;
812 ft_attr.prio = MLX5E_NIC_PRIO; 812 ft_attr.prio = MLX5E_NIC_PRIO;
813 ft_attr.underlay_qpn = underlay_qpn;
814 813
815 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); 814 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
816 if (IS_ERR(ft->t)) { 815 if (IS_ERR(ft->t)) {
@@ -1147,7 +1146,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1147 priv->netdev->hw_features &= ~NETIF_F_NTUPLE; 1146 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
1148 } 1147 }
1149 1148
1150 err = mlx5e_create_ttc_table(priv, 0); 1149 err = mlx5e_create_ttc_table(priv);
1151 if (err) { 1150 if (err) {
1152 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", 1151 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1153 err); 1152 err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index a61b71b6fff3..41cd22a223dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2976,7 +2976,7 @@ static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
2976 new_channels.params = priv->channels.params; 2976 new_channels.params = priv->channels.params;
2977 new_channels.params.num_tc = tc ? tc : 1; 2977 new_channels.params.num_tc = tc ? tc : 1;
2978 2978
2979 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { 2979 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
2980 priv->channels.params = new_channels.params; 2980 priv->channels.params = new_channels.params;
2981 goto out; 2981 goto out;
2982 } 2982 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 7b1566f0ae58..66b5fec15313 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1041,6 +1041,8 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
1041#define MLX5_IB_GRH_BYTES 40 1041#define MLX5_IB_GRH_BYTES 40
1042#define MLX5_IPOIB_ENCAP_LEN 4 1042#define MLX5_IPOIB_ENCAP_LEN 4
1043#define MLX5_GID_SIZE 16 1043#define MLX5_GID_SIZE 16
1044#define MLX5_IPOIB_PSEUDO_LEN 20
1045#define MLX5_IPOIB_HARD_LEN (MLX5_IPOIB_PSEUDO_LEN + MLX5_IPOIB_ENCAP_LEN)
1044 1046
1045static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, 1047static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
1046 struct mlx5_cqe64 *cqe, 1048 struct mlx5_cqe64 *cqe,
@@ -1048,6 +1050,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
1048 struct sk_buff *skb) 1050 struct sk_buff *skb)
1049{ 1051{
1050 struct net_device *netdev = rq->netdev; 1052 struct net_device *netdev = rq->netdev;
1053 char *pseudo_header;
1051 u8 *dgid; 1054 u8 *dgid;
1052 u8 g; 1055 u8 g;
1053 1056
@@ -1076,8 +1079,11 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
1076 if (likely(netdev->features & NETIF_F_RXHASH)) 1079 if (likely(netdev->features & NETIF_F_RXHASH))
1077 mlx5e_skb_set_hash(cqe, skb); 1080 mlx5e_skb_set_hash(cqe, skb);
1078 1081
1082 /* 20 bytes of ipoib header and 4 for encap existing */
1083 pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN);
1084 memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN);
1079 skb_reset_mac_header(skb); 1085 skb_reset_mac_header(skb);
1080 skb_pull(skb, MLX5_IPOIB_ENCAP_LEN); 1086 skb_pull(skb, MLX5_IPOIB_HARD_LEN);
1081 1087
1082 skb->dev = netdev; 1088 skb->dev = netdev;
1083 1089
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 11c27e4fadf6..ec63158ab643 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -43,6 +43,7 @@
43#include <net/tc_act/tc_vlan.h> 43#include <net/tc_act/tc_vlan.h>
44#include <net/tc_act/tc_tunnel_key.h> 44#include <net/tc_act/tc_tunnel_key.h>
45#include <net/tc_act/tc_pedit.h> 45#include <net/tc_act/tc_pedit.h>
46#include <net/tc_act/tc_csum.h>
46#include <net/vxlan.h> 47#include <net/vxlan.h>
47#include <net/arp.h> 48#include <net/arp.h>
48#include "en.h" 49#include "en.h"
@@ -384,7 +385,7 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
384 if (e->flags & MLX5_ENCAP_ENTRY_VALID) 385 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
385 mlx5_encap_dealloc(priv->mdev, e->encap_id); 386 mlx5_encap_dealloc(priv->mdev, e->encap_id);
386 387
387 hlist_del_rcu(&e->encap_hlist); 388 hash_del_rcu(&e->encap_hlist);
388 kfree(e->encap_header); 389 kfree(e->encap_header);
389 kfree(e); 390 kfree(e);
390 } 391 }
@@ -925,11 +926,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
925 struct mlx5e_tc_flow_parse_attr *parse_attr) 926 struct mlx5e_tc_flow_parse_attr *parse_attr)
926{ 927{
927 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; 928 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
928 int i, action_size, nactions, max_actions, first, last; 929 int i, action_size, nactions, max_actions, first, last, first_z;
929 void *s_masks_p, *a_masks_p, *vals_p; 930 void *s_masks_p, *a_masks_p, *vals_p;
930 u32 s_mask, a_mask, val;
931 struct mlx5_fields *f; 931 struct mlx5_fields *f;
932 u8 cmd, field_bsize; 932 u8 cmd, field_bsize;
933 u32 s_mask, a_mask;
933 unsigned long mask; 934 unsigned long mask;
934 void *action; 935 void *action;
935 936
@@ -946,7 +947,8 @@ static int offload_pedit_fields(struct pedit_headers *masks,
946 for (i = 0; i < ARRAY_SIZE(fields); i++) { 947 for (i = 0; i < ARRAY_SIZE(fields); i++) {
947 f = &fields[i]; 948 f = &fields[i];
948 /* avoid seeing bits set from previous iterations */ 949 /* avoid seeing bits set from previous iterations */
949 s_mask = a_mask = mask = val = 0; 950 s_mask = 0;
951 a_mask = 0;
950 952
951 s_masks_p = (void *)set_masks + f->offset; 953 s_masks_p = (void *)set_masks + f->offset;
952 a_masks_p = (void *)add_masks + f->offset; 954 a_masks_p = (void *)add_masks + f->offset;
@@ -981,12 +983,12 @@ static int offload_pedit_fields(struct pedit_headers *masks,
981 memset(a_masks_p, 0, f->size); 983 memset(a_masks_p, 0, f->size);
982 } 984 }
983 985
984 memcpy(&val, vals_p, f->size);
985
986 field_bsize = f->size * BITS_PER_BYTE; 986 field_bsize = f->size * BITS_PER_BYTE;
987
988 first_z = find_first_zero_bit(&mask, field_bsize);
987 first = find_first_bit(&mask, field_bsize); 989 first = find_first_bit(&mask, field_bsize);
988 last = find_last_bit(&mask, field_bsize); 990 last = find_last_bit(&mask, field_bsize);
989 if (first > 0 || last != (field_bsize - 1)) { 991 if (first > 0 || last != (field_bsize - 1) || first_z < last) {
990 printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n", 992 printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
991 mask); 993 mask);
992 return -EOPNOTSUPP; 994 return -EOPNOTSUPP;
@@ -1002,11 +1004,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
1002 } 1004 }
1003 1005
1004 if (field_bsize == 32) 1006 if (field_bsize == 32)
1005 MLX5_SET(set_action_in, action, data, ntohl(val)); 1007 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p));
1006 else if (field_bsize == 16) 1008 else if (field_bsize == 16)
1007 MLX5_SET(set_action_in, action, data, ntohs(val)); 1009 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p));
1008 else if (field_bsize == 8) 1010 else if (field_bsize == 8)
1009 MLX5_SET(set_action_in, action, data, val); 1011 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p);
1010 1012
1011 action += action_size; 1013 action += action_size;
1012 nactions++; 1014 nactions++;
@@ -1109,6 +1111,28 @@ out_err:
1109 return err; 1111 return err;
1110} 1112}
1111 1113
1114static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
1115{
1116 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
1117 TCA_CSUM_UPDATE_FLAG_UDP;
1118
1119 /* The HW recalcs checksums only if re-writing headers */
1120 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
1121 netdev_warn(priv->netdev,
1122 "TC csum action is only offloaded with pedit\n");
1123 return false;
1124 }
1125
1126 if (update_flags & ~prot_flags) {
1127 netdev_warn(priv->netdev,
1128 "can't offload TC csum action for some header/s - flags %#x\n",
1129 update_flags);
1130 return false;
1131 }
1132
1133 return true;
1134}
1135
1112static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, 1136static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1113 struct mlx5e_tc_flow_parse_attr *parse_attr, 1137 struct mlx5e_tc_flow_parse_attr *parse_attr,
1114 struct mlx5e_tc_flow *flow) 1138 struct mlx5e_tc_flow *flow)
@@ -1149,6 +1173,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1149 continue; 1173 continue;
1150 } 1174 }
1151 1175
1176 if (is_tcf_csum(a)) {
1177 if (csum_offload_supported(priv, attr->action,
1178 tcf_csum_update_flags(a)))
1179 continue;
1180
1181 return -EOPNOTSUPP;
1182 }
1183
1152 if (is_tcf_skbedit_mark(a)) { 1184 if (is_tcf_skbedit_mark(a)) {
1153 u32 mark = tcf_skbedit_mark(a); 1185 u32 mark = tcf_skbedit_mark(a);
1154 1186
@@ -1651,6 +1683,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1651 continue; 1683 continue;
1652 } 1684 }
1653 1685
1686 if (is_tcf_csum(a)) {
1687 if (csum_offload_supported(priv, attr->action,
1688 tcf_csum_update_flags(a)))
1689 continue;
1690
1691 return -EOPNOTSUPP;
1692 }
1693
1654 if (is_tcf_mirred_egress_redirect(a)) { 1694 if (is_tcf_mirred_egress_redirect(a)) {
1655 int ifindex = tcf_mirred_ifindex(a); 1695 int ifindex = tcf_mirred_ifindex(a);
1656 struct net_device *out_dev, *encap_dev = NULL; 1696 struct net_device *out_dev, *encap_dev = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index ea5d8d37a75c..33eae5ad2fb0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -422,7 +422,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
422 break; 422 break;
423 423
424 case MLX5_EVENT_TYPE_CMD: 424 case MLX5_EVENT_TYPE_CMD:
425 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector)); 425 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
426 break; 426 break;
427 427
428 case MLX5_EVENT_TYPE_PORT_CHANGE: 428 case MLX5_EVENT_TYPE_PORT_CHANGE:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 19e3d2fc2099..fcec7bedd3cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -40,28 +40,25 @@
40#include "eswitch.h" 40#include "eswitch.h"
41 41
42int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, 42int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
43 struct mlx5_flow_table *ft) 43 struct mlx5_flow_table *ft, u32 underlay_qpn)
44{ 44{
45 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0}; 45 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
46 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0}; 46 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
47 47
48 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && 48 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
49 ft->underlay_qpn == 0) 49 underlay_qpn == 0)
50 return 0; 50 return 0;
51 51
52 MLX5_SET(set_flow_table_root_in, in, opcode, 52 MLX5_SET(set_flow_table_root_in, in, opcode,
53 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); 53 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
54 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type); 54 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
55 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id); 55 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
56 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
56 if (ft->vport) { 57 if (ft->vport) {
57 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport); 58 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
58 MLX5_SET(set_flow_table_root_in, in, other_vport, 1); 59 MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
59 } 60 }
60 61
61 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
62 ft->underlay_qpn != 0)
63 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, ft->underlay_qpn);
64
65 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 62 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
66} 63}
67 64
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 8fad80688536..0f98a7cf4877 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -71,7 +71,8 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
71 unsigned int index); 71 unsigned int index);
72 72
73int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, 73int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
74 struct mlx5_flow_table *ft); 74 struct mlx5_flow_table *ft,
75 u32 underlay_qpn);
75 76
76int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id); 77int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id);
77int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id); 78int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index b8a176503d38..0e487e8ca634 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -650,7 +650,7 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
650 if (ft->level >= min_level) 650 if (ft->level >= min_level)
651 return 0; 651 return 0;
652 652
653 err = mlx5_cmd_update_root_ft(root->dev, ft); 653 err = mlx5_cmd_update_root_ft(root->dev, ft, root->underlay_qpn);
654 if (err) 654 if (err)
655 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", 655 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
656 ft->id); 656 ft->id);
@@ -818,8 +818,6 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
818 goto unlock_root; 818 goto unlock_root;
819 } 819 }
820 820
821 ft->underlay_qpn = ft_attr->underlay_qpn;
822
823 tree_init_node(&ft->node, 1, del_flow_table); 821 tree_init_node(&ft->node, 1, del_flow_table);
824 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; 822 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
825 next_ft = find_next_chained_ft(fs_prio); 823 next_ft = find_next_chained_ft(fs_prio);
@@ -1489,7 +1487,8 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
1489 1487
1490 new_root_ft = find_next_ft(ft); 1488 new_root_ft = find_next_ft(ft);
1491 if (new_root_ft) { 1489 if (new_root_ft) {
1492 int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft); 1490 int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft,
1491 root->underlay_qpn);
1493 1492
1494 if (err) { 1493 if (err) {
1495 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", 1494 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
@@ -2062,3 +2061,21 @@ err:
2062 mlx5_cleanup_fs(dev); 2061 mlx5_cleanup_fs(dev);
2063 return err; 2062 return err;
2064} 2063}
2064
2065int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2066{
2067 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
2068
2069 root->underlay_qpn = underlay_qpn;
2070 return 0;
2071}
2072EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
2073
2074int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2075{
2076 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
2077
2078 root->underlay_qpn = 0;
2079 return 0;
2080}
2081EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 81eafc7b9dd9..990acee6fb09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -118,7 +118,6 @@ struct mlx5_flow_table {
118 /* FWD rules that point on this flow table */ 118 /* FWD rules that point on this flow table */
119 struct list_head fwd_rules; 119 struct list_head fwd_rules;
120 u32 flags; 120 u32 flags;
121 u32 underlay_qpn;
122}; 121};
123 122
124struct mlx5_fc_cache { 123struct mlx5_fc_cache {
@@ -195,6 +194,7 @@ struct mlx5_flow_root_namespace {
195 struct mlx5_flow_table *root_ft; 194 struct mlx5_flow_table *root_ft;
196 /* Should be held when chaining flow tables */ 195 /* Should be held when chaining flow tables */
197 struct mutex chain_lock; 196 struct mutex chain_lock;
197 u32 underlay_qpn;
198}; 198};
199 199
200int mlx5_init_fc_stats(struct mlx5_core_dev *dev); 200int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index d0515391d33b..44f59b1d6f0f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -90,7 +90,7 @@ static void trigger_cmd_completions(struct mlx5_core_dev *dev)
90 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); 90 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
91 91
92 mlx5_core_dbg(dev, "vector 0x%llx\n", vector); 92 mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
93 mlx5_cmd_comp_handler(dev, vector); 93 mlx5_cmd_comp_handler(dev, vector, true);
94 return; 94 return;
95 95
96no_trig: 96no_trig:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
index 019c230da498..cc1858752e70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
@@ -66,6 +66,10 @@ static void mlx5i_init(struct mlx5_core_dev *mdev,
66 66
67 mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); 67 mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
68 68
69 /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
70 mlx5e_set_rq_type_params(mdev, &priv->channels.params, MLX5_WQ_TYPE_LINKED_LIST);
71 priv->channels.params.lro_en = false;
72
69 mutex_init(&priv->state_lock); 73 mutex_init(&priv->state_lock);
70 74
71 netdev->hw_features |= NETIF_F_SG; 75 netdev->hw_features |= NETIF_F_SG;
@@ -156,6 +160,8 @@ out:
156 160
157static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) 161static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
158{ 162{
163 mlx5_fs_remove_rx_underlay_qpn(mdev, qp->qpn);
164
159 mlx5_core_destroy_qp(mdev, qp); 165 mlx5_core_destroy_qp(mdev, qp);
160} 166}
161 167
@@ -170,6 +176,8 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
170 return err; 176 return err;
171 } 177 }
172 178
179 mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
180
173 err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); 181 err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
174 if (err) { 182 if (err) {
175 mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); 183 mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
@@ -189,7 +197,6 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
189 197
190static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) 198static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
191{ 199{
192 struct mlx5i_priv *ipriv = priv->ppriv;
193 int err; 200 int err;
194 201
195 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, 202 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
@@ -205,7 +212,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
205 priv->netdev->hw_features &= ~NETIF_F_NTUPLE; 212 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
206 } 213 }
207 214
208 err = mlx5e_create_ttc_table(priv, ipriv->qp.qpn); 215 err = mlx5e_create_ttc_table(priv);
209 if (err) { 216 if (err) {
210 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", 217 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
211 err); 218 err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 0c123d571b4c..fe5546bb4153 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -612,7 +612,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
612 struct mlx5_priv *priv = &mdev->priv; 612 struct mlx5_priv *priv = &mdev->priv;
613 struct msix_entry *msix = priv->msix_arr; 613 struct msix_entry *msix = priv->msix_arr;
614 int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; 614 int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
615 int err;
616 615
617 if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { 616 if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
618 mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); 617 mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
@@ -622,18 +621,12 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
622 cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), 621 cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
623 priv->irq_info[i].mask); 622 priv->irq_info[i].mask);
624 623
625 err = irq_set_affinity_hint(irq, priv->irq_info[i].mask); 624#ifdef CONFIG_SMP
626 if (err) { 625 if (irq_set_affinity_hint(irq, priv->irq_info[i].mask))
627 mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x", 626 mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
628 irq); 627#endif
629 goto err_clear_mask;
630 }
631 628
632 return 0; 629 return 0;
633
634err_clear_mask:
635 free_cpumask_var(priv->irq_info[i].mask);
636 return err;
637} 630}
638 631
639static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) 632static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index ea56f6ade6b4..5f0a7bc692a4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -199,10 +199,11 @@ static int mlxsw_sp_erif_entry_get(struct mlxsw_sp *mlxsw_sp,
199 199
200 entry->counter_valid = false; 200 entry->counter_valid = false;
201 entry->counter = 0; 201 entry->counter = 0;
202 entry->index = mlxsw_sp_rif_index(rif);
203
202 if (!counters_enabled) 204 if (!counters_enabled)
203 return 0; 205 return 0;
204 206
205 entry->index = mlxsw_sp_rif_index(rif);
206 err = mlxsw_sp_rif_counter_value_get(mlxsw_sp, rif, 207 err = mlxsw_sp_rif_counter_value_get(mlxsw_sp, rif,
207 MLXSW_SP_RIF_COUNTER_EGRESS, 208 MLXSW_SP_RIF_COUNTER_EGRESS,
208 &cnt); 209 &cnt);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 33cec1cc1642..9f89c4137d21 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -206,6 +206,9 @@ void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
206{ 206{
207 unsigned int *p_counter_index; 207 unsigned int *p_counter_index;
208 208
209 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
210 return;
211
209 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir); 212 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
210 if (WARN_ON(!p_counter_index)) 213 if (WARN_ON(!p_counter_index))
211 return; 214 return;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 0d8411f1f954..f4bb0c0b7c1d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1497,8 +1497,7 @@ do_fdb_op:
1497 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, 1497 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
1498 adding, true); 1498 adding, true);
1499 if (err) { 1499 if (err) {
1500 if (net_ratelimit()) 1500 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
1501 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1502 return; 1501 return;
1503 } 1502 }
1504 1503
@@ -1558,8 +1557,7 @@ do_fdb_op:
1558 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, 1557 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
1559 adding, true); 1558 adding, true);
1560 if (err) { 1559 if (err) {
1561 if (net_ratelimit()) 1560 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
1562 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1563 return; 1561 return;
1564 } 1562 }
1565 1563
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index b8d5270359cd..e30676515529 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -247,7 +247,7 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
247 cmd.req.arg3 = 0; 247 cmd.req.arg3 = 0;
248 248
249 if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) 249 if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
250 netxen_issue_cmd(adapter, &cmd); 250 rcode = netxen_issue_cmd(adapter, &cmd);
251 251
252 if (rcode != NX_RCODE_SUCCESS) 252 if (rcode != NX_RCODE_SUCCESS)
253 return -EIO; 253 return -EIO;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 67200c5498ab..0a8fde629991 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -983,7 +983,7 @@ void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
983 memset(&camline, 0, sizeof(union gft_cam_line_union)); 983 memset(&camline, 0, sizeof(union gft_cam_line_union));
984 qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 984 qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
985 camline.cam_line_mapped.camline); 985 camline.cam_line_mapped.camline);
986 memset(&ramline, 0, sizeof(union gft_cam_line_union)); 986 memset(&ramline, 0, sizeof(ramline));
987 987
988 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) { 988 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) {
989 u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM; 989 u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 49bad00a0f8f..7245b1072518 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -37,8 +37,8 @@
37 37
38#define _QLCNIC_LINUX_MAJOR 5 38#define _QLCNIC_LINUX_MAJOR 5
39#define _QLCNIC_LINUX_MINOR 3 39#define _QLCNIC_LINUX_MINOR 3
40#define _QLCNIC_LINUX_SUBVERSION 65 40#define _QLCNIC_LINUX_SUBVERSION 66
41#define QLCNIC_LINUX_VERSIONID "5.3.65" 41#define QLCNIC_LINUX_VERSIONID "5.3.66"
42#define QLCNIC_DRV_IDC_VER 0x01 42#define QLCNIC_DRV_IDC_VER 0x01
43#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 43#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 718bf58a7da6..4fb68797630e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -3168,6 +3168,40 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
3168 return 0; 3168 return 0;
3169} 3169}
3170 3170
3171void qlcnic_83xx_get_port_type(struct qlcnic_adapter *adapter)
3172{
3173 struct qlcnic_hardware_context *ahw = adapter->ahw;
3174 struct qlcnic_cmd_args cmd;
3175 u32 config;
3176 int err;
3177
3178 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS);
3179 if (err)
3180 return;
3181
3182 err = qlcnic_issue_cmd(adapter, &cmd);
3183 if (err) {
3184 dev_info(&adapter->pdev->dev,
3185 "Get Link Status Command failed: 0x%x\n", err);
3186 goto out;
3187 } else {
3188 config = cmd.rsp.arg[3];
3189
3190 switch (QLC_83XX_SFP_MODULE_TYPE(config)) {
3191 case QLC_83XX_MODULE_FIBRE_1000BASE_SX:
3192 case QLC_83XX_MODULE_FIBRE_1000BASE_LX:
3193 case QLC_83XX_MODULE_FIBRE_1000BASE_CX:
3194 case QLC_83XX_MODULE_TP_1000BASE_T:
3195 ahw->port_type = QLCNIC_GBE;
3196 break;
3197 default:
3198 ahw->port_type = QLCNIC_XGBE;
3199 }
3200 }
3201out:
3202 qlcnic_free_mbx_args(&cmd);
3203}
3204
3171int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) 3205int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
3172{ 3206{
3173 u8 pci_func; 3207 u8 pci_func;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 3dfe8e27b51c..b75a81246856 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -637,6 +637,7 @@ void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
637int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *, 637int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *,
638 struct ethtool_pauseparam *); 638 struct ethtool_pauseparam *);
639int qlcnic_83xx_test_link(struct qlcnic_adapter *); 639int qlcnic_83xx_test_link(struct qlcnic_adapter *);
640void qlcnic_83xx_get_port_type(struct qlcnic_adapter *adapter);
640int qlcnic_83xx_reg_test(struct qlcnic_adapter *); 641int qlcnic_83xx_reg_test(struct qlcnic_adapter *);
641int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *); 642int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *);
642int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *); 643int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 9a869c15d8bf..7f7deeaf1cf0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -486,6 +486,9 @@ static int qlcnic_set_link_ksettings(struct net_device *dev,
486 u32 ret = 0; 486 u32 ret = 0;
487 struct qlcnic_adapter *adapter = netdev_priv(dev); 487 struct qlcnic_adapter *adapter = netdev_priv(dev);
488 488
489 if (qlcnic_83xx_check(adapter))
490 qlcnic_83xx_get_port_type(adapter);
491
489 if (adapter->ahw->port_type != QLCNIC_GBE) 492 if (adapter->ahw->port_type != QLCNIC_GBE)
490 return -EOPNOTSUPP; 493 return -EOPNOTSUPP;
491 494
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 513e6c74e199..24ca7df15d07 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -296,8 +296,9 @@ qcaspi_receive(struct qcaspi *qca)
296 296
297 /* Allocate rx SKB if we don't have one available. */ 297 /* Allocate rx SKB if we don't have one available. */
298 if (!qca->rx_skb) { 298 if (!qca->rx_skb) {
299 qca->rx_skb = netdev_alloc_skb(net_dev, 299 qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
300 net_dev->mtu + VLAN_ETH_HLEN); 300 net_dev->mtu +
301 VLAN_ETH_HLEN);
301 if (!qca->rx_skb) { 302 if (!qca->rx_skb) {
302 netdev_dbg(net_dev, "out of RX resources\n"); 303 netdev_dbg(net_dev, "out of RX resources\n");
303 qca->stats.out_of_mem++; 304 qca->stats.out_of_mem++;
@@ -377,7 +378,7 @@ qcaspi_receive(struct qcaspi *qca)
377 qca->rx_skb, qca->rx_skb->dev); 378 qca->rx_skb, qca->rx_skb->dev);
378 qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY; 379 qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
379 netif_rx_ni(qca->rx_skb); 380 netif_rx_ni(qca->rx_skb);
380 qca->rx_skb = netdev_alloc_skb(net_dev, 381 qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
381 net_dev->mtu + VLAN_ETH_HLEN); 382 net_dev->mtu + VLAN_ETH_HLEN);
382 if (!qca->rx_skb) { 383 if (!qca->rx_skb) {
383 netdev_dbg(net_dev, "out of RX resources\n"); 384 netdev_dbg(net_dev, "out of RX resources\n");
@@ -759,7 +760,8 @@ qcaspi_netdev_init(struct net_device *dev)
759 if (!qca->rx_buffer) 760 if (!qca->rx_buffer)
760 return -ENOBUFS; 761 return -ENOBUFS;
761 762
762 qca->rx_skb = netdev_alloc_skb(dev, qca->net_dev->mtu + VLAN_ETH_HLEN); 763 qca->rx_skb = netdev_alloc_skb_ip_align(dev, qca->net_dev->mtu +
764 VLAN_ETH_HLEN);
763 if (!qca->rx_skb) { 765 if (!qca->rx_skb) {
764 kfree(qca->rx_buffer); 766 kfree(qca->rx_buffer);
765 netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n"); 767 netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n");
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index f68c4db656ed..2d686ccf971b 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3220,7 +3220,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
3220 /* MDIO bus init */ 3220 /* MDIO bus init */
3221 ret = sh_mdio_init(mdp, pd); 3221 ret = sh_mdio_init(mdp, pd);
3222 if (ret) { 3222 if (ret) {
3223 dev_err(&ndev->dev, "failed to initialise MDIO\n"); 3223 if (ret != -EPROBE_DEFER)
3224 dev_err(&pdev->dev, "MDIO init failed: %d\n", ret);
3224 goto out_release; 3225 goto out_release;
3225 } 3226 }
3226 3227
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 7b916aa21bde..4d7fb8af880d 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -18,8 +18,12 @@
18#include "mcdi.h" 18#include "mcdi.h"
19 19
20enum { 20enum {
21 EFX_REV_SIENA_A0 = 0, 21 /* Revisions 0-2 were Falcon A0, A1 and B0 respectively.
22 EFX_REV_HUNT_A0 = 1, 22 * They are not supported by this driver but these revision numbers
23 * form part of the ethtool API for register dumping.
24 */
25 EFX_REV_SIENA_A0 = 3,
26 EFX_REV_HUNT_A0 = 4,
23}; 27};
24 28
25static inline int efx_nic_rev(struct efx_nic *efx) 29static inline int efx_nic_rev(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index cd8c60132390..a74c481401c4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3725,7 +3725,7 @@ static void sysfs_display_ring(void *head, int size, int extend_desc,
3725 ep++; 3725 ep++;
3726 } else { 3726 } else {
3727 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 3727 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3728 i, (unsigned int)virt_to_phys(ep), 3728 i, (unsigned int)virt_to_phys(p),
3729 le32_to_cpu(p->des0), le32_to_cpu(p->des1), 3729 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3730 le32_to_cpu(p->des2), le32_to_cpu(p->des3)); 3730 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3731 p++; 3731 p++;
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 5a90fed06260..5b56c24b6ed2 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -411,13 +411,14 @@ static int vsw_port_remove(struct vio_dev *vdev)
411 411
412 if (port) { 412 if (port) {
413 del_timer_sync(&port->vio.timer); 413 del_timer_sync(&port->vio.timer);
414 del_timer_sync(&port->clean_timer);
414 415
415 napi_disable(&port->napi); 416 napi_disable(&port->napi);
417 unregister_netdev(port->dev);
416 418
417 list_del_rcu(&port->list); 419 list_del_rcu(&port->list);
418 420
419 synchronize_rcu(); 421 synchronize_rcu();
420 del_timer_sync(&port->clean_timer);
421 spin_lock_irqsave(&port->vp->lock, flags); 422 spin_lock_irqsave(&port->vp->lock, flags);
422 sunvnet_port_rm_txq_common(port); 423 sunvnet_port_rm_txq_common(port);
423 spin_unlock_irqrestore(&port->vp->lock, flags); 424 spin_unlock_irqrestore(&port->vp->lock, flags);
@@ -427,7 +428,6 @@ static int vsw_port_remove(struct vio_dev *vdev)
427 428
428 dev_set_drvdata(&vdev->dev, NULL); 429 dev_set_drvdata(&vdev->dev, NULL);
429 430
430 unregister_netdev(port->dev);
431 free_netdev(port->dev); 431 free_netdev(port->dev);
432 } 432 }
433 433
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 729a7da90b5b..e6222e535019 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1353,9 +1353,10 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
1353 1353
1354 tx_pipe->dma_channel = knav_dma_open_channel(dev, 1354 tx_pipe->dma_channel = knav_dma_open_channel(dev,
1355 tx_pipe->dma_chan_name, &config); 1355 tx_pipe->dma_chan_name, &config);
1356 if (IS_ERR_OR_NULL(tx_pipe->dma_channel)) { 1356 if (IS_ERR(tx_pipe->dma_channel)) {
1357 dev_err(dev, "failed opening tx chan(%s)\n", 1357 dev_err(dev, "failed opening tx chan(%s)\n",
1358 tx_pipe->dma_chan_name); 1358 tx_pipe->dma_chan_name);
1359 ret = PTR_ERR(tx_pipe->dma_channel);
1359 goto err; 1360 goto err;
1360 } 1361 }
1361 1362
@@ -1673,9 +1674,10 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
1673 1674
1674 netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device, 1675 netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device,
1675 netcp->dma_chan_name, &config); 1676 netcp->dma_chan_name, &config);
1676 if (IS_ERR_OR_NULL(netcp->rx_channel)) { 1677 if (IS_ERR(netcp->rx_channel)) {
1677 dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n", 1678 dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n",
1678 netcp->dma_chan_name); 1679 netcp->dma_chan_name);
1680 ret = PTR_ERR(netcp->rx_channel);
1679 goto fail; 1681 goto fail;
1680 } 1682 }
1681 1683
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 897176fc5043..dd92950a4615 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2651,7 +2651,6 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2651 case HWTSTAMP_FILTER_NONE: 2651 case HWTSTAMP_FILTER_NONE:
2652 cpts_rx_enable(cpts, 0); 2652 cpts_rx_enable(cpts, 0);
2653 break; 2653 break;
2654 case HWTSTAMP_FILTER_ALL:
2655 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2654 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2656 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2655 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2657 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2656 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index dec5d563ab19..959fd12d2e67 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1293,7 +1293,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
1293 if (nla_put_u32(skb, IFLA_GENEVE_ID, vni)) 1293 if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
1294 goto nla_put_failure; 1294 goto nla_put_failure;
1295 1295
1296 if (ip_tunnel_info_af(info) == AF_INET) { 1296 if (rtnl_dereference(geneve->sock4)) {
1297 if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE, 1297 if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
1298 info->key.u.ipv4.dst)) 1298 info->key.u.ipv4.dst))
1299 goto nla_put_failure; 1299 goto nla_put_failure;
@@ -1302,8 +1302,10 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
1302 !!(info->key.tun_flags & TUNNEL_CSUM))) 1302 !!(info->key.tun_flags & TUNNEL_CSUM)))
1303 goto nla_put_failure; 1303 goto nla_put_failure;
1304 1304
1305 }
1306
1305#if IS_ENABLED(CONFIG_IPV6) 1307#if IS_ENABLED(CONFIG_IPV6)
1306 } else { 1308 if (rtnl_dereference(geneve->sock6)) {
1307 if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6, 1309 if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6,
1308 &info->key.u.ipv6.dst)) 1310 &info->key.u.ipv6.dst))
1309 goto nla_put_failure; 1311 goto nla_put_failure;
@@ -1315,8 +1317,8 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
1315 if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, 1317 if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
1316 !geneve->use_udp6_rx_checksums)) 1318 !geneve->use_udp6_rx_checksums))
1317 goto nla_put_failure; 1319 goto nla_put_failure;
1318#endif
1319 } 1320 }
1321#endif
1320 1322
1321 if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) || 1323 if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) ||
1322 nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) || 1324 nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) ||
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 4fea1b3dfbb4..7b652bb7ebe4 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -873,7 +873,7 @@ static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
873 873
874 /* Check if there's an existing gtpX device to configure */ 874 /* Check if there's an existing gtpX device to configure */
875 dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK])); 875 dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK]));
876 if (dev->netdev_ops == &gtp_netdev_ops) 876 if (dev && dev->netdev_ops == &gtp_netdev_ops)
877 gtp = netdev_priv(dev); 877 gtp = netdev_priv(dev);
878 878
879 put_net(net); 879 put_net(net);
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 8716b8c07feb..6f3c805f7211 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1077,7 +1077,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
1077 * are "42101001.sb" or "42101002.sb" 1077 * are "42101001.sb" or "42101002.sb"
1078 */ 1078 */
1079 sprintf(stir421x_fw_name, "4210%4X.sb", 1079 sprintf(stir421x_fw_name, "4210%4X.sb",
1080 self->usbdev->descriptor.bcdDevice); 1080 le16_to_cpu(self->usbdev->descriptor.bcdDevice));
1081 ret = request_firmware(&fw, stir421x_fw_name, &self->usbdev->dev); 1081 ret = request_firmware(&fw, stir421x_fw_name, &self->usbdev->dev);
1082 if (ret < 0) 1082 if (ret < 0)
1083 return ret; 1083 return ret;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index b34eaaae03fd..346ad2ff3998 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -789,10 +789,12 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
789 */ 789 */
790static struct lock_class_key macvlan_netdev_addr_lock_key; 790static struct lock_class_key macvlan_netdev_addr_lock_key;
791 791
792#define ALWAYS_ON_FEATURES \ 792#define ALWAYS_ON_OFFLOADS \
793 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX | \ 793 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \
794 NETIF_F_GSO_ROBUST) 794 NETIF_F_GSO_ROBUST)
795 795
796#define ALWAYS_ON_FEATURES (ALWAYS_ON_OFFLOADS | NETIF_F_LLTX)
797
796#define MACVLAN_FEATURES \ 798#define MACVLAN_FEATURES \
797 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ 799 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
798 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_LRO | \ 800 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_LRO | \
@@ -827,6 +829,7 @@ static int macvlan_init(struct net_device *dev)
827 dev->features |= ALWAYS_ON_FEATURES; 829 dev->features |= ALWAYS_ON_FEATURES;
828 dev->hw_features |= NETIF_F_LRO; 830 dev->hw_features |= NETIF_F_LRO;
829 dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; 831 dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
832 dev->vlan_features |= ALWAYS_ON_OFFLOADS;
830 dev->gso_max_size = lowerdev->gso_max_size; 833 dev->gso_max_size = lowerdev->gso_max_size;
831 dev->gso_max_segs = lowerdev->gso_max_segs; 834 dev->gso_max_segs = lowerdev->gso_max_segs;
832 dev->hard_header_len = lowerdev->hard_header_len; 835 dev->hard_header_len = lowerdev->hard_header_len;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 60ffc9da6a28..c360dd6ead22 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -108,7 +108,7 @@ config MDIO_MOXART
108config MDIO_OCTEON 108config MDIO_OCTEON
109 tristate "Octeon and some ThunderX SOCs MDIO buses" 109 tristate "Octeon and some ThunderX SOCs MDIO buses"
110 depends on 64BIT 110 depends on 64BIT
111 depends on HAS_IOMEM 111 depends on HAS_IOMEM && OF_MDIO
112 select MDIO_CAVIUM 112 select MDIO_CAVIUM
113 help 113 help
114 This module provides a driver for the Octeon and ThunderX MDIO 114 This module provides a driver for the Octeon and ThunderX MDIO
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 272b051a0199..9097e42bec2e 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -255,34 +255,6 @@ static int marvell_config_aneg(struct phy_device *phydev)
255{ 255{
256 int err; 256 int err;
257 257
258 /* The Marvell PHY has an errata which requires
259 * that certain registers get written in order
260 * to restart autonegotiation */
261 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
262
263 if (err < 0)
264 return err;
265
266 err = phy_write(phydev, 0x1d, 0x1f);
267 if (err < 0)
268 return err;
269
270 err = phy_write(phydev, 0x1e, 0x200c);
271 if (err < 0)
272 return err;
273
274 err = phy_write(phydev, 0x1d, 0x5);
275 if (err < 0)
276 return err;
277
278 err = phy_write(phydev, 0x1e, 0);
279 if (err < 0)
280 return err;
281
282 err = phy_write(phydev, 0x1e, 0x100);
283 if (err < 0)
284 return err;
285
286 err = marvell_set_polarity(phydev, phydev->mdix_ctrl); 258 err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
287 if (err < 0) 259 if (err < 0)
288 return err; 260 return err;
@@ -316,6 +288,42 @@ static int marvell_config_aneg(struct phy_device *phydev)
316 return 0; 288 return 0;
317} 289}
318 290
291static int m88e1101_config_aneg(struct phy_device *phydev)
292{
293 int err;
294
295 /* This Marvell PHY has an errata which requires
296 * that certain registers get written in order
297 * to restart autonegotiation
298 */
299 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
300
301 if (err < 0)
302 return err;
303
304 err = phy_write(phydev, 0x1d, 0x1f);
305 if (err < 0)
306 return err;
307
308 err = phy_write(phydev, 0x1e, 0x200c);
309 if (err < 0)
310 return err;
311
312 err = phy_write(phydev, 0x1d, 0x5);
313 if (err < 0)
314 return err;
315
316 err = phy_write(phydev, 0x1e, 0);
317 if (err < 0)
318 return err;
319
320 err = phy_write(phydev, 0x1e, 0x100);
321 if (err < 0)
322 return err;
323
324 return marvell_config_aneg(phydev);
325}
326
319static int m88e1111_config_aneg(struct phy_device *phydev) 327static int m88e1111_config_aneg(struct phy_device *phydev)
320{ 328{
321 int err; 329 int err;
@@ -1892,7 +1900,7 @@ static struct phy_driver marvell_drivers[] = {
1892 .flags = PHY_HAS_INTERRUPT, 1900 .flags = PHY_HAS_INTERRUPT,
1893 .probe = marvell_probe, 1901 .probe = marvell_probe,
1894 .config_init = &marvell_config_init, 1902 .config_init = &marvell_config_init,
1895 .config_aneg = &marvell_config_aneg, 1903 .config_aneg = &m88e1101_config_aneg,
1896 .read_status = &genphy_read_status, 1904 .read_status = &genphy_read_status,
1897 .ack_interrupt = &marvell_ack_interrupt, 1905 .ack_interrupt = &marvell_ack_interrupt,
1898 .config_intr = &marvell_config_intr, 1906 .config_intr = &marvell_config_intr,
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 963838d4fac1..599ce24c514f 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -122,10 +122,9 @@ int mdio_mux_init(struct device *dev,
122 pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL); 122 pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL);
123 if (pb == NULL) { 123 if (pb == NULL) {
124 ret_val = -ENOMEM; 124 ret_val = -ENOMEM;
125 goto err_parent_bus; 125 goto err_pb_kz;
126 } 126 }
127 127
128
129 pb->switch_data = data; 128 pb->switch_data = data;
130 pb->switch_fn = switch_fn; 129 pb->switch_fn = switch_fn;
131 pb->current_child = -1; 130 pb->current_child = -1;
@@ -154,6 +153,7 @@ int mdio_mux_init(struct device *dev,
154 cb->mii_bus = mdiobus_alloc(); 153 cb->mii_bus = mdiobus_alloc();
155 if (!cb->mii_bus) { 154 if (!cb->mii_bus) {
156 ret_val = -ENOMEM; 155 ret_val = -ENOMEM;
156 devm_kfree(dev, cb);
157 of_node_put(child_bus_node); 157 of_node_put(child_bus_node);
158 break; 158 break;
159 } 159 }
@@ -170,7 +170,6 @@ int mdio_mux_init(struct device *dev,
170 mdiobus_free(cb->mii_bus); 170 mdiobus_free(cb->mii_bus);
171 devm_kfree(dev, cb); 171 devm_kfree(dev, cb);
172 } else { 172 } else {
173 of_node_get(child_bus_node);
174 cb->next = pb->children; 173 cb->next = pb->children;
175 pb->children = cb; 174 pb->children = cb;
176 } 175 }
@@ -181,9 +180,11 @@ int mdio_mux_init(struct device *dev,
181 return 0; 180 return 0;
182 } 181 }
183 182
183 devm_kfree(dev, pb);
184err_pb_kz:
184 /* balance the reference of_mdio_find_bus() took */ 185 /* balance the reference of_mdio_find_bus() took */
185 put_device(&pb->mii_bus->dev); 186 if (!mux_bus)
186 187 put_device(&parent_bus->dev);
187err_parent_bus: 188err_parent_bus:
188 of_node_put(parent_bus_node); 189 of_node_put(parent_bus_node);
189 return ret_val; 190 return ret_val;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index a898e5c4ef1b..8e73f5f36e71 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -364,9 +364,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
364 364
365 mutex_init(&bus->mdio_lock); 365 mutex_init(&bus->mdio_lock);
366 366
367 if (bus->reset)
368 bus->reset(bus);
369
370 /* de-assert bus level PHY GPIO resets */ 367 /* de-assert bus level PHY GPIO resets */
371 if (bus->num_reset_gpios > 0) { 368 if (bus->num_reset_gpios > 0) {
372 bus->reset_gpiod = devm_kcalloc(&bus->dev, 369 bus->reset_gpiod = devm_kcalloc(&bus->dev,
@@ -396,6 +393,9 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
396 } 393 }
397 } 394 }
398 395
396 if (bus->reset)
397 bus->reset(bus);
398
399 for (i = 0; i < PHY_MAX_ADDR; i++) { 399 for (i = 0; i < PHY_MAX_ADDR; i++) {
400 if ((bus->phy_mask & (1 << i)) == 0) { 400 if ((bus->phy_mask & (1 << i)) == 0) {
401 struct phy_device *phydev; 401 struct phy_device *phydev;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index f3ae88fdf332..8ab281b478f2 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -310,6 +310,26 @@ skip:
310 return -ENODEV; 310 return -ENODEV;
311 } 311 }
312 312
313 return 0;
314
315bad_desc:
316 dev_info(&dev->udev->dev, "bad CDC descriptors\n");
317 return -ENODEV;
318}
319EXPORT_SYMBOL_GPL(usbnet_generic_cdc_bind);
320
321
322/* like usbnet_generic_cdc_bind() but handles filter initialization
323 * correctly
324 */
325int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
326{
327 int rv;
328
329 rv = usbnet_generic_cdc_bind(dev, intf);
330 if (rv < 0)
331 goto bail_out;
332
313 /* Some devices don't initialise properly. In particular 333 /* Some devices don't initialise properly. In particular
314 * the packet filter is not reset. There are devices that 334 * the packet filter is not reset. There are devices that
315 * don't do reset all the way. So the packet filter should 335 * don't do reset all the way. So the packet filter should
@@ -317,13 +337,10 @@ skip:
317 */ 337 */
318 usbnet_cdc_update_filter(dev); 338 usbnet_cdc_update_filter(dev);
319 339
320 return 0; 340bail_out:
321 341 return rv;
322bad_desc:
323 dev_info(&dev->udev->dev, "bad CDC descriptors\n");
324 return -ENODEV;
325} 342}
326EXPORT_SYMBOL_GPL(usbnet_generic_cdc_bind); 343EXPORT_SYMBOL_GPL(usbnet_ether_cdc_bind);
327 344
328void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf) 345void usbnet_cdc_unbind(struct usbnet *dev, struct usb_interface *intf)
329{ 346{
@@ -417,7 +434,7 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
417 BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) 434 BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data)
418 < sizeof(struct cdc_state))); 435 < sizeof(struct cdc_state)));
419 436
420 status = usbnet_generic_cdc_bind(dev, intf); 437 status = usbnet_ether_cdc_bind(dev, intf);
421 if (status < 0) 438 if (status < 0)
422 return status; 439 return status;
423 440
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
index c4f1c363e24b..9df3c1ffff35 100644
--- a/drivers/net/usb/ch9200.c
+++ b/drivers/net/usb/ch9200.c
@@ -310,8 +310,8 @@ static int get_mac_address(struct usbnet *dev, unsigned char *data)
310 int rd_mac_len = 0; 310 int rd_mac_len = 0;
311 311
312 netdev_dbg(dev->net, "get_mac_address:\n\tusbnet VID:%0x PID:%0x\n", 312 netdev_dbg(dev->net, "get_mac_address:\n\tusbnet VID:%0x PID:%0x\n",
313 dev->udev->descriptor.idVendor, 313 le16_to_cpu(dev->udev->descriptor.idVendor),
314 dev->udev->descriptor.idProduct); 314 le16_to_cpu(dev->udev->descriptor.idProduct));
315 315
316 memset(mac_addr, 0, sizeof(mac_addr)); 316 memset(mac_addr, 0, sizeof(mac_addr));
317 rd_mac_len = control_read(dev, REQUEST_READ, 0, 317 rd_mac_len = control_read(dev, REQUEST_READ, 0,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index d7165767ca9d..8f923a147fa9 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1196,6 +1196,8 @@ static const struct usb_device_id products[] = {
1196 {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ 1196 {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
1197 {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ 1197 {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
1198 {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ 1198 {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
1199 {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
1200 {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */
1199 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 1201 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
1200 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 1202 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
1201 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 1203 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 765400b62168..2dfca96a63b6 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -681,7 +681,7 @@ static int smsc95xx_set_features(struct net_device *netdev,
681 if (ret < 0) 681 if (ret < 0)
682 return ret; 682 return ret;
683 683
684 if (features & NETIF_F_HW_CSUM) 684 if (features & NETIF_F_IP_CSUM)
685 read_buf |= Tx_COE_EN_; 685 read_buf |= Tx_COE_EN_;
686 else 686 else
687 read_buf &= ~Tx_COE_EN_; 687 read_buf &= ~Tx_COE_EN_;
@@ -1279,12 +1279,19 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
1279 1279
1280 spin_lock_init(&pdata->mac_cr_lock); 1280 spin_lock_init(&pdata->mac_cr_lock);
1281 1281
1282 /* LAN95xx devices do not alter the computed checksum of 0 to 0xffff.
1283 * RFC 2460, ipv6 UDP calculated checksum yields a result of zero must
1284 * be changed to 0xffff. RFC 768, ipv4 UDP computed checksum is zero,
1285 * it is transmitted as all ones. The zero transmitted checksum means
1286 * transmitter generated no checksum. Hence, enable csum offload only
1287 * for ipv4 packets.
1288 */
1282 if (DEFAULT_TX_CSUM_ENABLE) 1289 if (DEFAULT_TX_CSUM_ENABLE)
1283 dev->net->features |= NETIF_F_HW_CSUM; 1290 dev->net->features |= NETIF_F_IP_CSUM;
1284 if (DEFAULT_RX_CSUM_ENABLE) 1291 if (DEFAULT_RX_CSUM_ENABLE)
1285 dev->net->features |= NETIF_F_RXCSUM; 1292 dev->net->features |= NETIF_F_RXCSUM;
1286 1293
1287 dev->net->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 1294 dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1288 1295
1289 smsc95xx_init_mac_address(dev); 1296 smsc95xx_init_mac_address(dev);
1290 1297
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9320d96a1632..3e9246cc49c3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1989,6 +1989,7 @@ static const struct net_device_ops virtnet_netdev = {
1989 .ndo_poll_controller = virtnet_netpoll, 1989 .ndo_poll_controller = virtnet_netpoll,
1990#endif 1990#endif
1991 .ndo_xdp = virtnet_xdp, 1991 .ndo_xdp = virtnet_xdp,
1992 .ndo_features_check = passthru_features_check,
1992}; 1993};
1993 1994
1994static void virtnet_config_changed_work(struct work_struct *work) 1995static void virtnet_config_changed_work(struct work_struct *work)
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 25bc764ae7dc..d1c7029ded7c 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2962,6 +2962,11 @@ vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2962 /* we need to enable NAPI, otherwise dev_close will deadlock */ 2962 /* we need to enable NAPI, otherwise dev_close will deadlock */
2963 for (i = 0; i < adapter->num_rx_queues; i++) 2963 for (i = 0; i < adapter->num_rx_queues; i++)
2964 napi_enable(&adapter->rx_queue[i].napi); 2964 napi_enable(&adapter->rx_queue[i].napi);
2965 /*
2966 * Need to clear the quiesce bit to ensure that vmxnet3_close
2967 * can quiesce the device properly
2968 */
2969 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2965 dev_close(adapter->netdev); 2970 dev_close(adapter->netdev);
2966} 2971}
2967 2972
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index ceda5861da78..db882493875c 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -989,6 +989,7 @@ static u32 vrf_fib_table(const struct net_device *dev)
989 989
990static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) 990static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
991{ 991{
992 kfree_skb(skb);
992 return 0; 993 return 0;
993} 994}
994 995
@@ -998,7 +999,7 @@ static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
998{ 999{
999 struct net *net = dev_net(dev); 1000 struct net *net = dev_net(dev);
1000 1001
1001 if (NF_HOOK(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) < 0) 1002 if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1)
1002 skb = NULL; /* kfree_skb(skb) handled by nf code */ 1003 skb = NULL; /* kfree_skb(skb) handled by nf code */
1003 1004
1004 return skb; 1005 return skb;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 6ffc482550c1..7b61adb6270c 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1934,8 +1934,7 @@ abort_transaction_no_dev_fatal:
1934 xennet_disconnect_backend(info); 1934 xennet_disconnect_backend(info);
1935 xennet_destroy_queues(info); 1935 xennet_destroy_queues(info);
1936 out: 1936 out:
1937 unregister_netdev(info->netdev); 1937 device_unregister(&dev->dev);
1938 xennet_free_netdev(info->netdev);
1939 return err; 1938 return err;
1940} 1939}
1941 1940
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index d5e0906262ea..a60926410438 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -925,6 +925,29 @@ static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
925} 925}
926 926
927#ifdef CONFIG_BLK_DEV_INTEGRITY 927#ifdef CONFIG_BLK_DEV_INTEGRITY
928static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id,
929 u16 bs)
930{
931 struct nvme_ns *ns = disk->private_data;
932 u16 old_ms = ns->ms;
933 u8 pi_type = 0;
934
935 ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
936 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
937
938 /* PI implementation requires metadata equal t10 pi tuple size */
939 if (ns->ms == sizeof(struct t10_pi_tuple))
940 pi_type = id->dps & NVME_NS_DPS_PI_MASK;
941
942 if (blk_get_integrity(disk) &&
943 (ns->pi_type != pi_type || ns->ms != old_ms ||
944 bs != queue_logical_block_size(disk->queue) ||
945 (ns->ms && ns->ext)))
946 blk_integrity_unregister(disk);
947
948 ns->pi_type = pi_type;
949}
950
928static void nvme_init_integrity(struct nvme_ns *ns) 951static void nvme_init_integrity(struct nvme_ns *ns)
929{ 952{
930 struct blk_integrity integrity; 953 struct blk_integrity integrity;
@@ -951,6 +974,10 @@ static void nvme_init_integrity(struct nvme_ns *ns)
951 blk_queue_max_integrity_segments(ns->queue, 1); 974 blk_queue_max_integrity_segments(ns->queue, 1);
952} 975}
953#else 976#else
977static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id,
978 u16 bs)
979{
980}
954static void nvme_init_integrity(struct nvme_ns *ns) 981static void nvme_init_integrity(struct nvme_ns *ns)
955{ 982{
956} 983}
@@ -997,37 +1024,22 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
997static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) 1024static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
998{ 1025{
999 struct nvme_ns *ns = disk->private_data; 1026 struct nvme_ns *ns = disk->private_data;
1000 u8 lbaf, pi_type; 1027 u16 bs;
1001 u16 old_ms;
1002 unsigned short bs;
1003
1004 old_ms = ns->ms;
1005 lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
1006 ns->lba_shift = id->lbaf[lbaf].ds;
1007 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
1008 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
1009 1028
1010 /* 1029 /*
1011 * If identify namespace failed, use default 512 byte block size so 1030 * If identify namespace failed, use default 512 byte block size so
1012 * block layer can use before failing read/write for 0 capacity. 1031 * block layer can use before failing read/write for 0 capacity.
1013 */ 1032 */
1033 ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
1014 if (ns->lba_shift == 0) 1034 if (ns->lba_shift == 0)
1015 ns->lba_shift = 9; 1035 ns->lba_shift = 9;
1016 bs = 1 << ns->lba_shift; 1036 bs = 1 << ns->lba_shift;
1017 /* XXX: PI implementation requires metadata equal t10 pi tuple size */
1018 pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
1019 id->dps & NVME_NS_DPS_PI_MASK : 0;
1020 1037
1021 blk_mq_freeze_queue(disk->queue); 1038 blk_mq_freeze_queue(disk->queue);
1022 if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
1023 ns->ms != old_ms ||
1024 bs != queue_logical_block_size(disk->queue) ||
1025 (ns->ms && ns->ext)))
1026 blk_integrity_unregister(disk);
1027 1039
1028 ns->pi_type = pi_type; 1040 if (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)
1041 nvme_prep_integrity(disk, id, bs);
1029 blk_queue_logical_block_size(ns->queue, bs); 1042 blk_queue_logical_block_size(ns->queue, bs);
1030
1031 if (ns->ms && !blk_get_integrity(disk) && !ns->ext) 1043 if (ns->ms && !blk_get_integrity(disk) && !ns->ext)
1032 nvme_init_integrity(ns); 1044 nvme_init_integrity(ns);
1033 if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk)) 1045 if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
@@ -1605,7 +1617,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
1605 } 1617 }
1606 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); 1618 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
1607 1619
1608 if (ctrl->ops->is_fabrics) { 1620 if (ctrl->ops->flags & NVME_F_FABRICS) {
1609 ctrl->icdoff = le16_to_cpu(id->icdoff); 1621 ctrl->icdoff = le16_to_cpu(id->icdoff);
1610 ctrl->ioccsz = le32_to_cpu(id->ioccsz); 1622 ctrl->ioccsz = le32_to_cpu(id->ioccsz);
1611 ctrl->iorcsz = le32_to_cpu(id->iorcsz); 1623 ctrl->iorcsz = le32_to_cpu(id->iorcsz);
@@ -2098,7 +2110,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
2098 if (ns->ndev) 2110 if (ns->ndev)
2099 nvme_nvm_unregister_sysfs(ns); 2111 nvme_nvm_unregister_sysfs(ns);
2100 del_gendisk(ns->disk); 2112 del_gendisk(ns->disk);
2101 blk_mq_abort_requeue_list(ns->queue);
2102 blk_cleanup_queue(ns->queue); 2113 blk_cleanup_queue(ns->queue);
2103 } 2114 }
2104 2115
@@ -2436,8 +2447,16 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
2436 continue; 2447 continue;
2437 revalidate_disk(ns->disk); 2448 revalidate_disk(ns->disk);
2438 blk_set_queue_dying(ns->queue); 2449 blk_set_queue_dying(ns->queue);
2439 blk_mq_abort_requeue_list(ns->queue); 2450
2440 blk_mq_start_stopped_hw_queues(ns->queue, true); 2451 /*
2452 * Forcibly start all queues to avoid having stuck requests.
2453 * Note that we must ensure the queues are not stopped
2454 * when the final removal happens.
2455 */
2456 blk_mq_start_hw_queues(ns->queue);
2457
2458 /* draining requests in requeue list */
2459 blk_mq_kick_requeue_list(ns->queue);
2441 } 2460 }
2442 mutex_unlock(&ctrl->namespaces_mutex); 2461 mutex_unlock(&ctrl->namespaces_mutex);
2443} 2462}
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 70e689bf1cad..5b14cbefb724 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -45,8 +45,6 @@ enum nvme_fc_queue_flags {
45 45
46#define NVMEFC_QUEUE_DELAY 3 /* ms units */ 46#define NVMEFC_QUEUE_DELAY 3 /* ms units */
47 47
48#define NVME_FC_MAX_CONNECT_ATTEMPTS 1
49
50struct nvme_fc_queue { 48struct nvme_fc_queue {
51 struct nvme_fc_ctrl *ctrl; 49 struct nvme_fc_ctrl *ctrl;
52 struct device *dev; 50 struct device *dev;
@@ -165,8 +163,6 @@ struct nvme_fc_ctrl {
165 struct work_struct delete_work; 163 struct work_struct delete_work;
166 struct work_struct reset_work; 164 struct work_struct reset_work;
167 struct delayed_work connect_work; 165 struct delayed_work connect_work;
168 int reconnect_delay;
169 int connect_attempts;
170 166
171 struct kref ref; 167 struct kref ref;
172 u32 flags; 168 u32 flags;
@@ -1376,9 +1372,9 @@ done:
1376 complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op); 1372 complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
1377 if (!complete_rq) { 1373 if (!complete_rq) {
1378 if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) { 1374 if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
1379 status = cpu_to_le16(NVME_SC_ABORT_REQ); 1375 status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
1380 if (blk_queue_dying(rq->q)) 1376 if (blk_queue_dying(rq->q))
1381 status |= cpu_to_le16(NVME_SC_DNR); 1377 status |= cpu_to_le16(NVME_SC_DNR << 1);
1382 } 1378 }
1383 nvme_end_request(rq, status, result); 1379 nvme_end_request(rq, status, result);
1384 } else 1380 } else
@@ -1751,9 +1747,13 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
1751 dev_warn(ctrl->ctrl.device, 1747 dev_warn(ctrl->ctrl.device,
1752 "NVME-FC{%d}: transport association error detected: %s\n", 1748 "NVME-FC{%d}: transport association error detected: %s\n",
1753 ctrl->cnum, errmsg); 1749 ctrl->cnum, errmsg);
1754 dev_info(ctrl->ctrl.device, 1750 dev_warn(ctrl->ctrl.device,
1755 "NVME-FC{%d}: resetting controller\n", ctrl->cnum); 1751 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
1756 1752
1753 /* stop the queues on error, cleanup is in reset thread */
1754 if (ctrl->queue_count > 1)
1755 nvme_stop_queues(&ctrl->ctrl);
1756
1757 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { 1757 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
1758 dev_err(ctrl->ctrl.device, 1758 dev_err(ctrl->ctrl.device,
1759 "NVME-FC{%d}: error_recovery: Couldn't change state " 1759 "NVME-FC{%d}: error_recovery: Couldn't change state "
@@ -2191,9 +2191,6 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2191 if (!opts->nr_io_queues) 2191 if (!opts->nr_io_queues)
2192 return 0; 2192 return 0;
2193 2193
2194 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
2195 opts->nr_io_queues);
2196
2197 nvme_fc_init_io_queues(ctrl); 2194 nvme_fc_init_io_queues(ctrl);
2198 2195
2199 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); 2196 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
@@ -2264,9 +2261,6 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
2264 if (ctrl->queue_count == 1) 2261 if (ctrl->queue_count == 1)
2265 return 0; 2262 return 0;
2266 2263
2267 dev_info(ctrl->ctrl.device, "Recreating %d I/O queues.\n",
2268 opts->nr_io_queues);
2269
2270 nvme_fc_init_io_queues(ctrl); 2264 nvme_fc_init_io_queues(ctrl);
2271 2265
2272 ret = blk_mq_reinit_tagset(&ctrl->tag_set); 2266 ret = blk_mq_reinit_tagset(&ctrl->tag_set);
@@ -2302,7 +2296,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2302 int ret; 2296 int ret;
2303 bool changed; 2297 bool changed;
2304 2298
2305 ctrl->connect_attempts++; 2299 ++ctrl->ctrl.opts->nr_reconnects;
2306 2300
2307 /* 2301 /*
2308 * Create the admin queue 2302 * Create the admin queue
@@ -2399,9 +2393,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2399 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 2393 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
2400 WARN_ON_ONCE(!changed); 2394 WARN_ON_ONCE(!changed);
2401 2395
2402 ctrl->connect_attempts = 0; 2396 ctrl->ctrl.opts->nr_reconnects = 0;
2403
2404 kref_get(&ctrl->ctrl.kref);
2405 2397
2406 if (ctrl->queue_count > 1) { 2398 if (ctrl->queue_count > 1) {
2407 nvme_start_queues(&ctrl->ctrl); 2399 nvme_start_queues(&ctrl->ctrl);
@@ -2532,26 +2524,32 @@ nvme_fc_delete_ctrl_work(struct work_struct *work)
2532 2524
2533 /* 2525 /*
2534 * tear down the controller 2526 * tear down the controller
2535 * This will result in the last reference on the nvme ctrl to 2527 * After the last reference on the nvme ctrl is removed,
2536 * expire, calling the transport nvme_fc_nvme_ctrl_freed() callback. 2528 * the transport nvme_fc_nvme_ctrl_freed() callback will be
2537 * From there, the transport will tear down it's logical queues and 2529 * invoked. From there, the transport will tear down it's
2538 * association. 2530 * logical queues and association.
2539 */ 2531 */
2540 nvme_uninit_ctrl(&ctrl->ctrl); 2532 nvme_uninit_ctrl(&ctrl->ctrl);
2541 2533
2542 nvme_put_ctrl(&ctrl->ctrl); 2534 nvme_put_ctrl(&ctrl->ctrl);
2543} 2535}
2544 2536
2545static int 2537static bool
2546__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl) 2538__nvme_fc_schedule_delete_work(struct nvme_fc_ctrl *ctrl)
2547{ 2539{
2548 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING)) 2540 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
2549 return -EBUSY; 2541 return true;
2550 2542
2551 if (!queue_work(nvme_fc_wq, &ctrl->delete_work)) 2543 if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
2552 return -EBUSY; 2544 return true;
2553 2545
2554 return 0; 2546 return false;
2547}
2548
2549static int
2550__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
2551{
2552 return __nvme_fc_schedule_delete_work(ctrl) ? -EBUSY : 0;
2555} 2553}
2556 2554
2557/* 2555/*
@@ -2577,6 +2575,35 @@ nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
2577} 2575}
2578 2576
2579static void 2577static void
2578nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
2579{
2580 /* If we are resetting/deleting then do nothing */
2581 if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) {
2582 WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
2583 ctrl->ctrl.state == NVME_CTRL_LIVE);
2584 return;
2585 }
2586
2587 dev_info(ctrl->ctrl.device,
2588 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
2589 ctrl->cnum, status);
2590
2591 if (nvmf_should_reconnect(&ctrl->ctrl)) {
2592 dev_info(ctrl->ctrl.device,
2593 "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
2594 ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
2595 queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
2596 ctrl->ctrl.opts->reconnect_delay * HZ);
2597 } else {
2598 dev_warn(ctrl->ctrl.device,
2599 "NVME-FC{%d}: Max reconnect attempts (%d) "
2600 "reached. Removing controller\n",
2601 ctrl->cnum, ctrl->ctrl.opts->nr_reconnects);
2602 WARN_ON(__nvme_fc_schedule_delete_work(ctrl));
2603 }
2604}
2605
2606static void
2580nvme_fc_reset_ctrl_work(struct work_struct *work) 2607nvme_fc_reset_ctrl_work(struct work_struct *work)
2581{ 2608{
2582 struct nvme_fc_ctrl *ctrl = 2609 struct nvme_fc_ctrl *ctrl =
@@ -2587,34 +2614,9 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
2587 nvme_fc_delete_association(ctrl); 2614 nvme_fc_delete_association(ctrl);
2588 2615
2589 ret = nvme_fc_create_association(ctrl); 2616 ret = nvme_fc_create_association(ctrl);
2590 if (ret) { 2617 if (ret)
2591 dev_warn(ctrl->ctrl.device, 2618 nvme_fc_reconnect_or_delete(ctrl, ret);
2592 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", 2619 else
2593 ctrl->cnum, ret);
2594 if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) {
2595 dev_warn(ctrl->ctrl.device,
2596 "NVME-FC{%d}: Max reconnect attempts (%d) "
2597 "reached. Removing controller\n",
2598 ctrl->cnum, ctrl->connect_attempts);
2599
2600 if (!nvme_change_ctrl_state(&ctrl->ctrl,
2601 NVME_CTRL_DELETING)) {
2602 dev_err(ctrl->ctrl.device,
2603 "NVME-FC{%d}: failed to change state "
2604 "to DELETING\n", ctrl->cnum);
2605 return;
2606 }
2607
2608 WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work));
2609 return;
2610 }
2611
2612 dev_warn(ctrl->ctrl.device,
2613 "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
2614 ctrl->cnum, ctrl->reconnect_delay);
2615 queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
2616 ctrl->reconnect_delay * HZ);
2617 } else
2618 dev_info(ctrl->ctrl.device, 2620 dev_info(ctrl->ctrl.device,
2619 "NVME-FC{%d}: controller reset complete\n", ctrl->cnum); 2621 "NVME-FC{%d}: controller reset complete\n", ctrl->cnum);
2620} 2622}
@@ -2628,7 +2630,7 @@ nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
2628{ 2630{
2629 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 2631 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2630 2632
2631 dev_warn(ctrl->ctrl.device, 2633 dev_info(ctrl->ctrl.device,
2632 "NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum); 2634 "NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum);
2633 2635
2634 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) 2636 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
@@ -2645,7 +2647,7 @@ nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
2645static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { 2647static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2646 .name = "fc", 2648 .name = "fc",
2647 .module = THIS_MODULE, 2649 .module = THIS_MODULE,
2648 .is_fabrics = true, 2650 .flags = NVME_F_FABRICS,
2649 .reg_read32 = nvmf_reg_read32, 2651 .reg_read32 = nvmf_reg_read32,
2650 .reg_read64 = nvmf_reg_read64, 2652 .reg_read64 = nvmf_reg_read64,
2651 .reg_write32 = nvmf_reg_write32, 2653 .reg_write32 = nvmf_reg_write32,
@@ -2667,34 +2669,9 @@ nvme_fc_connect_ctrl_work(struct work_struct *work)
2667 struct nvme_fc_ctrl, connect_work); 2669 struct nvme_fc_ctrl, connect_work);
2668 2670
2669 ret = nvme_fc_create_association(ctrl); 2671 ret = nvme_fc_create_association(ctrl);
2670 if (ret) { 2672 if (ret)
2671 dev_warn(ctrl->ctrl.device, 2673 nvme_fc_reconnect_or_delete(ctrl, ret);
2672 "NVME-FC{%d}: Reconnect attempt failed (%d)\n", 2674 else
2673 ctrl->cnum, ret);
2674 if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) {
2675 dev_warn(ctrl->ctrl.device,
2676 "NVME-FC{%d}: Max reconnect attempts (%d) "
2677 "reached. Removing controller\n",
2678 ctrl->cnum, ctrl->connect_attempts);
2679
2680 if (!nvme_change_ctrl_state(&ctrl->ctrl,
2681 NVME_CTRL_DELETING)) {
2682 dev_err(ctrl->ctrl.device,
2683 "NVME-FC{%d}: failed to change state "
2684 "to DELETING\n", ctrl->cnum);
2685 return;
2686 }
2687
2688 WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work));
2689 return;
2690 }
2691
2692 dev_warn(ctrl->ctrl.device,
2693 "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
2694 ctrl->cnum, ctrl->reconnect_delay);
2695 queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
2696 ctrl->reconnect_delay * HZ);
2697 } else
2698 dev_info(ctrl->ctrl.device, 2675 dev_info(ctrl->ctrl.device,
2699 "NVME-FC{%d}: controller reconnect complete\n", 2676 "NVME-FC{%d}: controller reconnect complete\n",
2700 ctrl->cnum); 2677 ctrl->cnum);
@@ -2720,6 +2697,12 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2720 unsigned long flags; 2697 unsigned long flags;
2721 int ret, idx; 2698 int ret, idx;
2722 2699
2700 if (!(rport->remoteport.port_role &
2701 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
2702 ret = -EBADR;
2703 goto out_fail;
2704 }
2705
2723 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 2706 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2724 if (!ctrl) { 2707 if (!ctrl) {
2725 ret = -ENOMEM; 2708 ret = -ENOMEM;
@@ -2745,7 +2728,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2745 INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work); 2728 INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work);
2746 INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work); 2729 INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work);
2747 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); 2730 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
2748 ctrl->reconnect_delay = opts->reconnect_delay;
2749 spin_lock_init(&ctrl->lock); 2731 spin_lock_init(&ctrl->lock);
2750 2732
2751 /* io queue count */ 2733 /* io queue count */
@@ -2809,7 +2791,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2809 ctrl->ctrl.opts = NULL; 2791 ctrl->ctrl.opts = NULL;
2810 /* initiate nvme ctrl ref counting teardown */ 2792 /* initiate nvme ctrl ref counting teardown */
2811 nvme_uninit_ctrl(&ctrl->ctrl); 2793 nvme_uninit_ctrl(&ctrl->ctrl);
2812 nvme_put_ctrl(&ctrl->ctrl);
2813 2794
2814 /* as we're past the point where we transition to the ref 2795 /* as we're past the point where we transition to the ref
2815 * counting teardown path, if we return a bad pointer here, 2796 * counting teardown path, if we return a bad pointer here,
@@ -2825,6 +2806,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2825 return ERR_PTR(ret); 2806 return ERR_PTR(ret);
2826 } 2807 }
2827 2808
2809 kref_get(&ctrl->ctrl.kref);
2810
2828 dev_info(ctrl->ctrl.device, 2811 dev_info(ctrl->ctrl.device,
2829 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", 2812 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
2830 ctrl->cnum, ctrl->ctrl.opts->subsysnqn); 2813 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
@@ -2961,7 +2944,7 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
2961static struct nvmf_transport_ops nvme_fc_transport = { 2944static struct nvmf_transport_ops nvme_fc_transport = {
2962 .name = "fc", 2945 .name = "fc",
2963 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR, 2946 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
2964 .allowed_opts = NVMF_OPT_RECONNECT_DELAY, 2947 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
2965 .create_ctrl = nvme_fc_create_ctrl, 2948 .create_ctrl = nvme_fc_create_ctrl,
2966}; 2949};
2967 2950
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 29c708ca9621..9d6a070d4391 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -208,7 +208,9 @@ struct nvme_ns {
208struct nvme_ctrl_ops { 208struct nvme_ctrl_ops {
209 const char *name; 209 const char *name;
210 struct module *module; 210 struct module *module;
211 bool is_fabrics; 211 unsigned int flags;
212#define NVME_F_FABRICS (1 << 0)
213#define NVME_F_METADATA_SUPPORTED (1 << 1)
212 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); 214 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
213 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); 215 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
214 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); 216 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index fed803232edc..d52701df7245 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -263,7 +263,7 @@ static void nvme_dbbuf_set(struct nvme_dev *dev)
263 c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); 263 c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);
264 264
265 if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { 265 if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) {
266 dev_warn(dev->dev, "unable to set dbbuf\n"); 266 dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
267 /* Free memory and continue on */ 267 /* Free memory and continue on */
268 nvme_dbbuf_dma_free(dev); 268 nvme_dbbuf_dma_free(dev);
269 } 269 }
@@ -1394,11 +1394,11 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
1394 result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, 1394 result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
1395 &pci_status); 1395 &pci_status);
1396 if (result == PCIBIOS_SUCCESSFUL) 1396 if (result == PCIBIOS_SUCCESSFUL)
1397 dev_warn(dev->dev, 1397 dev_warn(dev->ctrl.device,
1398 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", 1398 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
1399 csts, pci_status); 1399 csts, pci_status);
1400 else 1400 else
1401 dev_warn(dev->dev, 1401 dev_warn(dev->ctrl.device,
1402 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", 1402 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
1403 csts, result); 1403 csts, result);
1404} 1404}
@@ -1506,6 +1506,11 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
1506 if (dev->cmb) { 1506 if (dev->cmb) {
1507 iounmap(dev->cmb); 1507 iounmap(dev->cmb);
1508 dev->cmb = NULL; 1508 dev->cmb = NULL;
1509 if (dev->cmbsz) {
1510 sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
1511 &dev_attr_cmb.attr, NULL);
1512 dev->cmbsz = 0;
1513 }
1509 } 1514 }
1510} 1515}
1511 1516
@@ -1735,8 +1740,8 @@ static int nvme_pci_enable(struct nvme_dev *dev)
1735 */ 1740 */
1736 if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { 1741 if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
1737 dev->q_depth = 2; 1742 dev->q_depth = 2;
1738 dev_warn(dev->dev, "detected Apple NVMe controller, set " 1743 dev_warn(dev->ctrl.device, "detected Apple NVMe controller, "
1739 "queue depth=%u to work around controller resets\n", 1744 "set queue depth=%u to work around controller resets\n",
1740 dev->q_depth); 1745 dev->q_depth);
1741 } 1746 }
1742 1747
@@ -1754,7 +1759,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
1754 if (dev->cmbsz) { 1759 if (dev->cmbsz) {
1755 if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, 1760 if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
1756 &dev_attr_cmb.attr, NULL)) 1761 &dev_attr_cmb.attr, NULL))
1757 dev_warn(dev->dev, 1762 dev_warn(dev->ctrl.device,
1758 "failed to add sysfs attribute for CMB\n"); 1763 "failed to add sysfs attribute for CMB\n");
1759 } 1764 }
1760 } 1765 }
@@ -1779,6 +1784,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
1779{ 1784{
1780 struct pci_dev *pdev = to_pci_dev(dev->dev); 1785 struct pci_dev *pdev = to_pci_dev(dev->dev);
1781 1786
1787 nvme_release_cmb(dev);
1782 pci_free_irq_vectors(pdev); 1788 pci_free_irq_vectors(pdev);
1783 1789
1784 if (pci_is_enabled(pdev)) { 1790 if (pci_is_enabled(pdev)) {
@@ -2041,6 +2047,7 @@ static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
2041static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { 2047static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
2042 .name = "pcie", 2048 .name = "pcie",
2043 .module = THIS_MODULE, 2049 .module = THIS_MODULE,
2050 .flags = NVME_F_METADATA_SUPPORTED,
2044 .reg_read32 = nvme_pci_reg_read32, 2051 .reg_read32 = nvme_pci_reg_read32,
2045 .reg_write32 = nvme_pci_reg_write32, 2052 .reg_write32 = nvme_pci_reg_write32,
2046 .reg_read64 = nvme_pci_reg_read64, 2053 .reg_read64 = nvme_pci_reg_read64,
@@ -2184,7 +2191,6 @@ static void nvme_remove(struct pci_dev *pdev)
2184 nvme_dev_disable(dev, true); 2191 nvme_dev_disable(dev, true);
2185 nvme_dev_remove_admin(dev); 2192 nvme_dev_remove_admin(dev);
2186 nvme_free_queues(dev, 0); 2193 nvme_free_queues(dev, 0);
2187 nvme_release_cmb(dev);
2188 nvme_release_prp_pools(dev); 2194 nvme_release_prp_pools(dev);
2189 nvme_dev_unmap(dev); 2195 nvme_dev_unmap(dev);
2190 nvme_put_ctrl(&dev->ctrl); 2196 nvme_put_ctrl(&dev->ctrl);
@@ -2288,6 +2294,8 @@ static const struct pci_device_id nvme_id_table[] = {
2288 { PCI_VDEVICE(INTEL, 0x0a54), 2294 { PCI_VDEVICE(INTEL, 0x0a54),
2289 .driver_data = NVME_QUIRK_STRIPE_SIZE | 2295 .driver_data = NVME_QUIRK_STRIPE_SIZE |
2290 NVME_QUIRK_DEALLOCATE_ZEROES, }, 2296 NVME_QUIRK_DEALLOCATE_ZEROES, },
2297 { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
2298 .driver_data = NVME_QUIRK_NO_DEEPEST_PS },
2291 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 2299 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
2292 .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, 2300 .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
2293 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ 2301 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index dd1c6deef82f..28bd255c144d 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1038,6 +1038,19 @@ static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
1038 nvme_rdma_wr_error(cq, wc, "SEND"); 1038 nvme_rdma_wr_error(cq, wc, "SEND");
1039} 1039}
1040 1040
1041static inline int nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
1042{
1043 int sig_limit;
1044
1045 /*
1046 * We signal completion every queue depth/2 and also handle the
1047 * degenerated case of a device with queue_depth=1, where we
1048 * would need to signal every message.
1049 */
1050 sig_limit = max(queue->queue_size / 2, 1);
1051 return (++queue->sig_count % sig_limit) == 0;
1052}
1053
1041static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, 1054static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
1042 struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge, 1055 struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
1043 struct ib_send_wr *first, bool flush) 1056 struct ib_send_wr *first, bool flush)
@@ -1065,9 +1078,6 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
1065 * Would have been way to obvious to handle this in hardware or 1078 * Would have been way to obvious to handle this in hardware or
1066 * at least the RDMA stack.. 1079 * at least the RDMA stack..
1067 * 1080 *
1068 * This messy and racy code sniplet is copy and pasted from the iSER
1069 * initiator, and the magic '32' comes from there as well.
1070 *
1071 * Always signal the flushes. The magic request used for the flush 1081 * Always signal the flushes. The magic request used for the flush
1072 * sequencer is not allocated in our driver's tagset and it's 1082 * sequencer is not allocated in our driver's tagset and it's
1073 * triggered to be freed by blk_cleanup_queue(). So we need to 1083 * triggered to be freed by blk_cleanup_queue(). So we need to
@@ -1075,7 +1085,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
1075 * embedded in request's payload, is not freed when __ib_process_cq() 1085 * embedded in request's payload, is not freed when __ib_process_cq()
1076 * calls wr_cqe->done(). 1086 * calls wr_cqe->done().
1077 */ 1087 */
1078 if ((++queue->sig_count % 32) == 0 || flush) 1088 if (nvme_rdma_queue_sig_limit(queue) || flush)
1079 wr.send_flags |= IB_SEND_SIGNALED; 1089 wr.send_flags |= IB_SEND_SIGNALED;
1080 1090
1081 if (first) 1091 if (first)
@@ -1782,7 +1792,7 @@ static int nvme_rdma_reset_ctrl(struct nvme_ctrl *nctrl)
1782static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { 1792static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
1783 .name = "rdma", 1793 .name = "rdma",
1784 .module = THIS_MODULE, 1794 .module = THIS_MODULE,
1785 .is_fabrics = true, 1795 .flags = NVME_F_FABRICS,
1786 .reg_read32 = nvmf_reg_read32, 1796 .reg_read32 = nvmf_reg_read32,
1787 .reg_read64 = nvmf_reg_read64, 1797 .reg_read64 = nvmf_reg_read64,
1788 .reg_write32 = nvmf_reg_write32, 1798 .reg_write32 = nvmf_reg_write32,
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index cf90713043da..eb9399ac97cf 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -529,6 +529,12 @@ fail:
529} 529}
530EXPORT_SYMBOL_GPL(nvmet_req_init); 530EXPORT_SYMBOL_GPL(nvmet_req_init);
531 531
532void nvmet_req_uninit(struct nvmet_req *req)
533{
534 percpu_ref_put(&req->sq->ref);
535}
536EXPORT_SYMBOL_GPL(nvmet_req_uninit);
537
532static inline bool nvmet_cc_en(u32 cc) 538static inline bool nvmet_cc_en(u32 cc)
533{ 539{
534 return cc & 0x1; 540 return cc & 0x1;
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 62eba29c85fb..2006fae61980 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -517,9 +517,7 @@ nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
517{ 517{
518 int cpu, idx, cnt; 518 int cpu, idx, cnt;
519 519
520 if (!(tgtport->ops->target_features & 520 if (tgtport->ops->max_hw_queues == 1)
521 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED) ||
522 tgtport->ops->max_hw_queues == 1)
523 return WORK_CPU_UNBOUND; 521 return WORK_CPU_UNBOUND;
524 522
525 /* Simple cpu selection based on qid modulo active cpu count */ 523 /* Simple cpu selection based on qid modulo active cpu count */
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 15551ef79c8c..294a6611fb24 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -698,7 +698,6 @@ static struct nvmet_fc_target_template tgttemplate = {
698 .dma_boundary = FCLOOP_DMABOUND_4G, 698 .dma_boundary = FCLOOP_DMABOUND_4G,
699 /* optional features */ 699 /* optional features */
700 .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR | 700 .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR |
701 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED |
702 NVMET_FCTGTFEAT_OPDONE_IN_ISR, 701 NVMET_FCTGTFEAT_OPDONE_IN_ISR,
703 /* sizes of additional private data for data structures */ 702 /* sizes of additional private data for data structures */
704 .target_priv_sz = sizeof(struct fcloop_tport), 703 .target_priv_sz = sizeof(struct fcloop_tport),
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index feb497134aee..e503cfff0337 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -558,7 +558,7 @@ static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)
558static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = { 558static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
559 .name = "loop", 559 .name = "loop",
560 .module = THIS_MODULE, 560 .module = THIS_MODULE,
561 .is_fabrics = true, 561 .flags = NVME_F_FABRICS,
562 .reg_read32 = nvmf_reg_read32, 562 .reg_read32 = nvmf_reg_read32,
563 .reg_read64 = nvmf_reg_read64, 563 .reg_read64 = nvmf_reg_read64,
564 .reg_write32 = nvmf_reg_write32, 564 .reg_write32 = nvmf_reg_write32,
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 7cb77ba5993b..cfc5c7fb0ab7 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -261,6 +261,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
261 261
262bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, 262bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
263 struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops); 263 struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops);
264void nvmet_req_uninit(struct nvmet_req *req);
264void nvmet_req_complete(struct nvmet_req *req, u16 status); 265void nvmet_req_complete(struct nvmet_req *req, u16 status);
265 266
266void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, 267void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 99c69018a35f..9e45cde63376 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -567,6 +567,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
567 rsp->n_rdma = 0; 567 rsp->n_rdma = 0;
568 568
569 if (unlikely(wc->status != IB_WC_SUCCESS)) { 569 if (unlikely(wc->status != IB_WC_SUCCESS)) {
570 nvmet_req_uninit(&rsp->req);
570 nvmet_rdma_release_rsp(rsp); 571 nvmet_rdma_release_rsp(rsp);
571 if (wc->status != IB_WC_WR_FLUSH_ERR) { 572 if (wc->status != IB_WC_WR_FLUSH_ERR) {
572 pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n", 573 pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 3080d9dd031d..43bd69dceabf 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -507,6 +507,9 @@ void *__unflatten_device_tree(const void *blob,
507 507
508 /* Allocate memory for the expanded device tree */ 508 /* Allocate memory for the expanded device tree */
509 mem = dt_alloc(size + 4, __alignof__(struct device_node)); 509 mem = dt_alloc(size + 4, __alignof__(struct device_node));
510 if (!mem)
511 return NULL;
512
510 memset(mem, 0, size); 513 memset(mem, 0, size);
511 514
512 *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef); 515 *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 4dec07ea510f..d507c3569a88 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -197,7 +197,7 @@ static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
197 const struct of_device_id *i; 197 const struct of_device_id *i;
198 198
199 for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) { 199 for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
200 int const (*initfn)(struct reserved_mem *rmem) = i->data; 200 reservedmem_of_init_fn initfn = i->data;
201 const char *compat = i->compatible; 201 const char *compat = i->compatible;
202 202
203 if (!of_flat_dt_is_compatible(rmem->fdt_node, compat)) 203 if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 71fecc2debfc..703a42118ffc 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -523,7 +523,7 @@ static int __init of_platform_default_populate_init(void)
523arch_initcall_sync(of_platform_default_populate_init); 523arch_initcall_sync(of_platform_default_populate_init);
524#endif 524#endif
525 525
526static int of_platform_device_destroy(struct device *dev, void *data) 526int of_platform_device_destroy(struct device *dev, void *data)
527{ 527{
528 /* Do not touch devices not populated from the device tree */ 528 /* Do not touch devices not populated from the device tree */
529 if (!dev->of_node || !of_node_check_flag(dev->of_node, OF_POPULATED)) 529 if (!dev->of_node || !of_node_check_flag(dev->of_node, OF_POPULATED))
@@ -544,6 +544,7 @@ static int of_platform_device_destroy(struct device *dev, void *data)
544 of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); 544 of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
545 return 0; 545 return 0;
546} 546}
547EXPORT_SYMBOL_GPL(of_platform_device_destroy);
547 548
548/** 549/**
549 * of_platform_depopulate() - Remove devices populated from device tree 550 * of_platform_depopulate() - Remove devices populated from device tree
diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c
index a98cba55c7f0..19a289b8cc94 100644
--- a/drivers/pci/dwc/pci-imx6.c
+++ b/drivers/pci/dwc/pci-imx6.c
@@ -252,7 +252,34 @@ static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
252static int imx6q_pcie_abort_handler(unsigned long addr, 252static int imx6q_pcie_abort_handler(unsigned long addr,
253 unsigned int fsr, struct pt_regs *regs) 253 unsigned int fsr, struct pt_regs *regs)
254{ 254{
255 return 0; 255 unsigned long pc = instruction_pointer(regs);
256 unsigned long instr = *(unsigned long *)pc;
257 int reg = (instr >> 12) & 15;
258
259 /*
260 * If the instruction being executed was a read,
261 * make it look like it read all-ones.
262 */
263 if ((instr & 0x0c100000) == 0x04100000) {
264 unsigned long val;
265
266 if (instr & 0x00400000)
267 val = 255;
268 else
269 val = -1;
270
271 regs->uregs[reg] = val;
272 regs->ARM_pc += 4;
273 return 0;
274 }
275
276 if ((instr & 0x0e100090) == 0x00100090) {
277 regs->uregs[reg] = -1;
278 regs->ARM_pc += 4;
279 return 0;
280 }
281
282 return 1;
256} 283}
257 284
258static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) 285static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
@@ -819,8 +846,8 @@ static int __init imx6_pcie_init(void)
819 * we can install the handler here without risking it 846 * we can install the handler here without risking it
820 * accessing some uninitialized driver state. 847 * accessing some uninitialized driver state.
821 */ 848 */
822 hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0, 849 hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
823 "imprecise external abort"); 850 "external abort on non-linefetch");
824 851
825 return platform_driver_register(&imx6_pcie_driver); 852 return platform_driver_register(&imx6_pcie_driver);
826} 853}
diff --git a/drivers/pci/endpoint/Kconfig b/drivers/pci/endpoint/Kconfig
index c23f146fb5a6..c09623ca8c3b 100644
--- a/drivers/pci/endpoint/Kconfig
+++ b/drivers/pci/endpoint/Kconfig
@@ -6,6 +6,7 @@ menu "PCI Endpoint"
6 6
7config PCI_ENDPOINT 7config PCI_ENDPOINT
8 bool "PCI Endpoint Support" 8 bool "PCI Endpoint Support"
9 depends on HAS_DMA
9 help 10 help
10 Enable this configuration option to support configurable PCI 11 Enable this configuration option to support configurable PCI
11 endpoint. This should be enabled if the platform has a PCI 12 endpoint. This should be enabled if the platform has a PCI
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index b01bd5bba8e6..563901cd9c06 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2144,7 +2144,8 @@ bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
2144 2144
2145 if (!pm_runtime_suspended(dev) 2145 if (!pm_runtime_suspended(dev)
2146 || pci_target_state(pci_dev) != pci_dev->current_state 2146 || pci_target_state(pci_dev) != pci_dev->current_state
2147 || platform_pci_need_resume(pci_dev)) 2147 || platform_pci_need_resume(pci_dev)
2148 || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME))
2148 return false; 2149 return false;
2149 2150
2150 /* 2151 /*
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index cc6e085008fb..f6a63406c76e 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -1291,7 +1291,6 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
1291 cdev = &stdev->cdev; 1291 cdev = &stdev->cdev;
1292 cdev_init(cdev, &switchtec_fops); 1292 cdev_init(cdev, &switchtec_fops);
1293 cdev->owner = THIS_MODULE; 1293 cdev->owner = THIS_MODULE;
1294 cdev->kobj.parent = &dev->kobj;
1295 1294
1296 return stdev; 1295 return stdev;
1297 1296
@@ -1442,12 +1441,15 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
1442 stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET; 1441 stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
1443 stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET; 1442 stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
1444 stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET; 1443 stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
1445 stdev->partition = ioread8(&stdev->mmio_ntb->partition_id); 1444 stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
1446 stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count); 1445 stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
1447 stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET; 1446 stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
1448 stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition]; 1447 stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
1449 stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET; 1448 stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
1450 1449
1450 if (stdev->partition_count < 1)
1451 stdev->partition_count = 1;
1452
1451 init_pff(stdev); 1453 init_pff(stdev);
1452 1454
1453 pci_set_drvdata(pdev, stdev); 1455 pci_set_drvdata(pdev, stdev);
@@ -1479,11 +1481,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
1479 SWITCHTEC_EVENT_EN_IRQ, 1481 SWITCHTEC_EVENT_EN_IRQ,
1480 &stdev->mmio_part_cfg->mrpc_comp_hdr); 1482 &stdev->mmio_part_cfg->mrpc_comp_hdr);
1481 1483
1482 rc = cdev_add(&stdev->cdev, stdev->dev.devt, 1); 1484 rc = cdev_device_add(&stdev->cdev, &stdev->dev);
1483 if (rc)
1484 goto err_put;
1485
1486 rc = device_add(&stdev->dev);
1487 if (rc) 1485 if (rc)
1488 goto err_devadd; 1486 goto err_devadd;
1489 1487
@@ -1492,7 +1490,6 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
1492 return 0; 1490 return 0;
1493 1491
1494err_devadd: 1492err_devadd:
1495 cdev_del(&stdev->cdev);
1496 stdev_kill(stdev); 1493 stdev_kill(stdev);
1497err_put: 1494err_put:
1498 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); 1495 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
@@ -1506,8 +1503,7 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
1506 1503
1507 pci_set_drvdata(pdev, NULL); 1504 pci_set_drvdata(pdev, NULL);
1508 1505
1509 device_del(&stdev->dev); 1506 cdev_device_del(&stdev->cdev, &stdev->dev);
1510 cdev_del(&stdev->cdev);
1511 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); 1507 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1512 dev_info(&stdev->dev, "unregistered.\n"); 1508 dev_info(&stdev->dev, "unregistered.\n");
1513 1509
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 14bde0db8c24..5b10b50f8686 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -538,6 +538,7 @@ struct powercap_zone *powercap_register_zone(
538 538
539 power_zone->id = result; 539 power_zone->id = result;
540 idr_init(&power_zone->idr); 540 idr_init(&power_zone->idr);
541 result = -ENOMEM;
541 power_zone->name = kstrdup(name, GFP_KERNEL); 542 power_zone->name = kstrdup(name, GFP_KERNEL);
542 if (!power_zone->name) 543 if (!power_zone->name)
543 goto err_name_alloc; 544 goto err_name_alloc;
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index b3de973a6260..9dca53df3584 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -1088,7 +1088,7 @@ static u32 rtc_handler(void *context)
1088 } 1088 }
1089 spin_unlock_irqrestore(&rtc_lock, flags); 1089 spin_unlock_irqrestore(&rtc_lock, flags);
1090 1090
1091 pm_wakeup_event(dev, 0); 1091 pm_wakeup_hard_event(dev);
1092 acpi_clear_event(ACPI_EVENT_RTC); 1092 acpi_clear_event(ACPI_EVENT_RTC);
1093 acpi_disable_event(ACPI_EVENT_RTC, 0); 1093 acpi_disable_event(ACPI_EVENT_RTC, 0);
1094 return ACPI_INTERRUPT_HANDLED; 1094 return ACPI_INTERRUPT_HANDLED;
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index e443b0d0b236..34b9ad6b3143 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -35,7 +35,7 @@ static struct bus_type ccwgroup_bus_type;
35static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) 35static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
36{ 36{
37 int i; 37 int i;
38 char str[8]; 38 char str[16];
39 39
40 for (i = 0; i < gdev->count; i++) { 40 for (i = 0; i < gdev->count; i++) {
41 sprintf(str, "cdev%d", i); 41 sprintf(str, "cdev%d", i);
@@ -238,7 +238,7 @@ static void ccwgroup_release(struct device *dev)
238 238
239static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) 239static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
240{ 240{
241 char str[8]; 241 char str[16];
242 int i, rc; 242 int i, rc;
243 243
244 for (i = 0; i < gdev->count; i++) { 244 for (i = 0; i < gdev->count; i++) {
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index f33ce8577619..1d595d17bf11 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -11,7 +11,7 @@
11#include "qdio.h" 11#include "qdio.h"
12 12
13/* that gives us 15 characters in the text event views */ 13/* that gives us 15 characters in the text event views */
14#define QDIO_DBF_LEN 16 14#define QDIO_DBF_LEN 32
15 15
16extern debug_info_t *qdio_dbf_setup; 16extern debug_info_t *qdio_dbf_setup;
17extern debug_info_t *qdio_dbf_error; 17extern debug_info_t *qdio_dbf_error;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index f6aa21176d89..30bc6105aac3 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -701,6 +701,7 @@ enum qeth_discipline_id {
701}; 701};
702 702
703struct qeth_discipline { 703struct qeth_discipline {
704 const struct device_type *devtype;
704 void (*start_poll)(struct ccw_device *, int, unsigned long); 705 void (*start_poll)(struct ccw_device *, int, unsigned long);
705 qdio_handler_t *input_handler; 706 qdio_handler_t *input_handler;
706 qdio_handler_t *output_handler; 707 qdio_handler_t *output_handler;
@@ -875,6 +876,9 @@ extern struct qeth_discipline qeth_l2_discipline;
875extern struct qeth_discipline qeth_l3_discipline; 876extern struct qeth_discipline qeth_l3_discipline;
876extern const struct attribute_group *qeth_generic_attr_groups[]; 877extern const struct attribute_group *qeth_generic_attr_groups[];
877extern const struct attribute_group *qeth_osn_attr_groups[]; 878extern const struct attribute_group *qeth_osn_attr_groups[];
879extern const struct attribute_group qeth_device_attr_group;
880extern const struct attribute_group qeth_device_blkt_group;
881extern const struct device_type qeth_generic_devtype;
878extern struct workqueue_struct *qeth_wq; 882extern struct workqueue_struct *qeth_wq;
879 883
880int qeth_card_hw_is_reachable(struct qeth_card *); 884int qeth_card_hw_is_reachable(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 38114a8d56e0..fc6d85f2b38d 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -5530,10 +5530,12 @@ void qeth_core_free_discipline(struct qeth_card *card)
5530 card->discipline = NULL; 5530 card->discipline = NULL;
5531} 5531}
5532 5532
5533static const struct device_type qeth_generic_devtype = { 5533const struct device_type qeth_generic_devtype = {
5534 .name = "qeth_generic", 5534 .name = "qeth_generic",
5535 .groups = qeth_generic_attr_groups, 5535 .groups = qeth_generic_attr_groups,
5536}; 5536};
5537EXPORT_SYMBOL_GPL(qeth_generic_devtype);
5538
5537static const struct device_type qeth_osn_devtype = { 5539static const struct device_type qeth_osn_devtype = {
5538 .name = "qeth_osn", 5540 .name = "qeth_osn",
5539 .groups = qeth_osn_attr_groups, 5541 .groups = qeth_osn_attr_groups,
@@ -5659,23 +5661,22 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
5659 goto err_card; 5661 goto err_card;
5660 } 5662 }
5661 5663
5662 if (card->info.type == QETH_CARD_TYPE_OSN)
5663 gdev->dev.type = &qeth_osn_devtype;
5664 else
5665 gdev->dev.type = &qeth_generic_devtype;
5666
5667 switch (card->info.type) { 5664 switch (card->info.type) {
5668 case QETH_CARD_TYPE_OSN: 5665 case QETH_CARD_TYPE_OSN:
5669 case QETH_CARD_TYPE_OSM: 5666 case QETH_CARD_TYPE_OSM:
5670 rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2); 5667 rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
5671 if (rc) 5668 if (rc)
5672 goto err_card; 5669 goto err_card;
5670
5671 gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
5672 ? card->discipline->devtype
5673 : &qeth_osn_devtype;
5673 rc = card->discipline->setup(card->gdev); 5674 rc = card->discipline->setup(card->gdev);
5674 if (rc) 5675 if (rc)
5675 goto err_disc; 5676 goto err_disc;
5676 case QETH_CARD_TYPE_OSD: 5677 break;
5677 case QETH_CARD_TYPE_OSX:
5678 default: 5678 default:
5679 gdev->dev.type = &qeth_generic_devtype;
5679 break; 5680 break;
5680 } 5681 }
5681 5682
@@ -5731,8 +5732,10 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
5731 if (rc) 5732 if (rc)
5732 goto err; 5733 goto err;
5733 rc = card->discipline->setup(card->gdev); 5734 rc = card->discipline->setup(card->gdev);
5734 if (rc) 5735 if (rc) {
5736 qeth_core_free_discipline(card);
5735 goto err; 5737 goto err;
5738 }
5736 } 5739 }
5737 rc = card->discipline->set_online(gdev); 5740 rc = card->discipline->set_online(gdev);
5738err: 5741err:
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 75b29fd2fcf4..db6a285d41e0 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -413,12 +413,16 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
413 413
414 if (card->options.layer2 == newdis) 414 if (card->options.layer2 == newdis)
415 goto out; 415 goto out;
416 else { 416 if (card->info.type == QETH_CARD_TYPE_OSM) {
417 card->info.mac_bits = 0; 417 /* fixed layer, can't switch */
418 if (card->discipline) { 418 rc = -EOPNOTSUPP;
419 card->discipline->remove(card->gdev); 419 goto out;
420 qeth_core_free_discipline(card); 420 }
421 } 421
422 card->info.mac_bits = 0;
423 if (card->discipline) {
424 card->discipline->remove(card->gdev);
425 qeth_core_free_discipline(card);
422 } 426 }
423 427
424 rc = qeth_core_load_discipline(card, newdis); 428 rc = qeth_core_load_discipline(card, newdis);
@@ -426,6 +430,8 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
426 goto out; 430 goto out;
427 431
428 rc = card->discipline->setup(card->gdev); 432 rc = card->discipline->setup(card->gdev);
433 if (rc)
434 qeth_core_free_discipline(card);
429out: 435out:
430 mutex_unlock(&card->discipline_mutex); 436 mutex_unlock(&card->discipline_mutex);
431 return rc ? rc : count; 437 return rc ? rc : count;
@@ -703,10 +709,11 @@ static struct attribute *qeth_blkt_device_attrs[] = {
703 &dev_attr_inter_jumbo.attr, 709 &dev_attr_inter_jumbo.attr,
704 NULL, 710 NULL,
705}; 711};
706static struct attribute_group qeth_device_blkt_group = { 712const struct attribute_group qeth_device_blkt_group = {
707 .name = "blkt", 713 .name = "blkt",
708 .attrs = qeth_blkt_device_attrs, 714 .attrs = qeth_blkt_device_attrs,
709}; 715};
716EXPORT_SYMBOL_GPL(qeth_device_blkt_group);
710 717
711static struct attribute *qeth_device_attrs[] = { 718static struct attribute *qeth_device_attrs[] = {
712 &dev_attr_state.attr, 719 &dev_attr_state.attr,
@@ -726,9 +733,10 @@ static struct attribute *qeth_device_attrs[] = {
726 &dev_attr_switch_attrs.attr, 733 &dev_attr_switch_attrs.attr,
727 NULL, 734 NULL,
728}; 735};
729static struct attribute_group qeth_device_attr_group = { 736const struct attribute_group qeth_device_attr_group = {
730 .attrs = qeth_device_attrs, 737 .attrs = qeth_device_attrs,
731}; 738};
739EXPORT_SYMBOL_GPL(qeth_device_attr_group);
732 740
733const struct attribute_group *qeth_generic_attr_groups[] = { 741const struct attribute_group *qeth_generic_attr_groups[] = {
734 &qeth_device_attr_group, 742 &qeth_device_attr_group,
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index 29d9fb3890ad..0d59f9a45ea9 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -8,6 +8,8 @@
8 8
9#include "qeth_core.h" 9#include "qeth_core.h"
10 10
11extern const struct attribute_group *qeth_l2_attr_groups[];
12
11int qeth_l2_create_device_attributes(struct device *); 13int qeth_l2_create_device_attributes(struct device *);
12void qeth_l2_remove_device_attributes(struct device *); 14void qeth_l2_remove_device_attributes(struct device *);
13void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card); 15void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 1b07f382d74c..bd2df62a5cdf 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -880,11 +880,21 @@ static int qeth_l2_stop(struct net_device *dev)
880 return 0; 880 return 0;
881} 881}
882 882
883static const struct device_type qeth_l2_devtype = {
884 .name = "qeth_layer2",
885 .groups = qeth_l2_attr_groups,
886};
887
883static int qeth_l2_probe_device(struct ccwgroup_device *gdev) 888static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
884{ 889{
885 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 890 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
891 int rc;
886 892
887 qeth_l2_create_device_attributes(&gdev->dev); 893 if (gdev->dev.type == &qeth_generic_devtype) {
894 rc = qeth_l2_create_device_attributes(&gdev->dev);
895 if (rc)
896 return rc;
897 }
888 INIT_LIST_HEAD(&card->vid_list); 898 INIT_LIST_HEAD(&card->vid_list);
889 hash_init(card->mac_htable); 899 hash_init(card->mac_htable);
890 card->options.layer2 = 1; 900 card->options.layer2 = 1;
@@ -896,7 +906,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
896{ 906{
897 struct qeth_card *card = dev_get_drvdata(&cgdev->dev); 907 struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
898 908
899 qeth_l2_remove_device_attributes(&cgdev->dev); 909 if (cgdev->dev.type == &qeth_generic_devtype)
910 qeth_l2_remove_device_attributes(&cgdev->dev);
900 qeth_set_allowed_threads(card, 0, 1); 911 qeth_set_allowed_threads(card, 0, 1);
901 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 912 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
902 913
@@ -954,7 +965,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
954 case QETH_CARD_TYPE_OSN: 965 case QETH_CARD_TYPE_OSN:
955 card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, 966 card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN,
956 ether_setup); 967 ether_setup);
957 card->dev->flags |= IFF_NOARP;
958 break; 968 break;
959 default: 969 default:
960 card->dev = alloc_etherdev(0); 970 card->dev = alloc_etherdev(0);
@@ -969,9 +979,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
969 card->dev->min_mtu = 64; 979 card->dev->min_mtu = 64;
970 card->dev->max_mtu = ETH_MAX_MTU; 980 card->dev->max_mtu = ETH_MAX_MTU;
971 card->dev->netdev_ops = &qeth_l2_netdev_ops; 981 card->dev->netdev_ops = &qeth_l2_netdev_ops;
972 card->dev->ethtool_ops = 982 if (card->info.type == QETH_CARD_TYPE_OSN) {
973 (card->info.type != QETH_CARD_TYPE_OSN) ? 983 card->dev->ethtool_ops = &qeth_l2_osn_ops;
974 &qeth_l2_ethtool_ops : &qeth_l2_osn_ops; 984 card->dev->flags |= IFF_NOARP;
985 } else {
986 card->dev->ethtool_ops = &qeth_l2_ethtool_ops;
987 }
975 card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 988 card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
976 if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { 989 if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
977 card->dev->hw_features = NETIF_F_SG; 990 card->dev->hw_features = NETIF_F_SG;
@@ -1269,6 +1282,7 @@ static int qeth_l2_control_event(struct qeth_card *card,
1269} 1282}
1270 1283
1271struct qeth_discipline qeth_l2_discipline = { 1284struct qeth_discipline qeth_l2_discipline = {
1285 .devtype = &qeth_l2_devtype,
1272 .start_poll = qeth_qdio_start_poll, 1286 .start_poll = qeth_qdio_start_poll,
1273 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, 1287 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
1274 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, 1288 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index 687972356d6b..9696baa49e2d 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -269,3 +269,11 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
269 } else 269 } else
270 qeth_bridgeport_an_set(card, 0); 270 qeth_bridgeport_an_set(card, 0);
271} 271}
272
273const struct attribute_group *qeth_l2_attr_groups[] = {
274 &qeth_device_attr_group,
275 &qeth_device_blkt_group,
276 /* l2 specific, see l2_{create,remove}_device_attributes(): */
277 &qeth_l2_bridgeport_attr_group,
278 NULL,
279};
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 6e0354ef4b86..d8df1e635163 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3039,8 +3039,13 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3039static int qeth_l3_probe_device(struct ccwgroup_device *gdev) 3039static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
3040{ 3040{
3041 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3041 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3042 int rc;
3042 3043
3043 qeth_l3_create_device_attributes(&gdev->dev); 3044 rc = qeth_l3_create_device_attributes(&gdev->dev);
3045 if (rc)
3046 return rc;
3047 hash_init(card->ip_htable);
3048 hash_init(card->ip_mc_htable);
3044 card->options.layer2 = 0; 3049 card->options.layer2 = 0;
3045 card->info.hwtrap = 0; 3050 card->info.hwtrap = 0;
3046 return 0; 3051 return 0;
@@ -3306,6 +3311,7 @@ static int qeth_l3_control_event(struct qeth_card *card,
3306} 3311}
3307 3312
3308struct qeth_discipline qeth_l3_discipline = { 3313struct qeth_discipline qeth_l3_discipline = {
3314 .devtype = &qeth_generic_devtype,
3309 .start_poll = qeth_qdio_start_poll, 3315 .start_poll = qeth_qdio_start_poll,
3310 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, 3316 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
3311 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, 3317 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 2a76ea78a0bf..b18fe2014cf2 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -87,7 +87,7 @@ struct vq_info_block {
87} __packed; 87} __packed;
88 88
89struct virtio_feature_desc { 89struct virtio_feature_desc {
90 __u32 features; 90 __le32 features;
91 __u8 index; 91 __u8 index;
92} __packed; 92} __packed;
93 93
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 622bdabc8894..dab195f04da7 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -1769,7 +1769,6 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
1769 goto bye; 1769 goto bye;
1770 } 1770 }
1771 1771
1772 mempool_free(mbp, hw->mb_mempool);
1773 if (finicsum != cfcsum) { 1772 if (finicsum != cfcsum) {
1774 csio_warn(hw, 1773 csio_warn(hw,
1775 "Config File checksum mismatch: csum=%#x, computed=%#x\n", 1774 "Config File checksum mismatch: csum=%#x, computed=%#x\n",
@@ -1780,6 +1779,10 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
1780 rv = csio_hw_validate_caps(hw, mbp); 1779 rv = csio_hw_validate_caps(hw, mbp);
1781 if (rv != 0) 1780 if (rv != 0)
1782 goto bye; 1781 goto bye;
1782
1783 mempool_free(mbp, hw->mb_mempool);
1784 mbp = NULL;
1785
1783 /* 1786 /*
1784 * Note that we're operating with parameters 1787 * Note that we're operating with parameters
1785 * not supplied by the driver, rather than from hard-wired 1788 * not supplied by the driver, rather than from hard-wired
diff --git a/drivers/scsi/cxlflash/Kconfig b/drivers/scsi/cxlflash/Kconfig
index c052104e523e..a011c5dbf214 100644
--- a/drivers/scsi/cxlflash/Kconfig
+++ b/drivers/scsi/cxlflash/Kconfig
@@ -5,6 +5,7 @@
5config CXLFLASH 5config CXLFLASH
6 tristate "Support for IBM CAPI Flash" 6 tristate "Support for IBM CAPI Flash"
7 depends on PCI && SCSI && CXL && EEH 7 depends on PCI && SCSI && CXL && EEH
8 select IRQ_POLL
8 default m 9 default m
9 help 10 help
10 Allows CAPI Accelerated IO to Flash 11 Allows CAPI Accelerated IO to Flash
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index a808e8ef1d08..234352da5c3c 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -407,11 +407,12 @@ unlock:
407 * can_queue. Eventually we will hit the point where we run 407 * can_queue. Eventually we will hit the point where we run
408 * on all reserved structs. 408 * on all reserved structs.
409 */ 409 */
410static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport) 410static bool fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
411{ 411{
412 struct fc_fcp_internal *si = fc_get_scsi_internal(lport); 412 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
413 unsigned long flags; 413 unsigned long flags;
414 int can_queue; 414 int can_queue;
415 bool changed = false;
415 416
416 spin_lock_irqsave(lport->host->host_lock, flags); 417 spin_lock_irqsave(lport->host->host_lock, flags);
417 418
@@ -427,9 +428,11 @@ static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
427 if (!can_queue) 428 if (!can_queue)
428 can_queue = 1; 429 can_queue = 1;
429 lport->host->can_queue = can_queue; 430 lport->host->can_queue = can_queue;
431 changed = true;
430 432
431unlock: 433unlock:
432 spin_unlock_irqrestore(lport->host->host_lock, flags); 434 spin_unlock_irqrestore(lport->host->host_lock, flags);
435 return changed;
433} 436}
434 437
435/* 438/*
@@ -1896,11 +1899,11 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
1896 1899
1897 if (!fc_fcp_lport_queue_ready(lport)) { 1900 if (!fc_fcp_lport_queue_ready(lport)) {
1898 if (lport->qfull) { 1901 if (lport->qfull) {
1899 fc_fcp_can_queue_ramp_down(lport); 1902 if (fc_fcp_can_queue_ramp_down(lport))
1900 shost_printk(KERN_ERR, lport->host, 1903 shost_printk(KERN_ERR, lport->host,
1901 "libfc: queue full, " 1904 "libfc: queue full, "
1902 "reducing can_queue to %d.\n", 1905 "reducing can_queue to %d.\n",
1903 lport->host->can_queue); 1906 lport->host->can_queue);
1904 } 1907 }
1905 rc = SCSI_MLQUEUE_HOST_BUSY; 1908 rc = SCSI_MLQUEUE_HOST_BUSY;
1906 goto out; 1909 goto out;
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index b44c3136eb51..520325867e2b 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1422,7 +1422,7 @@ static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata,
1422 fp = fc_frame_alloc(lport, sizeof(*rtv)); 1422 fp = fc_frame_alloc(lport, sizeof(*rtv));
1423 if (!fp) { 1423 if (!fp) {
1424 rjt_data.reason = ELS_RJT_UNAB; 1424 rjt_data.reason = ELS_RJT_UNAB;
1425 rjt_data.reason = ELS_EXPL_INSUF_RES; 1425 rjt_data.explan = ELS_EXPL_INSUF_RES;
1426 fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); 1426 fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
1427 goto drop; 1427 goto drop;
1428 } 1428 }
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 6d7840b096e6..f2c0ba6ced78 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -141,6 +141,13 @@ struct lpfc_dmabuf {
141 uint32_t buffer_tag; /* used for tagged queue ring */ 141 uint32_t buffer_tag; /* used for tagged queue ring */
142}; 142};
143 143
144struct lpfc_nvmet_ctxbuf {
145 struct list_head list;
146 struct lpfc_nvmet_rcv_ctx *context;
147 struct lpfc_iocbq *iocbq;
148 struct lpfc_sglq *sglq;
149};
150
144struct lpfc_dma_pool { 151struct lpfc_dma_pool {
145 struct lpfc_dmabuf *elements; 152 struct lpfc_dmabuf *elements;
146 uint32_t max_count; 153 uint32_t max_count;
@@ -163,9 +170,7 @@ struct rqb_dmabuf {
163 struct lpfc_dmabuf dbuf; 170 struct lpfc_dmabuf dbuf;
164 uint16_t total_size; 171 uint16_t total_size;
165 uint16_t bytes_recv; 172 uint16_t bytes_recv;
166 void *context; 173 uint16_t idx;
167 struct lpfc_iocbq *iocbq;
168 struct lpfc_sglq *sglq;
169 struct lpfc_queue *hrq; /* ptr to associated Header RQ */ 174 struct lpfc_queue *hrq; /* ptr to associated Header RQ */
170 struct lpfc_queue *drq; /* ptr to associated Data RQ */ 175 struct lpfc_queue *drq; /* ptr to associated Data RQ */
171}; 176};
@@ -670,6 +675,8 @@ struct lpfc_hba {
670 /* INIT_LINK mailbox command */ 675 /* INIT_LINK mailbox command */
671#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ 676#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
672#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ 677#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
678#define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */
679#define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */
673 680
674 uint32_t hba_flag; /* hba generic flags */ 681 uint32_t hba_flag; /* hba generic flags */
675#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ 682#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
@@ -777,7 +784,6 @@ struct lpfc_hba {
777 uint32_t cfg_nvme_oas; 784 uint32_t cfg_nvme_oas;
778 uint32_t cfg_nvme_io_channel; 785 uint32_t cfg_nvme_io_channel;
779 uint32_t cfg_nvmet_mrq; 786 uint32_t cfg_nvmet_mrq;
780 uint32_t cfg_nvmet_mrq_post;
781 uint32_t cfg_enable_nvmet; 787 uint32_t cfg_enable_nvmet;
782 uint32_t cfg_nvme_enable_fb; 788 uint32_t cfg_nvme_enable_fb;
783 uint32_t cfg_nvmet_fb_size; 789 uint32_t cfg_nvmet_fb_size;
@@ -943,6 +949,7 @@ struct lpfc_hba {
943 struct pci_pool *lpfc_mbuf_pool; 949 struct pci_pool *lpfc_mbuf_pool;
944 struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */ 950 struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
945 struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */ 951 struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
952 struct pci_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
946 struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */ 953 struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
947 struct pci_pool *txrdy_payload_pool; 954 struct pci_pool *txrdy_payload_pool;
948 struct lpfc_dma_pool lpfc_mbuf_safety_pool; 955 struct lpfc_dma_pool lpfc_mbuf_safety_pool;
@@ -1228,7 +1235,11 @@ lpfc_sli_read_hs(struct lpfc_hba *phba)
1228static inline struct lpfc_sli_ring * 1235static inline struct lpfc_sli_ring *
1229lpfc_phba_elsring(struct lpfc_hba *phba) 1236lpfc_phba_elsring(struct lpfc_hba *phba)
1230{ 1237{
1231 if (phba->sli_rev == LPFC_SLI_REV4) 1238 if (phba->sli_rev == LPFC_SLI_REV4) {
1232 return phba->sli4_hba.els_wq->pring; 1239 if (phba->sli4_hba.els_wq)
1240 return phba->sli4_hba.els_wq->pring;
1241 else
1242 return NULL;
1243 }
1233 return &phba->sli.sli3_ring[LPFC_ELS_RING]; 1244 return &phba->sli.sli3_ring[LPFC_ELS_RING];
1234} 1245}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 4830370bfab1..bb2d9e238225 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -60,9 +60,9 @@
60#define LPFC_MIN_DEVLOSS_TMO 1 60#define LPFC_MIN_DEVLOSS_TMO 1
61#define LPFC_MAX_DEVLOSS_TMO 255 61#define LPFC_MAX_DEVLOSS_TMO 255
62 62
63#define LPFC_DEF_MRQ_POST 256 63#define LPFC_DEF_MRQ_POST 512
64#define LPFC_MIN_MRQ_POST 32 64#define LPFC_MIN_MRQ_POST 512
65#define LPFC_MAX_MRQ_POST 512 65#define LPFC_MAX_MRQ_POST 2048
66 66
67/* 67/*
68 * Write key size should be multiple of 4. If write key is changed 68 * Write key size should be multiple of 4. If write key is changed
@@ -205,8 +205,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
205 atomic_read(&tgtp->xmt_ls_rsp_error)); 205 atomic_read(&tgtp->xmt_ls_rsp_error));
206 206
207 len += snprintf(buf+len, PAGE_SIZE-len, 207 len += snprintf(buf+len, PAGE_SIZE-len,
208 "FCP: Rcv %08x Drop %08x\n", 208 "FCP: Rcv %08x Release %08x Drop %08x\n",
209 atomic_read(&tgtp->rcv_fcp_cmd_in), 209 atomic_read(&tgtp->rcv_fcp_cmd_in),
210 atomic_read(&tgtp->xmt_fcp_release),
210 atomic_read(&tgtp->rcv_fcp_cmd_drop)); 211 atomic_read(&tgtp->rcv_fcp_cmd_drop));
211 212
212 if (atomic_read(&tgtp->rcv_fcp_cmd_in) != 213 if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
@@ -218,15 +219,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
218 } 219 }
219 220
220 len += snprintf(buf+len, PAGE_SIZE-len, 221 len += snprintf(buf+len, PAGE_SIZE-len,
221 "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x\n", 222 "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
223 "drop %08x\n",
222 atomic_read(&tgtp->xmt_fcp_read), 224 atomic_read(&tgtp->xmt_fcp_read),
223 atomic_read(&tgtp->xmt_fcp_read_rsp), 225 atomic_read(&tgtp->xmt_fcp_read_rsp),
224 atomic_read(&tgtp->xmt_fcp_write), 226 atomic_read(&tgtp->xmt_fcp_write),
225 atomic_read(&tgtp->xmt_fcp_rsp)); 227 atomic_read(&tgtp->xmt_fcp_rsp),
226
227 len += snprintf(buf+len, PAGE_SIZE-len,
228 "FCP Rsp: abort %08x drop %08x\n",
229 atomic_read(&tgtp->xmt_fcp_abort),
230 atomic_read(&tgtp->xmt_fcp_drop)); 228 atomic_read(&tgtp->xmt_fcp_drop));
231 229
232 len += snprintf(buf+len, PAGE_SIZE-len, 230 len += snprintf(buf+len, PAGE_SIZE-len,
@@ -236,10 +234,22 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
236 atomic_read(&tgtp->xmt_fcp_rsp_drop)); 234 atomic_read(&tgtp->xmt_fcp_rsp_drop));
237 235
238 len += snprintf(buf+len, PAGE_SIZE-len, 236 len += snprintf(buf+len, PAGE_SIZE-len,
239 "ABORT: Xmt %08x Err %08x Cmpl %08x", 237 "ABORT: Xmt %08x Cmpl %08x\n",
238 atomic_read(&tgtp->xmt_fcp_abort),
239 atomic_read(&tgtp->xmt_fcp_abort_cmpl));
240
241 len += snprintf(buf + len, PAGE_SIZE - len,
242 "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x",
243 atomic_read(&tgtp->xmt_abort_sol),
244 atomic_read(&tgtp->xmt_abort_unsol),
240 atomic_read(&tgtp->xmt_abort_rsp), 245 atomic_read(&tgtp->xmt_abort_rsp),
241 atomic_read(&tgtp->xmt_abort_rsp_error), 246 atomic_read(&tgtp->xmt_abort_rsp_error));
242 atomic_read(&tgtp->xmt_abort_cmpl)); 247
248 len += snprintf(buf + len, PAGE_SIZE - len,
249 "IO_CTX: %08x outstanding %08x total %x",
250 phba->sli4_hba.nvmet_ctx_cnt,
251 phba->sli4_hba.nvmet_io_wait_cnt,
252 phba->sli4_hba.nvmet_io_wait_total);
243 253
244 len += snprintf(buf+len, PAGE_SIZE-len, "\n"); 254 len += snprintf(buf+len, PAGE_SIZE-len, "\n");
245 return len; 255 return len;
@@ -3312,14 +3322,6 @@ LPFC_ATTR_R(nvmet_mrq,
3312 "Specify number of RQ pairs for processing NVMET cmds"); 3322 "Specify number of RQ pairs for processing NVMET cmds");
3313 3323
3314/* 3324/*
3315 * lpfc_nvmet_mrq_post: Specify number buffers to post on every MRQ
3316 *
3317 */
3318LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST,
3319 LPFC_MIN_MRQ_POST, LPFC_MAX_MRQ_POST,
3320 "Specify number of buffers to post on every MRQ");
3321
3322/*
3323 * lpfc_enable_fc4_type: Defines what FC4 types are supported. 3325 * lpfc_enable_fc4_type: Defines what FC4 types are supported.
3324 * Supported Values: 1 - register just FCP 3326 * Supported Values: 1 - register just FCP
3325 * 3 - register both FCP and NVME 3327 * 3 - register both FCP and NVME
@@ -5154,7 +5156,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
5154 &dev_attr_lpfc_suppress_rsp, 5156 &dev_attr_lpfc_suppress_rsp,
5155 &dev_attr_lpfc_nvme_io_channel, 5157 &dev_attr_lpfc_nvme_io_channel,
5156 &dev_attr_lpfc_nvmet_mrq, 5158 &dev_attr_lpfc_nvmet_mrq,
5157 &dev_attr_lpfc_nvmet_mrq_post,
5158 &dev_attr_lpfc_nvme_enable_fb, 5159 &dev_attr_lpfc_nvme_enable_fb,
5159 &dev_attr_lpfc_nvmet_fb_size, 5160 &dev_attr_lpfc_nvmet_fb_size,
5160 &dev_attr_lpfc_enable_bg, 5161 &dev_attr_lpfc_enable_bg,
@@ -6194,7 +6195,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
6194 6195
6195 lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type); 6196 lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
6196 lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq); 6197 lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
6197 lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
6198 6198
6199 /* Initialize first burst. Target vs Initiator are different. */ 6199 /* Initialize first burst. Target vs Initiator are different. */
6200 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); 6200 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
@@ -6291,7 +6291,6 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
6291 /* Not NVME Target mode. Turn off Target parameters. */ 6291 /* Not NVME Target mode. Turn off Target parameters. */
6292 phba->nvmet_support = 0; 6292 phba->nvmet_support = 0;
6293 phba->cfg_nvmet_mrq = 0; 6293 phba->cfg_nvmet_mrq = 0;
6294 phba->cfg_nvmet_mrq_post = 0;
6295 phba->cfg_nvmet_fb_size = 0; 6294 phba->cfg_nvmet_fb_size = 0;
6296 } 6295 }
6297 6296
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 944b32ca4931..8912767e7bc8 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -75,6 +75,10 @@ void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
75void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *); 75void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
76void lpfc_retry_pport_discovery(struct lpfc_hba *); 76void lpfc_retry_pport_discovery(struct lpfc_hba *);
77void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t); 77void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
78int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
79void lpfc_free_iocb_list(struct lpfc_hba *phba);
80int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
81 struct lpfc_queue *drq, int count, int idx);
78 82
79void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); 83void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
80void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 84void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -246,16 +250,14 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
246void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); 250void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
247struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba); 251struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba);
248void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab); 252void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab);
249void lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, 253void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba,
250 struct lpfc_dmabuf *mp); 254 struct lpfc_nvmet_ctxbuf *ctxp);
251int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, 255int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
252 struct fc_frame_header *fc_hdr); 256 struct fc_frame_header *fc_hdr);
253void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, 257void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
254 uint16_t); 258 uint16_t);
255int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 259int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
256 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe); 260 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe);
257int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq,
258 struct lpfc_queue *dq, int count);
259int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq); 261int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq);
260void lpfc_unregister_fcf(struct lpfc_hba *); 262void lpfc_unregister_fcf(struct lpfc_hba *);
261void lpfc_unregister_fcf_rescan(struct lpfc_hba *); 263void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
@@ -271,6 +273,7 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
271void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *); 273void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
272 274
273int lpfc_mem_alloc(struct lpfc_hba *, int align); 275int lpfc_mem_alloc(struct lpfc_hba *, int align);
276int lpfc_nvmet_mem_alloc(struct lpfc_hba *phba);
274int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *); 277int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *);
275void lpfc_mem_free(struct lpfc_hba *); 278void lpfc_mem_free(struct lpfc_hba *);
276void lpfc_mem_free_all(struct lpfc_hba *); 279void lpfc_mem_free_all(struct lpfc_hba *);
@@ -294,6 +297,7 @@ int lpfc_selective_reset(struct lpfc_hba *);
294void lpfc_reset_barrier(struct lpfc_hba *); 297void lpfc_reset_barrier(struct lpfc_hba *);
295int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); 298int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
296int lpfc_sli_brdkill(struct lpfc_hba *); 299int lpfc_sli_brdkill(struct lpfc_hba *);
300int lpfc_sli_chipset_init(struct lpfc_hba *phba);
297int lpfc_sli_brdreset(struct lpfc_hba *); 301int lpfc_sli_brdreset(struct lpfc_hba *);
298int lpfc_sli_brdrestart(struct lpfc_hba *); 302int lpfc_sli_brdrestart(struct lpfc_hba *);
299int lpfc_sli_hba_setup(struct lpfc_hba *); 303int lpfc_sli_hba_setup(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 1487406aea77..f2cd19c6c2df 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -630,7 +630,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type,
630 NLP_EVT_DEVICE_RECOVERY); 630 NLP_EVT_DEVICE_RECOVERY);
631 spin_lock_irq(shost->host_lock); 631 spin_lock_irq(shost->host_lock);
632 ndlp->nlp_flag &= ~NLP_NVMET_RECOV; 632 ndlp->nlp_flag &= ~NLP_NVMET_RECOV;
633 spin_lock_irq(shost->host_lock); 633 spin_unlock_irq(shost->host_lock);
634 } 634 }
635 } 635 }
636 636
@@ -2092,6 +2092,7 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
2092 2092
2093 ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */ 2093 ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */
2094 ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */ 2094 ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */
2095 ae->un.AttrTypes[6] = 0x01; /* Type 40 - NVME */
2095 ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */ 2096 ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */
2096 size = FOURBYTES + 32; 2097 size = FOURBYTES + 32;
2097 ad->AttrLen = cpu_to_be16(size); 2098 ad->AttrLen = cpu_to_be16(size);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index fce549a91911..4bcb92c844ca 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -798,21 +798,22 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
798 atomic_read(&tgtp->xmt_fcp_rsp)); 798 atomic_read(&tgtp->xmt_fcp_rsp));
799 799
800 len += snprintf(buf + len, size - len, 800 len += snprintf(buf + len, size - len,
801 "FCP Rsp: abort %08x drop %08x\n",
802 atomic_read(&tgtp->xmt_fcp_abort),
803 atomic_read(&tgtp->xmt_fcp_drop));
804
805 len += snprintf(buf + len, size - len,
806 "FCP Rsp Cmpl: %08x err %08x drop %08x\n", 801 "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
807 atomic_read(&tgtp->xmt_fcp_rsp_cmpl), 802 atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
808 atomic_read(&tgtp->xmt_fcp_rsp_error), 803 atomic_read(&tgtp->xmt_fcp_rsp_error),
809 atomic_read(&tgtp->xmt_fcp_rsp_drop)); 804 atomic_read(&tgtp->xmt_fcp_rsp_drop));
810 805
811 len += snprintf(buf + len, size - len, 806 len += snprintf(buf + len, size - len,
812 "ABORT: Xmt %08x Err %08x Cmpl %08x", 807 "ABORT: Xmt %08x Cmpl %08x\n",
808 atomic_read(&tgtp->xmt_fcp_abort),
809 atomic_read(&tgtp->xmt_fcp_abort_cmpl));
810
811 len += snprintf(buf + len, size - len,
812 "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x",
813 atomic_read(&tgtp->xmt_abort_sol),
814 atomic_read(&tgtp->xmt_abort_unsol),
813 atomic_read(&tgtp->xmt_abort_rsp), 815 atomic_read(&tgtp->xmt_abort_rsp),
814 atomic_read(&tgtp->xmt_abort_rsp_error), 816 atomic_read(&tgtp->xmt_abort_rsp_error));
815 atomic_read(&tgtp->xmt_abort_cmpl));
816 817
817 len += snprintf(buf + len, size - len, "\n"); 818 len += snprintf(buf + len, size - len, "\n");
818 819
@@ -841,6 +842,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
841 } 842 }
842 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 843 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
843 } 844 }
845
846 len += snprintf(buf + len, size - len,
847 "IO_CTX: %08x outstanding %08x total %08x\n",
848 phba->sli4_hba.nvmet_ctx_cnt,
849 phba->sli4_hba.nvmet_io_wait_cnt,
850 phba->sli4_hba.nvmet_io_wait_total);
844 } else { 851 } else {
845 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 852 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
846 return len; 853 return len;
@@ -1959,6 +1966,7 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
1959 atomic_set(&tgtp->rcv_ls_req_out, 0); 1966 atomic_set(&tgtp->rcv_ls_req_out, 0);
1960 atomic_set(&tgtp->rcv_ls_req_drop, 0); 1967 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1961 atomic_set(&tgtp->xmt_ls_abort, 0); 1968 atomic_set(&tgtp->xmt_ls_abort, 0);
1969 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1962 atomic_set(&tgtp->xmt_ls_rsp, 0); 1970 atomic_set(&tgtp->xmt_ls_rsp, 0);
1963 atomic_set(&tgtp->xmt_ls_drop, 0); 1971 atomic_set(&tgtp->xmt_ls_drop, 0);
1964 atomic_set(&tgtp->xmt_ls_rsp_error, 0); 1972 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
@@ -1967,19 +1975,22 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
1967 atomic_set(&tgtp->rcv_fcp_cmd_in, 0); 1975 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1968 atomic_set(&tgtp->rcv_fcp_cmd_out, 0); 1976 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1969 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); 1977 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1970 atomic_set(&tgtp->xmt_fcp_abort, 0);
1971 atomic_set(&tgtp->xmt_fcp_drop, 0); 1978 atomic_set(&tgtp->xmt_fcp_drop, 0);
1972 atomic_set(&tgtp->xmt_fcp_read_rsp, 0); 1979 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1973 atomic_set(&tgtp->xmt_fcp_read, 0); 1980 atomic_set(&tgtp->xmt_fcp_read, 0);
1974 atomic_set(&tgtp->xmt_fcp_write, 0); 1981 atomic_set(&tgtp->xmt_fcp_write, 0);
1975 atomic_set(&tgtp->xmt_fcp_rsp, 0); 1982 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1983 atomic_set(&tgtp->xmt_fcp_release, 0);
1976 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); 1984 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1977 atomic_set(&tgtp->xmt_fcp_rsp_error, 0); 1985 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1978 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); 1986 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1979 1987
1988 atomic_set(&tgtp->xmt_fcp_abort, 0);
1989 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1990 atomic_set(&tgtp->xmt_abort_sol, 0);
1991 atomic_set(&tgtp->xmt_abort_unsol, 0);
1980 atomic_set(&tgtp->xmt_abort_rsp, 0); 1992 atomic_set(&tgtp->xmt_abort_rsp, 0);
1981 atomic_set(&tgtp->xmt_abort_rsp_error, 0); 1993 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1982 atomic_set(&tgtp->xmt_abort_cmpl, 0);
1983 } 1994 }
1984 return nbytes; 1995 return nbytes;
1985} 1996}
@@ -3070,11 +3081,11 @@ __lpfc_idiag_print_wq(struct lpfc_queue *qp, char *wqtype,
3070 qp->assoc_qid, qp->q_cnt_1, 3081 qp->assoc_qid, qp->q_cnt_1,
3071 (unsigned long long)qp->q_cnt_4); 3082 (unsigned long long)qp->q_cnt_4);
3072 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, 3083 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
3073 "\t\tWQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " 3084 "\t\tWQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
3074 "HOST-IDX[%04d], PORT-IDX[%04d]", 3085 "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
3075 qp->queue_id, qp->entry_count, 3086 qp->queue_id, qp->entry_count,
3076 qp->entry_size, qp->host_index, 3087 qp->entry_size, qp->host_index,
3077 qp->hba_index); 3088 qp->hba_index, qp->entry_repost);
3078 len += snprintf(pbuffer + len, 3089 len += snprintf(pbuffer + len,
3079 LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); 3090 LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
3080 return len; 3091 return len;
@@ -3121,11 +3132,11 @@ __lpfc_idiag_print_cq(struct lpfc_queue *qp, char *cqtype,
3121 qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, 3132 qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
3122 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); 3133 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
3123 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, 3134 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
3124 "\tCQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " 3135 "\tCQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
3125 "HOST-IDX[%04d], PORT-IDX[%04d]", 3136 "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
3126 qp->queue_id, qp->entry_count, 3137 qp->queue_id, qp->entry_count,
3127 qp->entry_size, qp->host_index, 3138 qp->entry_size, qp->host_index,
3128 qp->hba_index); 3139 qp->hba_index, qp->entry_repost);
3129 3140
3130 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); 3141 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
3131 3142
@@ -3143,20 +3154,20 @@ __lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp,
3143 "\t\t%s RQ info: ", rqtype); 3154 "\t\t%s RQ info: ", rqtype);
3144 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, 3155 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
3145 "AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x " 3156 "AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x "
3146 "trunc:x%x rcv:x%llx]\n", 3157 "posted:x%x rcv:x%llx]\n",
3147 qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2, 3158 qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
3148 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); 3159 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
3149 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, 3160 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
3150 "\t\tHQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " 3161 "\t\tHQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
3151 "HOST-IDX[%04d], PORT-IDX[%04d]\n", 3162 "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n",
3152 qp->queue_id, qp->entry_count, qp->entry_size, 3163 qp->queue_id, qp->entry_count, qp->entry_size,
3153 qp->host_index, qp->hba_index); 3164 qp->host_index, qp->hba_index, qp->entry_repost);
3154 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, 3165 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
3155 "\t\tDQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " 3166 "\t\tDQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
3156 "HOST-IDX[%04d], PORT-IDX[%04d]\n", 3167 "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n",
3157 datqp->queue_id, datqp->entry_count, 3168 datqp->queue_id, datqp->entry_count,
3158 datqp->entry_size, datqp->host_index, 3169 datqp->entry_size, datqp->host_index,
3159 datqp->hba_index); 3170 datqp->hba_index, datqp->entry_repost);
3160 return len; 3171 return len;
3161} 3172}
3162 3173
@@ -3242,10 +3253,10 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
3242 eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3, 3253 eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
3243 (unsigned long long)qp->q_cnt_4); 3254 (unsigned long long)qp->q_cnt_4);
3244 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, 3255 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
3245 "EQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], " 3256 "EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
3246 "HOST-IDX[%04d], PORT-IDX[%04d]", 3257 "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
3247 qp->queue_id, qp->entry_count, qp->entry_size, 3258 qp->queue_id, qp->entry_count, qp->entry_size,
3248 qp->host_index, qp->hba_index); 3259 qp->host_index, qp->hba_index, qp->entry_repost);
3249 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); 3260 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
3250 3261
3251 return len; 3262 return len;
@@ -5855,8 +5866,10 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
5855 atomic_dec(&lpfc_debugfs_hba_count); 5866 atomic_dec(&lpfc_debugfs_hba_count);
5856 } 5867 }
5857 5868
5858 debugfs_remove(lpfc_debugfs_root); /* lpfc */ 5869 if (atomic_read(&lpfc_debugfs_hba_count) == 0) {
5859 lpfc_debugfs_root = NULL; 5870 debugfs_remove(lpfc_debugfs_root); /* lpfc */
5871 lpfc_debugfs_root = NULL;
5872 }
5860 } 5873 }
5861#endif 5874#endif
5862 return; 5875 return;
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 9d5a379f4b15..094c97b9e5f7 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -90,6 +90,7 @@ struct lpfc_nodelist {
90#define NLP_FCP_INITIATOR 0x10 /* entry is an FCP Initiator */ 90#define NLP_FCP_INITIATOR 0x10 /* entry is an FCP Initiator */
91#define NLP_NVME_TARGET 0x20 /* entry is a NVME Target */ 91#define NLP_NVME_TARGET 0x20 /* entry is a NVME Target */
92#define NLP_NVME_INITIATOR 0x40 /* entry is a NVME Initiator */ 92#define NLP_NVME_INITIATOR 0x40 /* entry is a NVME Initiator */
93#define NLP_NVME_DISCOVERY 0x80 /* entry has NVME disc srvc */
93 94
94 uint16_t nlp_fc4_type; /* FC types node supports. */ 95 uint16_t nlp_fc4_type; /* FC types node supports. */
95 /* Assigned from GID_FF, only 96 /* Assigned from GID_FF, only
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 67827e397431..8e532b39ae93 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1047,6 +1047,13 @@ stop_rr_fcf_flogi:
1047 irsp->ulpStatus, irsp->un.ulpWord[4], 1047 irsp->ulpStatus, irsp->un.ulpWord[4],
1048 irsp->ulpTimeout); 1048 irsp->ulpTimeout);
1049 1049
1050
1051 /* If this is not a loop open failure, bail out */
1052 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
1053 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1054 IOERR_LOOP_OPEN_FAILURE)))
1055 goto flogifail;
1056
1050 /* FLOGI failed, so there is no fabric */ 1057 /* FLOGI failed, so there is no fabric */
1051 spin_lock_irq(shost->host_lock); 1058 spin_lock_irq(shost->host_lock);
1052 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 1059 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -2077,16 +2084,19 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2077 2084
2078 if (irsp->ulpStatus) { 2085 if (irsp->ulpStatus) {
2079 /* Check for retry */ 2086 /* Check for retry */
2087 ndlp->fc4_prli_sent--;
2080 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2088 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2081 /* ELS command is being retried */ 2089 /* ELS command is being retried */
2082 ndlp->fc4_prli_sent--;
2083 goto out; 2090 goto out;
2084 } 2091 }
2092
2085 /* PRLI failed */ 2093 /* PRLI failed */
2086 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2094 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2087 "2754 PRLI failure DID:%06X Status:x%x/x%x\n", 2095 "2754 PRLI failure DID:%06X Status:x%x/x%x, "
2096 "data: x%x\n",
2088 ndlp->nlp_DID, irsp->ulpStatus, 2097 ndlp->nlp_DID, irsp->ulpStatus,
2089 irsp->un.ulpWord[4]); 2098 irsp->un.ulpWord[4], ndlp->fc4_prli_sent);
2099
2090 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2100 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2091 if (lpfc_error_lost_link(irsp)) 2101 if (lpfc_error_lost_link(irsp))
2092 goto out; 2102 goto out;
@@ -7441,6 +7451,13 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
7441 */ 7451 */
7442 spin_lock_irq(&phba->hbalock); 7452 spin_lock_irq(&phba->hbalock);
7443 pring = lpfc_phba_elsring(phba); 7453 pring = lpfc_phba_elsring(phba);
7454
7455 /* Bail out if we've no ELS wq, like in PCI error recovery case. */
7456 if (unlikely(!pring)) {
7457 spin_unlock_irq(&phba->hbalock);
7458 return;
7459 }
7460
7444 if (phba->sli_rev == LPFC_SLI_REV4) 7461 if (phba->sli_rev == LPFC_SLI_REV4)
7445 spin_lock(&pring->ring_lock); 7462 spin_lock(&pring->ring_lock);
7446 7463
@@ -8667,7 +8684,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
8667 lpfc_do_scr_ns_plogi(phba, vport); 8684 lpfc_do_scr_ns_plogi(phba, vport);
8668 goto out; 8685 goto out;
8669fdisc_failed: 8686fdisc_failed:
8670 if (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS) 8687 if (vport->fc_vport &&
8688 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS))
8671 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 8689 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
8672 /* Cancel discovery timer */ 8690 /* Cancel discovery timer */
8673 lpfc_can_disctmo(vport); 8691 lpfc_can_disctmo(vport);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 0482c5580331..3ffcd9215ca8 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -693,15 +693,16 @@ lpfc_work_done(struct lpfc_hba *phba)
693 pring = lpfc_phba_elsring(phba); 693 pring = lpfc_phba_elsring(phba);
694 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 694 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
695 status >>= (4*LPFC_ELS_RING); 695 status >>= (4*LPFC_ELS_RING);
696 if ((status & HA_RXMASK) || 696 if (pring && (status & HA_RXMASK ||
697 (pring->flag & LPFC_DEFERRED_RING_EVENT) || 697 pring->flag & LPFC_DEFERRED_RING_EVENT ||
698 (phba->hba_flag & HBA_SP_QUEUE_EVT)) { 698 phba->hba_flag & HBA_SP_QUEUE_EVT)) {
699 if (pring->flag & LPFC_STOP_IOCB_EVENT) { 699 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
700 pring->flag |= LPFC_DEFERRED_RING_EVENT; 700 pring->flag |= LPFC_DEFERRED_RING_EVENT;
701 /* Set the lpfc data pending flag */ 701 /* Set the lpfc data pending flag */
702 set_bit(LPFC_DATA_READY, &phba->data_flags); 702 set_bit(LPFC_DATA_READY, &phba->data_flags);
703 } else { 703 } else {
704 if (phba->link_state >= LPFC_LINK_UP) { 704 if (phba->link_state >= LPFC_LINK_UP ||
705 phba->link_flag & LS_MDS_LOOPBACK) {
705 pring->flag &= ~LPFC_DEFERRED_RING_EVENT; 706 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
706 lpfc_sli_handle_slow_ring_event(phba, pring, 707 lpfc_sli_handle_slow_ring_event(phba, pring,
707 (status & 708 (status &
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 1d12f2be36bc..e0a5fce416ae 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1356,6 +1356,7 @@ struct lpfc_mbx_wq_destroy {
1356 1356
1357#define LPFC_HDR_BUF_SIZE 128 1357#define LPFC_HDR_BUF_SIZE 128
1358#define LPFC_DATA_BUF_SIZE 2048 1358#define LPFC_DATA_BUF_SIZE 2048
1359#define LPFC_NVMET_DATA_BUF_SIZE 128
1359struct rq_context { 1360struct rq_context {
1360 uint32_t word0; 1361 uint32_t word0;
1361#define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */ 1362#define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */
@@ -4420,6 +4421,19 @@ struct fcp_treceive64_wqe {
4420}; 4421};
4421#define TXRDY_PAYLOAD_LEN 12 4422#define TXRDY_PAYLOAD_LEN 12
4422 4423
4424#define CMD_SEND_FRAME 0xE1
4425
4426struct send_frame_wqe {
4427 struct ulp_bde64 bde; /* words 0-2 */
4428 uint32_t frame_len; /* word 3 */
4429 uint32_t fc_hdr_wd0; /* word 4 */
4430 uint32_t fc_hdr_wd1; /* word 5 */
4431 struct wqe_common wqe_com; /* words 6-11 */
4432 uint32_t fc_hdr_wd2; /* word 12 */
4433 uint32_t fc_hdr_wd3; /* word 13 */
4434 uint32_t fc_hdr_wd4; /* word 14 */
4435 uint32_t fc_hdr_wd5; /* word 15 */
4436};
4423 4437
4424union lpfc_wqe { 4438union lpfc_wqe {
4425 uint32_t words[16]; 4439 uint32_t words[16];
@@ -4438,7 +4452,7 @@ union lpfc_wqe {
4438 struct fcp_trsp64_wqe fcp_trsp; 4452 struct fcp_trsp64_wqe fcp_trsp;
4439 struct fcp_tsend64_wqe fcp_tsend; 4453 struct fcp_tsend64_wqe fcp_tsend;
4440 struct fcp_treceive64_wqe fcp_treceive; 4454 struct fcp_treceive64_wqe fcp_treceive;
4441 4455 struct send_frame_wqe send_frame;
4442}; 4456};
4443 4457
4444union lpfc_wqe128 { 4458union lpfc_wqe128 {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 90ae354a9c45..9add9473cae5 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1099,7 +1099,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1099 1099
1100 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { 1100 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1101 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); 1101 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
1102 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); 1102 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1103 } 1103 }
1104 } 1104 }
1105 1105
@@ -3381,7 +3381,7 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3381{ 3381{
3382 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3382 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3383 uint16_t i, lxri, xri_cnt, els_xri_cnt; 3383 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3384 uint16_t nvmet_xri_cnt, tot_cnt; 3384 uint16_t nvmet_xri_cnt;
3385 LIST_HEAD(nvmet_sgl_list); 3385 LIST_HEAD(nvmet_sgl_list);
3386 int rc; 3386 int rc;
3387 3387
@@ -3389,15 +3389,9 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3389 * update on pci function's nvmet xri-sgl list 3389 * update on pci function's nvmet xri-sgl list
3390 */ 3390 */
3391 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3391 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3392 nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post; 3392
3393 tot_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 3393 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
3394 if (nvmet_xri_cnt > tot_cnt) { 3394 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3395 phba->cfg_nvmet_mrq_post = tot_cnt / phba->cfg_nvmet_mrq;
3396 nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
3397 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3398 "6301 NVMET post-sgl count changed to %d\n",
3399 phba->cfg_nvmet_mrq_post);
3400 }
3401 3395
3402 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { 3396 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3403 /* els xri-sgl expanded */ 3397 /* els xri-sgl expanded */
@@ -3602,6 +3596,13 @@ lpfc_get_wwpn(struct lpfc_hba *phba)
3602 LPFC_MBOXQ_t *mboxq; 3596 LPFC_MBOXQ_t *mboxq;
3603 MAILBOX_t *mb; 3597 MAILBOX_t *mb;
3604 3598
3599 if (phba->sli_rev < LPFC_SLI_REV4) {
3600 /* Reset the port first */
3601 lpfc_sli_brdrestart(phba);
3602 rc = lpfc_sli_chipset_init(phba);
3603 if (rc)
3604 return (uint64_t)-1;
3605 }
3605 3606
3606 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 3607 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
3607 GFP_KERNEL); 3608 GFP_KERNEL);
@@ -4539,6 +4540,19 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
4539 pmb->vport = phba->pport; 4540 pmb->vport = phba->pport;
4540 4541
4541 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 4542 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
4543 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
4544
4545 switch (phba->sli4_hba.link_state.status) {
4546 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
4547 phba->link_flag |= LS_MDS_LINK_DOWN;
4548 break;
4549 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
4550 phba->link_flag |= LS_MDS_LOOPBACK;
4551 break;
4552 default:
4553 break;
4554 }
4555
4542 /* Parse and translate status field */ 4556 /* Parse and translate status field */
4543 mb = &pmb->u.mb; 4557 mb = &pmb->u.mb;
4544 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, 4558 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba,
@@ -5823,6 +5837,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5823 spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); 5837 spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
5824 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); 5838 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
5825 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 5839 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
5840 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_list);
5841 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
5842
5826 /* Fast-path XRI aborted CQ Event work queue list */ 5843 /* Fast-path XRI aborted CQ Event work queue list */
5827 INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue); 5844 INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
5828 } 5845 }
@@ -5830,6 +5847,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5830 /* This abort list used by worker thread */ 5847 /* This abort list used by worker thread */
5831 spin_lock_init(&phba->sli4_hba.sgl_list_lock); 5848 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
5832 spin_lock_init(&phba->sli4_hba.nvmet_io_lock); 5849 spin_lock_init(&phba->sli4_hba.nvmet_io_lock);
5850 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
5833 5851
5834 /* 5852 /*
5835 * Initialize driver internal slow-path work queues 5853 * Initialize driver internal slow-path work queues
@@ -5944,16 +5962,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5944 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 5962 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
5945 if (wwn == lpfc_enable_nvmet[i]) { 5963 if (wwn == lpfc_enable_nvmet[i]) {
5946#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 5964#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
5965 if (lpfc_nvmet_mem_alloc(phba))
5966 break;
5967
5968 phba->nvmet_support = 1; /* a match */
5969
5947 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5970 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5948 "6017 NVME Target %016llx\n", 5971 "6017 NVME Target %016llx\n",
5949 wwn); 5972 wwn);
5950 phba->nvmet_support = 1; /* a match */
5951#else 5973#else
5952 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5974 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5953 "6021 Can't enable NVME Target." 5975 "6021 Can't enable NVME Target."
5954 " NVME_TARGET_FC infrastructure" 5976 " NVME_TARGET_FC infrastructure"
5955 " is not in kernel\n"); 5977 " is not in kernel\n");
5956#endif 5978#endif
5979 break;
5957 } 5980 }
5958 } 5981 }
5959 } 5982 }
@@ -6262,7 +6285,7 @@ lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
6262 * 6285 *
6263 * This routine is invoked to free the driver's IOCB list and memory. 6286 * This routine is invoked to free the driver's IOCB list and memory.
6264 **/ 6287 **/
6265static void 6288void
6266lpfc_free_iocb_list(struct lpfc_hba *phba) 6289lpfc_free_iocb_list(struct lpfc_hba *phba)
6267{ 6290{
6268 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 6291 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
@@ -6290,7 +6313,7 @@ lpfc_free_iocb_list(struct lpfc_hba *phba)
6290 * 0 - successful 6313 * 0 - successful
6291 * other values - error 6314 * other values - error
6292 **/ 6315 **/
6293static int 6316int
6294lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 6317lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
6295{ 6318{
6296 struct lpfc_iocbq *iocbq_entry = NULL; 6319 struct lpfc_iocbq *iocbq_entry = NULL;
@@ -6518,7 +6541,6 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
6518 uint16_t rpi_limit, curr_rpi_range; 6541 uint16_t rpi_limit, curr_rpi_range;
6519 struct lpfc_dmabuf *dmabuf; 6542 struct lpfc_dmabuf *dmabuf;
6520 struct lpfc_rpi_hdr *rpi_hdr; 6543 struct lpfc_rpi_hdr *rpi_hdr;
6521 uint32_t rpi_count;
6522 6544
6523 /* 6545 /*
6524 * If the SLI4 port supports extents, posting the rpi header isn't 6546 * If the SLI4 port supports extents, posting the rpi header isn't
@@ -6531,8 +6553,7 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
6531 return NULL; 6553 return NULL;
6532 6554
6533 /* The limit on the logical index is just the max_rpi count. */ 6555 /* The limit on the logical index is just the max_rpi count. */
6534 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 6556 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
6535 phba->sli4_hba.max_cfg_param.max_rpi - 1;
6536 6557
6537 spin_lock_irq(&phba->hbalock); 6558 spin_lock_irq(&phba->hbalock);
6538 /* 6559 /*
@@ -6543,18 +6564,10 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
6543 curr_rpi_range = phba->sli4_hba.next_rpi; 6564 curr_rpi_range = phba->sli4_hba.next_rpi;
6544 spin_unlock_irq(&phba->hbalock); 6565 spin_unlock_irq(&phba->hbalock);
6545 6566
6546 /* 6567 /* Reached full RPI range */
6547 * The port has a limited number of rpis. The increment here 6568 if (curr_rpi_range == rpi_limit)
6548 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
6549 * and to allow the full max_rpi range per port.
6550 */
6551 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
6552 rpi_count = rpi_limit - curr_rpi_range;
6553 else
6554 rpi_count = LPFC_RPI_HDR_COUNT;
6555
6556 if (!rpi_count)
6557 return NULL; 6569 return NULL;
6570
6558 /* 6571 /*
6559 * First allocate the protocol header region for the port. The 6572 * First allocate the protocol header region for the port. The
6560 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 6573 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
@@ -6588,13 +6601,9 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
6588 6601
6589 /* The rpi_hdr stores the logical index only. */ 6602 /* The rpi_hdr stores the logical index only. */
6590 rpi_hdr->start_rpi = curr_rpi_range; 6603 rpi_hdr->start_rpi = curr_rpi_range;
6604 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
6591 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 6605 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
6592 6606
6593 /*
6594 * The next_rpi stores the next logical module-64 rpi value used
6595 * to post physical rpis in subsequent rpi postings.
6596 */
6597 phba->sli4_hba.next_rpi += rpi_count;
6598 spin_unlock_irq(&phba->hbalock); 6607 spin_unlock_irq(&phba->hbalock);
6599 return rpi_hdr; 6608 return rpi_hdr;
6600 6609
@@ -8165,7 +8174,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8165 /* Create NVMET Receive Queue for header */ 8174 /* Create NVMET Receive Queue for header */
8166 qdesc = lpfc_sli4_queue_alloc(phba, 8175 qdesc = lpfc_sli4_queue_alloc(phba,
8167 phba->sli4_hba.rq_esize, 8176 phba->sli4_hba.rq_esize,
8168 phba->sli4_hba.rq_ecount); 8177 LPFC_NVMET_RQE_DEF_COUNT);
8169 if (!qdesc) { 8178 if (!qdesc) {
8170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8179 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8171 "3146 Failed allocate " 8180 "3146 Failed allocate "
@@ -8187,7 +8196,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8187 /* Create NVMET Receive Queue for data */ 8196 /* Create NVMET Receive Queue for data */
8188 qdesc = lpfc_sli4_queue_alloc(phba, 8197 qdesc = lpfc_sli4_queue_alloc(phba,
8189 phba->sli4_hba.rq_esize, 8198 phba->sli4_hba.rq_esize,
8190 phba->sli4_hba.rq_ecount); 8199 LPFC_NVMET_RQE_DEF_COUNT);
8191 if (!qdesc) { 8200 if (!qdesc) {
8192 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8201 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8193 "3156 Failed allocate " 8202 "3156 Failed allocate "
@@ -8319,46 +8328,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
8319} 8328}
8320 8329
8321int 8330int
8322lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
8323 struct lpfc_queue *drq, int count)
8324{
8325 int rc, i;
8326 struct lpfc_rqe hrqe;
8327 struct lpfc_rqe drqe;
8328 struct lpfc_rqb *rqbp;
8329 struct rqb_dmabuf *rqb_buffer;
8330 LIST_HEAD(rqb_buf_list);
8331
8332 rqbp = hrq->rqbp;
8333 for (i = 0; i < count; i++) {
8334 rqb_buffer = (rqbp->rqb_alloc_buffer)(phba);
8335 if (!rqb_buffer)
8336 break;
8337 rqb_buffer->hrq = hrq;
8338 rqb_buffer->drq = drq;
8339 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
8340 }
8341 while (!list_empty(&rqb_buf_list)) {
8342 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
8343 hbuf.list);
8344
8345 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
8346 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
8347 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
8348 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
8349 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
8350 if (rc < 0) {
8351 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
8352 } else {
8353 list_add_tail(&rqb_buffer->hbuf.list,
8354 &rqbp->rqb_buffer_list);
8355 rqbp->buffer_count++;
8356 }
8357 }
8358 return 1;
8359}
8360
8361int
8362lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq) 8331lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
8363{ 8332{
8364 struct lpfc_rqb *rqbp; 8333 struct lpfc_rqb *rqbp;
@@ -8777,9 +8746,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
8777 goto out_destroy; 8746 goto out_destroy;
8778 } 8747 }
8779 8748
8780 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
8781 lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
8782
8783 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 8749 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
8784 phba->sli4_hba.els_cq, LPFC_USOL); 8750 phba->sli4_hba.els_cq, LPFC_USOL);
8785 if (rc) { 8751 if (rc) {
@@ -8847,7 +8813,7 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
8847 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq); 8813 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
8848 8814
8849 /* Unset ELS work queue */ 8815 /* Unset ELS work queue */
8850 if (phba->sli4_hba.els_cq) 8816 if (phba->sli4_hba.els_wq)
8851 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 8817 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
8852 8818
8853 /* Unset unsolicited receive queue */ 8819 /* Unset unsolicited receive queue */
@@ -11103,7 +11069,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
11103 struct lpfc_hba *phba; 11069 struct lpfc_hba *phba;
11104 struct lpfc_vport *vport = NULL; 11070 struct lpfc_vport *vport = NULL;
11105 struct Scsi_Host *shost = NULL; 11071 struct Scsi_Host *shost = NULL;
11106 int error, cnt; 11072 int error;
11107 uint32_t cfg_mode, intr_mode; 11073 uint32_t cfg_mode, intr_mode;
11108 11074
11109 /* Allocate memory for HBA structure */ 11075 /* Allocate memory for HBA structure */
@@ -11137,22 +11103,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
11137 goto out_unset_pci_mem_s4; 11103 goto out_unset_pci_mem_s4;
11138 } 11104 }
11139 11105
11140 cnt = phba->cfg_iocb_cnt * 1024;
11141 if (phba->nvmet_support)
11142 cnt += phba->cfg_nvmet_mrq_post * phba->cfg_nvmet_mrq;
11143
11144 /* Initialize and populate the iocb list per host */
11145 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11146 "2821 initialize iocb list %d total %d\n",
11147 phba->cfg_iocb_cnt, cnt);
11148 error = lpfc_init_iocb_list(phba, cnt);
11149
11150 if (error) {
11151 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11152 "1413 Failed to initialize iocb list.\n");
11153 goto out_unset_driver_resource_s4;
11154 }
11155
11156 INIT_LIST_HEAD(&phba->active_rrq_list); 11106 INIT_LIST_HEAD(&phba->active_rrq_list);
11157 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 11107 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
11158 11108
@@ -11161,7 +11111,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
11161 if (error) { 11111 if (error) {
11162 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11112 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11163 "1414 Failed to set up driver resource.\n"); 11113 "1414 Failed to set up driver resource.\n");
11164 goto out_free_iocb_list; 11114 goto out_unset_driver_resource_s4;
11165 } 11115 }
11166 11116
11167 /* Get the default values for Model Name and Description */ 11117 /* Get the default values for Model Name and Description */
@@ -11261,8 +11211,6 @@ out_destroy_shost:
11261 lpfc_destroy_shost(phba); 11211 lpfc_destroy_shost(phba);
11262out_unset_driver_resource: 11212out_unset_driver_resource:
11263 lpfc_unset_driver_resource_phase2(phba); 11213 lpfc_unset_driver_resource_phase2(phba);
11264out_free_iocb_list:
11265 lpfc_free_iocb_list(phba);
11266out_unset_driver_resource_s4: 11214out_unset_driver_resource_s4:
11267 lpfc_sli4_driver_resource_unset(phba); 11215 lpfc_sli4_driver_resource_unset(phba);
11268out_unset_pci_mem_s4: 11216out_unset_pci_mem_s4:
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 5986c7957199..fcc05a1517c2 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -214,6 +214,21 @@ fail_free_drb_pool:
214 return -ENOMEM; 214 return -ENOMEM;
215} 215}
216 216
217int
218lpfc_nvmet_mem_alloc(struct lpfc_hba *phba)
219{
220 phba->lpfc_nvmet_drb_pool =
221 pci_pool_create("lpfc_nvmet_drb_pool",
222 phba->pcidev, LPFC_NVMET_DATA_BUF_SIZE,
223 SGL_ALIGN_SZ, 0);
224 if (!phba->lpfc_nvmet_drb_pool) {
225 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
226 "6024 Can't enable NVME Target - no memory\n");
227 return -ENOMEM;
228 }
229 return 0;
230}
231
217/** 232/**
218 * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc 233 * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
219 * @phba: HBA to free memory for 234 * @phba: HBA to free memory for
@@ -232,6 +247,9 @@ lpfc_mem_free(struct lpfc_hba *phba)
232 247
233 /* Free HBQ pools */ 248 /* Free HBQ pools */
234 lpfc_sli_hbqbuf_free_all(phba); 249 lpfc_sli_hbqbuf_free_all(phba);
250 if (phba->lpfc_nvmet_drb_pool)
251 pci_pool_destroy(phba->lpfc_nvmet_drb_pool);
252 phba->lpfc_nvmet_drb_pool = NULL;
235 if (phba->lpfc_drb_pool) 253 if (phba->lpfc_drb_pool)
236 pci_pool_destroy(phba->lpfc_drb_pool); 254 pci_pool_destroy(phba->lpfc_drb_pool);
237 phba->lpfc_drb_pool = NULL; 255 phba->lpfc_drb_pool = NULL;
@@ -611,8 +629,6 @@ struct rqb_dmabuf *
611lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) 629lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
612{ 630{
613 struct rqb_dmabuf *dma_buf; 631 struct rqb_dmabuf *dma_buf;
614 struct lpfc_iocbq *nvmewqe;
615 union lpfc_wqe128 *wqe;
616 632
617 dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL); 633 dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
618 if (!dma_buf) 634 if (!dma_buf)
@@ -624,69 +640,15 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
624 kfree(dma_buf); 640 kfree(dma_buf);
625 return NULL; 641 return NULL;
626 } 642 }
627 dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, 643 dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_nvmet_drb_pool,
628 &dma_buf->dbuf.phys); 644 GFP_KERNEL, &dma_buf->dbuf.phys);
629 if (!dma_buf->dbuf.virt) { 645 if (!dma_buf->dbuf.virt) {
630 pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, 646 pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
631 dma_buf->hbuf.phys); 647 dma_buf->hbuf.phys);
632 kfree(dma_buf); 648 kfree(dma_buf);
633 return NULL; 649 return NULL;
634 } 650 }
635 dma_buf->total_size = LPFC_DATA_BUF_SIZE; 651 dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE;
636
637 dma_buf->context = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx),
638 GFP_KERNEL);
639 if (!dma_buf->context) {
640 pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
641 dma_buf->dbuf.phys);
642 pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
643 dma_buf->hbuf.phys);
644 kfree(dma_buf);
645 return NULL;
646 }
647
648 dma_buf->iocbq = lpfc_sli_get_iocbq(phba);
649 if (!dma_buf->iocbq) {
650 kfree(dma_buf->context);
651 pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
652 dma_buf->dbuf.phys);
653 pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
654 dma_buf->hbuf.phys);
655 kfree(dma_buf);
656 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
657 "2621 Ran out of nvmet iocb/WQEs\n");
658 return NULL;
659 }
660 dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
661 nvmewqe = dma_buf->iocbq;
662 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
663 /* Initialize WQE */
664 memset(wqe, 0, sizeof(union lpfc_wqe));
665 /* Word 7 */
666 bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
667 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
668 bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
669 /* Word 10 */
670 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
671 bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
672 bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
673
674 dma_buf->iocbq->context1 = NULL;
675 spin_lock(&phba->sli4_hba.sgl_list_lock);
676 dma_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, dma_buf->iocbq);
677 spin_unlock(&phba->sli4_hba.sgl_list_lock);
678 if (!dma_buf->sglq) {
679 lpfc_sli_release_iocbq(phba, dma_buf->iocbq);
680 kfree(dma_buf->context);
681 pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
682 dma_buf->dbuf.phys);
683 pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
684 dma_buf->hbuf.phys);
685 kfree(dma_buf);
686 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
687 "6132 Ran out of nvmet XRIs\n");
688 return NULL;
689 }
690 return dma_buf; 652 return dma_buf;
691} 653}
692 654
@@ -705,20 +667,9 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
705void 667void
706lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab) 668lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
707{ 669{
708 unsigned long flags;
709
710 __lpfc_clear_active_sglq(phba, dmab->sglq->sli4_lxritag);
711 dmab->sglq->state = SGL_FREED;
712 dmab->sglq->ndlp = NULL;
713
714 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
715 list_add_tail(&dmab->sglq->list, &phba->sli4_hba.lpfc_nvmet_sgl_list);
716 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, flags);
717
718 lpfc_sli_release_iocbq(phba, dmab->iocbq);
719 kfree(dmab->context);
720 pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); 670 pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
721 pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); 671 pci_pool_free(phba->lpfc_nvmet_drb_pool,
672 dmab->dbuf.virt, dmab->dbuf.phys);
722 kfree(dmab); 673 kfree(dmab);
723} 674}
724 675
@@ -803,6 +754,11 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
803 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); 754 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
804 if (rc < 0) { 755 if (rc < 0) {
805 (rqbp->rqb_free_buffer)(phba, rqb_entry); 756 (rqbp->rqb_free_buffer)(phba, rqb_entry);
757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
758 "6409 Cannot post to RQ %d: %x %x\n",
759 rqb_entry->hrq->queue_id,
760 rqb_entry->hrq->host_index,
761 rqb_entry->hrq->hba_index);
806 } else { 762 } else {
807 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); 763 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
808 rqbp->buffer_count++; 764 rqbp->buffer_count++;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 8777c2d5f50d..bff3de053df4 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1944,7 +1944,13 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1944 1944
1945 /* Target driver cannot solicit NVME FB. */ 1945 /* Target driver cannot solicit NVME FB. */
1946 if (bf_get_be32(prli_tgt, nvpr)) { 1946 if (bf_get_be32(prli_tgt, nvpr)) {
1947 /* Complete the nvme target roles. The transport
1948 * needs to know if the rport is capable of
1949 * discovery in addition to its role.
1950 */
1947 ndlp->nlp_type |= NLP_NVME_TARGET; 1951 ndlp->nlp_type |= NLP_NVME_TARGET;
1952 if (bf_get_be32(prli_disc, nvpr))
1953 ndlp->nlp_type |= NLP_NVME_DISCOVERY;
1948 if ((bf_get_be32(prli_fba, nvpr) == 1) && 1954 if ((bf_get_be32(prli_fba, nvpr) == 1) &&
1949 (bf_get_be32(prli_fb_sz, nvpr) > 0) && 1955 (bf_get_be32(prli_fb_sz, nvpr) > 0) &&
1950 (phba->cfg_nvme_enable_fb) && 1956 (phba->cfg_nvme_enable_fb) &&
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 94434e621c33..074a6b5e7763 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -142,7 +142,7 @@ out:
142} 142}
143 143
144/** 144/**
145 * lpfc_nvmet_rq_post - Repost a NVMET RQ DMA buffer and clean up context 145 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
146 * @phba: HBA buffer is associated with 146 * @phba: HBA buffer is associated with
147 * @ctxp: context to clean up 147 * @ctxp: context to clean up
148 * @mp: Buffer to free 148 * @mp: Buffer to free
@@ -155,24 +155,113 @@ out:
155 * Returns: None 155 * Returns: None
156 **/ 156 **/
157void 157void
158lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, 158lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
159 struct lpfc_dmabuf *mp)
160{ 159{
161 if (ctxp) { 160#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
162 if (ctxp->flag) 161 struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
163 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 162 struct lpfc_nvmet_tgtport *tgtp;
164 "6314 rq_post ctx xri x%x flag x%x\n", 163 struct fc_frame_header *fc_hdr;
165 ctxp->oxid, ctxp->flag); 164 struct rqb_dmabuf *nvmebuf;
166 165 struct lpfc_dmabuf *hbufp;
167 if (ctxp->txrdy) { 166 uint32_t *payload;
168 pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, 167 uint32_t size, oxid, sid, rc;
169 ctxp->txrdy_phys); 168 unsigned long iflag;
170 ctxp->txrdy = NULL; 169
171 ctxp->txrdy_phys = 0; 170 if (ctxp->txrdy) {
171 pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
172 ctxp->txrdy_phys);
173 ctxp->txrdy = NULL;
174 ctxp->txrdy_phys = 0;
175 }
176 ctxp->state = LPFC_NVMET_STE_FREE;
177
178 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
179 if (phba->sli4_hba.nvmet_io_wait_cnt) {
180 hbufp = &nvmebuf->hbuf;
181 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
182 nvmebuf, struct rqb_dmabuf,
183 hbuf.list);
184 phba->sli4_hba.nvmet_io_wait_cnt--;
185 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
186 iflag);
187
188 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
189 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
190 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
191 payload = (uint32_t *)(nvmebuf->dbuf.virt);
192 size = nvmebuf->bytes_recv;
193 sid = sli4_sid_from_fc_hdr(fc_hdr);
194
195 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
196 memset(ctxp, 0, sizeof(ctxp->ctx));
197 ctxp->wqeq = NULL;
198 ctxp->txrdy = NULL;
199 ctxp->offset = 0;
200 ctxp->phba = phba;
201 ctxp->size = size;
202 ctxp->oxid = oxid;
203 ctxp->sid = sid;
204 ctxp->state = LPFC_NVMET_STE_RCV;
205 ctxp->entry_cnt = 1;
206 ctxp->flag = 0;
207 ctxp->ctxbuf = ctx_buf;
208 spin_lock_init(&ctxp->ctxlock);
209
210#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
211 if (phba->ktime_on) {
212 ctxp->ts_cmd_nvme = ktime_get_ns();
213 ctxp->ts_isr_cmd = ctxp->ts_cmd_nvme;
214 ctxp->ts_nvme_data = 0;
215 ctxp->ts_data_wqput = 0;
216 ctxp->ts_isr_data = 0;
217 ctxp->ts_data_nvme = 0;
218 ctxp->ts_nvme_status = 0;
219 ctxp->ts_status_wqput = 0;
220 ctxp->ts_isr_status = 0;
221 ctxp->ts_status_nvme = 0;
172 } 222 }
173 ctxp->state = LPFC_NVMET_STE_FREE; 223#endif
224 atomic_inc(&tgtp->rcv_fcp_cmd_in);
225 /*
226 * The calling sequence should be:
227 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
228 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
229 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
230 * the NVME command / FC header is stored.
231 * A buffer has already been reposted for this IO, so just free
232 * the nvmebuf.
233 */
234 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
235 payload, size);
236
237 /* Process FCP command */
238 if (rc == 0) {
239 atomic_inc(&tgtp->rcv_fcp_cmd_out);
240 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
241 return;
242 }
243
244 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
245 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
246 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
247 ctxp->oxid, rc,
248 atomic_read(&tgtp->rcv_fcp_cmd_in),
249 atomic_read(&tgtp->rcv_fcp_cmd_out),
250 atomic_read(&tgtp->xmt_fcp_release));
251
252 lpfc_nvmet_defer_release(phba, ctxp);
253 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
254 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
255 return;
174 } 256 }
175 lpfc_rq_buf_free(phba, mp); 257 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
258
259 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
260 list_add_tail(&ctx_buf->list,
261 &phba->sli4_hba.lpfc_nvmet_ctx_list);
262 phba->sli4_hba.nvmet_ctx_cnt++;
263 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
264#endif
176} 265}
177 266
178#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 267#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -502,6 +591,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
502 "6150 LS Drop IO x%x: Prep\n", 591 "6150 LS Drop IO x%x: Prep\n",
503 ctxp->oxid); 592 ctxp->oxid);
504 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 593 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
594 atomic_inc(&nvmep->xmt_ls_abort);
505 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, 595 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
506 ctxp->sid, ctxp->oxid); 596 ctxp->sid, ctxp->oxid);
507 return -ENOMEM; 597 return -ENOMEM;
@@ -545,6 +635,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
545 lpfc_nlp_put(nvmewqeq->context1); 635 lpfc_nlp_put(nvmewqeq->context1);
546 636
547 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 637 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
638 atomic_inc(&nvmep->xmt_ls_abort);
548 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); 639 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
549 return -ENXIO; 640 return -ENXIO;
550} 641}
@@ -612,9 +703,9 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
612 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n", 703 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
613 ctxp->oxid, rsp->op, rsp->rsplen); 704 ctxp->oxid, rsp->op, rsp->rsplen);
614 705
706 ctxp->flag |= LPFC_NVMET_IO_INP;
615 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); 707 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
616 if (rc == WQE_SUCCESS) { 708 if (rc == WQE_SUCCESS) {
617 ctxp->flag |= LPFC_NVMET_IO_INP;
618#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 709#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
619 if (!phba->ktime_on) 710 if (!phba->ktime_on)
620 return 0; 711 return 0;
@@ -692,6 +783,7 @@ static void
692lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, 783lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
693 struct nvmefc_tgt_fcp_req *rsp) 784 struct nvmefc_tgt_fcp_req *rsp)
694{ 785{
786 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
695 struct lpfc_nvmet_rcv_ctx *ctxp = 787 struct lpfc_nvmet_rcv_ctx *ctxp =
696 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); 788 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
697 struct lpfc_hba *phba = ctxp->phba; 789 struct lpfc_hba *phba = ctxp->phba;
@@ -710,10 +802,12 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
710 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid, 802 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid,
711 ctxp->state, 0); 803 ctxp->state, 0);
712 804
805 atomic_inc(&lpfc_nvmep->xmt_fcp_release);
806
713 if (aborting) 807 if (aborting)
714 return; 808 return;
715 809
716 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); 810 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
717} 811}
718 812
719static struct nvmet_fc_target_template lpfc_tgttemplate = { 813static struct nvmet_fc_target_template lpfc_tgttemplate = {
@@ -734,17 +828,128 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
734 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport), 828 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
735}; 829};
736 830
831void
832lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
833{
834 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
835 unsigned long flags;
836
837 list_for_each_entry_safe(
838 ctx_buf, next_ctx_buf,
839 &phba->sli4_hba.lpfc_nvmet_ctx_list, list) {
840 spin_lock_irqsave(
841 &phba->sli4_hba.abts_nvme_buf_list_lock, flags);
842 list_del_init(&ctx_buf->list);
843 spin_unlock_irqrestore(
844 &phba->sli4_hba.abts_nvme_buf_list_lock, flags);
845 __lpfc_clear_active_sglq(phba,
846 ctx_buf->sglq->sli4_lxritag);
847 ctx_buf->sglq->state = SGL_FREED;
848 ctx_buf->sglq->ndlp = NULL;
849
850 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
851 list_add_tail(&ctx_buf->sglq->list,
852 &phba->sli4_hba.lpfc_nvmet_sgl_list);
853 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
854 flags);
855
856 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
857 kfree(ctx_buf->context);
858 }
859}
860
861int
862lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
863{
864 struct lpfc_nvmet_ctxbuf *ctx_buf;
865 struct lpfc_iocbq *nvmewqe;
866 union lpfc_wqe128 *wqe;
867 int i;
868
869 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
870 "6403 Allocate NVMET resources for %d XRIs\n",
871 phba->sli4_hba.nvmet_xri_cnt);
872
873 /* For all nvmet xris, allocate resources needed to process a
874 * received command on a per xri basis.
875 */
876 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
877 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
878 if (!ctx_buf) {
879 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
880 "6404 Ran out of memory for NVMET\n");
881 return -ENOMEM;
882 }
883
884 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
885 GFP_KERNEL);
886 if (!ctx_buf->context) {
887 kfree(ctx_buf);
888 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
889 "6405 Ran out of NVMET "
890 "context memory\n");
891 return -ENOMEM;
892 }
893 ctx_buf->context->ctxbuf = ctx_buf;
894
895 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
896 if (!ctx_buf->iocbq) {
897 kfree(ctx_buf->context);
898 kfree(ctx_buf);
899 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
900 "6406 Ran out of NVMET iocb/WQEs\n");
901 return -ENOMEM;
902 }
903 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
904 nvmewqe = ctx_buf->iocbq;
905 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
906 /* Initialize WQE */
907 memset(wqe, 0, sizeof(union lpfc_wqe));
908 /* Word 7 */
909 bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
910 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
911 bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
912 /* Word 10 */
913 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
914 bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
915 bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
916
917 ctx_buf->iocbq->context1 = NULL;
918 spin_lock(&phba->sli4_hba.sgl_list_lock);
919 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
920 spin_unlock(&phba->sli4_hba.sgl_list_lock);
921 if (!ctx_buf->sglq) {
922 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
923 kfree(ctx_buf->context);
924 kfree(ctx_buf);
925 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
926 "6407 Ran out of NVMET XRIs\n");
927 return -ENOMEM;
928 }
929 spin_lock(&phba->sli4_hba.nvmet_io_lock);
930 list_add_tail(&ctx_buf->list,
931 &phba->sli4_hba.lpfc_nvmet_ctx_list);
932 spin_unlock(&phba->sli4_hba.nvmet_io_lock);
933 }
934 phba->sli4_hba.nvmet_ctx_cnt = phba->sli4_hba.nvmet_xri_cnt;
935 return 0;
936}
937
737int 938int
738lpfc_nvmet_create_targetport(struct lpfc_hba *phba) 939lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
739{ 940{
740 struct lpfc_vport *vport = phba->pport; 941 struct lpfc_vport *vport = phba->pport;
741 struct lpfc_nvmet_tgtport *tgtp; 942 struct lpfc_nvmet_tgtport *tgtp;
742 struct nvmet_fc_port_info pinfo; 943 struct nvmet_fc_port_info pinfo;
743 int error = 0; 944 int error;
744 945
745 if (phba->targetport) 946 if (phba->targetport)
746 return 0; 947 return 0;
747 948
949 error = lpfc_nvmet_setup_io_context(phba);
950 if (error)
951 return error;
952
748 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info)); 953 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
749 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); 954 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
750 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); 955 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
@@ -764,7 +969,6 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
764 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; 969 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
765 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; 970 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
766 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | 971 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
767 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED |
768 NVMET_FCTGTFEAT_CMD_IN_ISR | 972 NVMET_FCTGTFEAT_CMD_IN_ISR |
769 NVMET_FCTGTFEAT_OPDONE_IN_ISR; 973 NVMET_FCTGTFEAT_OPDONE_IN_ISR;
770 974
@@ -773,13 +977,16 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
773 &phba->pcidev->dev, 977 &phba->pcidev->dev,
774 &phba->targetport); 978 &phba->targetport);
775#else 979#else
776 error = -ENOMEM; 980 error = -ENOENT;
777#endif 981#endif
778 if (error) { 982 if (error) {
779 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 983 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
780 "6025 Cannot register NVME targetport " 984 "6025 Cannot register NVME targetport "
781 "x%x\n", error); 985 "x%x\n", error);
782 phba->targetport = NULL; 986 phba->targetport = NULL;
987
988 lpfc_nvmet_cleanup_io_context(phba);
989
783 } else { 990 } else {
784 tgtp = (struct lpfc_nvmet_tgtport *) 991 tgtp = (struct lpfc_nvmet_tgtport *)
785 phba->targetport->private; 992 phba->targetport->private;
@@ -796,6 +1003,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
796 atomic_set(&tgtp->rcv_ls_req_out, 0); 1003 atomic_set(&tgtp->rcv_ls_req_out, 0);
797 atomic_set(&tgtp->rcv_ls_req_drop, 0); 1004 atomic_set(&tgtp->rcv_ls_req_drop, 0);
798 atomic_set(&tgtp->xmt_ls_abort, 0); 1005 atomic_set(&tgtp->xmt_ls_abort, 0);
1006 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
799 atomic_set(&tgtp->xmt_ls_rsp, 0); 1007 atomic_set(&tgtp->xmt_ls_rsp, 0);
800 atomic_set(&tgtp->xmt_ls_drop, 0); 1008 atomic_set(&tgtp->xmt_ls_drop, 0);
801 atomic_set(&tgtp->xmt_ls_rsp_error, 0); 1009 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
@@ -803,18 +1011,21 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
803 atomic_set(&tgtp->rcv_fcp_cmd_in, 0); 1011 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
804 atomic_set(&tgtp->rcv_fcp_cmd_out, 0); 1012 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
805 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); 1013 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
806 atomic_set(&tgtp->xmt_fcp_abort, 0);
807 atomic_set(&tgtp->xmt_fcp_drop, 0); 1014 atomic_set(&tgtp->xmt_fcp_drop, 0);
808 atomic_set(&tgtp->xmt_fcp_read_rsp, 0); 1015 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
809 atomic_set(&tgtp->xmt_fcp_read, 0); 1016 atomic_set(&tgtp->xmt_fcp_read, 0);
810 atomic_set(&tgtp->xmt_fcp_write, 0); 1017 atomic_set(&tgtp->xmt_fcp_write, 0);
811 atomic_set(&tgtp->xmt_fcp_rsp, 0); 1018 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1019 atomic_set(&tgtp->xmt_fcp_release, 0);
812 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); 1020 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
813 atomic_set(&tgtp->xmt_fcp_rsp_error, 0); 1021 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
814 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); 1022 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1023 atomic_set(&tgtp->xmt_fcp_abort, 0);
1024 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1025 atomic_set(&tgtp->xmt_abort_unsol, 0);
1026 atomic_set(&tgtp->xmt_abort_sol, 0);
815 atomic_set(&tgtp->xmt_abort_rsp, 0); 1027 atomic_set(&tgtp->xmt_abort_rsp, 0);
816 atomic_set(&tgtp->xmt_abort_rsp_error, 0); 1028 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
817 atomic_set(&tgtp->xmt_abort_cmpl, 0);
818 } 1029 }
819 return error; 1030 return error;
820} 1031}
@@ -865,7 +1076,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
865 list_for_each_entry_safe(ctxp, next_ctxp, 1076 list_for_each_entry_safe(ctxp, next_ctxp,
866 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1077 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
867 list) { 1078 list) {
868 if (ctxp->rqb_buffer->sglq->sli4_xritag != xri) 1079 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
869 continue; 1080 continue;
870 1081
871 /* Check if we already received a free context call 1082 /* Check if we already received a free context call
@@ -886,7 +1097,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
886 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || 1097 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
887 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 1098 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
888 lpfc_set_rrq_active(phba, ndlp, 1099 lpfc_set_rrq_active(phba, ndlp,
889 ctxp->rqb_buffer->sglq->sli4_lxritag, 1100 ctxp->ctxbuf->sglq->sli4_lxritag,
890 rxid, 1); 1101 rxid, 1);
891 lpfc_sli4_abts_err_handler(phba, ndlp, axri); 1102 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
892 } 1103 }
@@ -895,8 +1106,8 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
895 "6318 XB aborted %x flg x%x (%x)\n", 1106 "6318 XB aborted %x flg x%x (%x)\n",
896 ctxp->oxid, ctxp->flag, released); 1107 ctxp->oxid, ctxp->flag, released);
897 if (released) 1108 if (released)
898 lpfc_nvmet_rq_post(phba, ctxp, 1109 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
899 &ctxp->rqb_buffer->hbuf); 1110
900 if (rrq_empty) 1111 if (rrq_empty)
901 lpfc_worker_wake_up(phba); 1112 lpfc_worker_wake_up(phba);
902 return; 1113 return;
@@ -924,7 +1135,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
924 list_for_each_entry_safe(ctxp, next_ctxp, 1135 list_for_each_entry_safe(ctxp, next_ctxp,
925 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1136 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
926 list) { 1137 list) {
927 if (ctxp->rqb_buffer->sglq->sli4_xritag != xri) 1138 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
928 continue; 1139 continue;
929 1140
930 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1141 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
@@ -976,6 +1187,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
976 init_completion(&tgtp->tport_unreg_done); 1187 init_completion(&tgtp->tport_unreg_done);
977 nvmet_fc_unregister_targetport(phba->targetport); 1188 nvmet_fc_unregister_targetport(phba->targetport);
978 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); 1189 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
1190 lpfc_nvmet_cleanup_io_context(phba);
979 } 1191 }
980 phba->targetport = NULL; 1192 phba->targetport = NULL;
981#endif 1193#endif
@@ -1011,6 +1223,7 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1011 oxid = 0; 1223 oxid = 0;
1012 size = 0; 1224 size = 0;
1013 sid = 0; 1225 sid = 0;
1226 ctxp = NULL;
1014 goto dropit; 1227 goto dropit;
1015 } 1228 }
1016 1229
@@ -1105,39 +1318,71 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1105 struct lpfc_nvmet_rcv_ctx *ctxp; 1318 struct lpfc_nvmet_rcv_ctx *ctxp;
1106 struct lpfc_nvmet_tgtport *tgtp; 1319 struct lpfc_nvmet_tgtport *tgtp;
1107 struct fc_frame_header *fc_hdr; 1320 struct fc_frame_header *fc_hdr;
1321 struct lpfc_nvmet_ctxbuf *ctx_buf;
1108 uint32_t *payload; 1322 uint32_t *payload;
1109 uint32_t size, oxid, sid, rc; 1323 uint32_t size, oxid, sid, rc, qno;
1324 unsigned long iflag;
1110#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1325#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1111 uint32_t id; 1326 uint32_t id;
1112#endif 1327#endif
1113 1328
1329 ctx_buf = NULL;
1114 if (!nvmebuf || !phba->targetport) { 1330 if (!nvmebuf || !phba->targetport) {
1115 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1331 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1116 "6157 FCP Drop IO\n"); 1332 "6157 NVMET FCP Drop IO\n");
1117 oxid = 0; 1333 oxid = 0;
1118 size = 0; 1334 size = 0;
1119 sid = 0; 1335 sid = 0;
1336 ctxp = NULL;
1120 goto dropit; 1337 goto dropit;
1121 } 1338 }
1122 1339
1340 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
1341 if (phba->sli4_hba.nvmet_ctx_cnt) {
1342 list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_list,
1343 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
1344 phba->sli4_hba.nvmet_ctx_cnt--;
1345 }
1346 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
1123 1347
1124 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1125 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1126 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); 1348 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1127 size = nvmebuf->bytes_recv;
1128 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 1349 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1129 sid = sli4_sid_from_fc_hdr(fc_hdr); 1350 size = nvmebuf->bytes_recv;
1130 1351
1131 ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmebuf->context; 1352#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1132 if (ctxp == NULL) { 1353 if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
1133 atomic_inc(&tgtp->rcv_fcp_cmd_drop); 1354 id = smp_processor_id();
1134 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1355 if (id < LPFC_CHECK_CPU_CNT)
1135 "6158 FCP Drop IO x%x: Alloc\n", 1356 phba->cpucheck_rcv_io[id]++;
1136 oxid); 1357 }
1137 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); 1358#endif
1138 /* Cannot send ABTS without context */ 1359
1360 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
1361 oxid, size, smp_processor_id());
1362
1363 if (!ctx_buf) {
1364 /* Queue this NVME IO to process later */
1365 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1366 list_add_tail(&nvmebuf->hbuf.list,
1367 &phba->sli4_hba.lpfc_nvmet_io_wait_list);
1368 phba->sli4_hba.nvmet_io_wait_cnt++;
1369 phba->sli4_hba.nvmet_io_wait_total++;
1370 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1371 iflag);
1372
1373 /* Post a brand new DMA buffer to RQ */
1374 qno = nvmebuf->idx;
1375 lpfc_post_rq_buffer(
1376 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
1377 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
1139 return; 1378 return;
1140 } 1379 }
1380
1381 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1382 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1383 sid = sli4_sid_from_fc_hdr(fc_hdr);
1384
1385 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
1141 memset(ctxp, 0, sizeof(ctxp->ctx)); 1386 memset(ctxp, 0, sizeof(ctxp->ctx));
1142 ctxp->wqeq = NULL; 1387 ctxp->wqeq = NULL;
1143 ctxp->txrdy = NULL; 1388 ctxp->txrdy = NULL;
@@ -1147,9 +1392,9 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1147 ctxp->oxid = oxid; 1392 ctxp->oxid = oxid;
1148 ctxp->sid = sid; 1393 ctxp->sid = sid;
1149 ctxp->state = LPFC_NVMET_STE_RCV; 1394 ctxp->state = LPFC_NVMET_STE_RCV;
1150 ctxp->rqb_buffer = nvmebuf;
1151 ctxp->entry_cnt = 1; 1395 ctxp->entry_cnt = 1;
1152 ctxp->flag = 0; 1396 ctxp->flag = 0;
1397 ctxp->ctxbuf = ctx_buf;
1153 spin_lock_init(&ctxp->ctxlock); 1398 spin_lock_init(&ctxp->ctxlock);
1154 1399
1155#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1400#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -1165,22 +1410,16 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1165 ctxp->ts_isr_status = 0; 1410 ctxp->ts_isr_status = 0;
1166 ctxp->ts_status_nvme = 0; 1411 ctxp->ts_status_nvme = 0;
1167 } 1412 }
1168
1169 if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
1170 id = smp_processor_id();
1171 if (id < LPFC_CHECK_CPU_CNT)
1172 phba->cpucheck_rcv_io[id]++;
1173 }
1174#endif 1413#endif
1175 1414
1176 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
1177 oxid, size, smp_processor_id());
1178
1179 atomic_inc(&tgtp->rcv_fcp_cmd_in); 1415 atomic_inc(&tgtp->rcv_fcp_cmd_in);
1180 /* 1416 /*
1181 * The calling sequence should be: 1417 * The calling sequence should be:
1182 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done 1418 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
1183 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. 1419 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
1420 * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
1421 * the NVME command / FC header is stored, so we are free to repost
1422 * the buffer.
1184 */ 1423 */
1185 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, 1424 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
1186 payload, size); 1425 payload, size);
@@ -1188,26 +1427,32 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1188 /* Process FCP command */ 1427 /* Process FCP command */
1189 if (rc == 0) { 1428 if (rc == 0) {
1190 atomic_inc(&tgtp->rcv_fcp_cmd_out); 1429 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1430 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1191 return; 1431 return;
1192 } 1432 }
1193 1433
1194 atomic_inc(&tgtp->rcv_fcp_cmd_drop); 1434 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
1195 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1435 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1196 "6159 FCP Drop IO x%x: err x%x\n", 1436 "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
1197 ctxp->oxid, rc); 1437 ctxp->oxid, rc,
1438 atomic_read(&tgtp->rcv_fcp_cmd_in),
1439 atomic_read(&tgtp->rcv_fcp_cmd_out),
1440 atomic_read(&tgtp->xmt_fcp_release));
1198dropit: 1441dropit:
1199 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", 1442 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
1200 oxid, size, sid); 1443 oxid, size, sid);
1201 if (oxid) { 1444 if (oxid) {
1445 lpfc_nvmet_defer_release(phba, ctxp);
1202 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); 1446 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
1447 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1203 return; 1448 return;
1204 } 1449 }
1205 1450
1206 if (nvmebuf) { 1451 if (ctx_buf)
1207 nvmebuf->iocbq->hba_wqidx = 0; 1452 lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
1208 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ 1453
1209 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); 1454 if (nvmebuf)
1210 } 1455 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1211#endif 1456#endif
1212} 1457}
1213 1458
@@ -1259,7 +1504,7 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
1259 uint64_t isr_timestamp) 1504 uint64_t isr_timestamp)
1260{ 1505{
1261 if (phba->nvmet_support == 0) { 1506 if (phba->nvmet_support == 0) {
1262 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); 1507 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
1263 return; 1508 return;
1264 } 1509 }
1265 lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf, 1510 lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf,
@@ -1460,7 +1705,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
1460 nvmewqe = ctxp->wqeq; 1705 nvmewqe = ctxp->wqeq;
1461 if (nvmewqe == NULL) { 1706 if (nvmewqe == NULL) {
1462 /* Allocate buffer for command wqe */ 1707 /* Allocate buffer for command wqe */
1463 nvmewqe = ctxp->rqb_buffer->iocbq; 1708 nvmewqe = ctxp->ctxbuf->iocbq;
1464 if (nvmewqe == NULL) { 1709 if (nvmewqe == NULL) {
1465 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1710 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1466 "6110 lpfc_nvmet_prep_fcp_wqe: No " 1711 "6110 lpfc_nvmet_prep_fcp_wqe: No "
@@ -1487,7 +1732,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
1487 return NULL; 1732 return NULL;
1488 } 1733 }
1489 1734
1490 sgl = (struct sli4_sge *)ctxp->rqb_buffer->sglq->sgl; 1735 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
1491 switch (rsp->op) { 1736 switch (rsp->op) {
1492 case NVMET_FCOP_READDATA: 1737 case NVMET_FCOP_READDATA:
1493 case NVMET_FCOP_READDATA_RSP: 1738 case NVMET_FCOP_READDATA_RSP:
@@ -1812,7 +2057,8 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1812 result = wcqe->parameter; 2057 result = wcqe->parameter;
1813 2058
1814 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2059 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1815 atomic_inc(&tgtp->xmt_abort_cmpl); 2060 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2061 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
1816 2062
1817 ctxp->state = LPFC_NVMET_STE_DONE; 2063 ctxp->state = LPFC_NVMET_STE_DONE;
1818 2064
@@ -1827,6 +2073,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1827 } 2073 }
1828 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2074 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
1829 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 2075 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2076 atomic_inc(&tgtp->xmt_abort_rsp);
1830 2077
1831 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2078 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
1832 "6165 ABORT cmpl: xri x%x flg x%x (%d) " 2079 "6165 ABORT cmpl: xri x%x flg x%x (%d) "
@@ -1835,15 +2082,16 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1835 wcqe->word0, wcqe->total_data_placed, 2082 wcqe->word0, wcqe->total_data_placed,
1836 result, wcqe->word3); 2083 result, wcqe->word3);
1837 2084
2085 cmdwqe->context2 = NULL;
2086 cmdwqe->context3 = NULL;
1838 /* 2087 /*
1839 * if transport has released ctx, then can reuse it. Otherwise, 2088 * if transport has released ctx, then can reuse it. Otherwise,
1840 * will be recycled by transport release call. 2089 * will be recycled by transport release call.
1841 */ 2090 */
1842 if (released) 2091 if (released)
1843 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); 2092 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1844 2093
1845 cmdwqe->context2 = NULL; 2094 /* This is the iocbq for the abort, not the command */
1846 cmdwqe->context3 = NULL;
1847 lpfc_sli_release_iocbq(phba, cmdwqe); 2095 lpfc_sli_release_iocbq(phba, cmdwqe);
1848 2096
1849 /* Since iaab/iaar are NOT set, there is no work left. 2097 /* Since iaab/iaar are NOT set, there is no work left.
@@ -1877,7 +2125,8 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1877 result = wcqe->parameter; 2125 result = wcqe->parameter;
1878 2126
1879 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2127 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1880 atomic_inc(&tgtp->xmt_abort_cmpl); 2128 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2129 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
1881 2130
1882 if (!ctxp) { 2131 if (!ctxp) {
1883 /* if context is clear, related io alrady complete */ 2132 /* if context is clear, related io alrady complete */
@@ -1907,6 +2156,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1907 } 2156 }
1908 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2157 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
1909 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 2158 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2159 atomic_inc(&tgtp->xmt_abort_rsp);
1910 2160
1911 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2161 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1912 "6316 ABTS cmpl xri x%x flg x%x (%x) " 2162 "6316 ABTS cmpl xri x%x flg x%x (%x) "
@@ -1914,15 +2164,15 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1914 ctxp->oxid, ctxp->flag, released, 2164 ctxp->oxid, ctxp->flag, released,
1915 wcqe->word0, wcqe->total_data_placed, 2165 wcqe->word0, wcqe->total_data_placed,
1916 result, wcqe->word3); 2166 result, wcqe->word3);
2167
2168 cmdwqe->context2 = NULL;
2169 cmdwqe->context3 = NULL;
1917 /* 2170 /*
1918 * if transport has released ctx, then can reuse it. Otherwise, 2171 * if transport has released ctx, then can reuse it. Otherwise,
1919 * will be recycled by transport release call. 2172 * will be recycled by transport release call.
1920 */ 2173 */
1921 if (released) 2174 if (released)
1922 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); 2175 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1923
1924 cmdwqe->context2 = NULL;
1925 cmdwqe->context3 = NULL;
1926 2176
1927 /* Since iaab/iaar are NOT set, there is no work left. 2177 /* Since iaab/iaar are NOT set, there is no work left.
1928 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted 2178 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
@@ -1953,7 +2203,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1953 result = wcqe->parameter; 2203 result = wcqe->parameter;
1954 2204
1955 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2205 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1956 atomic_inc(&tgtp->xmt_abort_cmpl); 2206 atomic_inc(&tgtp->xmt_ls_abort_cmpl);
1957 2207
1958 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2208 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1959 "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n", 2209 "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
@@ -1984,10 +2234,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
1984 sid, xri, ctxp->wqeq->sli4_xritag); 2234 sid, xri, ctxp->wqeq->sli4_xritag);
1985 2235
1986 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2236 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1987 if (!ctxp->wqeq) {
1988 ctxp->wqeq = ctxp->rqb_buffer->iocbq;
1989 ctxp->wqeq->hba_wqidx = 0;
1990 }
1991 2237
1992 ndlp = lpfc_findnode_did(phba->pport, sid); 2238 ndlp = lpfc_findnode_did(phba->pport, sid);
1993 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 2239 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
@@ -2083,7 +2329,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2083 2329
2084 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2330 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2085 if (!ctxp->wqeq) { 2331 if (!ctxp->wqeq) {
2086 ctxp->wqeq = ctxp->rqb_buffer->iocbq; 2332 ctxp->wqeq = ctxp->ctxbuf->iocbq;
2087 ctxp->wqeq->hba_wqidx = 0; 2333 ctxp->wqeq->hba_wqidx = 0;
2088 } 2334 }
2089 2335
@@ -2104,6 +2350,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2104 /* Issue ABTS for this WQE based on iotag */ 2350 /* Issue ABTS for this WQE based on iotag */
2105 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); 2351 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
2106 if (!ctxp->abort_wqeq) { 2352 if (!ctxp->abort_wqeq) {
2353 atomic_inc(&tgtp->xmt_abort_rsp_error);
2107 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, 2354 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2108 "6161 ABORT failed: No wqeqs: " 2355 "6161 ABORT failed: No wqeqs: "
2109 "xri: x%x\n", ctxp->oxid); 2356 "xri: x%x\n", ctxp->oxid);
@@ -2128,6 +2375,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2128 /* driver queued commands are in process of being flushed */ 2375 /* driver queued commands are in process of being flushed */
2129 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { 2376 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
2130 spin_unlock_irqrestore(&phba->hbalock, flags); 2377 spin_unlock_irqrestore(&phba->hbalock, flags);
2378 atomic_inc(&tgtp->xmt_abort_rsp_error);
2131 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 2379 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2132 "6163 Driver in reset cleanup - flushing " 2380 "6163 Driver in reset cleanup - flushing "
2133 "NVME Req now. hba_flag x%x oxid x%x\n", 2381 "NVME Req now. hba_flag x%x oxid x%x\n",
@@ -2140,6 +2388,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2140 /* Outstanding abort is in progress */ 2388 /* Outstanding abort is in progress */
2141 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) { 2389 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
2142 spin_unlock_irqrestore(&phba->hbalock, flags); 2390 spin_unlock_irqrestore(&phba->hbalock, flags);
2391 atomic_inc(&tgtp->xmt_abort_rsp_error);
2143 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 2392 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2144 "6164 Outstanding NVME I/O Abort Request " 2393 "6164 Outstanding NVME I/O Abort Request "
2145 "still pending on oxid x%x\n", 2394 "still pending on oxid x%x\n",
@@ -2190,9 +2439,12 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2190 abts_wqeq->context2 = ctxp; 2439 abts_wqeq->context2 = ctxp;
2191 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); 2440 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
2192 spin_unlock_irqrestore(&phba->hbalock, flags); 2441 spin_unlock_irqrestore(&phba->hbalock, flags);
2193 if (rc == WQE_SUCCESS) 2442 if (rc == WQE_SUCCESS) {
2443 atomic_inc(&tgtp->xmt_abort_sol);
2194 return 0; 2444 return 0;
2445 }
2195 2446
2447 atomic_inc(&tgtp->xmt_abort_rsp_error);
2196 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2448 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2197 lpfc_sli_release_iocbq(phba, abts_wqeq); 2449 lpfc_sli_release_iocbq(phba, abts_wqeq);
2198 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2450 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
@@ -2215,7 +2467,7 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
2215 2467
2216 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2468 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2217 if (!ctxp->wqeq) { 2469 if (!ctxp->wqeq) {
2218 ctxp->wqeq = ctxp->rqb_buffer->iocbq; 2470 ctxp->wqeq = ctxp->ctxbuf->iocbq;
2219 ctxp->wqeq->hba_wqidx = 0; 2471 ctxp->wqeq->hba_wqidx = 0;
2220 } 2472 }
2221 2473
@@ -2231,11 +2483,11 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
2231 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); 2483 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
2232 spin_unlock_irqrestore(&phba->hbalock, flags); 2484 spin_unlock_irqrestore(&phba->hbalock, flags);
2233 if (rc == WQE_SUCCESS) { 2485 if (rc == WQE_SUCCESS) {
2234 atomic_inc(&tgtp->xmt_abort_rsp);
2235 return 0; 2486 return 0;
2236 } 2487 }
2237 2488
2238aerr: 2489aerr:
2490 atomic_inc(&tgtp->xmt_abort_rsp_error);
2239 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2491 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2240 atomic_inc(&tgtp->xmt_abort_rsp_error); 2492 atomic_inc(&tgtp->xmt_abort_rsp_error);
2241 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, 2493 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
@@ -2270,6 +2522,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
2270 } 2522 }
2271 abts_wqeq = ctxp->wqeq; 2523 abts_wqeq = ctxp->wqeq;
2272 wqe_abts = &abts_wqeq->wqe; 2524 wqe_abts = &abts_wqeq->wqe;
2525
2273 lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); 2526 lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
2274 2527
2275 spin_lock_irqsave(&phba->hbalock, flags); 2528 spin_lock_irqsave(&phba->hbalock, flags);
@@ -2279,7 +2532,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
2279 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq); 2532 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
2280 spin_unlock_irqrestore(&phba->hbalock, flags); 2533 spin_unlock_irqrestore(&phba->hbalock, flags);
2281 if (rc == WQE_SUCCESS) { 2534 if (rc == WQE_SUCCESS) {
2282 atomic_inc(&tgtp->xmt_abort_rsp); 2535 atomic_inc(&tgtp->xmt_abort_unsol);
2283 return 0; 2536 return 0;
2284 } 2537 }
2285 2538
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 128759fe6650..6eb2f5d8d4ed 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -22,6 +22,7 @@
22 ********************************************************************/ 22 ********************************************************************/
23 23
24#define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */ 24#define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */
25#define LPFC_NVMET_RQE_DEF_COUNT 512
25#define LPFC_NVMET_SUCCESS_LEN 12 26#define LPFC_NVMET_SUCCESS_LEN 12
26 27
27/* Used for NVME Target */ 28/* Used for NVME Target */
@@ -34,6 +35,7 @@ struct lpfc_nvmet_tgtport {
34 atomic_t rcv_ls_req_out; 35 atomic_t rcv_ls_req_out;
35 atomic_t rcv_ls_req_drop; 36 atomic_t rcv_ls_req_drop;
36 atomic_t xmt_ls_abort; 37 atomic_t xmt_ls_abort;
38 atomic_t xmt_ls_abort_cmpl;
37 39
38 /* Stats counters - lpfc_nvmet_xmt_ls_rsp */ 40 /* Stats counters - lpfc_nvmet_xmt_ls_rsp */
39 atomic_t xmt_ls_rsp; 41 atomic_t xmt_ls_rsp;
@@ -47,9 +49,9 @@ struct lpfc_nvmet_tgtport {
47 atomic_t rcv_fcp_cmd_in; 49 atomic_t rcv_fcp_cmd_in;
48 atomic_t rcv_fcp_cmd_out; 50 atomic_t rcv_fcp_cmd_out;
49 atomic_t rcv_fcp_cmd_drop; 51 atomic_t rcv_fcp_cmd_drop;
52 atomic_t xmt_fcp_release;
50 53
51 /* Stats counters - lpfc_nvmet_xmt_fcp_op */ 54 /* Stats counters - lpfc_nvmet_xmt_fcp_op */
52 atomic_t xmt_fcp_abort;
53 atomic_t xmt_fcp_drop; 55 atomic_t xmt_fcp_drop;
54 atomic_t xmt_fcp_read_rsp; 56 atomic_t xmt_fcp_read_rsp;
55 atomic_t xmt_fcp_read; 57 atomic_t xmt_fcp_read;
@@ -62,12 +64,13 @@ struct lpfc_nvmet_tgtport {
62 atomic_t xmt_fcp_rsp_drop; 64 atomic_t xmt_fcp_rsp_drop;
63 65
64 66
65 /* Stats counters - lpfc_nvmet_unsol_issue_abort */ 67 /* Stats counters - lpfc_nvmet_xmt_fcp_abort */
68 atomic_t xmt_fcp_abort;
69 atomic_t xmt_fcp_abort_cmpl;
70 atomic_t xmt_abort_sol;
71 atomic_t xmt_abort_unsol;
66 atomic_t xmt_abort_rsp; 72 atomic_t xmt_abort_rsp;
67 atomic_t xmt_abort_rsp_error; 73 atomic_t xmt_abort_rsp_error;
68
69 /* Stats counters - lpfc_nvmet_xmt_abort_cmp */
70 atomic_t xmt_abort_cmpl;
71}; 74};
72 75
73struct lpfc_nvmet_rcv_ctx { 76struct lpfc_nvmet_rcv_ctx {
@@ -103,6 +106,7 @@ struct lpfc_nvmet_rcv_ctx {
103#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */ 106#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
104#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */ 107#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
105 struct rqb_dmabuf *rqb_buffer; 108 struct rqb_dmabuf *rqb_buffer;
109 struct lpfc_nvmet_ctxbuf *ctxbuf;
106 110
107#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 111#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
108 uint64_t ts_isr_cmd; 112 uint64_t ts_isr_cmd;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index cf19f4976f5f..d6b184839bc2 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -74,6 +74,8 @@ static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
74 struct lpfc_iocbq *); 74 struct lpfc_iocbq *);
75static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 75static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
76 struct hbq_dmabuf *); 76 struct hbq_dmabuf *);
77static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
78 struct hbq_dmabuf *dmabuf);
77static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *, 79static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
78 struct lpfc_cqe *); 80 struct lpfc_cqe *);
79static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, 81static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
@@ -479,22 +481,23 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
479 if (unlikely(!hq) || unlikely(!dq)) 481 if (unlikely(!hq) || unlikely(!dq))
480 return -ENOMEM; 482 return -ENOMEM;
481 put_index = hq->host_index; 483 put_index = hq->host_index;
482 temp_hrqe = hq->qe[hq->host_index].rqe; 484 temp_hrqe = hq->qe[put_index].rqe;
483 temp_drqe = dq->qe[dq->host_index].rqe; 485 temp_drqe = dq->qe[dq->host_index].rqe;
484 486
485 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 487 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
486 return -EINVAL; 488 return -EINVAL;
487 if (hq->host_index != dq->host_index) 489 if (put_index != dq->host_index)
488 return -EINVAL; 490 return -EINVAL;
489 /* If the host has not yet processed the next entry then we are done */ 491 /* If the host has not yet processed the next entry then we are done */
490 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index) 492 if (((put_index + 1) % hq->entry_count) == hq->hba_index)
491 return -EBUSY; 493 return -EBUSY;
492 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 494 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
493 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 495 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
494 496
495 /* Update the host index to point to the next slot */ 497 /* Update the host index to point to the next slot */
496 hq->host_index = ((hq->host_index + 1) % hq->entry_count); 498 hq->host_index = ((put_index + 1) % hq->entry_count);
497 dq->host_index = ((dq->host_index + 1) % dq->entry_count); 499 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
500 hq->RQ_buf_posted++;
498 501
499 /* Ring The Header Receive Queue Doorbell */ 502 /* Ring The Header Receive Queue Doorbell */
500 if (!(hq->host_index % hq->entry_repost)) { 503 if (!(hq->host_index % hq->entry_repost)) {
@@ -4204,13 +4207,16 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
4204 /* Reset HBA */ 4207 /* Reset HBA */
4205 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4208 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4206 "0325 Reset HBA Data: x%x x%x\n", 4209 "0325 Reset HBA Data: x%x x%x\n",
4207 phba->pport->port_state, psli->sli_flag); 4210 (phba->pport) ? phba->pport->port_state : 0,
4211 psli->sli_flag);
4208 4212
4209 /* perform board reset */ 4213 /* perform board reset */
4210 phba->fc_eventTag = 0; 4214 phba->fc_eventTag = 0;
4211 phba->link_events = 0; 4215 phba->link_events = 0;
4212 phba->pport->fc_myDID = 0; 4216 if (phba->pport) {
4213 phba->pport->fc_prevDID = 0; 4217 phba->pport->fc_myDID = 0;
4218 phba->pport->fc_prevDID = 0;
4219 }
4214 4220
4215 /* Turn off parity checking and serr during the physical reset */ 4221 /* Turn off parity checking and serr during the physical reset */
4216 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 4222 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
@@ -4336,7 +4342,8 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4336 /* Restart HBA */ 4342 /* Restart HBA */
4337 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4343 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4338 "0337 Restart HBA Data: x%x x%x\n", 4344 "0337 Restart HBA Data: x%x x%x\n",
4339 phba->pport->port_state, psli->sli_flag); 4345 (phba->pport) ? phba->pport->port_state : 0,
4346 psli->sli_flag);
4340 4347
4341 word0 = 0; 4348 word0 = 0;
4342 mb = (MAILBOX_t *) &word0; 4349 mb = (MAILBOX_t *) &word0;
@@ -4350,7 +4357,7 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4350 readl(to_slim); /* flush */ 4357 readl(to_slim); /* flush */
4351 4358
4352 /* Only skip post after fc_ffinit is completed */ 4359 /* Only skip post after fc_ffinit is completed */
4353 if (phba->pport->port_state) 4360 if (phba->pport && phba->pport->port_state)
4354 word0 = 1; /* This is really setting up word1 */ 4361 word0 = 1; /* This is really setting up word1 */
4355 else 4362 else
4356 word0 = 0; /* This is really setting up word1 */ 4363 word0 = 0; /* This is really setting up word1 */
@@ -4359,7 +4366,8 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4359 readl(to_slim); /* flush */ 4366 readl(to_slim); /* flush */
4360 4367
4361 lpfc_sli_brdreset(phba); 4368 lpfc_sli_brdreset(phba);
4362 phba->pport->stopped = 0; 4369 if (phba->pport)
4370 phba->pport->stopped = 0;
4363 phba->link_state = LPFC_INIT_START; 4371 phba->link_state = LPFC_INIT_START;
4364 phba->hba_flag = 0; 4372 phba->hba_flag = 0;
4365 spin_unlock_irq(&phba->hbalock); 4373 spin_unlock_irq(&phba->hbalock);
@@ -4446,7 +4454,7 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
4446 * iteration, the function will restart the HBA again. The function returns 4454 * iteration, the function will restart the HBA again. The function returns
4447 * zero if HBA successfully restarted else returns negative error code. 4455 * zero if HBA successfully restarted else returns negative error code.
4448 **/ 4456 **/
4449static int 4457int
4450lpfc_sli_chipset_init(struct lpfc_hba *phba) 4458lpfc_sli_chipset_init(struct lpfc_hba *phba)
4451{ 4459{
4452 uint32_t status, i = 0; 4460 uint32_t status, i = 0;
@@ -5901,7 +5909,7 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
5901 bf_set(lpfc_mbx_set_feature_mds, 5909 bf_set(lpfc_mbx_set_feature_mds,
5902 &mbox->u.mqe.un.set_feature, 1); 5910 &mbox->u.mqe.un.set_feature, 1);
5903 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, 5911 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
5904 &mbox->u.mqe.un.set_feature, 0); 5912 &mbox->u.mqe.un.set_feature, 1);
5905 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; 5913 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
5906 mbox->u.mqe.un.set_feature.param_len = 8; 5914 mbox->u.mqe.un.set_feature.param_len = 8;
5907 break; 5915 break;
@@ -6507,6 +6515,50 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
6507 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); 6515 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
6508} 6516}
6509 6517
6518int
6519lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
6520 struct lpfc_queue *drq, int count, int idx)
6521{
6522 int rc, i;
6523 struct lpfc_rqe hrqe;
6524 struct lpfc_rqe drqe;
6525 struct lpfc_rqb *rqbp;
6526 struct rqb_dmabuf *rqb_buffer;
6527 LIST_HEAD(rqb_buf_list);
6528
6529 rqbp = hrq->rqbp;
6530 for (i = 0; i < count; i++) {
6531 /* IF RQ is already full, don't bother */
6532 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
6533 break;
6534 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
6535 if (!rqb_buffer)
6536 break;
6537 rqb_buffer->hrq = hrq;
6538 rqb_buffer->drq = drq;
6539 rqb_buffer->idx = idx;
6540 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
6541 }
6542 while (!list_empty(&rqb_buf_list)) {
6543 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
6544 hbuf.list);
6545
6546 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
6547 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
6548 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
6549 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
6550 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
6551 if (rc < 0) {
6552 rqbp->rqb_free_buffer(phba, rqb_buffer);
6553 } else {
6554 list_add_tail(&rqb_buffer->hbuf.list,
6555 &rqbp->rqb_buffer_list);
6556 rqbp->buffer_count++;
6557 }
6558 }
6559 return 1;
6560}
6561
6510/** 6562/**
6511 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function 6563 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
6512 * @phba: Pointer to HBA context object. 6564 * @phba: Pointer to HBA context object.
@@ -6519,7 +6571,7 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
6519int 6571int
6520lpfc_sli4_hba_setup(struct lpfc_hba *phba) 6572lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6521{ 6573{
6522 int rc, i; 6574 int rc, i, cnt;
6523 LPFC_MBOXQ_t *mboxq; 6575 LPFC_MBOXQ_t *mboxq;
6524 struct lpfc_mqe *mqe; 6576 struct lpfc_mqe *mqe;
6525 uint8_t *vpd; 6577 uint8_t *vpd;
@@ -6870,6 +6922,21 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6870 goto out_destroy_queue; 6922 goto out_destroy_queue;
6871 } 6923 }
6872 phba->sli4_hba.nvmet_xri_cnt = rc; 6924 phba->sli4_hba.nvmet_xri_cnt = rc;
6925
6926 cnt = phba->cfg_iocb_cnt * 1024;
6927 /* We need 1 iocbq for every SGL, for IO processing */
6928 cnt += phba->sli4_hba.nvmet_xri_cnt;
6929 /* Initialize and populate the iocb list per host */
6930 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6931 "2821 initialize iocb list %d total %d\n",
6932 phba->cfg_iocb_cnt, cnt);
6933 rc = lpfc_init_iocb_list(phba, cnt);
6934 if (rc) {
6935 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6936 "1413 Failed to init iocb list.\n");
6937 goto out_destroy_queue;
6938 }
6939
6873 lpfc_nvmet_create_targetport(phba); 6940 lpfc_nvmet_create_targetport(phba);
6874 } else { 6941 } else {
6875 /* update host scsi xri-sgl sizes and mappings */ 6942 /* update host scsi xri-sgl sizes and mappings */
@@ -6889,28 +6956,34 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6889 "and mapping: %d\n", rc); 6956 "and mapping: %d\n", rc);
6890 goto out_destroy_queue; 6957 goto out_destroy_queue;
6891 } 6958 }
6959
6960 cnt = phba->cfg_iocb_cnt * 1024;
6961 /* Initialize and populate the iocb list per host */
6962 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6963 "2820 initialize iocb list %d total %d\n",
6964 phba->cfg_iocb_cnt, cnt);
6965 rc = lpfc_init_iocb_list(phba, cnt);
6966 if (rc) {
6967 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6968 "6301 Failed to init iocb list.\n");
6969 goto out_destroy_queue;
6970 }
6892 } 6971 }
6893 6972
6894 if (phba->nvmet_support && phba->cfg_nvmet_mrq) { 6973 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
6895
6896 /* Post initial buffers to all RQs created */ 6974 /* Post initial buffers to all RQs created */
6897 for (i = 0; i < phba->cfg_nvmet_mrq; i++) { 6975 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
6898 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; 6976 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
6899 INIT_LIST_HEAD(&rqbp->rqb_buffer_list); 6977 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
6900 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; 6978 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
6901 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; 6979 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
6902 rqbp->entry_count = 256; 6980 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
6903 rqbp->buffer_count = 0; 6981 rqbp->buffer_count = 0;
6904 6982
6905 /* Divide by 4 and round down to multiple of 16 */
6906 rc = (phba->cfg_nvmet_mrq_post >> 2) & 0xfff8;
6907 phba->sli4_hba.nvmet_mrq_hdr[i]->entry_repost = rc;
6908 phba->sli4_hba.nvmet_mrq_data[i]->entry_repost = rc;
6909
6910 lpfc_post_rq_buffer( 6983 lpfc_post_rq_buffer(
6911 phba, phba->sli4_hba.nvmet_mrq_hdr[i], 6984 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
6912 phba->sli4_hba.nvmet_mrq_data[i], 6985 phba->sli4_hba.nvmet_mrq_data[i],
6913 phba->cfg_nvmet_mrq_post); 6986 LPFC_NVMET_RQE_DEF_COUNT, i);
6914 } 6987 }
6915 } 6988 }
6916 6989
@@ -7077,6 +7150,7 @@ out_unset_queue:
7077 /* Unset all the queues set up in this routine when error out */ 7150 /* Unset all the queues set up in this routine when error out */
7078 lpfc_sli4_queue_unset(phba); 7151 lpfc_sli4_queue_unset(phba);
7079out_destroy_queue: 7152out_destroy_queue:
7153 lpfc_free_iocb_list(phba);
7080 lpfc_sli4_queue_destroy(phba); 7154 lpfc_sli4_queue_destroy(phba);
7081out_stop_timers: 7155out_stop_timers:
7082 lpfc_stop_hba_timers(phba); 7156 lpfc_stop_hba_timers(phba);
@@ -8616,8 +8690,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8616 memset(wqe, 0, sizeof(union lpfc_wqe128)); 8690 memset(wqe, 0, sizeof(union lpfc_wqe128));
8617 /* Some of the fields are in the right position already */ 8691 /* Some of the fields are in the right position already */
8618 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 8692 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
8619 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ 8693 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
8620 wqe->generic.wqe_com.word10 = 0; 8694 /* The ct field has moved so reset */
8695 wqe->generic.wqe_com.word7 = 0;
8696 wqe->generic.wqe_com.word10 = 0;
8697 }
8621 8698
8622 abort_tag = (uint32_t) iocbq->iotag; 8699 abort_tag = (uint32_t) iocbq->iotag;
8623 xritag = iocbq->sli4_xritag; 8700 xritag = iocbq->sli4_xritag;
@@ -9111,6 +9188,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9111 } 9188 }
9112 9189
9113 break; 9190 break;
9191 case CMD_SEND_FRAME:
9192 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9193 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9194 return 0;
9114 case CMD_XRI_ABORTED_CX: 9195 case CMD_XRI_ABORTED_CX:
9115 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 9196 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
9116 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 9197 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
@@ -12783,6 +12864,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
12783 struct fc_frame_header *fc_hdr; 12864 struct fc_frame_header *fc_hdr;
12784 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 12865 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
12785 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 12866 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
12867 struct lpfc_nvmet_tgtport *tgtp;
12786 struct hbq_dmabuf *dma_buf; 12868 struct hbq_dmabuf *dma_buf;
12787 uint32_t status, rq_id; 12869 uint32_t status, rq_id;
12788 unsigned long iflags; 12870 unsigned long iflags;
@@ -12803,7 +12885,6 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
12803 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 12885 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
12804 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12886 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12805 "2537 Receive Frame Truncated!!\n"); 12887 "2537 Receive Frame Truncated!!\n");
12806 hrq->RQ_buf_trunc++;
12807 case FC_STATUS_RQ_SUCCESS: 12888 case FC_STATUS_RQ_SUCCESS:
12808 lpfc_sli4_rq_release(hrq, drq); 12889 lpfc_sli4_rq_release(hrq, drq);
12809 spin_lock_irqsave(&phba->hbalock, iflags); 12890 spin_lock_irqsave(&phba->hbalock, iflags);
@@ -12814,6 +12895,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
12814 goto out; 12895 goto out;
12815 } 12896 }
12816 hrq->RQ_rcv_buf++; 12897 hrq->RQ_rcv_buf++;
12898 hrq->RQ_buf_posted--;
12817 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 12899 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
12818 12900
12819 /* If a NVME LS event (type 0x28), treat it as Fast path */ 12901 /* If a NVME LS event (type 0x28), treat it as Fast path */
@@ -12827,8 +12909,21 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
12827 spin_unlock_irqrestore(&phba->hbalock, iflags); 12909 spin_unlock_irqrestore(&phba->hbalock, iflags);
12828 workposted = true; 12910 workposted = true;
12829 break; 12911 break;
12830 case FC_STATUS_INSUFF_BUF_NEED_BUF:
12831 case FC_STATUS_INSUFF_BUF_FRM_DISC: 12912 case FC_STATUS_INSUFF_BUF_FRM_DISC:
12913 if (phba->nvmet_support) {
12914 tgtp = phba->targetport->private;
12915 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
12916 "6402 RQE Error x%x, posted %d err_cnt "
12917 "%d: %x %x %x\n",
12918 status, hrq->RQ_buf_posted,
12919 hrq->RQ_no_posted_buf,
12920 atomic_read(&tgtp->rcv_fcp_cmd_in),
12921 atomic_read(&tgtp->rcv_fcp_cmd_out),
12922 atomic_read(&tgtp->xmt_fcp_release));
12923 }
12924 /* fallthrough */
12925
12926 case FC_STATUS_INSUFF_BUF_NEED_BUF:
12832 hrq->RQ_no_posted_buf++; 12927 hrq->RQ_no_posted_buf++;
12833 /* Post more buffers if possible */ 12928 /* Post more buffers if possible */
12834 spin_lock_irqsave(&phba->hbalock, iflags); 12929 spin_lock_irqsave(&phba->hbalock, iflags);
@@ -12946,7 +13041,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
12946 while ((cqe = lpfc_sli4_cq_get(cq))) { 13041 while ((cqe = lpfc_sli4_cq_get(cq))) {
12947 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 13042 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
12948 if (!(++ecount % cq->entry_repost)) 13043 if (!(++ecount % cq->entry_repost))
12949 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 13044 break;
12950 cq->CQ_mbox++; 13045 cq->CQ_mbox++;
12951 } 13046 }
12952 break; 13047 break;
@@ -12960,7 +13055,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
12960 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, 13055 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
12961 cqe); 13056 cqe);
12962 if (!(++ecount % cq->entry_repost)) 13057 if (!(++ecount % cq->entry_repost))
12963 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 13058 break;
12964 } 13059 }
12965 13060
12966 /* Track the max number of CQEs processed in 1 EQ */ 13061 /* Track the max number of CQEs processed in 1 EQ */
@@ -13130,6 +13225,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13130 struct lpfc_queue *drq; 13225 struct lpfc_queue *drq;
13131 struct rqb_dmabuf *dma_buf; 13226 struct rqb_dmabuf *dma_buf;
13132 struct fc_frame_header *fc_hdr; 13227 struct fc_frame_header *fc_hdr;
13228 struct lpfc_nvmet_tgtport *tgtp;
13133 uint32_t status, rq_id; 13229 uint32_t status, rq_id;
13134 unsigned long iflags; 13230 unsigned long iflags;
13135 uint32_t fctl, idx; 13231 uint32_t fctl, idx;
@@ -13160,8 +13256,6 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13160 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13256 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13161 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13257 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13162 "6126 Receive Frame Truncated!!\n"); 13258 "6126 Receive Frame Truncated!!\n");
13163 hrq->RQ_buf_trunc++;
13164 break;
13165 case FC_STATUS_RQ_SUCCESS: 13259 case FC_STATUS_RQ_SUCCESS:
13166 lpfc_sli4_rq_release(hrq, drq); 13260 lpfc_sli4_rq_release(hrq, drq);
13167 spin_lock_irqsave(&phba->hbalock, iflags); 13261 spin_lock_irqsave(&phba->hbalock, iflags);
@@ -13173,6 +13267,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13173 } 13267 }
13174 spin_unlock_irqrestore(&phba->hbalock, iflags); 13268 spin_unlock_irqrestore(&phba->hbalock, iflags);
13175 hrq->RQ_rcv_buf++; 13269 hrq->RQ_rcv_buf++;
13270 hrq->RQ_buf_posted--;
13176 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 13271 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13177 13272
13178 /* Just some basic sanity checks on FCP Command frame */ 13273 /* Just some basic sanity checks on FCP Command frame */
@@ -13195,14 +13290,23 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13195drop: 13290drop:
13196 lpfc_in_buf_free(phba, &dma_buf->dbuf); 13291 lpfc_in_buf_free(phba, &dma_buf->dbuf);
13197 break; 13292 break;
13198 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13199 case FC_STATUS_INSUFF_BUF_FRM_DISC: 13293 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13294 if (phba->nvmet_support) {
13295 tgtp = phba->targetport->private;
13296 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13297 "6401 RQE Error x%x, posted %d err_cnt "
13298 "%d: %x %x %x\n",
13299 status, hrq->RQ_buf_posted,
13300 hrq->RQ_no_posted_buf,
13301 atomic_read(&tgtp->rcv_fcp_cmd_in),
13302 atomic_read(&tgtp->rcv_fcp_cmd_out),
13303 atomic_read(&tgtp->xmt_fcp_release));
13304 }
13305 /* fallthrough */
13306
13307 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13200 hrq->RQ_no_posted_buf++; 13308 hrq->RQ_no_posted_buf++;
13201 /* Post more buffers if possible */ 13309 /* Post more buffers if possible */
13202 spin_lock_irqsave(&phba->hbalock, iflags);
13203 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13204 spin_unlock_irqrestore(&phba->hbalock, iflags);
13205 workposted = true;
13206 break; 13310 break;
13207 } 13311 }
13208out: 13312out:
@@ -13356,7 +13460,7 @@ process_cq:
13356 while ((cqe = lpfc_sli4_cq_get(cq))) { 13460 while ((cqe = lpfc_sli4_cq_get(cq))) {
13357 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); 13461 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
13358 if (!(++ecount % cq->entry_repost)) 13462 if (!(++ecount % cq->entry_repost))
13359 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 13463 break;
13360 } 13464 }
13361 13465
13362 /* Track the max number of CQEs processed in 1 EQ */ 13466 /* Track the max number of CQEs processed in 1 EQ */
@@ -13447,7 +13551,7 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
13447 while ((cqe = lpfc_sli4_cq_get(cq))) { 13551 while ((cqe = lpfc_sli4_cq_get(cq))) {
13448 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); 13552 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
13449 if (!(++ecount % cq->entry_repost)) 13553 if (!(++ecount % cq->entry_repost))
13450 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 13554 break;
13451 } 13555 }
13452 13556
13453 /* Track the max number of CQEs processed in 1 EQ */ 13557 /* Track the max number of CQEs processed in 1 EQ */
@@ -13529,7 +13633,7 @@ lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
13529 while ((eqe = lpfc_sli4_eq_get(eq))) { 13633 while ((eqe = lpfc_sli4_eq_get(eq))) {
13530 lpfc_sli4_fof_handle_eqe(phba, eqe); 13634 lpfc_sli4_fof_handle_eqe(phba, eqe);
13531 if (!(++ecount % eq->entry_repost)) 13635 if (!(++ecount % eq->entry_repost))
13532 lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM); 13636 break;
13533 eq->EQ_processed++; 13637 eq->EQ_processed++;
13534 } 13638 }
13535 13639
@@ -13646,7 +13750,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
13646 13750
13647 lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx); 13751 lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
13648 if (!(++ecount % fpeq->entry_repost)) 13752 if (!(++ecount % fpeq->entry_repost))
13649 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); 13753 break;
13650 fpeq->EQ_processed++; 13754 fpeq->EQ_processed++;
13651 } 13755 }
13652 13756
@@ -13827,17 +13931,10 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
13827 } 13931 }
13828 queue->entry_size = entry_size; 13932 queue->entry_size = entry_size;
13829 queue->entry_count = entry_count; 13933 queue->entry_count = entry_count;
13830
13831 /*
13832 * entry_repost is calculated based on the number of entries in the
13833 * queue. This works out except for RQs. If buffers are NOT initially
13834 * posted for every RQE, entry_repost should be adjusted accordingly.
13835 */
13836 queue->entry_repost = (entry_count >> 3);
13837 if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
13838 queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
13839 queue->phba = phba; 13934 queue->phba = phba;
13840 13935
13936 /* entry_repost will be set during q creation */
13937
13841 return queue; 13938 return queue;
13842out_fail: 13939out_fail:
13843 lpfc_sli4_queue_free(queue); 13940 lpfc_sli4_queue_free(queue);
@@ -14068,6 +14165,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14068 status = -ENXIO; 14165 status = -ENXIO;
14069 eq->host_index = 0; 14166 eq->host_index = 0;
14070 eq->hba_index = 0; 14167 eq->hba_index = 0;
14168 eq->entry_repost = LPFC_EQ_REPOST;
14071 14169
14072 mempool_free(mbox, phba->mbox_mem_pool); 14170 mempool_free(mbox, phba->mbox_mem_pool);
14073 return status; 14171 return status;
@@ -14141,9 +14239,9 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14141 default: 14239 default:
14142 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14240 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14143 "0361 Unsupported CQ count: " 14241 "0361 Unsupported CQ count: "
14144 "entry cnt %d sz %d pg cnt %d repost %d\n", 14242 "entry cnt %d sz %d pg cnt %d\n",
14145 cq->entry_count, cq->entry_size, 14243 cq->entry_count, cq->entry_size,
14146 cq->page_count, cq->entry_repost); 14244 cq->page_count);
14147 if (cq->entry_count < 256) { 14245 if (cq->entry_count < 256) {
14148 status = -EINVAL; 14246 status = -EINVAL;
14149 goto out; 14247 goto out;
@@ -14196,6 +14294,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14196 cq->assoc_qid = eq->queue_id; 14294 cq->assoc_qid = eq->queue_id;
14197 cq->host_index = 0; 14295 cq->host_index = 0;
14198 cq->hba_index = 0; 14296 cq->hba_index = 0;
14297 cq->entry_repost = LPFC_CQ_REPOST;
14199 14298
14200out: 14299out:
14201 mempool_free(mbox, phba->mbox_mem_pool); 14300 mempool_free(mbox, phba->mbox_mem_pool);
@@ -14387,6 +14486,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
14387 cq->assoc_qid = eq->queue_id; 14486 cq->assoc_qid = eq->queue_id;
14388 cq->host_index = 0; 14487 cq->host_index = 0;
14389 cq->hba_index = 0; 14488 cq->hba_index = 0;
14489 cq->entry_repost = LPFC_CQ_REPOST;
14390 14490
14391 rc = 0; 14491 rc = 0;
14392 list_for_each_entry(dmabuf, &cq->page_list, list) { 14492 list_for_each_entry(dmabuf, &cq->page_list, list) {
@@ -14635,6 +14735,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
14635 mq->subtype = subtype; 14735 mq->subtype = subtype;
14636 mq->host_index = 0; 14736 mq->host_index = 0;
14637 mq->hba_index = 0; 14737 mq->hba_index = 0;
14738 mq->entry_repost = LPFC_MQ_REPOST;
14638 14739
14639 /* link the mq onto the parent cq child list */ 14740 /* link the mq onto the parent cq child list */
14640 list_add_tail(&mq->list, &cq->child_list); 14741 list_add_tail(&mq->list, &cq->child_list);
@@ -14860,34 +14961,6 @@ out:
14860} 14961}
14861 14962
14862/** 14963/**
14863 * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
14864 * @phba: HBA structure that indicates port to create a queue on.
14865 * @rq: The queue structure to use for the receive queue.
14866 * @qno: The associated HBQ number
14867 *
14868 *
14869 * For SLI4 we need to adjust the RQ repost value based on
14870 * the number of buffers that are initially posted to the RQ.
14871 */
14872void
14873lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
14874{
14875 uint32_t cnt;
14876
14877 /* sanity check on queue memory */
14878 if (!rq)
14879 return;
14880 cnt = lpfc_hbq_defs[qno]->entry_count;
14881
14882 /* Recalc repost for RQs based on buffers initially posted */
14883 cnt = (cnt >> 3);
14884 if (cnt < LPFC_QUEUE_MIN_REPOST)
14885 cnt = LPFC_QUEUE_MIN_REPOST;
14886
14887 rq->entry_repost = cnt;
14888}
14889
14890/**
14891 * lpfc_rq_create - Create a Receive Queue on the HBA 14964 * lpfc_rq_create - Create a Receive Queue on the HBA
14892 * @phba: HBA structure that indicates port to create a queue on. 14965 * @phba: HBA structure that indicates port to create a queue on.
14893 * @hrq: The queue structure to use to create the header receive queue. 14966 * @hrq: The queue structure to use to create the header receive queue.
@@ -15072,6 +15145,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15072 hrq->subtype = subtype; 15145 hrq->subtype = subtype;
15073 hrq->host_index = 0; 15146 hrq->host_index = 0;
15074 hrq->hba_index = 0; 15147 hrq->hba_index = 0;
15148 hrq->entry_repost = LPFC_RQ_REPOST;
15075 15149
15076 /* now create the data queue */ 15150 /* now create the data queue */
15077 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15151 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
@@ -15082,7 +15156,12 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15082 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 15156 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15083 bf_set(lpfc_rq_context_rqe_count_1, 15157 bf_set(lpfc_rq_context_rqe_count_1,
15084 &rq_create->u.request.context, hrq->entry_count); 15158 &rq_create->u.request.context, hrq->entry_count);
15085 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; 15159 if (subtype == LPFC_NVMET)
15160 rq_create->u.request.context.buffer_size =
15161 LPFC_NVMET_DATA_BUF_SIZE;
15162 else
15163 rq_create->u.request.context.buffer_size =
15164 LPFC_DATA_BUF_SIZE;
15086 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, 15165 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15087 LPFC_RQE_SIZE_8); 15166 LPFC_RQE_SIZE_8);
15088 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, 15167 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
@@ -15119,8 +15198,14 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15119 LPFC_RQ_RING_SIZE_4096); 15198 LPFC_RQ_RING_SIZE_4096);
15120 break; 15199 break;
15121 } 15200 }
15122 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 15201 if (subtype == LPFC_NVMET)
15123 LPFC_DATA_BUF_SIZE); 15202 bf_set(lpfc_rq_context_buf_size,
15203 &rq_create->u.request.context,
15204 LPFC_NVMET_DATA_BUF_SIZE);
15205 else
15206 bf_set(lpfc_rq_context_buf_size,
15207 &rq_create->u.request.context,
15208 LPFC_DATA_BUF_SIZE);
15124 } 15209 }
15125 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 15210 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15126 cq->queue_id); 15211 cq->queue_id);
@@ -15153,6 +15238,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15153 drq->subtype = subtype; 15238 drq->subtype = subtype;
15154 drq->host_index = 0; 15239 drq->host_index = 0;
15155 drq->hba_index = 0; 15240 drq->hba_index = 0;
15241 drq->entry_repost = LPFC_RQ_REPOST;
15156 15242
15157 /* link the header and data RQs onto the parent cq child list */ 15243 /* link the header and data RQs onto the parent cq child list */
15158 list_add_tail(&hrq->list, &cq->child_list); 15244 list_add_tail(&hrq->list, &cq->child_list);
@@ -15265,7 +15351,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15265 cq->queue_id); 15351 cq->queue_id);
15266 bf_set(lpfc_rq_context_data_size, 15352 bf_set(lpfc_rq_context_data_size,
15267 &rq_create->u.request.context, 15353 &rq_create->u.request.context,
15268 LPFC_DATA_BUF_SIZE); 15354 LPFC_NVMET_DATA_BUF_SIZE);
15269 bf_set(lpfc_rq_context_hdr_size, 15355 bf_set(lpfc_rq_context_hdr_size,
15270 &rq_create->u.request.context, 15356 &rq_create->u.request.context,
15271 LPFC_HDR_BUF_SIZE); 15357 LPFC_HDR_BUF_SIZE);
@@ -15310,6 +15396,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15310 hrq->subtype = subtype; 15396 hrq->subtype = subtype;
15311 hrq->host_index = 0; 15397 hrq->host_index = 0;
15312 hrq->hba_index = 0; 15398 hrq->hba_index = 0;
15399 hrq->entry_repost = LPFC_RQ_REPOST;
15313 15400
15314 drq->db_format = LPFC_DB_RING_FORMAT; 15401 drq->db_format = LPFC_DB_RING_FORMAT;
15315 drq->db_regaddr = phba->sli4_hba.RQDBregaddr; 15402 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
@@ -15318,6 +15405,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15318 drq->subtype = subtype; 15405 drq->subtype = subtype;
15319 drq->host_index = 0; 15406 drq->host_index = 0;
15320 drq->hba_index = 0; 15407 drq->hba_index = 0;
15408 drq->entry_repost = LPFC_RQ_REPOST;
15321 15409
15322 list_add_tail(&hrq->list, &cq->child_list); 15410 list_add_tail(&hrq->list, &cq->child_list);
15323 list_add_tail(&drq->list, &cq->child_list); 15411 list_add_tail(&drq->list, &cq->child_list);
@@ -16058,6 +16146,8 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16058 struct fc_vft_header *fc_vft_hdr; 16146 struct fc_vft_header *fc_vft_hdr;
16059 uint32_t *header = (uint32_t *) fc_hdr; 16147 uint32_t *header = (uint32_t *) fc_hdr;
16060 16148
16149#define FC_RCTL_MDS_DIAGS 0xF4
16150
16061 switch (fc_hdr->fh_r_ctl) { 16151 switch (fc_hdr->fh_r_ctl) {
16062 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 16152 case FC_RCTL_DD_UNCAT: /* uncategorized information */
16063 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 16153 case FC_RCTL_DD_SOL_DATA: /* solicited data */
@@ -16085,6 +16175,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16085 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 16175 case FC_RCTL_F_BSY: /* fabric busy to data frame */
16086 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 16176 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
16087 case FC_RCTL_LCR: /* link credit reset */ 16177 case FC_RCTL_LCR: /* link credit reset */
16178 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
16088 case FC_RCTL_END: /* end */ 16179 case FC_RCTL_END: /* end */
16089 break; 16180 break;
16090 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 16181 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
@@ -16094,12 +16185,16 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16094 default: 16185 default:
16095 goto drop; 16186 goto drop;
16096 } 16187 }
16188
16189#define FC_TYPE_VENDOR_UNIQUE 0xFF
16190
16097 switch (fc_hdr->fh_type) { 16191 switch (fc_hdr->fh_type) {
16098 case FC_TYPE_BLS: 16192 case FC_TYPE_BLS:
16099 case FC_TYPE_ELS: 16193 case FC_TYPE_ELS:
16100 case FC_TYPE_FCP: 16194 case FC_TYPE_FCP:
16101 case FC_TYPE_CT: 16195 case FC_TYPE_CT:
16102 case FC_TYPE_NVME: 16196 case FC_TYPE_NVME:
16197 case FC_TYPE_VENDOR_UNIQUE:
16103 break; 16198 break;
16104 case FC_TYPE_IP: 16199 case FC_TYPE_IP:
16105 case FC_TYPE_ILS: 16200 case FC_TYPE_ILS:
@@ -16110,12 +16205,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16110 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 16205 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
16111 "2538 Received frame rctl:%s (x%x), type:%s (x%x), " 16206 "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
16112 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", 16207 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
16208 (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS) ? "MDS Diags" :
16113 lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl, 16209 lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
16114 lpfc_type_names[fc_hdr->fh_type], fc_hdr->fh_type, 16210 (fc_hdr->fh_type == FC_TYPE_VENDOR_UNIQUE) ?
16115 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 16211 "Vendor Unique" : lpfc_type_names[fc_hdr->fh_type],
16116 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 16212 fc_hdr->fh_type, be32_to_cpu(header[0]),
16117 be32_to_cpu(header[4]), be32_to_cpu(header[5]), 16213 be32_to_cpu(header[1]), be32_to_cpu(header[2]),
16118 be32_to_cpu(header[6])); 16214 be32_to_cpu(header[3]), be32_to_cpu(header[4]),
16215 be32_to_cpu(header[5]), be32_to_cpu(header[6]));
16119 return 0; 16216 return 0;
16120drop: 16217drop:
16121 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 16218 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
@@ -16921,6 +17018,96 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
16921 lpfc_sli_release_iocbq(phba, iocbq); 17018 lpfc_sli_release_iocbq(phba, iocbq);
16922} 17019}
16923 17020
17021static void
17022lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17023 struct lpfc_iocbq *rspiocb)
17024{
17025 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17026
17027 if (pcmd && pcmd->virt)
17028 pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17029 kfree(pcmd);
17030 lpfc_sli_release_iocbq(phba, cmdiocb);
17031}
17032
17033static void
17034lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17035 struct hbq_dmabuf *dmabuf)
17036{
17037 struct fc_frame_header *fc_hdr;
17038 struct lpfc_hba *phba = vport->phba;
17039 struct lpfc_iocbq *iocbq = NULL;
17040 union lpfc_wqe *wqe;
17041 struct lpfc_dmabuf *pcmd = NULL;
17042 uint32_t frame_len;
17043 int rc;
17044
17045 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17046 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17047
17048 /* Send the received frame back */
17049 iocbq = lpfc_sli_get_iocbq(phba);
17050 if (!iocbq)
17051 goto exit;
17052
17053 /* Allocate buffer for command payload */
17054 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
17055 if (pcmd)
17056 pcmd->virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
17057 &pcmd->phys);
17058 if (!pcmd || !pcmd->virt)
17059 goto exit;
17060
17061 INIT_LIST_HEAD(&pcmd->list);
17062
17063 /* copyin the payload */
17064 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
17065
17066 /* fill in BDE's for command */
17067 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
17068 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
17069 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
17070 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
17071
17072 iocbq->context2 = pcmd;
17073 iocbq->vport = vport;
17074 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
17075 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
17076
17077 /*
17078 * Setup rest of the iocb as though it were a WQE
17079 * Build the SEND_FRAME WQE
17080 */
17081 wqe = (union lpfc_wqe *)&iocbq->iocb;
17082
17083 wqe->send_frame.frame_len = frame_len;
17084 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
17085 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
17086 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
17087 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
17088 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
17089 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
17090
17091 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
17092 iocbq->iocb.ulpLe = 1;
17093 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
17094 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
17095 if (rc == IOCB_ERROR)
17096 goto exit;
17097
17098 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17099 return;
17100
17101exit:
17102 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17103 "2023 Unable to process MDS loopback frame\n");
17104 if (pcmd && pcmd->virt)
17105 pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17106 kfree(pcmd);
17107 lpfc_sli_release_iocbq(phba, iocbq);
17108 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17109}
17110
16924/** 17111/**
16925 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 17112 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
16926 * @phba: Pointer to HBA context object. 17113 * @phba: Pointer to HBA context object.
@@ -16959,6 +17146,13 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
16959 fcfi = bf_get(lpfc_rcqe_fcf_id, 17146 fcfi = bf_get(lpfc_rcqe_fcf_id,
16960 &dmabuf->cq_event.cqe.rcqe_cmpl); 17147 &dmabuf->cq_event.cqe.rcqe_cmpl);
16961 17148
17149 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
17150 vport = phba->pport;
17151 /* Handle MDS Loopback frames */
17152 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
17153 return;
17154 }
17155
16962 /* d_id this frame is directed to */ 17156 /* d_id this frame is directed to */
16963 did = sli4_did_from_fc_hdr(fc_hdr); 17157 did = sli4_did_from_fc_hdr(fc_hdr);
16964 17158
@@ -17132,6 +17326,14 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
17132 "status x%x add_status x%x, mbx status x%x\n", 17326 "status x%x add_status x%x, mbx status x%x\n",
17133 shdr_status, shdr_add_status, rc); 17327 shdr_status, shdr_add_status, rc);
17134 rc = -ENXIO; 17328 rc = -ENXIO;
17329 } else {
17330 /*
17331 * The next_rpi stores the next logical module-64 rpi value used
17332 * to post physical rpis in subsequent rpi postings.
17333 */
17334 spin_lock_irq(&phba->hbalock);
17335 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
17336 spin_unlock_irq(&phba->hbalock);
17135 } 17337 }
17136 return rc; 17338 return rc;
17137} 17339}
@@ -18712,7 +18914,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
18712 18914
18713 spin_lock_irqsave(&pring->ring_lock, iflags); 18915 spin_lock_irqsave(&pring->ring_lock, iflags);
18714 ctxp = pwqe->context2; 18916 ctxp = pwqe->context2;
18715 sglq = ctxp->rqb_buffer->sglq; 18917 sglq = ctxp->ctxbuf->sglq;
18716 if (pwqe->sli4_xritag == NO_XRI) { 18918 if (pwqe->sli4_xritag == NO_XRI) {
18717 pwqe->sli4_lxritag = sglq->sli4_lxritag; 18919 pwqe->sli4_lxritag = sglq->sli4_lxritag;
18718 pwqe->sli4_xritag = sglq->sli4_xritag; 18920 pwqe->sli4_xritag = sglq->sli4_xritag;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index da46471337c8..cf863db27700 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -24,7 +24,6 @@
24#define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000 24#define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000
25#define LPFC_XRI_EXCH_BUSY_WAIT_T1 10 25#define LPFC_XRI_EXCH_BUSY_WAIT_T1 10
26#define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000 26#define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000
27#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
28#define LPFC_RPI_LOW_WATER_MARK 10 27#define LPFC_RPI_LOW_WATER_MARK 10
29 28
30#define LPFC_UNREG_FCF 1 29#define LPFC_UNREG_FCF 1
@@ -155,7 +154,11 @@ struct lpfc_queue {
155 uint32_t entry_count; /* Number of entries to support on the queue */ 154 uint32_t entry_count; /* Number of entries to support on the queue */
156 uint32_t entry_size; /* Size of each queue entry. */ 155 uint32_t entry_size; /* Size of each queue entry. */
157 uint32_t entry_repost; /* Count of entries before doorbell is rung */ 156 uint32_t entry_repost; /* Count of entries before doorbell is rung */
158#define LPFC_QUEUE_MIN_REPOST 8 157#define LPFC_EQ_REPOST 8
158#define LPFC_MQ_REPOST 8
159#define LPFC_CQ_REPOST 64
160#define LPFC_RQ_REPOST 64
161#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */
159 uint32_t queue_id; /* Queue ID assigned by the hardware */ 162 uint32_t queue_id; /* Queue ID assigned by the hardware */
160 uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */ 163 uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
161 uint32_t page_count; /* Number of pages allocated for this queue */ 164 uint32_t page_count; /* Number of pages allocated for this queue */
@@ -195,7 +198,7 @@ struct lpfc_queue {
195/* defines for RQ stats */ 198/* defines for RQ stats */
196#define RQ_no_posted_buf q_cnt_1 199#define RQ_no_posted_buf q_cnt_1
197#define RQ_no_buf_found q_cnt_2 200#define RQ_no_buf_found q_cnt_2
198#define RQ_buf_trunc q_cnt_3 201#define RQ_buf_posted q_cnt_3
199#define RQ_rcv_buf q_cnt_4 202#define RQ_rcv_buf q_cnt_4
200 203
201 uint64_t isr_timestamp; 204 uint64_t isr_timestamp;
@@ -617,12 +620,17 @@ struct lpfc_sli4_hba {
617 uint16_t scsi_xri_start; 620 uint16_t scsi_xri_start;
618 uint16_t els_xri_cnt; 621 uint16_t els_xri_cnt;
619 uint16_t nvmet_xri_cnt; 622 uint16_t nvmet_xri_cnt;
623 uint16_t nvmet_ctx_cnt;
624 uint16_t nvmet_io_wait_cnt;
625 uint16_t nvmet_io_wait_total;
620 struct list_head lpfc_els_sgl_list; 626 struct list_head lpfc_els_sgl_list;
621 struct list_head lpfc_abts_els_sgl_list; 627 struct list_head lpfc_abts_els_sgl_list;
622 struct list_head lpfc_nvmet_sgl_list; 628 struct list_head lpfc_nvmet_sgl_list;
623 struct list_head lpfc_abts_nvmet_ctx_list; 629 struct list_head lpfc_abts_nvmet_ctx_list;
624 struct list_head lpfc_abts_scsi_buf_list; 630 struct list_head lpfc_abts_scsi_buf_list;
625 struct list_head lpfc_abts_nvme_buf_list; 631 struct list_head lpfc_abts_nvme_buf_list;
632 struct list_head lpfc_nvmet_ctx_list;
633 struct list_head lpfc_nvmet_io_wait_list;
626 struct lpfc_sglq **lpfc_sglq_active_list; 634 struct lpfc_sglq **lpfc_sglq_active_list;
627 struct list_head lpfc_rpi_hdr_list; 635 struct list_head lpfc_rpi_hdr_list;
628 unsigned long *rpi_bmask; 636 unsigned long *rpi_bmask;
@@ -654,6 +662,7 @@ struct lpfc_sli4_hba {
654 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ 662 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
655 spinlock_t sgl_list_lock; /* list of aborted els IOs */ 663 spinlock_t sgl_list_lock; /* list of aborted els IOs */
656 spinlock_t nvmet_io_lock; 664 spinlock_t nvmet_io_lock;
665 spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
657 uint32_t physical_port; 666 uint32_t physical_port;
658 667
659 /* CPU to vector mapping information */ 668 /* CPU to vector mapping information */
@@ -661,8 +670,6 @@ struct lpfc_sli4_hba {
661 uint16_t num_online_cpu; 670 uint16_t num_online_cpu;
662 uint16_t num_present_cpu; 671 uint16_t num_present_cpu;
663 uint16_t curr_disp_cpu; 672 uint16_t curr_disp_cpu;
664
665 uint16_t nvmet_mrq_post_idx;
666}; 673};
667 674
668enum lpfc_sge_type { 675enum lpfc_sge_type {
@@ -698,6 +705,7 @@ struct lpfc_rpi_hdr {
698 struct lpfc_dmabuf *dmabuf; 705 struct lpfc_dmabuf *dmabuf;
699 uint32_t page_count; 706 uint32_t page_count;
700 uint32_t start_rpi; 707 uint32_t start_rpi;
708 uint16_t next_rpi;
701}; 709};
702 710
703struct lpfc_rsrc_blks { 711struct lpfc_rsrc_blks {
@@ -762,7 +770,6 @@ int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
762int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, 770int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
763 struct lpfc_queue **drqp, struct lpfc_queue **cqp, 771 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
764 uint32_t subtype); 772 uint32_t subtype);
765void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int);
766int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *); 773int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
767int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *); 774int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
768int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *); 775int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 1c26dc67151b..c2653244221c 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
20 * included with this package. * 20 * included with this package. *
21 *******************************************************************/ 21 *******************************************************************/
22 22
23#define LPFC_DRIVER_VERSION "11.2.0.12" 23#define LPFC_DRIVER_VERSION "11.2.0.14"
24#define LPFC_DRIVER_NAME "lpfc" 24#define LPFC_DRIVER_NAME "lpfc"
25 25
26/* Used for SLI 2/3 */ 26/* Used for SLI 2/3 */
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index a4aadf5f4dc6..1cc814f1505a 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3770,9 +3770,6 @@ static long pmcraid_ioctl_passthrough(
3770 pmcraid_err("couldn't build passthrough ioadls\n"); 3770 pmcraid_err("couldn't build passthrough ioadls\n");
3771 goto out_free_cmd; 3771 goto out_free_cmd;
3772 } 3772 }
3773 } else if (request_size < 0) {
3774 rc = -EINVAL;
3775 goto out_free_cmd;
3776 } 3773 }
3777 3774
3778 /* If data is being written into the device, copy the data from user 3775 /* If data is being written into the device, copy the data from user
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 40aeb6bb96a2..07ee88200e91 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -259,7 +259,7 @@ struct qedf_io_log {
259 uint16_t task_id; 259 uint16_t task_id;
260 uint32_t port_id; /* Remote port fabric ID */ 260 uint32_t port_id; /* Remote port fabric ID */
261 int lun; 261 int lun;
262 char op; /* SCSI CDB */ 262 unsigned char op; /* SCSI CDB */
263 uint8_t lba[4]; 263 uint8_t lba[4];
264 unsigned int bufflen; /* SCSI buffer length */ 264 unsigned int bufflen; /* SCSI buffer length */
265 unsigned int sg_count; /* Number of SG elements */ 265 unsigned int sg_count; /* Number of SG elements */
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index c505d41f6dc8..90627033bde6 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -109,7 +109,7 @@ retry_els:
109 did = fcport->rdata->ids.port_id; 109 did = fcport->rdata->ids.port_id;
110 sid = fcport->sid; 110 sid = fcport->sid;
111 111
112 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, sid, did, 112 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
113 FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | 113 FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
114 FC_FC_SEQ_INIT, 0); 114 FC_FC_SEQ_INIT, 0);
115 115
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index cceddd995a4b..a5c97342fd5d 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2895,7 +2895,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
2895 slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER; 2895 slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
2896 slowpath_params.drv_rev = QEDF_DRIVER_REV_VER; 2896 slowpath_params.drv_rev = QEDF_DRIVER_REV_VER;
2897 slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER; 2897 slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER;
2898 memcpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE); 2898 strncpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE);
2899 rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params); 2899 rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params);
2900 if (rc) { 2900 if (rc) {
2901 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n"); 2901 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 7bfbcfa7af40..61cdd99ae41e 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -763,6 +763,8 @@ struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
763 struct scsi_device *sdev; 763 struct scsi_device *sdev;
764 764
765 list_for_each_entry(sdev, &shost->__devices, siblings) { 765 list_for_each_entry(sdev, &shost->__devices, siblings) {
766 if (sdev->sdev_state == SDEV_DEL)
767 continue;
766 if (sdev->channel == channel && sdev->id == id && 768 if (sdev->channel == channel && sdev->id == id &&
767 sdev->lun ==lun) 769 sdev->lun ==lun)
768 return sdev; 770 return sdev;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 814a4bd8405d..99e16ac479e3 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -30,6 +30,7 @@
30#include <scsi/scsi_driver.h> 30#include <scsi/scsi_driver.h>
31#include <scsi/scsi_eh.h> 31#include <scsi/scsi_eh.h>
32#include <scsi/scsi_host.h> 32#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport.h> /* __scsi_init_queue() */
33#include <scsi/scsi_dh.h> 34#include <scsi/scsi_dh.h>
34 35
35#include <trace/events/scsi.h> 36#include <trace/events/scsi.h>
@@ -1850,7 +1851,7 @@ static int scsi_mq_prep_fn(struct request *req)
1850 1851
1851 /* zero out the cmd, except for the embedded scsi_request */ 1852 /* zero out the cmd, except for the embedded scsi_request */
1852 memset((char *)cmd + sizeof(cmd->req), 0, 1853 memset((char *)cmd + sizeof(cmd->req), 0,
1853 sizeof(*cmd) - sizeof(cmd->req)); 1854 sizeof(*cmd) - sizeof(cmd->req) + shost->hostt->cmd_size);
1854 1855
1855 req->special = cmd; 1856 req->special = cmd;
1856 1857
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index f9d1432d7cc5..b6bb4e0ce0e3 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -827,21 +827,32 @@ static int sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
827 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); 827 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
828 u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9); 828 u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9);
829 u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); 829 u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
830 int ret;
830 831
831 if (!(rq->cmd_flags & REQ_NOUNMAP)) { 832 if (!(rq->cmd_flags & REQ_NOUNMAP)) {
832 switch (sdkp->zeroing_mode) { 833 switch (sdkp->zeroing_mode) {
833 case SD_ZERO_WS16_UNMAP: 834 case SD_ZERO_WS16_UNMAP:
834 return sd_setup_write_same16_cmnd(cmd, true); 835 ret = sd_setup_write_same16_cmnd(cmd, true);
836 goto out;
835 case SD_ZERO_WS10_UNMAP: 837 case SD_ZERO_WS10_UNMAP:
836 return sd_setup_write_same10_cmnd(cmd, true); 838 ret = sd_setup_write_same10_cmnd(cmd, true);
839 goto out;
837 } 840 }
838 } 841 }
839 842
840 if (sdp->no_write_same) 843 if (sdp->no_write_same)
841 return BLKPREP_INVALID; 844 return BLKPREP_INVALID;
845
842 if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) 846 if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff)
843 return sd_setup_write_same16_cmnd(cmd, false); 847 ret = sd_setup_write_same16_cmnd(cmd, false);
844 return sd_setup_write_same10_cmnd(cmd, false); 848 else
849 ret = sd_setup_write_same10_cmnd(cmd, false);
850
851out:
852 if (sd_is_zoned(sdkp) && ret == BLKPREP_OK)
853 return sd_zbc_write_lock_zone(cmd);
854
855 return ret;
845} 856}
846 857
847static void sd_config_write_same(struct scsi_disk *sdkp) 858static void sd_config_write_same(struct scsi_disk *sdkp)
@@ -948,6 +959,10 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
948 rq->__data_len = sdp->sector_size; 959 rq->__data_len = sdp->sector_size;
949 ret = scsi_init_io(cmd); 960 ret = scsi_init_io(cmd);
950 rq->__data_len = nr_bytes; 961 rq->__data_len = nr_bytes;
962
963 if (sd_is_zoned(sdkp) && ret != BLKPREP_OK)
964 sd_zbc_write_unlock_zone(cmd);
965
951 return ret; 966 return ret;
952} 967}
953 968
@@ -1567,17 +1582,21 @@ out:
1567 return retval; 1582 return retval;
1568} 1583}
1569 1584
1570static int sd_sync_cache(struct scsi_disk *sdkp) 1585static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
1571{ 1586{
1572 int retries, res; 1587 int retries, res;
1573 struct scsi_device *sdp = sdkp->device; 1588 struct scsi_device *sdp = sdkp->device;
1574 const int timeout = sdp->request_queue->rq_timeout 1589 const int timeout = sdp->request_queue->rq_timeout
1575 * SD_FLUSH_TIMEOUT_MULTIPLIER; 1590 * SD_FLUSH_TIMEOUT_MULTIPLIER;
1576 struct scsi_sense_hdr sshdr; 1591 struct scsi_sense_hdr my_sshdr;
1577 1592
1578 if (!scsi_device_online(sdp)) 1593 if (!scsi_device_online(sdp))
1579 return -ENODEV; 1594 return -ENODEV;
1580 1595
1596 /* caller might not be interested in sense, but we need it */
1597 if (!sshdr)
1598 sshdr = &my_sshdr;
1599
1581 for (retries = 3; retries > 0; --retries) { 1600 for (retries = 3; retries > 0; --retries) {
1582 unsigned char cmd[10] = { 0 }; 1601 unsigned char cmd[10] = { 0 };
1583 1602
@@ -1586,7 +1605,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
1586 * Leave the rest of the command zero to indicate 1605 * Leave the rest of the command zero to indicate
1587 * flush everything. 1606 * flush everything.
1588 */ 1607 */
1589 res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, 1608 res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr,
1590 timeout, SD_MAX_RETRIES, 0, RQF_PM, NULL); 1609 timeout, SD_MAX_RETRIES, 0, RQF_PM, NULL);
1591 if (res == 0) 1610 if (res == 0)
1592 break; 1611 break;
@@ -1596,11 +1615,12 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
1596 sd_print_result(sdkp, "Synchronize Cache(10) failed", res); 1615 sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
1597 1616
1598 if (driver_byte(res) & DRIVER_SENSE) 1617 if (driver_byte(res) & DRIVER_SENSE)
1599 sd_print_sense_hdr(sdkp, &sshdr); 1618 sd_print_sense_hdr(sdkp, sshdr);
1619
1600 /* we need to evaluate the error return */ 1620 /* we need to evaluate the error return */
1601 if (scsi_sense_valid(&sshdr) && 1621 if (scsi_sense_valid(sshdr) &&
1602 (sshdr.asc == 0x3a || /* medium not present */ 1622 (sshdr->asc == 0x3a || /* medium not present */
1603 sshdr.asc == 0x20)) /* invalid command */ 1623 sshdr->asc == 0x20)) /* invalid command */
1604 /* this is no error here */ 1624 /* this is no error here */
1605 return 0; 1625 return 0;
1606 1626
@@ -3444,7 +3464,7 @@ static void sd_shutdown(struct device *dev)
3444 3464
3445 if (sdkp->WCE && sdkp->media_present) { 3465 if (sdkp->WCE && sdkp->media_present) {
3446 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); 3466 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
3447 sd_sync_cache(sdkp); 3467 sd_sync_cache(sdkp, NULL);
3448 } 3468 }
3449 3469
3450 if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) { 3470 if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
@@ -3456,6 +3476,7 @@ static void sd_shutdown(struct device *dev)
3456static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) 3476static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
3457{ 3477{
3458 struct scsi_disk *sdkp = dev_get_drvdata(dev); 3478 struct scsi_disk *sdkp = dev_get_drvdata(dev);
3479 struct scsi_sense_hdr sshdr;
3459 int ret = 0; 3480 int ret = 0;
3460 3481
3461 if (!sdkp) /* E.g.: runtime suspend following sd_remove() */ 3482 if (!sdkp) /* E.g.: runtime suspend following sd_remove() */
@@ -3463,12 +3484,23 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
3463 3484
3464 if (sdkp->WCE && sdkp->media_present) { 3485 if (sdkp->WCE && sdkp->media_present) {
3465 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); 3486 sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
3466 ret = sd_sync_cache(sdkp); 3487 ret = sd_sync_cache(sdkp, &sshdr);
3488
3467 if (ret) { 3489 if (ret) {
3468 /* ignore OFFLINE device */ 3490 /* ignore OFFLINE device */
3469 if (ret == -ENODEV) 3491 if (ret == -ENODEV)
3470 ret = 0; 3492 return 0;
3471 goto done; 3493
3494 if (!scsi_sense_valid(&sshdr) ||
3495 sshdr.sense_key != ILLEGAL_REQUEST)
3496 return ret;
3497
3498 /*
3499 * sshdr.sense_key == ILLEGAL_REQUEST means this drive
3500 * doesn't support sync. There's not much to do and
3501 * suspend shouldn't fail.
3502 */
3503 ret = 0;
3472 } 3504 }
3473 } 3505 }
3474 3506
@@ -3480,7 +3512,6 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
3480 ret = 0; 3512 ret = 0;
3481 } 3513 }
3482 3514
3483done:
3484 return ret; 3515 return ret;
3485} 3516}
3486 3517
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0a38ba01b7b4..82c33a6edbea 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2074,11 +2074,12 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
2074 if ((1 == resp->done) && (!resp->sg_io_owned) && 2074 if ((1 == resp->done) && (!resp->sg_io_owned) &&
2075 ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { 2075 ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
2076 resp->done = 2; /* guard against other readers */ 2076 resp->done = 2; /* guard against other readers */
2077 break; 2077 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2078 return resp;
2078 } 2079 }
2079 } 2080 }
2080 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2081 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2081 return resp; 2082 return NULL;
2082} 2083}
2083 2084
2084/* always adds to end of list */ 2085/* always adds to end of list */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index abc7e87937cc..ffe8d8608818 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -7698,6 +7698,12 @@ static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
7698 ufshcd_add_spm_lvl_sysfs_nodes(hba); 7698 ufshcd_add_spm_lvl_sysfs_nodes(hba);
7699} 7699}
7700 7700
7701static inline void ufshcd_remove_sysfs_nodes(struct ufs_hba *hba)
7702{
7703 device_remove_file(hba->dev, &hba->rpm_lvl_attr);
7704 device_remove_file(hba->dev, &hba->spm_lvl_attr);
7705}
7706
7701/** 7707/**
7702 * ufshcd_shutdown - shutdown routine 7708 * ufshcd_shutdown - shutdown routine
7703 * @hba: per adapter instance 7709 * @hba: per adapter instance
@@ -7735,6 +7741,7 @@ EXPORT_SYMBOL(ufshcd_shutdown);
7735 */ 7741 */
7736void ufshcd_remove(struct ufs_hba *hba) 7742void ufshcd_remove(struct ufs_hba *hba)
7737{ 7743{
7744 ufshcd_remove_sysfs_nodes(hba);
7738 scsi_remove_host(hba->host); 7745 scsi_remove_host(hba->host);
7739 /* disable interrupts */ 7746 /* disable interrupts */
7740 ufshcd_disable_intr(hba, hba->intr_mask); 7747 ufshcd_disable_intr(hba, hba->intr_mask);
diff --git a/drivers/soc/bcm/brcmstb/common.c b/drivers/soc/bcm/brcmstb/common.c
index b6195fdf0d00..22e98a90468c 100644
--- a/drivers/soc/bcm/brcmstb/common.c
+++ b/drivers/soc/bcm/brcmstb/common.c
@@ -49,7 +49,7 @@ static const struct of_device_id sun_top_ctrl_match[] = {
49 { .compatible = "brcm,bcm7420-sun-top-ctrl", }, 49 { .compatible = "brcm,bcm7420-sun-top-ctrl", },
50 { .compatible = "brcm,bcm7425-sun-top-ctrl", }, 50 { .compatible = "brcm,bcm7425-sun-top-ctrl", },
51 { .compatible = "brcm,bcm7429-sun-top-ctrl", }, 51 { .compatible = "brcm,bcm7429-sun-top-ctrl", },
52 { .compatible = "brcm,bcm7425-sun-top-ctrl", }, 52 { .compatible = "brcm,bcm7435-sun-top-ctrl", },
53 { .compatible = "brcm,brcmstb-sun-top-ctrl", }, 53 { .compatible = "brcm,brcmstb-sun-top-ctrl", },
54 { } 54 { }
55}; 55};
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index 357a5d8f8da0..a5b86a28f343 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -2,8 +2,9 @@ menu "i.MX SoC drivers"
2 2
3config IMX7_PM_DOMAINS 3config IMX7_PM_DOMAINS
4 bool "i.MX7 PM domains" 4 bool "i.MX7 PM domains"
5 select PM_GENERIC_DOMAINS
6 depends on SOC_IMX7D || (COMPILE_TEST && OF) 5 depends on SOC_IMX7D || (COMPILE_TEST && OF)
6 depends on PM
7 select PM_GENERIC_DOMAINS
7 default y if SOC_IMX7D 8 default y if SOC_IMX7D
8 9
9endmenu 10endmenu
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index ecebe2eecc3a..026182d3b27c 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -413,7 +413,7 @@ static int of_channel_match_helper(struct device_node *np, const char *name,
413 * @name: slave channel name 413 * @name: slave channel name
414 * @config: dma configuration parameters 414 * @config: dma configuration parameters
415 * 415 *
416 * Returns pointer to appropriate DMA channel on success or NULL. 416 * Returns pointer to appropriate DMA channel on success or error.
417 */ 417 */
418void *knav_dma_open_channel(struct device *dev, const char *name, 418void *knav_dma_open_channel(struct device *dev, const char *name,
419 struct knav_dma_cfg *config) 419 struct knav_dma_cfg *config)
diff --git a/drivers/staging/android/ion/devicetree.txt b/drivers/staging/android/ion/devicetree.txt
deleted file mode 100644
index 168715271f06..000000000000
--- a/drivers/staging/android/ion/devicetree.txt
+++ /dev/null
@@ -1,51 +0,0 @@
1Ion Memory Manager
2
3Ion is a memory manager that allows for sharing of buffers via dma-buf.
4Ion allows for different types of allocation via an abstraction called
5a 'heap'. A heap represents a specific type of memory. Each heap has
6a different type. There can be multiple instances of the same heap
7type.
8
9Specific heap instances are tied to heap IDs. Heap IDs are not to be specified
10in the devicetree.
11
12Required properties for Ion
13
14- compatible: "linux,ion" PLUS a compatible property for the device
15
16All child nodes of a linux,ion node are interpreted as heaps
17
18required properties for heaps
19
20- compatible: compatible string for a heap type PLUS a compatible property
21for the specific instance of the heap. Current heap types
22-- linux,ion-heap-system
23-- linux,ion-heap-system-contig
24-- linux,ion-heap-carveout
25-- linux,ion-heap-chunk
26-- linux,ion-heap-dma
27-- linux,ion-heap-custom
28
29Optional properties
30- memory-region: A phandle to a memory region. Required for DMA heap type
31(see reserved-memory.txt for details on the reservation)
32
33Example:
34
35 ion {
36 compatbile = "hisilicon,ion", "linux,ion";
37
38 ion-system-heap {
39 compatbile = "hisilicon,system-heap", "linux,ion-heap-system"
40 };
41
42 ion-camera-region {
43 compatible = "hisilicon,camera-heap", "linux,ion-heap-dma"
44 memory-region = <&camera_region>;
45 };
46
47 ion-fb-region {
48 compatbile = "hisilicon,fb-heap", "linux,ion-heap-dma"
49 memory-region = <&fb_region>;
50 };
51 }
diff --git a/drivers/staging/ccree/ssi_request_mgr.c b/drivers/staging/ccree/ssi_request_mgr.c
index 522bd62c102e..8611adf3bb2e 100644
--- a/drivers/staging/ccree/ssi_request_mgr.c
+++ b/drivers/staging/ccree/ssi_request_mgr.c
@@ -376,7 +376,6 @@ int send_request(
376 rc = ssi_power_mgr_runtime_get(&drvdata->plat_dev->dev); 376 rc = ssi_power_mgr_runtime_get(&drvdata->plat_dev->dev);
377 if (rc != 0) { 377 if (rc != 0) {
378 SSI_LOG_ERR("ssi_power_mgr_runtime_get returned %x\n",rc); 378 SSI_LOG_ERR("ssi_power_mgr_runtime_get returned %x\n",rc);
379 spin_unlock_bh(&req_mgr_h->hw_lock);
380 return rc; 379 return rc;
381 } 380 }
382#endif 381#endif
diff --git a/drivers/staging/fsl-dpaa2/Kconfig b/drivers/staging/fsl-dpaa2/Kconfig
index 2e325cb747ae..730fd6d4db33 100644
--- a/drivers/staging/fsl-dpaa2/Kconfig
+++ b/drivers/staging/fsl-dpaa2/Kconfig
@@ -12,6 +12,7 @@ config FSL_DPAA2
12config FSL_DPAA2_ETH 12config FSL_DPAA2_ETH
13 tristate "Freescale DPAA2 Ethernet" 13 tristate "Freescale DPAA2 Ethernet"
14 depends on FSL_DPAA2 && FSL_MC_DPIO 14 depends on FSL_DPAA2 && FSL_MC_DPIO
15 depends on NETDEVICES && ETHERNET
15 ---help--- 16 ---help---
16 Ethernet driver for Freescale DPAA2 SoCs, using the 17 Ethernet driver for Freescale DPAA2 SoCs, using the
17 Freescale MC bus driver 18 Freescale MC bus driver
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 4723a0bd5067..1c6ed5b2a6f9 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -97,8 +97,9 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
97 97
98 switch (variable) { 98 switch (variable) {
99 case HW_VAR_BSSID: 99 case HW_VAR_BSSID:
100 rtl92e_writel(dev, BSSIDR, ((u32 *)(val))[0]); 100 /* BSSIDR 2 byte alignment */
101 rtl92e_writew(dev, BSSIDR+2, ((u16 *)(val+2))[0]); 101 rtl92e_writew(dev, BSSIDR, *(u16 *)val);
102 rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(val + 2));
102 break; 103 break;
103 104
104 case HW_VAR_MEDIA_STATUS: 105 case HW_VAR_MEDIA_STATUS:
@@ -624,7 +625,7 @@ void rtl92e_get_eeprom_size(struct net_device *dev)
624 struct r8192_priv *priv = rtllib_priv(dev); 625 struct r8192_priv *priv = rtllib_priv(dev);
625 626
626 RT_TRACE(COMP_INIT, "===========>%s()\n", __func__); 627 RT_TRACE(COMP_INIT, "===========>%s()\n", __func__);
627 curCR = rtl92e_readl(dev, EPROM_CMD); 628 curCR = rtl92e_readw(dev, EPROM_CMD);
628 RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD, 629 RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD,
629 curCR); 630 curCR);
630 priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 : 631 priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 :
@@ -961,8 +962,8 @@ static void _rtl92e_net_update(struct net_device *dev)
961 rtl92e_config_rate(dev, &rate_config); 962 rtl92e_config_rate(dev, &rate_config);
962 priv->dot11CurrentPreambleMode = PREAMBLE_AUTO; 963 priv->dot11CurrentPreambleMode = PREAMBLE_AUTO;
963 priv->basic_rate = rate_config &= 0x15f; 964 priv->basic_rate = rate_config &= 0x15f;
964 rtl92e_writel(dev, BSSIDR, ((u32 *)net->bssid)[0]); 965 rtl92e_writew(dev, BSSIDR, *(u16 *)net->bssid);
965 rtl92e_writew(dev, BSSIDR+4, ((u16 *)net->bssid)[2]); 966 rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(net->bssid + 2));
966 967
967 if (priv->rtllib->iw_mode == IW_MODE_ADHOC) { 968 if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
968 rtl92e_writew(dev, ATIMWND, 2); 969 rtl92e_writew(dev, ATIMWND, 2);
@@ -1182,8 +1183,7 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
1182 struct cb_desc *cb_desc, struct sk_buff *skb) 1183 struct cb_desc *cb_desc, struct sk_buff *skb)
1183{ 1184{
1184 struct r8192_priv *priv = rtllib_priv(dev); 1185 struct r8192_priv *priv = rtllib_priv(dev);
1185 dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len, 1186 dma_addr_t mapping;
1186 PCI_DMA_TODEVICE);
1187 struct tx_fwinfo_8190pci *pTxFwInfo; 1187 struct tx_fwinfo_8190pci *pTxFwInfo;
1188 1188
1189 pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data; 1189 pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data;
@@ -1194,8 +1194,6 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
1194 pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT, 1194 pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT,
1195 pTxFwInfo->TxRate, cb_desc); 1195 pTxFwInfo->TxRate, cb_desc);
1196 1196
1197 if (pci_dma_mapping_error(priv->pdev, mapping))
1198 netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
1199 if (cb_desc->bAMPDUEnable) { 1197 if (cb_desc->bAMPDUEnable) {
1200 pTxFwInfo->AllowAggregation = 1; 1198 pTxFwInfo->AllowAggregation = 1;
1201 pTxFwInfo->RxMF = cb_desc->ampdu_factor; 1199 pTxFwInfo->RxMF = cb_desc->ampdu_factor;
@@ -1230,6 +1228,14 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
1230 } 1228 }
1231 1229
1232 memset((u8 *)pdesc, 0, 12); 1230 memset((u8 *)pdesc, 0, 12);
1231
1232 mapping = pci_map_single(priv->pdev, skb->data, skb->len,
1233 PCI_DMA_TODEVICE);
1234 if (pci_dma_mapping_error(priv->pdev, mapping)) {
1235 netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
1236 return;
1237 }
1238
1233 pdesc->LINIP = 0; 1239 pdesc->LINIP = 0;
1234 pdesc->CmdInit = 1; 1240 pdesc->CmdInit = 1;
1235 pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8; 1241 pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index 48bbd9e8a52f..dcc4eb691889 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -306,11 +306,6 @@ static void MakeTSEntry(struct ts_common_info *pTsCommonInfo, u8 *Addr,
306 pTsCommonInfo->TClasNum = TCLAS_Num; 306 pTsCommonInfo->TClasNum = TCLAS_Num;
307} 307}
308 308
309static bool IsACValid(unsigned int tid)
310{
311 return tid < 7;
312}
313
314bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS, 309bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
315 u8 *Addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs) 310 u8 *Addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs)
316{ 311{
@@ -328,12 +323,6 @@ bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
328 if (ieee->current_network.qos_data.supported == 0) { 323 if (ieee->current_network.qos_data.supported == 0) {
329 UP = 0; 324 UP = 0;
330 } else { 325 } else {
331 if (!IsACValid(TID)) {
332 netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n",
333 __func__, TID);
334 return false;
335 }
336
337 switch (TID) { 326 switch (TID) {
338 case 0: 327 case 0:
339 case 3: 328 case 3:
@@ -351,6 +340,10 @@ bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
351 case 7: 340 case 7:
352 UP = 7; 341 UP = 7;
353 break; 342 break;
343 default:
344 netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n",
345 __func__, TID);
346 return false;
354 } 347 }
355 } 348 }
356 349
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 5e7a61f24f8d..36c3189fc4b7 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -3531,7 +3531,6 @@ int rtw_wdev_alloc(struct adapter *padapter, struct device *dev)
3531 pwdev_priv->power_mgmt = true; 3531 pwdev_priv->power_mgmt = true;
3532 else 3532 else
3533 pwdev_priv->power_mgmt = false; 3533 pwdev_priv->power_mgmt = false;
3534 kfree((u8 *)wdev);
3535 3534
3536 return ret; 3535 return ret;
3537 3536
diff --git a/drivers/staging/typec/fusb302/fusb302.c b/drivers/staging/typec/fusb302/fusb302.c
index 2cee9a952c9b..4a356e509fe4 100644
--- a/drivers/staging/typec/fusb302/fusb302.c
+++ b/drivers/staging/typec/fusb302/fusb302.c
@@ -264,22 +264,36 @@ static void fusb302_debugfs_exit(const struct fusb302_chip *chip) { }
264 264
265#define FUSB302_RESUME_RETRY 10 265#define FUSB302_RESUME_RETRY 10
266#define FUSB302_RESUME_RETRY_SLEEP 50 266#define FUSB302_RESUME_RETRY_SLEEP 50
267static int fusb302_i2c_write(struct fusb302_chip *chip, 267
268 u8 address, u8 data) 268static bool fusb302_is_suspended(struct fusb302_chip *chip)
269{ 269{
270 int retry_cnt; 270 int retry_cnt;
271 int ret = 0;
272 271
273 atomic_set(&chip->i2c_busy, 1);
274 for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) { 272 for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) {
275 if (atomic_read(&chip->pm_suspend)) { 273 if (atomic_read(&chip->pm_suspend)) {
276 pr_err("fusb302_i2c: pm suspend, retry %d/%d\n", 274 dev_err(chip->dev, "i2c: pm suspend, retry %d/%d\n",
277 retry_cnt + 1, FUSB302_RESUME_RETRY); 275 retry_cnt + 1, FUSB302_RESUME_RETRY);
278 msleep(FUSB302_RESUME_RETRY_SLEEP); 276 msleep(FUSB302_RESUME_RETRY_SLEEP);
279 } else { 277 } else {
280 break; 278 return false;
281 } 279 }
282 } 280 }
281
282 return true;
283}
284
285static int fusb302_i2c_write(struct fusb302_chip *chip,
286 u8 address, u8 data)
287{
288 int ret = 0;
289
290 atomic_set(&chip->i2c_busy, 1);
291
292 if (fusb302_is_suspended(chip)) {
293 atomic_set(&chip->i2c_busy, 0);
294 return -ETIMEDOUT;
295 }
296
283 ret = i2c_smbus_write_byte_data(chip->i2c_client, address, data); 297 ret = i2c_smbus_write_byte_data(chip->i2c_client, address, data);
284 if (ret < 0) 298 if (ret < 0)
285 fusb302_log(chip, "cannot write 0x%02x to 0x%02x, ret=%d", 299 fusb302_log(chip, "cannot write 0x%02x to 0x%02x, ret=%d",
@@ -292,21 +306,17 @@ static int fusb302_i2c_write(struct fusb302_chip *chip,
292static int fusb302_i2c_block_write(struct fusb302_chip *chip, u8 address, 306static int fusb302_i2c_block_write(struct fusb302_chip *chip, u8 address,
293 u8 length, const u8 *data) 307 u8 length, const u8 *data)
294{ 308{
295 int retry_cnt;
296 int ret = 0; 309 int ret = 0;
297 310
298 if (length <= 0) 311 if (length <= 0)
299 return ret; 312 return ret;
300 atomic_set(&chip->i2c_busy, 1); 313 atomic_set(&chip->i2c_busy, 1);
301 for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) { 314
302 if (atomic_read(&chip->pm_suspend)) { 315 if (fusb302_is_suspended(chip)) {
303 pr_err("fusb302_i2c: pm suspend, retry %d/%d\n", 316 atomic_set(&chip->i2c_busy, 0);
304 retry_cnt + 1, FUSB302_RESUME_RETRY); 317 return -ETIMEDOUT;
305 msleep(FUSB302_RESUME_RETRY_SLEEP);
306 } else {
307 break;
308 }
309 } 318 }
319
310 ret = i2c_smbus_write_i2c_block_data(chip->i2c_client, address, 320 ret = i2c_smbus_write_i2c_block_data(chip->i2c_client, address,
311 length, data); 321 length, data);
312 if (ret < 0) 322 if (ret < 0)
@@ -320,19 +330,15 @@ static int fusb302_i2c_block_write(struct fusb302_chip *chip, u8 address,
320static int fusb302_i2c_read(struct fusb302_chip *chip, 330static int fusb302_i2c_read(struct fusb302_chip *chip,
321 u8 address, u8 *data) 331 u8 address, u8 *data)
322{ 332{
323 int retry_cnt;
324 int ret = 0; 333 int ret = 0;
325 334
326 atomic_set(&chip->i2c_busy, 1); 335 atomic_set(&chip->i2c_busy, 1);
327 for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) { 336
328 if (atomic_read(&chip->pm_suspend)) { 337 if (fusb302_is_suspended(chip)) {
329 pr_err("fusb302_i2c: pm suspend, retry %d/%d\n", 338 atomic_set(&chip->i2c_busy, 0);
330 retry_cnt + 1, FUSB302_RESUME_RETRY); 339 return -ETIMEDOUT;
331 msleep(FUSB302_RESUME_RETRY_SLEEP);
332 } else {
333 break;
334 }
335 } 340 }
341
336 ret = i2c_smbus_read_byte_data(chip->i2c_client, address); 342 ret = i2c_smbus_read_byte_data(chip->i2c_client, address);
337 *data = (u8)ret; 343 *data = (u8)ret;
338 if (ret < 0) 344 if (ret < 0)
@@ -345,33 +351,31 @@ static int fusb302_i2c_read(struct fusb302_chip *chip,
345static int fusb302_i2c_block_read(struct fusb302_chip *chip, u8 address, 351static int fusb302_i2c_block_read(struct fusb302_chip *chip, u8 address,
346 u8 length, u8 *data) 352 u8 length, u8 *data)
347{ 353{
348 int retry_cnt;
349 int ret = 0; 354 int ret = 0;
350 355
351 if (length <= 0) 356 if (length <= 0)
352 return ret; 357 return ret;
353 atomic_set(&chip->i2c_busy, 1); 358 atomic_set(&chip->i2c_busy, 1);
354 for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) { 359
355 if (atomic_read(&chip->pm_suspend)) { 360 if (fusb302_is_suspended(chip)) {
356 pr_err("fusb302_i2c: pm suspend, retry %d/%d\n", 361 atomic_set(&chip->i2c_busy, 0);
357 retry_cnt + 1, FUSB302_RESUME_RETRY); 362 return -ETIMEDOUT;
358 msleep(FUSB302_RESUME_RETRY_SLEEP);
359 } else {
360 break;
361 }
362 } 363 }
364
363 ret = i2c_smbus_read_i2c_block_data(chip->i2c_client, address, 365 ret = i2c_smbus_read_i2c_block_data(chip->i2c_client, address,
364 length, data); 366 length, data);
365 if (ret < 0) { 367 if (ret < 0) {
366 fusb302_log(chip, "cannot block read 0x%02x, len=%d, ret=%d", 368 fusb302_log(chip, "cannot block read 0x%02x, len=%d, ret=%d",
367 address, length, ret); 369 address, length, ret);
368 return ret; 370 goto done;
369 } 371 }
370 if (ret != length) { 372 if (ret != length) {
371 fusb302_log(chip, "only read %d/%d bytes from 0x%02x", 373 fusb302_log(chip, "only read %d/%d bytes from 0x%02x",
372 ret, length, address); 374 ret, length, address);
373 return -EIO; 375 ret = -EIO;
374 } 376 }
377
378done:
375 atomic_set(&chip->i2c_busy, 0); 379 atomic_set(&chip->i2c_busy, 0);
376 380
377 return ret; 381 return ret;
@@ -489,7 +493,7 @@ static int tcpm_init(struct tcpc_dev *dev)
489 ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &data); 493 ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &data);
490 if (ret < 0) 494 if (ret < 0)
491 return ret; 495 return ret;
492 chip->vbus_present = !!(FUSB_REG_STATUS0 & FUSB_REG_STATUS0_VBUSOK); 496 chip->vbus_present = !!(data & FUSB_REG_STATUS0_VBUSOK);
493 ret = fusb302_i2c_read(chip, FUSB_REG_DEVICE_ID, &data); 497 ret = fusb302_i2c_read(chip, FUSB_REG_DEVICE_ID, &data);
494 if (ret < 0) 498 if (ret < 0)
495 return ret; 499 return ret;
@@ -1025,7 +1029,7 @@ static int fusb302_pd_send_message(struct fusb302_chip *chip,
1025 buf[pos++] = FUSB302_TKN_SYNC1; 1029 buf[pos++] = FUSB302_TKN_SYNC1;
1026 buf[pos++] = FUSB302_TKN_SYNC2; 1030 buf[pos++] = FUSB302_TKN_SYNC2;
1027 1031
1028 len = pd_header_cnt(msg->header) * 4; 1032 len = pd_header_cnt_le(msg->header) * 4;
1029 /* plug 2 for header */ 1033 /* plug 2 for header */
1030 len += 2; 1034 len += 2;
1031 if (len > 0x1F) { 1035 if (len > 0x1F) {
@@ -1481,7 +1485,7 @@ static int fusb302_pd_read_message(struct fusb302_chip *chip,
1481 (u8 *)&msg->header); 1485 (u8 *)&msg->header);
1482 if (ret < 0) 1486 if (ret < 0)
1483 return ret; 1487 return ret;
1484 len = pd_header_cnt(msg->header) * 4; 1488 len = pd_header_cnt_le(msg->header) * 4;
1485 /* add 4 to length to include the CRC */ 1489 /* add 4 to length to include the CRC */
1486 if (len > PD_MAX_PAYLOAD * 4) { 1490 if (len > PD_MAX_PAYLOAD * 4) {
1487 fusb302_log(chip, "PD message too long %d", len); 1491 fusb302_log(chip, "PD message too long %d", len);
@@ -1663,14 +1667,12 @@ static int init_gpio(struct fusb302_chip *chip)
1663 if (ret < 0) { 1667 if (ret < 0) {
1664 fusb302_log(chip, 1668 fusb302_log(chip,
1665 "cannot set GPIO Int_N to input, ret=%d", ret); 1669 "cannot set GPIO Int_N to input, ret=%d", ret);
1666 gpio_free(chip->gpio_int_n);
1667 return ret; 1670 return ret;
1668 } 1671 }
1669 ret = gpio_to_irq(chip->gpio_int_n); 1672 ret = gpio_to_irq(chip->gpio_int_n);
1670 if (ret < 0) { 1673 if (ret < 0) {
1671 fusb302_log(chip, 1674 fusb302_log(chip,
1672 "cannot request IRQ for GPIO Int_N, ret=%d", ret); 1675 "cannot request IRQ for GPIO Int_N, ret=%d", ret);
1673 gpio_free(chip->gpio_int_n);
1674 return ret; 1676 return ret;
1675 } 1677 }
1676 chip->gpio_int_n_irq = ret; 1678 chip->gpio_int_n_irq = ret;
@@ -1787,11 +1789,13 @@ static const struct of_device_id fusb302_dt_match[] = {
1787 {.compatible = "fcs,fusb302"}, 1789 {.compatible = "fcs,fusb302"},
1788 {}, 1790 {},
1789}; 1791};
1792MODULE_DEVICE_TABLE(of, fusb302_dt_match);
1790 1793
1791static const struct i2c_device_id fusb302_i2c_device_id[] = { 1794static const struct i2c_device_id fusb302_i2c_device_id[] = {
1792 {"typec_fusb302", 0}, 1795 {"typec_fusb302", 0},
1793 {}, 1796 {},
1794}; 1797};
1798MODULE_DEVICE_TABLE(i2c, fusb302_i2c_device_id);
1795 1799
1796static const struct dev_pm_ops fusb302_pm_ops = { 1800static const struct dev_pm_ops fusb302_pm_ops = {
1797 .suspend = fusb302_pm_suspend, 1801 .suspend = fusb302_pm_suspend,
diff --git a/drivers/staging/typec/pd.h b/drivers/staging/typec/pd.h
index 8d97bdb95f23..510ef7279900 100644
--- a/drivers/staging/typec/pd.h
+++ b/drivers/staging/typec/pd.h
@@ -92,6 +92,16 @@ static inline unsigned int pd_header_type_le(__le16 header)
92 return pd_header_type(le16_to_cpu(header)); 92 return pd_header_type(le16_to_cpu(header));
93} 93}
94 94
95static inline unsigned int pd_header_msgid(u16 header)
96{
97 return (header >> PD_HEADER_ID_SHIFT) & PD_HEADER_ID_MASK;
98}
99
100static inline unsigned int pd_header_msgid_le(__le16 header)
101{
102 return pd_header_msgid(le16_to_cpu(header));
103}
104
95#define PD_MAX_PAYLOAD 7 105#define PD_MAX_PAYLOAD 7
96 106
97struct pd_message { 107struct pd_message {
diff --git a/drivers/staging/typec/pd_vdo.h b/drivers/staging/typec/pd_vdo.h
index dba172e0e0d1..d92259f8de0a 100644
--- a/drivers/staging/typec/pd_vdo.h
+++ b/drivers/staging/typec/pd_vdo.h
@@ -22,6 +22,9 @@
22 * VDM object is minimum of VDM header + 6 additional data objects. 22 * VDM object is minimum of VDM header + 6 additional data objects.
23 */ 23 */
24 24
25#define VDO_MAX_OBJECTS 6
26#define VDO_MAX_SIZE (VDO_MAX_OBJECTS + 1)
27
25/* 28/*
26 * VDM header 29 * VDM header
27 * ---------- 30 * ----------
@@ -34,7 +37,6 @@
34 * <5> :: reserved (SVDM), command type (UVDM) 37 * <5> :: reserved (SVDM), command type (UVDM)
35 * <4:0> :: command 38 * <4:0> :: command
36 */ 39 */
37#define VDO_MAX_SIZE 7
38#define VDO(vid, type, custom) \ 40#define VDO(vid, type, custom) \
39 (((vid) << 16) | \ 41 (((vid) << 16) | \
40 ((type) << 15) | \ 42 ((type) << 15) | \
diff --git a/drivers/staging/typec/tcpci.c b/drivers/staging/typec/tcpci.c
index 5e5be74c7850..df72d8b01e73 100644
--- a/drivers/staging/typec/tcpci.c
+++ b/drivers/staging/typec/tcpci.c
@@ -425,7 +425,7 @@ static const struct regmap_config tcpci_regmap_config = {
425 .max_register = 0x7F, /* 0x80 .. 0xFF are vendor defined */ 425 .max_register = 0x7F, /* 0x80 .. 0xFF are vendor defined */
426}; 426};
427 427
428const struct tcpc_config tcpci_tcpc_config = { 428static const struct tcpc_config tcpci_tcpc_config = {
429 .type = TYPEC_PORT_DFP, 429 .type = TYPEC_PORT_DFP,
430 .default_role = TYPEC_SINK, 430 .default_role = TYPEC_SINK,
431}; 431};
diff --git a/drivers/staging/typec/tcpm.c b/drivers/staging/typec/tcpm.c
index abba655ba00a..20eb4ebcf8c3 100644
--- a/drivers/staging/typec/tcpm.c
+++ b/drivers/staging/typec/tcpm.c
@@ -238,6 +238,7 @@ struct tcpm_port {
238 unsigned int hard_reset_count; 238 unsigned int hard_reset_count;
239 bool pd_capable; 239 bool pd_capable;
240 bool explicit_contract; 240 bool explicit_contract;
241 unsigned int rx_msgid;
241 242
242 /* Partner capabilities/requests */ 243 /* Partner capabilities/requests */
243 u32 sink_request; 244 u32 sink_request;
@@ -251,6 +252,8 @@ struct tcpm_port {
251 unsigned int nr_src_pdo; 252 unsigned int nr_src_pdo;
252 u32 snk_pdo[PDO_MAX_OBJECTS]; 253 u32 snk_pdo[PDO_MAX_OBJECTS];
253 unsigned int nr_snk_pdo; 254 unsigned int nr_snk_pdo;
255 u32 snk_vdo[VDO_MAX_OBJECTS];
256 unsigned int nr_snk_vdo;
254 257
255 unsigned int max_snk_mv; 258 unsigned int max_snk_mv;
256 unsigned int max_snk_ma; 259 unsigned int max_snk_ma;
@@ -997,6 +1000,7 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
997 struct pd_mode_data *modep; 1000 struct pd_mode_data *modep;
998 int rlen = 0; 1001 int rlen = 0;
999 u16 svid; 1002 u16 svid;
1003 int i;
1000 1004
1001 tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d", 1005 tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
1002 p0, cmd_type, cmd, cnt); 1006 p0, cmd_type, cmd, cnt);
@@ -1007,6 +1011,14 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
1007 case CMDT_INIT: 1011 case CMDT_INIT:
1008 switch (cmd) { 1012 switch (cmd) {
1009 case CMD_DISCOVER_IDENT: 1013 case CMD_DISCOVER_IDENT:
1014 /* 6.4.4.3.1: Only respond as UFP (device) */
1015 if (port->data_role == TYPEC_DEVICE &&
1016 port->nr_snk_vdo) {
1017 for (i = 0; i < port->nr_snk_vdo; i++)
1018 response[i + 1]
1019 = cpu_to_le32(port->snk_vdo[i]);
1020 rlen = port->nr_snk_vdo + 1;
1021 }
1010 break; 1022 break;
1011 case CMD_DISCOVER_SVID: 1023 case CMD_DISCOVER_SVID:
1012 break; 1024 break;
@@ -1415,6 +1427,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
1415 break; 1427 break;
1416 case SOFT_RESET_SEND: 1428 case SOFT_RESET_SEND:
1417 port->message_id = 0; 1429 port->message_id = 0;
1430 port->rx_msgid = -1;
1418 if (port->pwr_role == TYPEC_SOURCE) 1431 if (port->pwr_role == TYPEC_SOURCE)
1419 next_state = SRC_SEND_CAPABILITIES; 1432 next_state = SRC_SEND_CAPABILITIES;
1420 else 1433 else
@@ -1503,6 +1516,22 @@ static void tcpm_pd_rx_handler(struct work_struct *work)
1503 port->attached); 1516 port->attached);
1504 1517
1505 if (port->attached) { 1518 if (port->attached) {
1519 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
1520 unsigned int msgid = pd_header_msgid_le(msg->header);
1521
1522 /*
1523 * USB PD standard, 6.6.1.2:
1524 * "... if MessageID value in a received Message is the
1525 * same as the stored value, the receiver shall return a
1526 * GoodCRC Message with that MessageID value and drop
1527 * the Message (this is a retry of an already received
1528 * Message). Note: this shall not apply to the Soft_Reset
1529 * Message which always has a MessageID value of zero."
1530 */
1531 if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
1532 goto done;
1533 port->rx_msgid = msgid;
1534
1506 /* 1535 /*
1507 * If both ends believe to be DFP/host, we have a data role 1536 * If both ends believe to be DFP/host, we have a data role
1508 * mismatch. 1537 * mismatch.
@@ -1520,6 +1549,7 @@ static void tcpm_pd_rx_handler(struct work_struct *work)
1520 } 1549 }
1521 } 1550 }
1522 1551
1552done:
1523 mutex_unlock(&port->lock); 1553 mutex_unlock(&port->lock);
1524 kfree(event); 1554 kfree(event);
1525} 1555}
@@ -1719,8 +1749,7 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
1719 } 1749 }
1720 ma = min(ma, port->max_snk_ma); 1750 ma = min(ma, port->max_snk_ma);
1721 1751
1722 /* XXX: Any other flags need to be set? */ 1752 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
1723 flags = 0;
1724 1753
1725 /* Set mismatch bit if offered power is less than operating power */ 1754 /* Set mismatch bit if offered power is less than operating power */
1726 mw = ma * mv / 1000; 1755 mw = ma * mv / 1000;
@@ -1957,6 +1986,12 @@ static void tcpm_reset_port(struct tcpm_port *port)
1957 port->attached = false; 1986 port->attached = false;
1958 port->pd_capable = false; 1987 port->pd_capable = false;
1959 1988
1989 /*
1990 * First Rx ID should be 0; set this to a sentinel of -1 so that
1991 * we can check tcpm_pd_rx_handler() if we had seen it before.
1992 */
1993 port->rx_msgid = -1;
1994
1960 port->tcpc->set_pd_rx(port->tcpc, false); 1995 port->tcpc->set_pd_rx(port->tcpc, false);
1961 tcpm_init_vbus(port); /* also disables charging */ 1996 tcpm_init_vbus(port); /* also disables charging */
1962 tcpm_init_vconn(port); 1997 tcpm_init_vconn(port);
@@ -2170,6 +2205,7 @@ static void run_state_machine(struct tcpm_port *port)
2170 port->pwr_opmode = TYPEC_PWR_MODE_USB; 2205 port->pwr_opmode = TYPEC_PWR_MODE_USB;
2171 port->caps_count = 0; 2206 port->caps_count = 0;
2172 port->message_id = 0; 2207 port->message_id = 0;
2208 port->rx_msgid = -1;
2173 port->explicit_contract = false; 2209 port->explicit_contract = false;
2174 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0); 2210 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2175 break; 2211 break;
@@ -2329,6 +2365,7 @@ static void run_state_machine(struct tcpm_port *port)
2329 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_USB); 2365 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_USB);
2330 port->pwr_opmode = TYPEC_PWR_MODE_USB; 2366 port->pwr_opmode = TYPEC_PWR_MODE_USB;
2331 port->message_id = 0; 2367 port->message_id = 0;
2368 port->rx_msgid = -1;
2332 port->explicit_contract = false; 2369 port->explicit_contract = false;
2333 tcpm_set_state(port, SNK_DISCOVERY, 0); 2370 tcpm_set_state(port, SNK_DISCOVERY, 0);
2334 break; 2371 break;
@@ -2496,6 +2533,7 @@ static void run_state_machine(struct tcpm_port *port)
2496 /* Soft_Reset states */ 2533 /* Soft_Reset states */
2497 case SOFT_RESET: 2534 case SOFT_RESET:
2498 port->message_id = 0; 2535 port->message_id = 0;
2536 port->rx_msgid = -1;
2499 tcpm_pd_send_control(port, PD_CTRL_ACCEPT); 2537 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2500 if (port->pwr_role == TYPEC_SOURCE) 2538 if (port->pwr_role == TYPEC_SOURCE)
2501 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0); 2539 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
@@ -2504,6 +2542,7 @@ static void run_state_machine(struct tcpm_port *port)
2504 break; 2542 break;
2505 case SOFT_RESET_SEND: 2543 case SOFT_RESET_SEND:
2506 port->message_id = 0; 2544 port->message_id = 0;
2545 port->rx_msgid = -1;
2507 if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET)) 2546 if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
2508 tcpm_set_state_cond(port, hard_reset_state(port), 0); 2547 tcpm_set_state_cond(port, hard_reset_state(port), 0);
2509 else 2548 else
@@ -2568,6 +2607,14 @@ static void run_state_machine(struct tcpm_port *port)
2568 break; 2607 break;
2569 case PR_SWAP_SRC_SNK_SOURCE_OFF: 2608 case PR_SWAP_SRC_SNK_SOURCE_OFF:
2570 tcpm_set_cc(port, TYPEC_CC_RD); 2609 tcpm_set_cc(port, TYPEC_CC_RD);
2610 /*
2611 * USB-PD standard, 6.2.1.4, Port Power Role:
2612 * "During the Power Role Swap Sequence, for the initial Source
2613 * Port, the Port Power Role field shall be set to Sink in the
2614 * PS_RDY Message indicating that the initial Source’s power
2615 * supply is turned off"
2616 */
2617 tcpm_set_pwr_role(port, TYPEC_SINK);
2571 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) { 2618 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
2572 tcpm_set_state(port, ERROR_RECOVERY, 0); 2619 tcpm_set_state(port, ERROR_RECOVERY, 0);
2573 break; 2620 break;
@@ -2575,7 +2622,6 @@ static void run_state_machine(struct tcpm_port *port)
2575 tcpm_set_state_cond(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON); 2622 tcpm_set_state_cond(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON);
2576 break; 2623 break;
2577 case PR_SWAP_SRC_SNK_SINK_ON: 2624 case PR_SWAP_SRC_SNK_SINK_ON:
2578 tcpm_set_pwr_role(port, TYPEC_SINK);
2579 tcpm_swap_complete(port, 0); 2625 tcpm_swap_complete(port, 0);
2580 tcpm_set_state(port, SNK_STARTUP, 0); 2626 tcpm_set_state(port, SNK_STARTUP, 0);
2581 break; 2627 break;
@@ -2587,8 +2633,15 @@ static void run_state_machine(struct tcpm_port *port)
2587 case PR_SWAP_SNK_SRC_SOURCE_ON: 2633 case PR_SWAP_SNK_SRC_SOURCE_ON:
2588 tcpm_set_cc(port, tcpm_rp_cc(port)); 2634 tcpm_set_cc(port, tcpm_rp_cc(port));
2589 tcpm_set_vbus(port, true); 2635 tcpm_set_vbus(port, true);
2590 tcpm_pd_send_control(port, PD_CTRL_PS_RDY); 2636 /*
2637 * USB PD standard, 6.2.1.4:
2638 * "Subsequent Messages initiated by the Policy Engine,
2639 * such as the PS_RDY Message sent to indicate that Vbus
2640 * is ready, will have the Port Power Role field set to
2641 * Source."
2642 */
2591 tcpm_set_pwr_role(port, TYPEC_SOURCE); 2643 tcpm_set_pwr_role(port, TYPEC_SOURCE);
2644 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
2592 tcpm_swap_complete(port, 0); 2645 tcpm_swap_complete(port, 0);
2593 tcpm_set_state(port, SRC_STARTUP, 0); 2646 tcpm_set_state(port, SRC_STARTUP, 0);
2594 break; 2647 break;
@@ -3292,6 +3345,20 @@ static int tcpm_copy_pdos(u32 *dest_pdo, const u32 *src_pdo,
3292 return nr_pdo; 3345 return nr_pdo;
3293} 3346}
3294 3347
3348static int tcpm_copy_vdos(u32 *dest_vdo, const u32 *src_vdo,
3349 unsigned int nr_vdo)
3350{
3351 unsigned int i;
3352
3353 if (nr_vdo > VDO_MAX_OBJECTS)
3354 nr_vdo = VDO_MAX_OBJECTS;
3355
3356 for (i = 0; i < nr_vdo; i++)
3357 dest_vdo[i] = src_vdo[i];
3358
3359 return nr_vdo;
3360}
3361
3295void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo, 3362void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
3296 unsigned int nr_pdo) 3363 unsigned int nr_pdo)
3297{ 3364{
@@ -3382,6 +3449,8 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
3382 tcpc->config->nr_src_pdo); 3449 tcpc->config->nr_src_pdo);
3383 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo, 3450 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo,
3384 tcpc->config->nr_snk_pdo); 3451 tcpc->config->nr_snk_pdo);
3452 port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo,
3453 tcpc->config->nr_snk_vdo);
3385 3454
3386 port->max_snk_mv = tcpc->config->max_snk_mv; 3455 port->max_snk_mv = tcpc->config->max_snk_mv;
3387 port->max_snk_ma = tcpc->config->max_snk_ma; 3456 port->max_snk_ma = tcpc->config->max_snk_ma;
diff --git a/drivers/staging/typec/tcpm.h b/drivers/staging/typec/tcpm.h
index 969b365e6549..19c307d31a5a 100644
--- a/drivers/staging/typec/tcpm.h
+++ b/drivers/staging/typec/tcpm.h
@@ -60,6 +60,9 @@ struct tcpc_config {
60 const u32 *snk_pdo; 60 const u32 *snk_pdo;
61 unsigned int nr_snk_pdo; 61 unsigned int nr_snk_pdo;
62 62
63 const u32 *snk_vdo;
64 unsigned int nr_snk_vdo;
65
63 unsigned int max_snk_mv; 66 unsigned int max_snk_mv;
64 unsigned int max_snk_ma; 67 unsigned int max_snk_ma;
65 unsigned int max_snk_mw; 68 unsigned int max_snk_mw;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 988ee61fb4a7..d04db3f55519 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -502,8 +502,15 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
502 */ 502 */
503 sg_init_table(scatterlist, num_pages); 503 sg_init_table(scatterlist, num_pages);
504 /* Now set the pages for each scatterlist */ 504 /* Now set the pages for each scatterlist */
505 for (i = 0; i < num_pages; i++) 505 for (i = 0; i < num_pages; i++) {
506 sg_set_page(scatterlist + i, pages[i], PAGE_SIZE, 0); 506 unsigned int len = PAGE_SIZE - offset;
507
508 if (len > count)
509 len = count;
510 sg_set_page(scatterlist + i, pages[i], len, offset);
511 offset = 0;
512 count -= len;
513 }
507 514
508 dma_buffers = dma_map_sg(g_dev, 515 dma_buffers = dma_map_sg(g_dev,
509 scatterlist, 516 scatterlist,
@@ -524,20 +531,20 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
524 u32 addr = sg_dma_address(sg); 531 u32 addr = sg_dma_address(sg);
525 532
526 /* Note: addrs is the address + page_count - 1 533 /* Note: addrs is the address + page_count - 1
527 * The firmware expects the block to be page 534 * The firmware expects blocks after the first to be page-
528 * aligned and a multiple of the page size 535 * aligned and a multiple of the page size
529 */ 536 */
530 WARN_ON(len == 0); 537 WARN_ON(len == 0);
531 WARN_ON(len & ~PAGE_MASK); 538 WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
532 WARN_ON(addr & ~PAGE_MASK); 539 WARN_ON(i && (addr & ~PAGE_MASK));
533 if (k > 0 && 540 if (k > 0 &&
534 ((addrs[k - 1] & PAGE_MASK) | 541 ((addrs[k - 1] & PAGE_MASK) +
535 ((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT) 542 (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT))
536 == addr) { 543 == (addr & PAGE_MASK))
537 addrs[k - 1] += (len >> PAGE_SHIFT); 544 addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
538 } else { 545 else
539 addrs[k++] = addr | ((len >> PAGE_SHIFT) - 1); 546 addrs[k++] = (addr & PAGE_MASK) |
540 } 547 (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
541 } 548 }
542 549
543 /* Partial cache lines (fragments) require special measures */ 550 /* Partial cache lines (fragments) require special measures */
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig
index 2330a4eb4e8b..a6df12d88f90 100644
--- a/drivers/tee/Kconfig
+++ b/drivers/tee/Kconfig
@@ -1,6 +1,7 @@
1# Generic Trusted Execution Environment Configuration 1# Generic Trusted Execution Environment Configuration
2config TEE 2config TEE
3 tristate "Trusted Execution Environment support" 3 tristate "Trusted Execution Environment support"
4 depends on HAVE_ARM_SMCCC || COMPILE_TEST
4 select DMA_SHARED_BUFFER 5 select DMA_SHARED_BUFFER
5 select GENERIC_ALLOCATOR 6 select GENERIC_ALLOCATOR
6 help 7 help
diff --git a/drivers/thermal/broadcom/Kconfig b/drivers/thermal/broadcom/Kconfig
index ab08af4654ef..42c098e86f84 100644
--- a/drivers/thermal/broadcom/Kconfig
+++ b/drivers/thermal/broadcom/Kconfig
@@ -9,8 +9,9 @@ config BCM2835_THERMAL
9config BCM_NS_THERMAL 9config BCM_NS_THERMAL
10 tristate "Northstar thermal driver" 10 tristate "Northstar thermal driver"
11 depends on ARCH_BCM_IPROC || COMPILE_TEST 11 depends on ARCH_BCM_IPROC || COMPILE_TEST
12 default y if ARCH_BCM_IPROC
12 help 13 help
13 Northstar is a family of SoCs that includes e.g. BCM4708, BCM47081, 14 Support for the Northstar and Northstar Plus family of SoCs (e.g.
14 BCM4709 and BCM47094. It contains DMU (Device Management Unit) block 15 BCM4708, BCM4709, BCM5301x, BCM95852X, etc). It contains DMU (Device
15 with a thermal sensor that allows checking CPU temperature. This 16 Management Unit) block with a thermal sensor that allows checking CPU
16 driver provides support for it. 17 temperature.
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index 644ba526d9ea..4362a69ac88d 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -195,7 +195,6 @@ static struct thermal_zone_of_device_ops tmu_tz_ops = {
195static int qoriq_tmu_probe(struct platform_device *pdev) 195static int qoriq_tmu_probe(struct platform_device *pdev)
196{ 196{
197 int ret; 197 int ret;
198 const struct thermal_trip *trip;
199 struct qoriq_tmu_data *data; 198 struct qoriq_tmu_data *data;
200 struct device_node *np = pdev->dev.of_node; 199 struct device_node *np = pdev->dev.of_node;
201 u32 site = 0; 200 u32 site = 0;
@@ -243,8 +242,6 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
243 goto err_tmu; 242 goto err_tmu;
244 } 243 }
245 244
246 trip = of_thermal_get_trip_points(data->tz);
247
248 /* Enable monitoring */ 245 /* Enable monitoring */
249 site |= 0x1 << (15 - data->sensor_id); 246 site |= 0x1 << (15 - data->sensor_id);
250 tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr); 247 tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index b21b9cc2c8d6..5a51c740e372 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -359,7 +359,7 @@ static DECLARE_DELAYED_WORK(thermal_emergency_poweroff_work,
359 * This may be called from any critical situation to trigger a system shutdown 359 * This may be called from any critical situation to trigger a system shutdown
360 * after a known period of time. By default this is not scheduled. 360 * after a known period of time. By default this is not scheduled.
361 */ 361 */
362void thermal_emergency_poweroff(void) 362static void thermal_emergency_poweroff(void)
363{ 363{
364 int poweroff_delay_ms = CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS; 364 int poweroff_delay_ms = CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS;
365 /* 365 /*
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index ba9c302454fb..696ab3046b87 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -1010,7 +1010,7 @@ ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id)
1010} 1010}
1011 1011
1012/** 1012/**
1013 * ti_bandgap_set_continous_mode() - One time enabling of continuous mode 1013 * ti_bandgap_set_continuous_mode() - One time enabling of continuous mode
1014 * @bgp: pointer to struct ti_bandgap 1014 * @bgp: pointer to struct ti_bandgap
1015 * 1015 *
1016 * Call this function only if HAS(MODE_CONFIG) is set. As this driver may 1016 * Call this function only if HAS(MODE_CONFIG) is set. As this driver may
@@ -1214,22 +1214,18 @@ static struct ti_bandgap *ti_bandgap_build(struct platform_device *pdev)
1214 } 1214 }
1215 1215
1216 bgp = devm_kzalloc(&pdev->dev, sizeof(*bgp), GFP_KERNEL); 1216 bgp = devm_kzalloc(&pdev->dev, sizeof(*bgp), GFP_KERNEL);
1217 if (!bgp) { 1217 if (!bgp)
1218 dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n");
1219 return ERR_PTR(-ENOMEM); 1218 return ERR_PTR(-ENOMEM);
1220 }
1221 1219
1222 of_id = of_match_device(of_ti_bandgap_match, &pdev->dev); 1220 of_id = of_match_device(of_ti_bandgap_match, &pdev->dev);
1223 if (of_id) 1221 if (of_id)
1224 bgp->conf = of_id->data; 1222 bgp->conf = of_id->data;
1225 1223
1226 /* register shadow for context save and restore */ 1224 /* register shadow for context save and restore */
1227 bgp->regval = devm_kzalloc(&pdev->dev, sizeof(*bgp->regval) * 1225 bgp->regval = devm_kcalloc(&pdev->dev, bgp->conf->sensor_count,
1228 bgp->conf->sensor_count, GFP_KERNEL); 1226 sizeof(*bgp->regval), GFP_KERNEL);
1229 if (!bgp->regval) { 1227 if (!bgp->regval)
1230 dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n");
1231 return ERR_PTR(-ENOMEM); 1228 return ERR_PTR(-ENOMEM);
1232 }
1233 1229
1234 i = 0; 1230 i = 0;
1235 do { 1231 do {
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c
index 7ac9bcdf1e61..61fe8d6fd24e 100644
--- a/drivers/tty/ehv_bytechan.c
+++ b/drivers/tty/ehv_bytechan.c
@@ -764,7 +764,7 @@ static int __init ehv_bc_init(void)
764 ehv_bc_driver = alloc_tty_driver(count); 764 ehv_bc_driver = alloc_tty_driver(count);
765 if (!ehv_bc_driver) { 765 if (!ehv_bc_driver) {
766 ret = -ENOMEM; 766 ret = -ENOMEM;
767 goto error; 767 goto err_free_bcs;
768 } 768 }
769 769
770 ehv_bc_driver->driver_name = "ehv-bc"; 770 ehv_bc_driver->driver_name = "ehv-bc";
@@ -778,24 +778,23 @@ static int __init ehv_bc_init(void)
778 ret = tty_register_driver(ehv_bc_driver); 778 ret = tty_register_driver(ehv_bc_driver);
779 if (ret) { 779 if (ret) {
780 pr_err("ehv-bc: could not register tty driver (ret=%i)\n", ret); 780 pr_err("ehv-bc: could not register tty driver (ret=%i)\n", ret);
781 goto error; 781 goto err_put_tty_driver;
782 } 782 }
783 783
784 ret = platform_driver_register(&ehv_bc_tty_driver); 784 ret = platform_driver_register(&ehv_bc_tty_driver);
785 if (ret) { 785 if (ret) {
786 pr_err("ehv-bc: could not register platform driver (ret=%i)\n", 786 pr_err("ehv-bc: could not register platform driver (ret=%i)\n",
787 ret); 787 ret);
788 goto error; 788 goto err_deregister_tty_driver;
789 } 789 }
790 790
791 return 0; 791 return 0;
792 792
793error: 793err_deregister_tty_driver:
794 if (ehv_bc_driver) { 794 tty_unregister_driver(ehv_bc_driver);
795 tty_unregister_driver(ehv_bc_driver); 795err_put_tty_driver:
796 put_tty_driver(ehv_bc_driver); 796 put_tty_driver(ehv_bc_driver);
797 } 797err_free_bcs:
798
799 kfree(bcs); 798 kfree(bcs);
800 799
801 return ret; 800 return ret;
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index 433de5ea9b02..f71b47334149 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -122,6 +122,18 @@ void serdev_device_write_wakeup(struct serdev_device *serdev)
122} 122}
123EXPORT_SYMBOL_GPL(serdev_device_write_wakeup); 123EXPORT_SYMBOL_GPL(serdev_device_write_wakeup);
124 124
125int serdev_device_write_buf(struct serdev_device *serdev,
126 const unsigned char *buf, size_t count)
127{
128 struct serdev_controller *ctrl = serdev->ctrl;
129
130 if (!ctrl || !ctrl->ops->write_buf)
131 return -EINVAL;
132
133 return ctrl->ops->write_buf(ctrl, buf, count);
134}
135EXPORT_SYMBOL_GPL(serdev_device_write_buf);
136
125int serdev_device_write(struct serdev_device *serdev, 137int serdev_device_write(struct serdev_device *serdev,
126 const unsigned char *buf, size_t count, 138 const unsigned char *buf, size_t count,
127 unsigned long timeout) 139 unsigned long timeout)
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
index 487c88f6aa0e..d0a021c93986 100644
--- a/drivers/tty/serdev/serdev-ttyport.c
+++ b/drivers/tty/serdev/serdev-ttyport.c
@@ -102,9 +102,6 @@ static int ttyport_open(struct serdev_controller *ctrl)
102 return PTR_ERR(tty); 102 return PTR_ERR(tty);
103 serport->tty = tty; 103 serport->tty = tty;
104 104
105 serport->port->client_ops = &client_ops;
106 serport->port->client_data = ctrl;
107
108 if (tty->ops->open) 105 if (tty->ops->open)
109 tty->ops->open(serport->tty, NULL); 106 tty->ops->open(serport->tty, NULL);
110 else 107 else
@@ -215,6 +212,7 @@ struct device *serdev_tty_port_register(struct tty_port *port,
215 struct device *parent, 212 struct device *parent,
216 struct tty_driver *drv, int idx) 213 struct tty_driver *drv, int idx)
217{ 214{
215 const struct tty_port_client_operations *old_ops;
218 struct serdev_controller *ctrl; 216 struct serdev_controller *ctrl;
219 struct serport *serport; 217 struct serport *serport;
220 int ret; 218 int ret;
@@ -233,28 +231,37 @@ struct device *serdev_tty_port_register(struct tty_port *port,
233 231
234 ctrl->ops = &ctrl_ops; 232 ctrl->ops = &ctrl_ops;
235 233
234 old_ops = port->client_ops;
235 port->client_ops = &client_ops;
236 port->client_data = ctrl;
237
236 ret = serdev_controller_add(ctrl); 238 ret = serdev_controller_add(ctrl);
237 if (ret) 239 if (ret)
238 goto err_controller_put; 240 goto err_reset_data;
239 241
240 dev_info(&ctrl->dev, "tty port %s%d registered\n", drv->name, idx); 242 dev_info(&ctrl->dev, "tty port %s%d registered\n", drv->name, idx);
241 return &ctrl->dev; 243 return &ctrl->dev;
242 244
243err_controller_put: 245err_reset_data:
246 port->client_data = NULL;
247 port->client_ops = old_ops;
244 serdev_controller_put(ctrl); 248 serdev_controller_put(ctrl);
249
245 return ERR_PTR(ret); 250 return ERR_PTR(ret);
246} 251}
247 252
248void serdev_tty_port_unregister(struct tty_port *port) 253int serdev_tty_port_unregister(struct tty_port *port)
249{ 254{
250 struct serdev_controller *ctrl = port->client_data; 255 struct serdev_controller *ctrl = port->client_data;
251 struct serport *serport = serdev_controller_get_drvdata(ctrl); 256 struct serport *serport = serdev_controller_get_drvdata(ctrl);
252 257
253 if (!serport) 258 if (!serport)
254 return; 259 return -ENODEV;
255 260
256 serdev_controller_remove(ctrl); 261 serdev_controller_remove(ctrl);
257 port->client_ops = NULL; 262 port->client_ops = NULL;
258 port->client_data = NULL; 263 port->client_data = NULL;
259 serdev_controller_put(ctrl); 264 serdev_controller_put(ctrl);
265
266 return 0;
260} 267}
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 09a65a3ec7f7..68fd045a7025 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -47,6 +47,7 @@
47/* 47/*
48 * These are definitions for the Exar XR17V35X and XR17(C|D)15X 48 * These are definitions for the Exar XR17V35X and XR17(C|D)15X
49 */ 49 */
50#define UART_EXAR_INT0 0x80
50#define UART_EXAR_SLEEP 0x8b /* Sleep mode */ 51#define UART_EXAR_SLEEP 0x8b /* Sleep mode */
51#define UART_EXAR_DVID 0x8d /* Device identification */ 52#define UART_EXAR_DVID 0x8d /* Device identification */
52 53
@@ -1337,7 +1338,7 @@ out_lock:
1337 /* 1338 /*
1338 * Check if the device is a Fintek F81216A 1339 * Check if the device is a Fintek F81216A
1339 */ 1340 */
1340 if (port->type == PORT_16550A) 1341 if (port->type == PORT_16550A && port->iotype == UPIO_PORT)
1341 fintek_8250_probe(up); 1342 fintek_8250_probe(up);
1342 1343
1343 if (up->capabilities != old_capabilities) { 1344 if (up->capabilities != old_capabilities) {
@@ -1869,17 +1870,13 @@ static int serial8250_default_handle_irq(struct uart_port *port)
1869static int exar_handle_irq(struct uart_port *port) 1870static int exar_handle_irq(struct uart_port *port)
1870{ 1871{
1871 unsigned int iir = serial_port_in(port, UART_IIR); 1872 unsigned int iir = serial_port_in(port, UART_IIR);
1872 int ret; 1873 int ret = 0;
1873 1874
1874 ret = serial8250_handle_irq(port, iir); 1875 if (((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X)) &&
1876 serial_port_in(port, UART_EXAR_INT0) != 0)
1877 ret = 1;
1875 1878
1876 if ((port->type == PORT_XR17V35X) || 1879 ret |= serial8250_handle_irq(port, iir);
1877 (port->type == PORT_XR17D15X)) {
1878 serial_port_in(port, 0x80);
1879 serial_port_in(port, 0x81);
1880 serial_port_in(port, 0x82);
1881 serial_port_in(port, 0x83);
1882 }
1883 1880
1884 return ret; 1881 return ret;
1885} 1882}
@@ -2177,6 +2174,8 @@ int serial8250_do_startup(struct uart_port *port)
2177 serial_port_in(port, UART_RX); 2174 serial_port_in(port, UART_RX);
2178 serial_port_in(port, UART_IIR); 2175 serial_port_in(port, UART_IIR);
2179 serial_port_in(port, UART_MSR); 2176 serial_port_in(port, UART_MSR);
2177 if ((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X))
2178 serial_port_in(port, UART_EXAR_INT0);
2180 2179
2181 /* 2180 /*
2182 * At this point, there's no way the LSR could still be 0xff; 2181 * At this point, there's no way the LSR could still be 0xff;
@@ -2335,6 +2334,8 @@ dont_test_tx_en:
2335 serial_port_in(port, UART_RX); 2334 serial_port_in(port, UART_RX);
2336 serial_port_in(port, UART_IIR); 2335 serial_port_in(port, UART_IIR);
2337 serial_port_in(port, UART_MSR); 2336 serial_port_in(port, UART_MSR);
2337 if ((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X))
2338 serial_port_in(port, UART_EXAR_INT0);
2338 up->lsr_saved_flags = 0; 2339 up->lsr_saved_flags = 0;
2339 up->msr_saved_flags = 0; 2340 up->msr_saved_flags = 0;
2340 2341
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index 18e3f8342b85..0475f5d261ce 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -478,6 +478,7 @@ static int altera_jtaguart_remove(struct platform_device *pdev)
478 478
479 port = &altera_jtaguart_ports[i].port; 479 port = &altera_jtaguart_ports[i].port;
480 uart_remove_one_port(&altera_jtaguart_driver, port); 480 uart_remove_one_port(&altera_jtaguart_driver, port);
481 iounmap(port->membase);
481 482
482 return 0; 483 return 0;
483} 484}
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 46d3438a0d27..3e4b717670d7 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -615,6 +615,7 @@ static int altera_uart_remove(struct platform_device *pdev)
615 if (port) { 615 if (port) {
616 uart_remove_one_port(&altera_uart_driver, port); 616 uart_remove_one_port(&altera_uart_driver, port);
617 port->mapbase = 0; 617 port->mapbase = 0;
618 iounmap(port->membase);
618 } 619 }
619 620
620 return 0; 621 return 0;
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index ebd8569f9ad5..9fff25be87f9 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -27,6 +27,7 @@
27#define UARTn_FRAME 0x04 27#define UARTn_FRAME 0x04
28#define UARTn_FRAME_DATABITS__MASK 0x000f 28#define UARTn_FRAME_DATABITS__MASK 0x000f
29#define UARTn_FRAME_DATABITS(n) ((n) - 3) 29#define UARTn_FRAME_DATABITS(n) ((n) - 3)
30#define UARTn_FRAME_PARITY__MASK 0x0300
30#define UARTn_FRAME_PARITY_NONE 0x0000 31#define UARTn_FRAME_PARITY_NONE 0x0000
31#define UARTn_FRAME_PARITY_EVEN 0x0200 32#define UARTn_FRAME_PARITY_EVEN 0x0200
32#define UARTn_FRAME_PARITY_ODD 0x0300 33#define UARTn_FRAME_PARITY_ODD 0x0300
@@ -572,12 +573,16 @@ static void efm32_uart_console_get_options(struct efm32_uart_port *efm_port,
572 16 * (4 + (clkdiv >> 6))); 573 16 * (4 + (clkdiv >> 6)));
573 574
574 frame = efm32_uart_read32(efm_port, UARTn_FRAME); 575 frame = efm32_uart_read32(efm_port, UARTn_FRAME);
575 if (frame & UARTn_FRAME_PARITY_ODD) 576 switch (frame & UARTn_FRAME_PARITY__MASK) {
577 case UARTn_FRAME_PARITY_ODD:
576 *parity = 'o'; 578 *parity = 'o';
577 else if (frame & UARTn_FRAME_PARITY_EVEN) 579 break;
580 case UARTn_FRAME_PARITY_EVEN:
578 *parity = 'e'; 581 *parity = 'e';
579 else 582 break;
583 default:
580 *parity = 'n'; 584 *parity = 'n';
585 }
581 586
582 *bits = (frame & UARTn_FRAME_DATABITS__MASK) - 587 *bits = (frame & UARTn_FRAME_DATABITS__MASK) -
583 UARTn_FRAME_DATABITS(4) + 4; 588 UARTn_FRAME_DATABITS(4) + 4;
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 157883653256..f190a84a0246 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1382,9 +1382,9 @@ static struct spi_driver ifx_spi_driver = {
1382static void __exit ifx_spi_exit(void) 1382static void __exit ifx_spi_exit(void)
1383{ 1383{
1384 /* unregister */ 1384 /* unregister */
1385 spi_unregister_driver(&ifx_spi_driver);
1385 tty_unregister_driver(tty_drv); 1386 tty_unregister_driver(tty_drv);
1386 put_tty_driver(tty_drv); 1387 put_tty_driver(tty_drv);
1387 spi_unregister_driver(&ifx_spi_driver);
1388 unregister_reboot_notifier(&ifx_modem_reboot_notifier_block); 1388 unregister_reboot_notifier(&ifx_modem_reboot_notifier_block);
1389} 1389}
1390 1390
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 33509b4beaec..bbefddd92bfe 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2184,7 +2184,9 @@ static int serial_imx_probe(struct platform_device *pdev)
2184 * and DCD (when they are outputs) or enables the respective 2184 * and DCD (when they are outputs) or enables the respective
2185 * irqs. So set this bit early, i.e. before requesting irqs. 2185 * irqs. So set this bit early, i.e. before requesting irqs.
2186 */ 2186 */
2187 writel(UFCR_DCEDTE, sport->port.membase + UFCR); 2187 reg = readl(sport->port.membase + UFCR);
2188 if (!(reg & UFCR_DCEDTE))
2189 writel(reg | UFCR_DCEDTE, sport->port.membase + UFCR);
2188 2190
2189 /* 2191 /*
2190 * Disable UCR3_RI and UCR3_DCD irqs. They are also not 2192 * Disable UCR3_RI and UCR3_DCD irqs. They are also not
@@ -2195,7 +2197,15 @@ static int serial_imx_probe(struct platform_device *pdev)
2195 sport->port.membase + UCR3); 2197 sport->port.membase + UCR3);
2196 2198
2197 } else { 2199 } else {
2198 writel(0, sport->port.membase + UFCR); 2200 unsigned long ucr3 = UCR3_DSR;
2201
2202 reg = readl(sport->port.membase + UFCR);
2203 if (reg & UFCR_DCEDTE)
2204 writel(reg & ~UFCR_DCEDTE, sport->port.membase + UFCR);
2205
2206 if (!is_imx1_uart(sport))
2207 ucr3 |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP;
2208 writel(ucr3, sport->port.membase + UCR3);
2199 } 2209 }
2200 2210
2201 clk_disable_unprepare(sport->clk_ipg); 2211 clk_disable_unprepare(sport->clk_ipg);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 0f45b7884a2c..13bfd5dcffce 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -2083,7 +2083,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
2083 mutex_lock(&port->mutex); 2083 mutex_lock(&port->mutex);
2084 2084
2085 tty_dev = device_find_child(uport->dev, &match, serial_match_port); 2085 tty_dev = device_find_child(uport->dev, &match, serial_match_port);
2086 if (device_may_wakeup(tty_dev)) { 2086 if (tty_dev && device_may_wakeup(tty_dev)) {
2087 if (!enable_irq_wake(uport->irq)) 2087 if (!enable_irq_wake(uport->irq))
2088 uport->irq_wake = 1; 2088 uport->irq_wake = 1;
2089 put_device(tty_dev); 2089 put_device(tty_dev);
@@ -2782,7 +2782,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
2782 * Register the port whether it's detected or not. This allows 2782 * Register the port whether it's detected or not. This allows
2783 * setserial to be used to alter this port's parameters. 2783 * setserial to be used to alter this port's parameters.
2784 */ 2784 */
2785 tty_dev = tty_port_register_device_attr(port, drv->tty_driver, 2785 tty_dev = tty_port_register_device_attr_serdev(port, drv->tty_driver,
2786 uport->line, uport->dev, port, uport->tty_groups); 2786 uport->line, uport->dev, port, uport->tty_groups);
2787 if (likely(!IS_ERR(tty_dev))) { 2787 if (likely(!IS_ERR(tty_dev))) {
2788 device_set_wakeup_capable(tty_dev, 1); 2788 device_set_wakeup_capable(tty_dev, 1);
@@ -2845,7 +2845,7 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
2845 /* 2845 /*
2846 * Remove the devices from the tty layer 2846 * Remove the devices from the tty layer
2847 */ 2847 */
2848 tty_unregister_device(drv->tty_driver, uport->line); 2848 tty_port_unregister_device(port, drv->tty_driver, uport->line);
2849 2849
2850 tty = tty_port_tty_get(port); 2850 tty = tty_port_tty_get(port);
2851 if (tty) { 2851 if (tty) {
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 1d21a9c1d33e..4fb3165384c4 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -34,7 +34,9 @@ static int tty_port_default_receive_buf(struct tty_port *port,
34 if (!disc) 34 if (!disc)
35 return 0; 35 return 0;
36 36
37 mutex_lock(&tty->atomic_write_lock);
37 ret = tty_ldisc_receive_buf(disc, p, (char *)f, count); 38 ret = tty_ldisc_receive_buf(disc, p, (char *)f, count);
39 mutex_unlock(&tty->atomic_write_lock);
38 40
39 tty_ldisc_deref(disc); 41 tty_ldisc_deref(disc);
40 42
@@ -129,19 +131,85 @@ struct device *tty_port_register_device_attr(struct tty_port *port,
129 struct device *device, void *drvdata, 131 struct device *device, void *drvdata,
130 const struct attribute_group **attr_grp) 132 const struct attribute_group **attr_grp)
131{ 133{
134 tty_port_link_device(port, driver, index);
135 return tty_register_device_attr(driver, index, device, drvdata,
136 attr_grp);
137}
138EXPORT_SYMBOL_GPL(tty_port_register_device_attr);
139
140/**
141 * tty_port_register_device_attr_serdev - register tty or serdev device
142 * @port: tty_port of the device
143 * @driver: tty_driver for this device
144 * @index: index of the tty
145 * @device: parent if exists, otherwise NULL
146 * @drvdata: driver data for the device
147 * @attr_grp: attribute group for the device
148 *
149 * Register a serdev or tty device depending on if the parent device has any
150 * defined serdev clients or not.
151 */
152struct device *tty_port_register_device_attr_serdev(struct tty_port *port,
153 struct tty_driver *driver, unsigned index,
154 struct device *device, void *drvdata,
155 const struct attribute_group **attr_grp)
156{
132 struct device *dev; 157 struct device *dev;
133 158
134 tty_port_link_device(port, driver, index); 159 tty_port_link_device(port, driver, index);
135 160
136 dev = serdev_tty_port_register(port, device, driver, index); 161 dev = serdev_tty_port_register(port, device, driver, index);
137 if (PTR_ERR(dev) != -ENODEV) 162 if (PTR_ERR(dev) != -ENODEV) {
138 /* Skip creating cdev if we registered a serdev device */ 163 /* Skip creating cdev if we registered a serdev device */
139 return dev; 164 return dev;
165 }
140 166
141 return tty_register_device_attr(driver, index, device, drvdata, 167 return tty_register_device_attr(driver, index, device, drvdata,
142 attr_grp); 168 attr_grp);
143} 169}
144EXPORT_SYMBOL_GPL(tty_port_register_device_attr); 170EXPORT_SYMBOL_GPL(tty_port_register_device_attr_serdev);
171
172/**
173 * tty_port_register_device_serdev - register tty or serdev device
174 * @port: tty_port of the device
175 * @driver: tty_driver for this device
176 * @index: index of the tty
177 * @device: parent if exists, otherwise NULL
178 *
179 * Register a serdev or tty device depending on if the parent device has any
180 * defined serdev clients or not.
181 */
182struct device *tty_port_register_device_serdev(struct tty_port *port,
183 struct tty_driver *driver, unsigned index,
184 struct device *device)
185{
186 return tty_port_register_device_attr_serdev(port, driver, index,
187 device, NULL, NULL);
188}
189EXPORT_SYMBOL_GPL(tty_port_register_device_serdev);
190
191/**
192 * tty_port_unregister_device - deregister a tty or serdev device
193 * @port: tty_port of the device
194 * @driver: tty_driver for this device
195 * @index: index of the tty
196 *
197 * If a tty or serdev device is registered with a call to
198 * tty_port_register_device_serdev() then this function must be called when
199 * the device is gone.
200 */
201void tty_port_unregister_device(struct tty_port *port,
202 struct tty_driver *driver, unsigned index)
203{
204 int ret;
205
206 ret = serdev_tty_port_unregister(port);
207 if (ret == 0)
208 return;
209
210 tty_unregister_device(driver, index);
211}
212EXPORT_SYMBOL_GPL(tty_port_unregister_device);
145 213
146int tty_port_alloc_xmit_buf(struct tty_port *port) 214int tty_port_alloc_xmit_buf(struct tty_port *port)
147{ 215{
@@ -189,9 +257,6 @@ static void tty_port_destructor(struct kref *kref)
189 /* check if last port ref was dropped before tty release */ 257 /* check if last port ref was dropped before tty release */
190 if (WARN_ON(port->itty)) 258 if (WARN_ON(port->itty))
191 return; 259 return;
192
193 serdev_tty_port_unregister(port);
194
195 if (port->xmit_buf) 260 if (port->xmit_buf)
196 free_page((unsigned long)port->xmit_buf); 261 free_page((unsigned long)port->xmit_buf);
197 tty_port_destroy(port); 262 tty_port_destroy(port);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 1c196f87e9d9..ff04b7f8549f 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -279,7 +279,7 @@ static int uio_dev_add_attributes(struct uio_device *idev)
279 map = kzalloc(sizeof(*map), GFP_KERNEL); 279 map = kzalloc(sizeof(*map), GFP_KERNEL);
280 if (!map) { 280 if (!map) {
281 ret = -ENOMEM; 281 ret = -ENOMEM;
282 goto err_map_kobj; 282 goto err_map;
283 } 283 }
284 kobject_init(&map->kobj, &map_attr_type); 284 kobject_init(&map->kobj, &map_attr_type);
285 map->mem = mem; 285 map->mem = mem;
@@ -289,7 +289,7 @@ static int uio_dev_add_attributes(struct uio_device *idev)
289 goto err_map_kobj; 289 goto err_map_kobj;
290 ret = kobject_uevent(&map->kobj, KOBJ_ADD); 290 ret = kobject_uevent(&map->kobj, KOBJ_ADD);
291 if (ret) 291 if (ret)
292 goto err_map; 292 goto err_map_kobj;
293 } 293 }
294 294
295 for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) { 295 for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) {
@@ -308,7 +308,7 @@ static int uio_dev_add_attributes(struct uio_device *idev)
308 portio = kzalloc(sizeof(*portio), GFP_KERNEL); 308 portio = kzalloc(sizeof(*portio), GFP_KERNEL);
309 if (!portio) { 309 if (!portio) {
310 ret = -ENOMEM; 310 ret = -ENOMEM;
311 goto err_portio_kobj; 311 goto err_portio;
312 } 312 }
313 kobject_init(&portio->kobj, &portio_attr_type); 313 kobject_init(&portio->kobj, &portio_attr_type);
314 portio->port = port; 314 portio->port = port;
@@ -319,7 +319,7 @@ static int uio_dev_add_attributes(struct uio_device *idev)
319 goto err_portio_kobj; 319 goto err_portio_kobj;
320 ret = kobject_uevent(&portio->kobj, KOBJ_ADD); 320 ret = kobject_uevent(&portio->kobj, KOBJ_ADD);
321 if (ret) 321 if (ret)
322 goto err_portio; 322 goto err_portio_kobj;
323 } 323 }
324 324
325 return 0; 325 return 0;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index cfc3cff6e8d5..8e6ef671be9b 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -475,11 +475,11 @@ static void snoop_urb(struct usb_device *udev,
475 475
476 if (userurb) { /* Async */ 476 if (userurb) { /* Async */
477 if (when == SUBMIT) 477 if (when == SUBMIT)
478 dev_info(&udev->dev, "userurb %p, ep%d %s-%s, " 478 dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
479 "length %u\n", 479 "length %u\n",
480 userurb, ep, t, d, length); 480 userurb, ep, t, d, length);
481 else 481 else
482 dev_info(&udev->dev, "userurb %p, ep%d %s-%s, " 482 dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
483 "actual_length %u status %d\n", 483 "actual_length %u status %d\n",
484 userurb, ep, t, d, length, 484 userurb, ep, t, d, length,
485 timeout_or_status); 485 timeout_or_status);
@@ -1895,7 +1895,7 @@ static int proc_reapurb(struct usb_dev_state *ps, void __user *arg)
1895 if (as) { 1895 if (as) {
1896 int retval; 1896 int retval;
1897 1897
1898 snoop(&ps->dev->dev, "reap %p\n", as->userurb); 1898 snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
1899 retval = processcompl(as, (void __user * __user *)arg); 1899 retval = processcompl(as, (void __user * __user *)arg);
1900 free_async(as); 1900 free_async(as);
1901 return retval; 1901 return retval;
@@ -1912,7 +1912,7 @@ static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg)
1912 1912
1913 as = async_getcompleted(ps); 1913 as = async_getcompleted(ps);
1914 if (as) { 1914 if (as) {
1915 snoop(&ps->dev->dev, "reap %p\n", as->userurb); 1915 snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
1916 retval = processcompl(as, (void __user * __user *)arg); 1916 retval = processcompl(as, (void __user * __user *)arg);
1917 free_async(as); 1917 free_async(as);
1918 } else { 1918 } else {
@@ -2043,7 +2043,7 @@ static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg)
2043 if (as) { 2043 if (as) {
2044 int retval; 2044 int retval;
2045 2045
2046 snoop(&ps->dev->dev, "reap %p\n", as->userurb); 2046 snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
2047 retval = processcompl_compat(as, (void __user * __user *)arg); 2047 retval = processcompl_compat(as, (void __user * __user *)arg);
2048 free_async(as); 2048 free_async(as);
2049 return retval; 2049 return retval;
@@ -2060,7 +2060,7 @@ static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *ar
2060 2060
2061 as = async_getcompleted(ps); 2061 as = async_getcompleted(ps);
2062 if (as) { 2062 if (as) {
2063 snoop(&ps->dev->dev, "reap %p\n", as->userurb); 2063 snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
2064 retval = processcompl_compat(as, (void __user * __user *)arg); 2064 retval = processcompl_compat(as, (void __user * __user *)arg);
2065 free_async(as); 2065 free_async(as);
2066 } else { 2066 } else {
@@ -2489,7 +2489,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
2489#endif 2489#endif
2490 2490
2491 case USBDEVFS_DISCARDURB: 2491 case USBDEVFS_DISCARDURB:
2492 snoop(&dev->dev, "%s: DISCARDURB %p\n", __func__, p); 2492 snoop(&dev->dev, "%s: DISCARDURB %pK\n", __func__, p);
2493 ret = proc_unlinkurb(ps, p); 2493 ret = proc_unlinkurb(ps, p);
2494 break; 2494 break;
2495 2495
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 49550790a3cb..5dea98358c05 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1723,7 +1723,7 @@ int usb_hcd_unlink_urb (struct urb *urb, int status)
1723 if (retval == 0) 1723 if (retval == 0)
1724 retval = -EINPROGRESS; 1724 retval = -EINPROGRESS;
1725 else if (retval != -EIDRM && retval != -EBUSY) 1725 else if (retval != -EIDRM && retval != -EBUSY)
1726 dev_dbg(&udev->dev, "hcd_unlink_urb %p fail %d\n", 1726 dev_dbg(&udev->dev, "hcd_unlink_urb %pK fail %d\n",
1727 urb, retval); 1727 urb, retval);
1728 usb_put_dev(udev); 1728 usb_put_dev(udev);
1729 } 1729 }
@@ -1890,7 +1890,7 @@ rescan:
1890 /* kick hcd */ 1890 /* kick hcd */
1891 unlink1(hcd, urb, -ESHUTDOWN); 1891 unlink1(hcd, urb, -ESHUTDOWN);
1892 dev_dbg (hcd->self.controller, 1892 dev_dbg (hcd->self.controller,
1893 "shutdown urb %p ep%d%s%s\n", 1893 "shutdown urb %pK ep%d%s%s\n",
1894 urb, usb_endpoint_num(&ep->desc), 1894 urb, usb_endpoint_num(&ep->desc),
1895 is_in ? "in" : "out", 1895 is_in ? "in" : "out",
1896 ({ char *s; 1896 ({ char *s;
@@ -2520,6 +2520,7 @@ struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
2520 hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex), 2520 hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex),
2521 GFP_KERNEL); 2521 GFP_KERNEL);
2522 if (!hcd->bandwidth_mutex) { 2522 if (!hcd->bandwidth_mutex) {
2523 kfree(hcd->address0_mutex);
2523 kfree(hcd); 2524 kfree(hcd);
2524 dev_dbg(dev, "hcd bandwidth mutex alloc failed\n"); 2525 dev_dbg(dev, "hcd bandwidth mutex alloc failed\n");
2525 return NULL; 2526 return NULL;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 9dca59ef18b3..b8bb20d7acdb 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -362,7 +362,8 @@ static void usb_set_lpm_parameters(struct usb_device *udev)
362} 362}
363 363
364/* USB 2.0 spec Section 11.24.4.5 */ 364/* USB 2.0 spec Section 11.24.4.5 */
365static int get_hub_descriptor(struct usb_device *hdev, void *data) 365static int get_hub_descriptor(struct usb_device *hdev,
366 struct usb_hub_descriptor *desc)
366{ 367{
367 int i, ret, size; 368 int i, ret, size;
368 unsigned dtype; 369 unsigned dtype;
@@ -378,10 +379,18 @@ static int get_hub_descriptor(struct usb_device *hdev, void *data)
378 for (i = 0; i < 3; i++) { 379 for (i = 0; i < 3; i++) {
379 ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), 380 ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
380 USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB, 381 USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB,
381 dtype << 8, 0, data, size, 382 dtype << 8, 0, desc, size,
382 USB_CTRL_GET_TIMEOUT); 383 USB_CTRL_GET_TIMEOUT);
383 if (ret >= (USB_DT_HUB_NONVAR_SIZE + 2)) 384 if (hub_is_superspeed(hdev)) {
385 if (ret == size)
386 return ret;
387 } else if (ret >= USB_DT_HUB_NONVAR_SIZE + 2) {
388 /* Make sure we have the DeviceRemovable field. */
389 size = USB_DT_HUB_NONVAR_SIZE + desc->bNbrPorts / 8 + 1;
390 if (ret < size)
391 return -EMSGSIZE;
384 return ret; 392 return ret;
393 }
385 } 394 }
386 return -EINVAL; 395 return -EINVAL;
387} 396}
@@ -1313,7 +1322,7 @@ static int hub_configure(struct usb_hub *hub,
1313 } 1322 }
1314 mutex_init(&hub->status_mutex); 1323 mutex_init(&hub->status_mutex);
1315 1324
1316 hub->descriptor = kmalloc(sizeof(*hub->descriptor), GFP_KERNEL); 1325 hub->descriptor = kzalloc(sizeof(*hub->descriptor), GFP_KERNEL);
1317 if (!hub->descriptor) { 1326 if (!hub->descriptor) {
1318 ret = -ENOMEM; 1327 ret = -ENOMEM;
1319 goto fail; 1328 goto fail;
@@ -1321,13 +1330,19 @@ static int hub_configure(struct usb_hub *hub,
1321 1330
1322 /* Request the entire hub descriptor. 1331 /* Request the entire hub descriptor.
1323 * hub->descriptor can handle USB_MAXCHILDREN ports, 1332 * hub->descriptor can handle USB_MAXCHILDREN ports,
1324 * but the hub can/will return fewer bytes here. 1333 * but a (non-SS) hub can/will return fewer bytes here.
1325 */ 1334 */
1326 ret = get_hub_descriptor(hdev, hub->descriptor); 1335 ret = get_hub_descriptor(hdev, hub->descriptor);
1327 if (ret < 0) { 1336 if (ret < 0) {
1328 message = "can't read hub descriptor"; 1337 message = "can't read hub descriptor";
1329 goto fail; 1338 goto fail;
1330 } else if (hub->descriptor->bNbrPorts > USB_MAXCHILDREN) { 1339 }
1340
1341 maxchild = USB_MAXCHILDREN;
1342 if (hub_is_superspeed(hdev))
1343 maxchild = min_t(unsigned, maxchild, USB_SS_MAXPORTS);
1344
1345 if (hub->descriptor->bNbrPorts > maxchild) {
1331 message = "hub has too many ports!"; 1346 message = "hub has too many ports!";
1332 ret = -ENODEV; 1347 ret = -ENODEV;
1333 goto fail; 1348 goto fail;
diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c
index d787f195a9a6..d563cbcf76cf 100644
--- a/drivers/usb/core/of.c
+++ b/drivers/usb/core/of.c
@@ -53,6 +53,9 @@ EXPORT_SYMBOL_GPL(usb_of_get_child_node);
53 * 53 *
54 * Find the companion device from platform bus. 54 * Find the companion device from platform bus.
55 * 55 *
56 * Takes a reference to the returned struct device which needs to be dropped
57 * after use.
58 *
56 * Return: On success, a pointer to the companion device, %NULL on failure. 59 * Return: On success, a pointer to the companion device, %NULL on failure.
57 */ 60 */
58struct device *usb_of_get_companion_dev(struct device *dev) 61struct device *usb_of_get_companion_dev(struct device *dev)
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index d75cb8c0f7df..47903d510955 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -338,7 +338,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
338 if (!urb || !urb->complete) 338 if (!urb || !urb->complete)
339 return -EINVAL; 339 return -EINVAL;
340 if (urb->hcpriv) { 340 if (urb->hcpriv) {
341 WARN_ONCE(1, "URB %p submitted while active\n", urb); 341 WARN_ONCE(1, "URB %pK submitted while active\n", urb);
342 return -EBUSY; 342 return -EBUSY;
343 } 343 }
344 344
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 72664700b8a2..12ee23f53cdd 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -107,6 +107,10 @@ static int kdwc3_probe(struct platform_device *pdev)
107 return PTR_ERR(kdwc->usbss); 107 return PTR_ERR(kdwc->usbss);
108 108
109 kdwc->clk = devm_clk_get(kdwc->dev, "usb"); 109 kdwc->clk = devm_clk_get(kdwc->dev, "usb");
110 if (IS_ERR(kdwc->clk)) {
111 dev_err(kdwc->dev, "unable to get usb clock\n");
112 return PTR_ERR(kdwc->clk);
113 }
110 114
111 error = clk_prepare_enable(kdwc->clk); 115 error = clk_prepare_enable(kdwc->clk);
112 if (error < 0) { 116 if (error < 0) {
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index a15ec71d0423..84a2cebfc712 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -39,6 +39,8 @@
39#define PCI_DEVICE_ID_INTEL_APL 0x5aaa 39#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
40#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 40#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
41#define PCI_DEVICE_ID_INTEL_GLK 0x31aa 41#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
42#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
43#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
42 44
43#define PCI_INTEL_BXT_DSM_UUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" 45#define PCI_INTEL_BXT_DSM_UUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
44#define PCI_INTEL_BXT_FUNC_PMU_PWR 4 46#define PCI_INTEL_BXT_FUNC_PMU_PWR 4
@@ -270,6 +272,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
270 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, 272 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
271 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), }, 273 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), },
272 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), }, 274 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), },
275 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPLP), },
276 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPH), },
273 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, 277 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
274 { } /* Terminating Entry */ 278 { } /* Terminating Entry */
275}; 279};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 6f6f0b3be3ad..aea9a5b948b4 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1261,14 +1261,24 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1261 __dwc3_gadget_start_isoc(dwc, dep, cur_uf); 1261 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1262 dep->flags &= ~DWC3_EP_PENDING_REQUEST; 1262 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
1263 } 1263 }
1264 return 0;
1264 } 1265 }
1265 return 0; 1266
1267 if ((dep->flags & DWC3_EP_BUSY) &&
1268 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
1269 WARN_ON_ONCE(!dep->resource_index);
1270 ret = __dwc3_gadget_kick_transfer(dep,
1271 dep->resource_index);
1272 }
1273
1274 goto out;
1266 } 1275 }
1267 1276
1268 if (!dwc3_calc_trbs_left(dep)) 1277 if (!dwc3_calc_trbs_left(dep))
1269 return 0; 1278 return 0;
1270 1279
1271 ret = __dwc3_gadget_kick_transfer(dep, 0); 1280 ret = __dwc3_gadget_kick_transfer(dep, 0);
1281out:
1272 if (ret == -EBUSY) 1282 if (ret == -EBUSY)
1273 ret = 0; 1283 ret = 0;
1274 1284
@@ -3026,6 +3036,15 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
3026 return IRQ_HANDLED; 3036 return IRQ_HANDLED;
3027 } 3037 }
3028 3038
3039 /*
3040 * With PCIe legacy interrupt, test shows that top-half irq handler can
3041 * be called again after HW interrupt deassertion. Check if bottom-half
3042 * irq event handler completes before caching new event to prevent
3043 * losing events.
3044 */
3045 if (evt->flags & DWC3_EVENT_PENDING)
3046 return IRQ_HANDLED;
3047
3029 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); 3048 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
3030 count &= DWC3_GEVNTCOUNT_MASK; 3049 count &= DWC3_GEVNTCOUNT_MASK;
3031 if (!count) 3050 if (!count)
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 71dd27c0d7f2..47dda3450abd 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1858,12 +1858,12 @@ static int ffs_func_eps_enable(struct ffs_function *func)
1858 ep->ep->driver_data = ep; 1858 ep->ep->driver_data = ep;
1859 ep->ep->desc = ds; 1859 ep->ep->desc = ds;
1860 1860
1861 comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds + 1861 if (needs_comp_desc) {
1862 USB_DT_ENDPOINT_SIZE); 1862 comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds +
1863 ep->ep->maxburst = comp_desc->bMaxBurst + 1; 1863 USB_DT_ENDPOINT_SIZE);
1864 1864 ep->ep->maxburst = comp_desc->bMaxBurst + 1;
1865 if (needs_comp_desc)
1866 ep->ep->comp_desc = comp_desc; 1865 ep->ep->comp_desc = comp_desc;
1866 }
1867 1867
1868 ret = usb_ep_enable(ep->ep); 1868 ret = usb_ep_enable(ep->ep);
1869 if (likely(!ret)) { 1869 if (likely(!ret)) {
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 000677c991b0..9b0805f55ad7 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -1256,7 +1256,7 @@ static void gserial_console_exit(void)
1256 struct gscons_info *info = &gscons_info; 1256 struct gscons_info *info = &gscons_info;
1257 1257
1258 unregister_console(&gserial_cons); 1258 unregister_console(&gserial_cons);
1259 if (info->console_thread != NULL) 1259 if (!IS_ERR_OR_NULL(info->console_thread))
1260 kthread_stop(info->console_thread); 1260 kthread_stop(info->console_thread);
1261 gs_buf_free(&info->con_buf); 1261 gs_buf_free(&info->con_buf);
1262} 1262}
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index c79081952ea0..ccabb51cb98d 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -2008,7 +2008,7 @@ ss_hub_descriptor(struct usb_hub_descriptor *desc)
2008 HUB_CHAR_COMMON_OCPM); 2008 HUB_CHAR_COMMON_OCPM);
2009 desc->bNbrPorts = 1; 2009 desc->bNbrPorts = 1;
2010 desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/ 2010 desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/
2011 desc->u.ss.DeviceRemovable = 0xffff; 2011 desc->u.ss.DeviceRemovable = 0;
2012} 2012}
2013 2013
2014static inline void hub_descriptor(struct usb_hub_descriptor *desc) 2014static inline void hub_descriptor(struct usb_hub_descriptor *desc)
@@ -2020,8 +2020,8 @@ static inline void hub_descriptor(struct usb_hub_descriptor *desc)
2020 HUB_CHAR_INDV_PORT_LPSM | 2020 HUB_CHAR_INDV_PORT_LPSM |
2021 HUB_CHAR_COMMON_OCPM); 2021 HUB_CHAR_COMMON_OCPM);
2022 desc->bNbrPorts = 1; 2022 desc->bNbrPorts = 1;
2023 desc->u.hs.DeviceRemovable[0] = 0xff; 2023 desc->u.hs.DeviceRemovable[0] = 0;
2024 desc->u.hs.DeviceRemovable[1] = 0xff; 2024 desc->u.hs.DeviceRemovable[1] = 0xff; /* PortPwrCtrlMask */
2025} 2025}
2026 2026
2027static int dummy_hub_control( 2027static int dummy_hub_control(
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index bc7b9be12f54..f1908ea9fbd8 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -384,8 +384,10 @@ static int ehci_platform_resume(struct device *dev)
384 } 384 }
385 385
386 companion_dev = usb_of_get_companion_dev(hcd->self.controller); 386 companion_dev = usb_of_get_companion_dev(hcd->self.controller);
387 if (companion_dev) 387 if (companion_dev) {
388 device_pm_wait_for_dev(hcd->self.controller, companion_dev); 388 device_pm_wait_for_dev(hcd->self.controller, companion_dev);
389 put_device(companion_dev);
390 }
389 391
390 ehci_resume(hcd, priv->reset_on_resume); 392 ehci_resume(hcd, priv->reset_on_resume);
391 return 0; 393 return 0;
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index bfa7fa3d2eea..7bf78be1fd32 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -1269,7 +1269,7 @@ static void set_td_timer(struct r8a66597 *r8a66597, struct r8a66597_td *td)
1269 time = 30; 1269 time = 30;
1270 break; 1270 break;
1271 default: 1271 default:
1272 time = 300; 1272 time = 50;
1273 break; 1273 break;
1274 } 1274 }
1275 1275
@@ -1785,6 +1785,7 @@ static void r8a66597_td_timer(unsigned long _r8a66597)
1785 pipe = td->pipe; 1785 pipe = td->pipe;
1786 pipe_stop(r8a66597, pipe); 1786 pipe_stop(r8a66597, pipe);
1787 1787
1788 /* Select a different address or endpoint */
1788 new_td = td; 1789 new_td = td;
1789 do { 1790 do {
1790 list_move_tail(&new_td->queue, 1791 list_move_tail(&new_td->queue,
@@ -1794,7 +1795,8 @@ static void r8a66597_td_timer(unsigned long _r8a66597)
1794 new_td = td; 1795 new_td = td;
1795 break; 1796 break;
1796 } 1797 }
1797 } while (td != new_td && td->address == new_td->address); 1798 } while (td != new_td && td->address == new_td->address &&
1799 td->pipe->info.epnum == new_td->pipe->info.epnum);
1798 1800
1799 start_transfer(r8a66597, new_td); 1801 start_transfer(r8a66597, new_td);
1800 1802
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 5e3e9d4c6956..0dde49c35dd2 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -419,7 +419,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
419 wait_for_completion(cmd->completion); 419 wait_for_completion(cmd->completion);
420 420
421 if (cmd->status == COMP_COMMAND_ABORTED || 421 if (cmd->status == COMP_COMMAND_ABORTED ||
422 cmd->status == COMP_STOPPED) { 422 cmd->status == COMP_COMMAND_RING_STOPPED) {
423 xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); 423 xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
424 ret = -ETIME; 424 ret = -ETIME;
425 } 425 }
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index bbe22bcc550a..1f1687e888d6 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -56,7 +56,7 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
56 } 56 }
57 57
58 if (max_packet) { 58 if (max_packet) {
59 seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA); 59 seg->bounce_buf = kzalloc(max_packet, flags);
60 if (!seg->bounce_buf) { 60 if (!seg->bounce_buf) {
61 dma_pool_free(xhci->segment_pool, seg->trbs, dma); 61 dma_pool_free(xhci->segment_pool, seg->trbs, dma);
62 kfree(seg); 62 kfree(seg);
@@ -1724,7 +1724,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1724 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); 1724 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1725 for (i = 0; i < num_sp; i++) { 1725 for (i = 0; i < num_sp; i++) {
1726 dma_addr_t dma; 1726 dma_addr_t dma;
1727 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma, 1727 void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma,
1728 flags); 1728 flags);
1729 if (!buf) 1729 if (!buf)
1730 goto fail_sp4; 1730 goto fail_sp4;
@@ -2307,10 +2307,11 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2307 /* Place limits on the number of roothub ports so that the hub 2307 /* Place limits on the number of roothub ports so that the hub
2308 * descriptors aren't longer than the USB core will allocate. 2308 * descriptors aren't longer than the USB core will allocate.
2309 */ 2309 */
2310 if (xhci->num_usb3_ports > 15) { 2310 if (xhci->num_usb3_ports > USB_SS_MAXPORTS) {
2311 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2311 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2312 "Limiting USB 3.0 roothub ports to 15."); 2312 "Limiting USB 3.0 roothub ports to %u.",
2313 xhci->num_usb3_ports = 15; 2313 USB_SS_MAXPORTS);
2314 xhci->num_usb3_ports = USB_SS_MAXPORTS;
2314 } 2315 }
2315 if (xhci->num_usb2_ports > USB_MAXCHILDREN) { 2316 if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
2316 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2317 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 7b86508ac8cf..fcf1f3f63e7a 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -52,6 +52,7 @@
52#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8 52#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
53#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8 53#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
54#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 54#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
55#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
55 56
56static const char hcd_name[] = "xhci_hcd"; 57static const char hcd_name[] = "xhci_hcd";
57 58
@@ -166,7 +167,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
166 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || 167 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
167 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI || 168 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
168 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI || 169 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
169 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) { 170 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
171 pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) {
170 xhci->quirks |= XHCI_PME_STUCK_QUIRK; 172 xhci->quirks |= XHCI_PME_STUCK_QUIRK;
171 } 173 }
172 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 174 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
@@ -175,7 +177,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
175 } 177 }
176 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 178 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
177 (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || 179 (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
178 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) 180 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
181 pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
179 xhci->quirks |= XHCI_MISSING_CAS; 182 xhci->quirks |= XHCI_MISSING_CAS;
180 183
181 if (pdev->vendor == PCI_VENDOR_ID_ETRON && 184 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 7c2a9e7c8e0f..c04144b25a67 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -177,7 +177,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
177 177
178 irq = platform_get_irq(pdev, 0); 178 irq = platform_get_irq(pdev, 0);
179 if (irq < 0) 179 if (irq < 0)
180 return -ENODEV; 180 return irq;
181 181
182 /* 182 /*
183 * sysdev must point to a device that is known to the system firmware 183 * sysdev must point to a device that is known to the system firmware
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 74bf5c60a260..03f63f50afb6 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -323,7 +323,7 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
323 if (i_cmd->status != COMP_COMMAND_ABORTED) 323 if (i_cmd->status != COMP_COMMAND_ABORTED)
324 continue; 324 continue;
325 325
326 i_cmd->status = COMP_STOPPED; 326 i_cmd->status = COMP_COMMAND_RING_STOPPED;
327 327
328 xhci_dbg(xhci, "Turn aborted command %p to no-op\n", 328 xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
329 i_cmd->command_trb); 329 i_cmd->command_trb);
@@ -641,8 +641,8 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
641 xhci_urb_free_priv(urb_priv); 641 xhci_urb_free_priv(urb_priv);
642 usb_hcd_unlink_urb_from_ep(hcd, urb); 642 usb_hcd_unlink_urb_from_ep(hcd, urb);
643 spin_unlock(&xhci->lock); 643 spin_unlock(&xhci->lock);
644 usb_hcd_giveback_urb(hcd, urb, status);
645 trace_xhci_urb_giveback(urb); 644 trace_xhci_urb_giveback(urb);
645 usb_hcd_giveback_urb(hcd, urb, status);
646 spin_lock(&xhci->lock); 646 spin_lock(&xhci->lock);
647} 647}
648 648
@@ -1380,7 +1380,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1380 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); 1380 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1381 1381
1382 /* If CMD ring stopped we own the trbs between enqueue and dequeue */ 1382 /* If CMD ring stopped we own the trbs between enqueue and dequeue */
1383 if (cmd_comp_code == COMP_STOPPED) { 1383 if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) {
1384 complete_all(&xhci->cmd_ring_stop_completion); 1384 complete_all(&xhci->cmd_ring_stop_completion);
1385 return; 1385 return;
1386 } 1386 }
@@ -1436,8 +1436,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1436 break; 1436 break;
1437 case TRB_CMD_NOOP: 1437 case TRB_CMD_NOOP:
1438 /* Is this an aborted command turned to NO-OP? */ 1438 /* Is this an aborted command turned to NO-OP? */
1439 if (cmd->status == COMP_STOPPED) 1439 if (cmd->status == COMP_COMMAND_RING_STOPPED)
1440 cmd_comp_code = COMP_STOPPED; 1440 cmd_comp_code = COMP_COMMAND_RING_STOPPED;
1441 break; 1441 break;
1442 case TRB_RESET_EP: 1442 case TRB_RESET_EP:
1443 WARN_ON(slot_id != TRB_TO_SLOT_ID( 1443 WARN_ON(slot_id != TRB_TO_SLOT_ID(
@@ -2677,11 +2677,12 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
2677 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 2677 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2678 union xhci_trb *event_ring_deq; 2678 union xhci_trb *event_ring_deq;
2679 irqreturn_t ret = IRQ_NONE; 2679 irqreturn_t ret = IRQ_NONE;
2680 unsigned long flags;
2680 dma_addr_t deq; 2681 dma_addr_t deq;
2681 u64 temp_64; 2682 u64 temp_64;
2682 u32 status; 2683 u32 status;
2683 2684
2684 spin_lock(&xhci->lock); 2685 spin_lock_irqsave(&xhci->lock, flags);
2685 /* Check if the xHC generated the interrupt, or the irq is shared */ 2686 /* Check if the xHC generated the interrupt, or the irq is shared */
2686 status = readl(&xhci->op_regs->status); 2687 status = readl(&xhci->op_regs->status);
2687 if (status == ~(u32)0) { 2688 if (status == ~(u32)0) {
@@ -2707,12 +2708,9 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
2707 */ 2708 */
2708 status |= STS_EINT; 2709 status |= STS_EINT;
2709 writel(status, &xhci->op_regs->status); 2710 writel(status, &xhci->op_regs->status);
2710 /* FIXME when MSI-X is supported and there are multiple vectors */
2711 /* Clear the MSI-X event interrupt status */
2712 2711
2713 if (hcd->irq) { 2712 if (!hcd->msi_enabled) {
2714 u32 irq_pending; 2713 u32 irq_pending;
2715 /* Acknowledge the PCI interrupt */
2716 irq_pending = readl(&xhci->ir_set->irq_pending); 2714 irq_pending = readl(&xhci->ir_set->irq_pending);
2717 irq_pending |= IMAN_IP; 2715 irq_pending |= IMAN_IP;
2718 writel(irq_pending, &xhci->ir_set->irq_pending); 2716 writel(irq_pending, &xhci->ir_set->irq_pending);
@@ -2757,7 +2755,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
2757 ret = IRQ_HANDLED; 2755 ret = IRQ_HANDLED;
2758 2756
2759out: 2757out:
2760 spin_unlock(&xhci->lock); 2758 spin_unlock_irqrestore(&xhci->lock, flags);
2761 2759
2762 return ret; 2760 return ret;
2763} 2761}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2d1310220832..30f47d92a610 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -359,9 +359,10 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
359 /* fall back to msi*/ 359 /* fall back to msi*/
360 ret = xhci_setup_msi(xhci); 360 ret = xhci_setup_msi(xhci);
361 361
362 if (!ret) 362 if (!ret) {
363 /* hcd->irq is 0, we have MSI */ 363 hcd->msi_enabled = 1;
364 return 0; 364 return 0;
365 }
365 366
366 if (!pdev->irq) { 367 if (!pdev->irq) {
367 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); 368 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
@@ -1763,7 +1764,7 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1763 1764
1764 switch (*cmd_status) { 1765 switch (*cmd_status) {
1765 case COMP_COMMAND_ABORTED: 1766 case COMP_COMMAND_ABORTED:
1766 case COMP_STOPPED: 1767 case COMP_COMMAND_RING_STOPPED:
1767 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); 1768 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
1768 ret = -ETIME; 1769 ret = -ETIME;
1769 break; 1770 break;
@@ -1813,7 +1814,7 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1813 1814
1814 switch (*cmd_status) { 1815 switch (*cmd_status) {
1815 case COMP_COMMAND_ABORTED: 1816 case COMP_COMMAND_ABORTED:
1816 case COMP_STOPPED: 1817 case COMP_COMMAND_RING_STOPPED:
1817 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); 1818 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
1818 ret = -ETIME; 1819 ret = -ETIME;
1819 break; 1820 break;
@@ -3432,7 +3433,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
3432 ret = reset_device_cmd->status; 3433 ret = reset_device_cmd->status;
3433 switch (ret) { 3434 switch (ret) {
3434 case COMP_COMMAND_ABORTED: 3435 case COMP_COMMAND_ABORTED:
3435 case COMP_STOPPED: 3436 case COMP_COMMAND_RING_STOPPED:
3436 xhci_warn(xhci, "Timeout waiting for reset device command\n"); 3437 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3437 ret = -ETIME; 3438 ret = -ETIME;
3438 goto command_cleanup; 3439 goto command_cleanup;
@@ -3817,7 +3818,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3817 */ 3818 */
3818 switch (command->status) { 3819 switch (command->status) {
3819 case COMP_COMMAND_ABORTED: 3820 case COMP_COMMAND_ABORTED:
3820 case COMP_STOPPED: 3821 case COMP_COMMAND_RING_STOPPED:
3821 xhci_warn(xhci, "Timeout while waiting for setup device command\n"); 3822 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
3822 ret = -ETIME; 3823 ret = -ETIME;
3823 break; 3824 break;
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index e9cae4d82af2..15d4e64d3b65 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -192,7 +192,7 @@ static int chaoskey_probe(struct usb_interface *interface,
192 192
193 dev->in_ep = in_ep; 193 dev->in_ep = in_ep;
194 194
195 if (udev->descriptor.idVendor != ALEA_VENDOR_ID) 195 if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID)
196 dev->reads_started = 1; 196 dev->reads_started = 1;
197 197
198 dev->size = size; 198 dev->size = size;
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 77569531b78a..83b05a287b0c 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -554,7 +554,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
554 info.revision = le16_to_cpu(dev->udev->descriptor.bcdDevice); 554 info.revision = le16_to_cpu(dev->udev->descriptor.bcdDevice);
555 555
556 /* 0==UNKNOWN, 1==LOW(usb1.1) ,2=FULL(usb1.1), 3=HIGH(usb2.0) */ 556 /* 0==UNKNOWN, 1==LOW(usb1.1) ,2=FULL(usb1.1), 3=HIGH(usb2.0) */
557 info.speed = le16_to_cpu(dev->udev->speed); 557 info.speed = dev->udev->speed;
558 info.if_num = dev->interface->cur_altsetting->desc.bInterfaceNumber; 558 info.if_num = dev->interface->cur_altsetting->desc.bInterfaceNumber;
559 info.report_size = dev->report_size; 559 info.report_size = dev->report_size;
560 560
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index aa3c280fdf8d..0782ac6f5edf 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -926,6 +926,7 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
926 USB_MAJOR, dev->minor); 926 USB_MAJOR, dev->minor);
927 927
928exit: 928exit:
929 kfree(get_version_reply);
929 return retval; 930 return retval;
930 931
931error: 932error:
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
index 3c6948af726a..f019d80ca9e4 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
@@ -973,7 +973,7 @@ sisusbcon_set_origin(struct vc_data *c)
973 973
974 mutex_unlock(&sisusb->lock); 974 mutex_unlock(&sisusb->lock);
975 975
976 return 1; 976 return true;
977} 977}
978 978
979/* Interface routine */ 979/* Interface routine */
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index ac3a4952abb4..dbe617a735d8 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2780,10 +2780,11 @@ int musb_host_setup(struct musb *musb, int power_budget)
2780 int ret; 2780 int ret;
2781 struct usb_hcd *hcd = musb->hcd; 2781 struct usb_hcd *hcd = musb->hcd;
2782 2782
2783 MUSB_HST_MODE(musb); 2783 if (musb->port_mode == MUSB_PORT_MODE_HOST) {
2784 musb->xceiv->otg->default_a = 1; 2784 MUSB_HST_MODE(musb);
2785 musb->xceiv->otg->state = OTG_STATE_A_IDLE; 2785 musb->xceiv->otg->default_a = 1;
2786 2786 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2787 }
2787 otg_set_host(musb->xceiv->otg, &hcd->self); 2788 otg_set_host(musb->xceiv->otg, &hcd->self);
2788 hcd->self.otg_port = 1; 2789 hcd->self.otg_port = 1;
2789 musb->xceiv->otg->host = &hcd->self; 2790 musb->xceiv->otg->host = &hcd->self;
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index 8b43c4b99f04..7870b37e0ea5 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -219,6 +219,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
219 u32 dma_remaining; 219 u32 dma_remaining;
220 int src_burst, dst_burst; 220 int src_burst, dst_burst;
221 u16 csr; 221 u16 csr;
222 u32 psize;
222 int ch; 223 int ch;
223 s8 dmareq; 224 s8 dmareq;
224 s8 sync_dev; 225 s8 sync_dev;
@@ -390,15 +391,19 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
390 391
391 if (chdat->tx) { 392 if (chdat->tx) {
392 /* Send transfer_packet_sz packets at a time */ 393 /* Send transfer_packet_sz packets at a time */
393 musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, 394 psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
394 chdat->transfer_packet_sz); 395 psize &= ~0x7ff;
396 psize |= chdat->transfer_packet_sz;
397 musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
395 398
396 musb_writel(ep_conf, TUSB_EP_TX_OFFSET, 399 musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
397 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); 400 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
398 } else { 401 } else {
399 /* Receive transfer_packet_sz packets at a time */ 402 /* Receive transfer_packet_sz packets at a time */
400 musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, 403 psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
401 chdat->transfer_packet_sz << 16); 404 psize &= ~(0x7ff << 16);
405 psize |= (chdat->transfer_packet_sz << 16);
406 musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
402 407
403 musb_writel(ep_conf, TUSB_EP_RX_OFFSET, 408 musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
404 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); 409 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index d38780fa8788..aba74f817dc6 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -809,10 +809,10 @@ static const struct usb_device_id id_table_combined[] = {
809 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, 809 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
810 { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID), 810 { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID),
811 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 811 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
812 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), 812 { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID, 1) },
813 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 813 { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID, 1) },
814 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID), 814 { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_PID, 1) },
815 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 815 { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_H_PID, 1) },
816 { USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID), 816 { USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID),
817 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 817 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
818 { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID), 818 { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID),
@@ -1527,9 +1527,9 @@ static int set_serial_info(struct tty_struct *tty,
1527 (new_serial.flags & ASYNC_FLAGS)); 1527 (new_serial.flags & ASYNC_FLAGS));
1528 priv->custom_divisor = new_serial.custom_divisor; 1528 priv->custom_divisor = new_serial.custom_divisor;
1529 1529
1530check_and_exit:
1530 write_latency_timer(port); 1531 write_latency_timer(port);
1531 1532
1532check_and_exit:
1533 if ((old_priv.flags & ASYNC_SPD_MASK) != 1533 if ((old_priv.flags & ASYNC_SPD_MASK) !=
1534 (priv->flags & ASYNC_SPD_MASK)) { 1534 (priv->flags & ASYNC_SPD_MASK)) {
1535 if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) 1535 if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 71fb9e59db71..4fcf1cecb6d7 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -882,6 +882,8 @@
882/* Olimex */ 882/* Olimex */
883#define OLIMEX_VID 0x15BA 883#define OLIMEX_VID 0x15BA
884#define OLIMEX_ARM_USB_OCD_PID 0x0003 884#define OLIMEX_ARM_USB_OCD_PID 0x0003
885#define OLIMEX_ARM_USB_TINY_PID 0x0004
886#define OLIMEX_ARM_USB_TINY_H_PID 0x002a
885#define OLIMEX_ARM_USB_OCD_H_PID 0x002b 887#define OLIMEX_ARM_USB_OCD_H_PID 0x002b
886 888
887/* 889/*
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 87798e625d6c..6cefb9cb133d 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2336,8 +2336,11 @@ static void change_port_settings(struct tty_struct *tty,
2336 if (!baud) { 2336 if (!baud) {
2337 /* pick a default, any default... */ 2337 /* pick a default, any default... */
2338 baud = 9600; 2338 baud = 9600;
2339 } else 2339 } else {
2340 /* Avoid a zero divisor. */
2341 baud = min(baud, 461550);
2340 tty_encode_baud_rate(tty, baud, baud); 2342 tty_encode_baud_rate(tty, baud, baud);
2343 }
2341 2344
2342 edge_port->baud_rate = baud; 2345 edge_port->baud_rate = baud;
2343 config->wBaudRate = (__u16)((461550L + baud/2) / baud); 2346 config->wBaudRate = (__u16)((461550L + baud/2) / baud);
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 73956d48a0c5..f9734a96d516 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -197,6 +197,7 @@ static u8 ir_xbof_change(u8 xbof)
197static int ir_startup(struct usb_serial *serial) 197static int ir_startup(struct usb_serial *serial)
198{ 198{
199 struct usb_irda_cs_descriptor *irda_desc; 199 struct usb_irda_cs_descriptor *irda_desc;
200 int rates;
200 201
201 irda_desc = irda_usb_find_class_desc(serial, 0); 202 irda_desc = irda_usb_find_class_desc(serial, 0);
202 if (!irda_desc) { 203 if (!irda_desc) {
@@ -205,18 +206,20 @@ static int ir_startup(struct usb_serial *serial)
205 return -ENODEV; 206 return -ENODEV;
206 } 207 }
207 208
209 rates = le16_to_cpu(irda_desc->wBaudRate);
210
208 dev_dbg(&serial->dev->dev, 211 dev_dbg(&serial->dev->dev,
209 "%s - Baud rates supported:%s%s%s%s%s%s%s%s%s\n", 212 "%s - Baud rates supported:%s%s%s%s%s%s%s%s%s\n",
210 __func__, 213 __func__,
211 (irda_desc->wBaudRate & USB_IRDA_BR_2400) ? " 2400" : "", 214 (rates & USB_IRDA_BR_2400) ? " 2400" : "",
212 (irda_desc->wBaudRate & USB_IRDA_BR_9600) ? " 9600" : "", 215 (rates & USB_IRDA_BR_9600) ? " 9600" : "",
213 (irda_desc->wBaudRate & USB_IRDA_BR_19200) ? " 19200" : "", 216 (rates & USB_IRDA_BR_19200) ? " 19200" : "",
214 (irda_desc->wBaudRate & USB_IRDA_BR_38400) ? " 38400" : "", 217 (rates & USB_IRDA_BR_38400) ? " 38400" : "",
215 (irda_desc->wBaudRate & USB_IRDA_BR_57600) ? " 57600" : "", 218 (rates & USB_IRDA_BR_57600) ? " 57600" : "",
216 (irda_desc->wBaudRate & USB_IRDA_BR_115200) ? " 115200" : "", 219 (rates & USB_IRDA_BR_115200) ? " 115200" : "",
217 (irda_desc->wBaudRate & USB_IRDA_BR_576000) ? " 576000" : "", 220 (rates & USB_IRDA_BR_576000) ? " 576000" : "",
218 (irda_desc->wBaudRate & USB_IRDA_BR_1152000) ? " 1152000" : "", 221 (rates & USB_IRDA_BR_1152000) ? " 1152000" : "",
219 (irda_desc->wBaudRate & USB_IRDA_BR_4000000) ? " 4000000" : ""); 222 (rates & USB_IRDA_BR_4000000) ? " 4000000" : "");
220 223
221 switch (irda_desc->bmAdditionalBOFs) { 224 switch (irda_desc->bmAdditionalBOFs) {
222 case USB_IRDA_AB_48: 225 case USB_IRDA_AB_48:
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index edbc81f205c2..70f346f1aa86 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -189,7 +189,7 @@ static int mct_u232_set_baud_rate(struct tty_struct *tty,
189 return -ENOMEM; 189 return -ENOMEM;
190 190
191 divisor = mct_u232_calculate_baud_rate(serial, value, &speed); 191 divisor = mct_u232_calculate_baud_rate(serial, value, &speed);
192 put_unaligned_le32(cpu_to_le32(divisor), buf); 192 put_unaligned_le32(divisor, buf);
193 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 193 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
194 MCT_U232_SET_BAUD_RATE_REQUEST, 194 MCT_U232_SET_BAUD_RATE_REQUEST,
195 MCT_U232_SET_REQUEST_TYPE, 195 MCT_U232_SET_REQUEST_TYPE,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index af67a0de6b5d..3bf61acfc26b 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -281,6 +281,7 @@ static void option_instat_callback(struct urb *urb);
281#define TELIT_PRODUCT_LE922_USBCFG0 0x1042 281#define TELIT_PRODUCT_LE922_USBCFG0 0x1042
282#define TELIT_PRODUCT_LE922_USBCFG3 0x1043 282#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
283#define TELIT_PRODUCT_LE922_USBCFG5 0x1045 283#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
284#define TELIT_PRODUCT_ME910 0x1100
284#define TELIT_PRODUCT_LE920 0x1200 285#define TELIT_PRODUCT_LE920 0x1200
285#define TELIT_PRODUCT_LE910 0x1201 286#define TELIT_PRODUCT_LE910 0x1201
286#define TELIT_PRODUCT_LE910_USBCFG4 0x1206 287#define TELIT_PRODUCT_LE910_USBCFG4 0x1206
@@ -640,6 +641,11 @@ static const struct option_blacklist_info simcom_sim7100e_blacklist = {
640 .reserved = BIT(5) | BIT(6), 641 .reserved = BIT(5) | BIT(6),
641}; 642};
642 643
644static const struct option_blacklist_info telit_me910_blacklist = {
645 .sendsetup = BIT(0),
646 .reserved = BIT(1) | BIT(3),
647};
648
643static const struct option_blacklist_info telit_le910_blacklist = { 649static const struct option_blacklist_info telit_le910_blacklist = {
644 .sendsetup = BIT(0), 650 .sendsetup = BIT(0),
645 .reserved = BIT(1) | BIT(2), 651 .reserved = BIT(1) | BIT(2),
@@ -1235,6 +1241,8 @@ static const struct usb_device_id option_ids[] = {
1235 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, 1241 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
1236 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), 1242 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
1237 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, 1243 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
1244 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
1245 .driver_info = (kernel_ulong_t)&telit_me910_blacklist },
1238 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), 1246 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
1239 .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, 1247 .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
1240 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), 1248 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 38b3f0d8cd58..fd509ed6cf70 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = {
162 {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */ 162 {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
163 {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */ 163 {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */
164 {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */ 164 {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
165 {DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */
166 {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */
165 {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ 167 {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
166 {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ 168 {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
167 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ 169 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 369f3c24815a..44af719194b2 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -446,6 +446,10 @@ struct ms_lib_ctrl {
446#define SD_BLOCK_LEN 9 446#define SD_BLOCK_LEN 9
447 447
448struct ene_ub6250_info { 448struct ene_ub6250_info {
449
450 /* I/O bounce buffer */
451 u8 *bbuf;
452
449 /* for 6250 code */ 453 /* for 6250 code */
450 struct SD_STATUS SD_Status; 454 struct SD_STATUS SD_Status;
451 struct MS_STATUS MS_Status; 455 struct MS_STATUS MS_Status;
@@ -493,8 +497,11 @@ static int ene_load_bincode(struct us_data *us, unsigned char flag);
493 497
494static void ene_ub6250_info_destructor(void *extra) 498static void ene_ub6250_info_destructor(void *extra)
495{ 499{
500 struct ene_ub6250_info *info = (struct ene_ub6250_info *) extra;
501
496 if (!extra) 502 if (!extra)
497 return; 503 return;
504 kfree(info->bbuf);
498} 505}
499 506
500static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg) 507static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg)
@@ -860,8 +867,9 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr,
860 u8 PageNum, u32 *PageBuf, struct ms_lib_type_extdat *ExtraDat) 867 u8 PageNum, u32 *PageBuf, struct ms_lib_type_extdat *ExtraDat)
861{ 868{
862 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; 869 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
870 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
871 u8 *bbuf = info->bbuf;
863 int result; 872 int result;
864 u8 ExtBuf[4];
865 u32 bn = PhyBlockAddr * 0x20 + PageNum; 873 u32 bn = PhyBlockAddr * 0x20 + PageNum;
866 874
867 result = ene_load_bincode(us, MS_RW_PATTERN); 875 result = ene_load_bincode(us, MS_RW_PATTERN);
@@ -901,7 +909,7 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr,
901 bcb->CDB[2] = (unsigned char)(PhyBlockAddr>>16); 909 bcb->CDB[2] = (unsigned char)(PhyBlockAddr>>16);
902 bcb->CDB[6] = 0x01; 910 bcb->CDB[6] = 0x01;
903 911
904 result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0); 912 result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
905 if (result != USB_STOR_XFER_GOOD) 913 if (result != USB_STOR_XFER_GOOD)
906 return USB_STOR_TRANSPORT_ERROR; 914 return USB_STOR_TRANSPORT_ERROR;
907 915
@@ -910,9 +918,9 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr,
910 ExtraDat->status0 = 0x10; /* Not yet,fireware support */ 918 ExtraDat->status0 = 0x10; /* Not yet,fireware support */
911 919
912 ExtraDat->status1 = 0x00; /* Not yet,fireware support */ 920 ExtraDat->status1 = 0x00; /* Not yet,fireware support */
913 ExtraDat->ovrflg = ExtBuf[0]; 921 ExtraDat->ovrflg = bbuf[0];
914 ExtraDat->mngflg = ExtBuf[1]; 922 ExtraDat->mngflg = bbuf[1];
915 ExtraDat->logadr = memstick_logaddr(ExtBuf[2], ExtBuf[3]); 923 ExtraDat->logadr = memstick_logaddr(bbuf[2], bbuf[3]);
916 924
917 return USB_STOR_TRANSPORT_GOOD; 925 return USB_STOR_TRANSPORT_GOOD;
918} 926}
@@ -1332,8 +1340,9 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock,
1332 u8 PageNum, struct ms_lib_type_extdat *ExtraDat) 1340 u8 PageNum, struct ms_lib_type_extdat *ExtraDat)
1333{ 1341{
1334 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; 1342 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
1343 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
1344 u8 *bbuf = info->bbuf;
1335 int result; 1345 int result;
1336 u8 ExtBuf[4];
1337 1346
1338 memset(bcb, 0, sizeof(struct bulk_cb_wrap)); 1347 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
1339 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 1348 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
@@ -1347,7 +1356,7 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock,
1347 bcb->CDB[2] = (unsigned char)(PhyBlock>>16); 1356 bcb->CDB[2] = (unsigned char)(PhyBlock>>16);
1348 bcb->CDB[6] = 0x01; 1357 bcb->CDB[6] = 0x01;
1349 1358
1350 result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0); 1359 result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
1351 if (result != USB_STOR_XFER_GOOD) 1360 if (result != USB_STOR_XFER_GOOD)
1352 return USB_STOR_TRANSPORT_ERROR; 1361 return USB_STOR_TRANSPORT_ERROR;
1353 1362
@@ -1355,9 +1364,9 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock,
1355 ExtraDat->intr = 0x80; /* Not yet, waiting for fireware support */ 1364 ExtraDat->intr = 0x80; /* Not yet, waiting for fireware support */
1356 ExtraDat->status0 = 0x10; /* Not yet, waiting for fireware support */ 1365 ExtraDat->status0 = 0x10; /* Not yet, waiting for fireware support */
1357 ExtraDat->status1 = 0x00; /* Not yet, waiting for fireware support */ 1366 ExtraDat->status1 = 0x00; /* Not yet, waiting for fireware support */
1358 ExtraDat->ovrflg = ExtBuf[0]; 1367 ExtraDat->ovrflg = bbuf[0];
1359 ExtraDat->mngflg = ExtBuf[1]; 1368 ExtraDat->mngflg = bbuf[1];
1360 ExtraDat->logadr = memstick_logaddr(ExtBuf[2], ExtBuf[3]); 1369 ExtraDat->logadr = memstick_logaddr(bbuf[2], bbuf[3]);
1361 1370
1362 return USB_STOR_TRANSPORT_GOOD; 1371 return USB_STOR_TRANSPORT_GOOD;
1363} 1372}
@@ -1556,9 +1565,9 @@ static int ms_lib_scan_logicalblocknumber(struct us_data *us, u16 btBlk1st)
1556 u16 PhyBlock, newblk, i; 1565 u16 PhyBlock, newblk, i;
1557 u16 LogStart, LogEnde; 1566 u16 LogStart, LogEnde;
1558 struct ms_lib_type_extdat extdat; 1567 struct ms_lib_type_extdat extdat;
1559 u8 buf[0x200];
1560 u32 count = 0, index = 0; 1568 u32 count = 0, index = 0;
1561 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; 1569 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
1570 u8 *bbuf = info->bbuf;
1562 1571
1563 for (PhyBlock = 0; PhyBlock < info->MS_Lib.NumberOfPhyBlock;) { 1572 for (PhyBlock = 0; PhyBlock < info->MS_Lib.NumberOfPhyBlock;) {
1564 ms_lib_phy_to_log_range(PhyBlock, &LogStart, &LogEnde); 1573 ms_lib_phy_to_log_range(PhyBlock, &LogStart, &LogEnde);
@@ -1572,14 +1581,16 @@ static int ms_lib_scan_logicalblocknumber(struct us_data *us, u16 btBlk1st)
1572 } 1581 }
1573 1582
1574 if (count == PhyBlock) { 1583 if (count == PhyBlock) {
1575 ms_lib_read_extrablock(us, PhyBlock, 0, 0x80, &buf); 1584 ms_lib_read_extrablock(us, PhyBlock, 0, 0x80,
1585 bbuf);
1576 count += 0x80; 1586 count += 0x80;
1577 } 1587 }
1578 index = (PhyBlock % 0x80) * 4; 1588 index = (PhyBlock % 0x80) * 4;
1579 1589
1580 extdat.ovrflg = buf[index]; 1590 extdat.ovrflg = bbuf[index];
1581 extdat.mngflg = buf[index+1]; 1591 extdat.mngflg = bbuf[index+1];
1582 extdat.logadr = memstick_logaddr(buf[index+2], buf[index+3]); 1592 extdat.logadr = memstick_logaddr(bbuf[index+2],
1593 bbuf[index+3]);
1583 1594
1584 if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) { 1595 if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) {
1585 ms_lib_setacquired_errorblock(us, PhyBlock); 1596 ms_lib_setacquired_errorblock(us, PhyBlock);
@@ -2062,9 +2073,9 @@ static int ene_ms_init(struct us_data *us)
2062{ 2073{
2063 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; 2074 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
2064 int result; 2075 int result;
2065 u8 buf[0x200];
2066 u16 MSP_BlockSize, MSP_UserAreaBlocks; 2076 u16 MSP_BlockSize, MSP_UserAreaBlocks;
2067 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; 2077 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
2078 u8 *bbuf = info->bbuf;
2068 2079
2069 printk(KERN_INFO "transport --- ENE_MSInit\n"); 2080 printk(KERN_INFO "transport --- ENE_MSInit\n");
2070 2081
@@ -2083,13 +2094,13 @@ static int ene_ms_init(struct us_data *us)
2083 bcb->CDB[0] = 0xF1; 2094 bcb->CDB[0] = 0xF1;
2084 bcb->CDB[1] = 0x01; 2095 bcb->CDB[1] = 0x01;
2085 2096
2086 result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0); 2097 result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
2087 if (result != USB_STOR_XFER_GOOD) { 2098 if (result != USB_STOR_XFER_GOOD) {
2088 printk(KERN_ERR "Execution MS Init Code Fail !!\n"); 2099 printk(KERN_ERR "Execution MS Init Code Fail !!\n");
2089 return USB_STOR_TRANSPORT_ERROR; 2100 return USB_STOR_TRANSPORT_ERROR;
2090 } 2101 }
2091 /* the same part to test ENE */ 2102 /* the same part to test ENE */
2092 info->MS_Status = *(struct MS_STATUS *)&buf[0]; 2103 info->MS_Status = *(struct MS_STATUS *) bbuf;
2093 2104
2094 if (info->MS_Status.Insert && info->MS_Status.Ready) { 2105 if (info->MS_Status.Insert && info->MS_Status.Ready) {
2095 printk(KERN_INFO "Insert = %x\n", info->MS_Status.Insert); 2106 printk(KERN_INFO "Insert = %x\n", info->MS_Status.Insert);
@@ -2098,15 +2109,15 @@ static int ene_ms_init(struct us_data *us)
2098 printk(KERN_INFO "IsMSPHG = %x\n", info->MS_Status.IsMSPHG); 2109 printk(KERN_INFO "IsMSPHG = %x\n", info->MS_Status.IsMSPHG);
2099 printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP); 2110 printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP);
2100 if (info->MS_Status.IsMSPro) { 2111 if (info->MS_Status.IsMSPro) {
2101 MSP_BlockSize = (buf[6] << 8) | buf[7]; 2112 MSP_BlockSize = (bbuf[6] << 8) | bbuf[7];
2102 MSP_UserAreaBlocks = (buf[10] << 8) | buf[11]; 2113 MSP_UserAreaBlocks = (bbuf[10] << 8) | bbuf[11];
2103 info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks; 2114 info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks;
2104 } else { 2115 } else {
2105 ms_card_init(us); /* Card is MS (to ms.c)*/ 2116 ms_card_init(us); /* Card is MS (to ms.c)*/
2106 } 2117 }
2107 usb_stor_dbg(us, "MS Init Code OK !!\n"); 2118 usb_stor_dbg(us, "MS Init Code OK !!\n");
2108 } else { 2119 } else {
2109 usb_stor_dbg(us, "MS Card Not Ready --- %x\n", buf[0]); 2120 usb_stor_dbg(us, "MS Card Not Ready --- %x\n", bbuf[0]);
2110 return USB_STOR_TRANSPORT_ERROR; 2121 return USB_STOR_TRANSPORT_ERROR;
2111 } 2122 }
2112 2123
@@ -2116,9 +2127,9 @@ static int ene_ms_init(struct us_data *us)
2116static int ene_sd_init(struct us_data *us) 2127static int ene_sd_init(struct us_data *us)
2117{ 2128{
2118 int result; 2129 int result;
2119 u8 buf[0x200];
2120 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; 2130 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
2121 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; 2131 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
2132 u8 *bbuf = info->bbuf;
2122 2133
2123 usb_stor_dbg(us, "transport --- ENE_SDInit\n"); 2134 usb_stor_dbg(us, "transport --- ENE_SDInit\n");
2124 /* SD Init Part-1 */ 2135 /* SD Init Part-1 */
@@ -2152,17 +2163,17 @@ static int ene_sd_init(struct us_data *us)
2152 bcb->Flags = US_BULK_FLAG_IN; 2163 bcb->Flags = US_BULK_FLAG_IN;
2153 bcb->CDB[0] = 0xF1; 2164 bcb->CDB[0] = 0xF1;
2154 2165
2155 result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0); 2166 result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
2156 if (result != USB_STOR_XFER_GOOD) { 2167 if (result != USB_STOR_XFER_GOOD) {
2157 usb_stor_dbg(us, "Execution SD Init Code Fail !!\n"); 2168 usb_stor_dbg(us, "Execution SD Init Code Fail !!\n");
2158 return USB_STOR_TRANSPORT_ERROR; 2169 return USB_STOR_TRANSPORT_ERROR;
2159 } 2170 }
2160 2171
2161 info->SD_Status = *(struct SD_STATUS *)&buf[0]; 2172 info->SD_Status = *(struct SD_STATUS *) bbuf;
2162 if (info->SD_Status.Insert && info->SD_Status.Ready) { 2173 if (info->SD_Status.Insert && info->SD_Status.Ready) {
2163 struct SD_STATUS *s = &info->SD_Status; 2174 struct SD_STATUS *s = &info->SD_Status;
2164 2175
2165 ene_get_card_status(us, (unsigned char *)&buf); 2176 ene_get_card_status(us, bbuf);
2166 usb_stor_dbg(us, "Insert = %x\n", s->Insert); 2177 usb_stor_dbg(us, "Insert = %x\n", s->Insert);
2167 usb_stor_dbg(us, "Ready = %x\n", s->Ready); 2178 usb_stor_dbg(us, "Ready = %x\n", s->Ready);
2168 usb_stor_dbg(us, "IsMMC = %x\n", s->IsMMC); 2179 usb_stor_dbg(us, "IsMMC = %x\n", s->IsMMC);
@@ -2170,7 +2181,7 @@ static int ene_sd_init(struct us_data *us)
2170 usb_stor_dbg(us, "HiSpeed = %x\n", s->HiSpeed); 2181 usb_stor_dbg(us, "HiSpeed = %x\n", s->HiSpeed);
2171 usb_stor_dbg(us, "WtP = %x\n", s->WtP); 2182 usb_stor_dbg(us, "WtP = %x\n", s->WtP);
2172 } else { 2183 } else {
2173 usb_stor_dbg(us, "SD Card Not Ready --- %x\n", buf[0]); 2184 usb_stor_dbg(us, "SD Card Not Ready --- %x\n", bbuf[0]);
2174 return USB_STOR_TRANSPORT_ERROR; 2185 return USB_STOR_TRANSPORT_ERROR;
2175 } 2186 }
2176 return USB_STOR_TRANSPORT_GOOD; 2187 return USB_STOR_TRANSPORT_GOOD;
@@ -2180,13 +2191,15 @@ static int ene_sd_init(struct us_data *us)
2180static int ene_init(struct us_data *us) 2191static int ene_init(struct us_data *us)
2181{ 2192{
2182 int result; 2193 int result;
2183 u8 misc_reg03 = 0; 2194 u8 misc_reg03;
2184 struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); 2195 struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
2196 u8 *bbuf = info->bbuf;
2185 2197
2186 result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03); 2198 result = ene_get_card_type(us, REG_CARD_STATUS, bbuf);
2187 if (result != USB_STOR_XFER_GOOD) 2199 if (result != USB_STOR_XFER_GOOD)
2188 return USB_STOR_TRANSPORT_ERROR; 2200 return USB_STOR_TRANSPORT_ERROR;
2189 2201
2202 misc_reg03 = bbuf[0];
2190 if (misc_reg03 & 0x01) { 2203 if (misc_reg03 & 0x01) {
2191 if (!info->SD_Status.Ready) { 2204 if (!info->SD_Status.Ready) {
2192 result = ene_sd_init(us); 2205 result = ene_sd_init(us);
@@ -2303,8 +2316,9 @@ static int ene_ub6250_probe(struct usb_interface *intf,
2303 const struct usb_device_id *id) 2316 const struct usb_device_id *id)
2304{ 2317{
2305 int result; 2318 int result;
2306 u8 misc_reg03 = 0; 2319 u8 misc_reg03;
2307 struct us_data *us; 2320 struct us_data *us;
2321 struct ene_ub6250_info *info;
2308 2322
2309 result = usb_stor_probe1(&us, intf, id, 2323 result = usb_stor_probe1(&us, intf, id,
2310 (id - ene_ub6250_usb_ids) + ene_ub6250_unusual_dev_list, 2324 (id - ene_ub6250_usb_ids) + ene_ub6250_unusual_dev_list,
@@ -2313,11 +2327,16 @@ static int ene_ub6250_probe(struct usb_interface *intf,
2313 return result; 2327 return result;
2314 2328
2315 /* FIXME: where should the code alloc extra buf ? */ 2329 /* FIXME: where should the code alloc extra buf ? */
2316 if (!us->extra) { 2330 us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL);
2317 us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL); 2331 if (!us->extra)
2318 if (!us->extra) 2332 return -ENOMEM;
2319 return -ENOMEM; 2333 us->extra_destructor = ene_ub6250_info_destructor;
2320 us->extra_destructor = ene_ub6250_info_destructor; 2334
2335 info = (struct ene_ub6250_info *)(us->extra);
2336 info->bbuf = kmalloc(512, GFP_KERNEL);
2337 if (!info->bbuf) {
2338 kfree(us->extra);
2339 return -ENOMEM;
2321 } 2340 }
2322 2341
2323 us->transport_name = "ene_ub6250"; 2342 us->transport_name = "ene_ub6250";
@@ -2329,12 +2348,13 @@ static int ene_ub6250_probe(struct usb_interface *intf,
2329 return result; 2348 return result;
2330 2349
2331 /* probe card type */ 2350 /* probe card type */
2332 result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03); 2351 result = ene_get_card_type(us, REG_CARD_STATUS, info->bbuf);
2333 if (result != USB_STOR_XFER_GOOD) { 2352 if (result != USB_STOR_XFER_GOOD) {
2334 usb_stor_disconnect(intf); 2353 usb_stor_disconnect(intf);
2335 return USB_STOR_TRANSPORT_ERROR; 2354 return USB_STOR_TRANSPORT_ERROR;
2336 } 2355 }
2337 2356
2357 misc_reg03 = info->bbuf[0];
2338 if (!(misc_reg03 & 0x01)) { 2358 if (!(misc_reg03 & 0x01)) {
2339 pr_info("ums_eneub6250: This driver only supports SD/MS cards. " 2359 pr_info("ums_eneub6250: This driver only supports SD/MS cards. "
2340 "It does not support SM cards.\n"); 2360 "It does not support SM cards.\n");
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 5d8b2c261940..0585078638db 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -235,14 +235,19 @@ done:
235 235
236static inline void hub_descriptor(struct usb_hub_descriptor *desc) 236static inline void hub_descriptor(struct usb_hub_descriptor *desc)
237{ 237{
238 int width;
239
238 memset(desc, 0, sizeof(*desc)); 240 memset(desc, 0, sizeof(*desc));
239 desc->bDescriptorType = USB_DT_HUB; 241 desc->bDescriptorType = USB_DT_HUB;
240 desc->bDescLength = 9;
241 desc->wHubCharacteristics = cpu_to_le16( 242 desc->wHubCharacteristics = cpu_to_le16(
242 HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM); 243 HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM);
244
243 desc->bNbrPorts = VHCI_HC_PORTS; 245 desc->bNbrPorts = VHCI_HC_PORTS;
244 desc->u.hs.DeviceRemovable[0] = 0xff; 246 BUILD_BUG_ON(VHCI_HC_PORTS > USB_MAXCHILDREN);
245 desc->u.hs.DeviceRemovable[1] = 0xff; 247 width = desc->bNbrPorts / 8 + 1;
248 desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * width;
249 memset(&desc->u.hs.DeviceRemovable[0], 0, width);
250 memset(&desc->u.hs.DeviceRemovable[width], 0xff, width);
246} 251}
247 252
248static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, 253static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
index 6345e85822a4..a50cf45e530f 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -341,6 +341,7 @@ error_submit_ep1:
341static 341static
342int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id) 342int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
343{ 343{
344 struct usb_device *udev = interface_to_usbdev(iface);
344 struct i1480_usb *i1480_usb; 345 struct i1480_usb *i1480_usb;
345 struct i1480 *i1480; 346 struct i1480 *i1480;
346 struct device *dev = &iface->dev; 347 struct device *dev = &iface->dev;
@@ -352,8 +353,8 @@ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
352 iface->cur_altsetting->desc.bInterfaceNumber); 353 iface->cur_altsetting->desc.bInterfaceNumber);
353 goto error; 354 goto error;
354 } 355 }
355 if (iface->num_altsetting > 1 356 if (iface->num_altsetting > 1 &&
356 && interface_to_usbdev(iface)->descriptor.idProduct == 0xbabe) { 357 le16_to_cpu(udev->descriptor.idProduct) == 0xbabe) {
357 /* Need altsetting #1 [HW QUIRK] or EP1 won't work */ 358 /* Need altsetting #1 [HW QUIRK] or EP1 won't work */
358 result = usb_set_interface(interface_to_usbdev(iface), 0, 1); 359 result = usb_set_interface(interface_to_usbdev(iface), 0, 1);
359 if (result < 0) 360 if (result < 0)
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 52a70ee6014f..8b9049dac094 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -452,7 +452,7 @@ config DAVINCI_WATCHDOG
452 452
453config ORION_WATCHDOG 453config ORION_WATCHDOG
454 tristate "Orion watchdog" 454 tristate "Orion watchdog"
455 depends on ARCH_ORION5X || ARCH_DOVE || MACH_DOVE || ARCH_MVEBU || COMPILE_TEST 455 depends on ARCH_ORION5X || ARCH_DOVE || MACH_DOVE || ARCH_MVEBU || (COMPILE_TEST && !ARCH_EBSA110)
456 depends on ARM 456 depends on ARM
457 select WATCHDOG_CORE 457 select WATCHDOG_CORE
458 help 458 help
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
index 6fce17d5b9f1..a5775dfd8d5f 100644
--- a/drivers/watchdog/bcm_kona_wdt.c
+++ b/drivers/watchdog/bcm_kona_wdt.c
@@ -304,6 +304,8 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
304 if (!wdt) 304 if (!wdt)
305 return -ENOMEM; 305 return -ENOMEM;
306 306
307 spin_lock_init(&wdt->lock);
308
307 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 309 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
308 wdt->base = devm_ioremap_resource(dev, res); 310 wdt->base = devm_ioremap_resource(dev, res);
309 if (IS_ERR(wdt->base)) 311 if (IS_ERR(wdt->base))
@@ -316,7 +318,6 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
316 return ret; 318 return ret;
317 } 319 }
318 320
319 spin_lock_init(&wdt->lock);
320 platform_set_drvdata(pdev, wdt); 321 platform_set_drvdata(pdev, wdt);
321 watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt); 322 watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt);
322 bcm_kona_wdt_wdd.parent = &pdev->dev; 323 bcm_kona_wdt_wdd.parent = &pdev->dev;
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
index 8d61e8bfe60b..86e0b5d2e761 100644
--- a/drivers/watchdog/cadence_wdt.c
+++ b/drivers/watchdog/cadence_wdt.c
@@ -49,7 +49,7 @@
49/* Counter maximum value */ 49/* Counter maximum value */
50#define CDNS_WDT_COUNTER_MAX 0xFFF 50#define CDNS_WDT_COUNTER_MAX 0xFFF
51 51
52static int wdt_timeout = CDNS_WDT_DEFAULT_TIMEOUT; 52static int wdt_timeout;
53static int nowayout = WATCHDOG_NOWAYOUT; 53static int nowayout = WATCHDOG_NOWAYOUT;
54 54
55module_param(wdt_timeout, int, 0); 55module_param(wdt_timeout, int, 0);
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 347f0389b089..c4f65873bfa4 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -306,16 +306,15 @@ static int iTCO_wdt_ping(struct watchdog_device *wd_dev)
306 306
307 iTCO_vendor_pre_keepalive(p->smi_res, wd_dev->timeout); 307 iTCO_vendor_pre_keepalive(p->smi_res, wd_dev->timeout);
308 308
309 /* Reset the timeout status bit so that the timer
310 * needs to count down twice again before rebooting */
311 outw(0x0008, TCO1_STS(p)); /* write 1 to clear bit */
312
309 /* Reload the timer by writing to the TCO Timer Counter register */ 313 /* Reload the timer by writing to the TCO Timer Counter register */
310 if (p->iTCO_version >= 2) { 314 if (p->iTCO_version >= 2)
311 outw(0x01, TCO_RLD(p)); 315 outw(0x01, TCO_RLD(p));
312 } else if (p->iTCO_version == 1) { 316 else if (p->iTCO_version == 1)
313 /* Reset the timeout status bit so that the timer
314 * needs to count down twice again before rebooting */
315 outw(0x0008, TCO1_STS(p)); /* write 1 to clear bit */
316
317 outb(0x01, TCO_RLD(p)); 317 outb(0x01, TCO_RLD(p));
318 }
319 318
320 spin_unlock(&p->io_lock); 319 spin_unlock(&p->io_lock);
321 return 0; 320 return 0;
@@ -328,11 +327,8 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t)
328 unsigned char val8; 327 unsigned char val8;
329 unsigned int tmrval; 328 unsigned int tmrval;
330 329
331 tmrval = seconds_to_ticks(p, t); 330 /* The timer counts down twice before rebooting */
332 331 tmrval = seconds_to_ticks(p, t) / 2;
333 /* For TCO v1 the timer counts down twice before rebooting */
334 if (p->iTCO_version == 1)
335 tmrval /= 2;
336 332
337 /* from the specs: */ 333 /* from the specs: */
338 /* "Values of 0h-3h are ignored and should not be attempted" */ 334 /* "Values of 0h-3h are ignored and should not be attempted" */
@@ -385,6 +381,8 @@ static unsigned int iTCO_wdt_get_timeleft(struct watchdog_device *wd_dev)
385 spin_lock(&p->io_lock); 381 spin_lock(&p->io_lock);
386 val16 = inw(TCO_RLD(p)); 382 val16 = inw(TCO_RLD(p));
387 val16 &= 0x3ff; 383 val16 &= 0x3ff;
384 if (!(inw(TCO1_STS(p)) & 0x0008))
385 val16 += (inw(TCOv2_TMR(p)) & 0x3ff);
388 spin_unlock(&p->io_lock); 386 spin_unlock(&p->io_lock);
389 387
390 time_left = ticks_to_seconds(p, val16); 388 time_left = ticks_to_seconds(p, val16);
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 99ebf6ea3de6..5615f4013924 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -630,6 +630,9 @@ static int usb_pcwd_probe(struct usb_interface *interface,
630 return -ENODEV; 630 return -ENODEV;
631 } 631 }
632 632
633 if (iface_desc->desc.bNumEndpoints < 1)
634 return -ENODEV;
635
633 /* check out the endpoint: it has to be Interrupt & IN */ 636 /* check out the endpoint: it has to be Interrupt & IN */
634 endpoint = &iface_desc->endpoint[0].desc; 637 endpoint = &iface_desc->endpoint[0].desc;
635 638
diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c
index f709962018ac..362fd229786d 100644
--- a/drivers/watchdog/sama5d4_wdt.c
+++ b/drivers/watchdog/sama5d4_wdt.c
@@ -6,6 +6,7 @@
6 * Licensed under GPLv2. 6 * Licensed under GPLv2.
7 */ 7 */
8 8
9#include <linux/delay.h>
9#include <linux/interrupt.h> 10#include <linux/interrupt.h>
10#include <linux/io.h> 11#include <linux/io.h>
11#include <linux/kernel.h> 12#include <linux/kernel.h>
@@ -29,6 +30,7 @@ struct sama5d4_wdt {
29 struct watchdog_device wdd; 30 struct watchdog_device wdd;
30 void __iomem *reg_base; 31 void __iomem *reg_base;
31 u32 mr; 32 u32 mr;
33 unsigned long last_ping;
32}; 34};
33 35
34static int wdt_timeout = WDT_DEFAULT_TIMEOUT; 36static int wdt_timeout = WDT_DEFAULT_TIMEOUT;
@@ -44,11 +46,34 @@ MODULE_PARM_DESC(nowayout,
44 "Watchdog cannot be stopped once started (default=" 46 "Watchdog cannot be stopped once started (default="
45 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 47 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
46 48
49#define wdt_enabled (!(wdt->mr & AT91_WDT_WDDIS))
50
47#define wdt_read(wdt, field) \ 51#define wdt_read(wdt, field) \
48 readl_relaxed((wdt)->reg_base + (field)) 52 readl_relaxed((wdt)->reg_base + (field))
49 53
50#define wdt_write(wtd, field, val) \ 54/* 4 slow clock periods is 4/32768 = 122.07µs*/
51 writel_relaxed((val), (wdt)->reg_base + (field)) 55#define WDT_DELAY usecs_to_jiffies(123)
56
57static void wdt_write(struct sama5d4_wdt *wdt, u32 field, u32 val)
58{
59 /*
60 * WDT_CR and WDT_MR must not be modified within three slow clock
61 * periods following a restart of the watchdog performed by a write
62 * access in WDT_CR.
63 */
64 while (time_before(jiffies, wdt->last_ping + WDT_DELAY))
65 usleep_range(30, 125);
66 writel_relaxed(val, wdt->reg_base + field);
67 wdt->last_ping = jiffies;
68}
69
70static void wdt_write_nosleep(struct sama5d4_wdt *wdt, u32 field, u32 val)
71{
72 if (time_before(jiffies, wdt->last_ping + WDT_DELAY))
73 udelay(123);
74 writel_relaxed(val, wdt->reg_base + field);
75 wdt->last_ping = jiffies;
76}
52 77
53static int sama5d4_wdt_start(struct watchdog_device *wdd) 78static int sama5d4_wdt_start(struct watchdog_device *wdd)
54{ 79{
@@ -89,7 +114,16 @@ static int sama5d4_wdt_set_timeout(struct watchdog_device *wdd,
89 wdt->mr &= ~AT91_WDT_WDD; 114 wdt->mr &= ~AT91_WDT_WDD;
90 wdt->mr |= AT91_WDT_SET_WDV(value); 115 wdt->mr |= AT91_WDT_SET_WDV(value);
91 wdt->mr |= AT91_WDT_SET_WDD(value); 116 wdt->mr |= AT91_WDT_SET_WDD(value);
92 wdt_write(wdt, AT91_WDT_MR, wdt->mr); 117
118 /*
119 * WDDIS has to be 0 when updating WDD/WDV. The datasheet states: When
120 * setting the WDDIS bit, and while it is set, the fields WDV and WDD
121 * must not be modified.
122 * If the watchdog is enabled, then the timeout can be updated. Else,
123 * wait that the user enables it.
124 */
125 if (wdt_enabled)
126 wdt_write(wdt, AT91_WDT_MR, wdt->mr & ~AT91_WDT_WDDIS);
93 127
94 wdd->timeout = timeout; 128 wdd->timeout = timeout;
95 129
@@ -145,23 +179,21 @@ static int of_sama5d4_wdt_init(struct device_node *np, struct sama5d4_wdt *wdt)
145 179
146static int sama5d4_wdt_init(struct sama5d4_wdt *wdt) 180static int sama5d4_wdt_init(struct sama5d4_wdt *wdt)
147{ 181{
148 struct watchdog_device *wdd = &wdt->wdd;
149 u32 value = WDT_SEC2TICKS(wdd->timeout);
150 u32 reg; 182 u32 reg;
151
152 /* 183 /*
153 * Because the fields WDV and WDD must not be modified when the WDDIS 184 * When booting and resuming, the bootloader may have changed the
154 * bit is set, so clear the WDDIS bit before writing the WDT_MR. 185 * watchdog configuration.
186 * If the watchdog is already running, we can safely update it.
187 * Else, we have to disable it properly.
155 */ 188 */
156 reg = wdt_read(wdt, AT91_WDT_MR); 189 if (wdt_enabled) {
157 reg &= ~AT91_WDT_WDDIS; 190 wdt_write_nosleep(wdt, AT91_WDT_MR, wdt->mr);
158 wdt_write(wdt, AT91_WDT_MR, reg); 191 } else {
159 192 reg = wdt_read(wdt, AT91_WDT_MR);
160 wdt->mr |= AT91_WDT_SET_WDD(value); 193 if (!(reg & AT91_WDT_WDDIS))
161 wdt->mr |= AT91_WDT_SET_WDV(value); 194 wdt_write_nosleep(wdt, AT91_WDT_MR,
162 195 reg | AT91_WDT_WDDIS);
163 wdt_write(wdt, AT91_WDT_MR, wdt->mr); 196 }
164
165 return 0; 197 return 0;
166} 198}
167 199
@@ -172,6 +204,7 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
172 struct resource *res; 204 struct resource *res;
173 void __iomem *regs; 205 void __iomem *regs;
174 u32 irq = 0; 206 u32 irq = 0;
207 u32 timeout;
175 int ret; 208 int ret;
176 209
177 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); 210 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
@@ -184,6 +217,7 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
184 wdd->ops = &sama5d4_wdt_ops; 217 wdd->ops = &sama5d4_wdt_ops;
185 wdd->min_timeout = MIN_WDT_TIMEOUT; 218 wdd->min_timeout = MIN_WDT_TIMEOUT;
186 wdd->max_timeout = MAX_WDT_TIMEOUT; 219 wdd->max_timeout = MAX_WDT_TIMEOUT;
220 wdt->last_ping = jiffies;
187 221
188 watchdog_set_drvdata(wdd, wdt); 222 watchdog_set_drvdata(wdd, wdt);
189 223
@@ -221,6 +255,11 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
221 return ret; 255 return ret;
222 } 256 }
223 257
258 timeout = WDT_SEC2TICKS(wdd->timeout);
259
260 wdt->mr |= AT91_WDT_SET_WDD(timeout);
261 wdt->mr |= AT91_WDT_SET_WDV(timeout);
262
224 ret = sama5d4_wdt_init(wdt); 263 ret = sama5d4_wdt_init(wdt);
225 if (ret) 264 if (ret)
226 return ret; 265 return ret;
@@ -263,9 +302,7 @@ static int sama5d4_wdt_resume(struct device *dev)
263{ 302{
264 struct sama5d4_wdt *wdt = dev_get_drvdata(dev); 303 struct sama5d4_wdt *wdt = dev_get_drvdata(dev);
265 304
266 wdt_write(wdt, AT91_WDT_MR, wdt->mr & ~AT91_WDT_WDDIS); 305 sama5d4_wdt_init(wdt);
267 if (wdt->mr & AT91_WDT_WDDIS)
268 wdt_write(wdt, AT91_WDT_MR, wdt->mr);
269 306
270 return 0; 307 return 0;
271} 308}
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index 48b2c058b009..bc7addc2dc06 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -332,7 +332,7 @@ static irqreturn_t wdtpci_interrupt(int irq, void *dev_id)
332 pr_crit("Would Reboot\n"); 332 pr_crit("Would Reboot\n");
333#else 333#else
334 pr_crit("Initiating system reboot\n"); 334 pr_crit("Initiating system reboot\n");
335 emergency_restart(NULL); 335 emergency_restart();
336#endif 336#endif
337#else 337#else
338 pr_crit("Reset in 5ms\n"); 338 pr_crit("Reset in 5ms\n");
diff --git a/drivers/watchdog/zx2967_wdt.c b/drivers/watchdog/zx2967_wdt.c
index e290d5a13a6d..c98252733c30 100644
--- a/drivers/watchdog/zx2967_wdt.c
+++ b/drivers/watchdog/zx2967_wdt.c
@@ -211,10 +211,8 @@ static int zx2967_wdt_probe(struct platform_device *pdev)
211 211
212 base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 212 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
213 wdt->reg_base = devm_ioremap_resource(dev, base); 213 wdt->reg_base = devm_ioremap_resource(dev, base);
214 if (IS_ERR(wdt->reg_base)) { 214 if (IS_ERR(wdt->reg_base))
215 dev_err(dev, "ioremap failed\n");
216 return PTR_ERR(wdt->reg_base); 215 return PTR_ERR(wdt->reg_base);
217 }
218 216
219 zx2967_wdt_reset_sysctrl(dev); 217 zx2967_wdt_reset_sysctrl(dev);
220 218