aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_processor.c52
-rw-r--r--drivers/acpi/acpica/dsmethod.c3
-rw-r--r--drivers/acpi/bus.c3
-rw-r--r--drivers/acpi/internal.h6
-rw-r--r--drivers/acpi/nfit.c5
-rw-r--r--drivers/ata/Kconfig8
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci_platform.c3
-rw-r--r--drivers/ata/ahci_seattle.c210
-rw-r--r--drivers/ata/libahci.c1
-rw-r--r--drivers/base/power/opp/core.c3
-rw-r--r--drivers/base/power/wakeup.c2
-rw-r--r--drivers/base/property.c2
-rw-r--r--drivers/bcma/main.c17
-rw-r--r--drivers/block/aoe/aoeblk.c2
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/drbd/drbd_int.h4
-rw-r--r--drivers/block/drbd/drbd_nl.c2
-rw-r--r--drivers/block/loop.c6
-rw-r--r--drivers/block/rbd.c58
-rw-r--r--drivers/bus/mvebu-mbus.c2
-rw-r--r--drivers/bus/uniphier-system-bus.c2
-rw-r--r--drivers/char/hw_random/bcm63xx-rng.c1
-rw-r--r--drivers/clk/imx/clk-imx6q.c2
-rw-r--r--drivers/clk/mediatek/reset.c2
-rw-r--r--drivers/clk/mmp/reset.c2
-rw-r--r--drivers/clk/qcom/gcc-ipq4019.c70
-rw-r--r--drivers/clk/qcom/reset.c2
-rw-r--r--drivers/clk/qcom/reset.h2
-rw-r--r--drivers/clk/rockchip/softrst.c2
-rw-r--r--drivers/clk/sirf/clk-atlas7.c2
-rw-r--r--drivers/clk/sunxi/clk-a10-ve.c2
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c2
-rw-r--r--drivers/clk/sunxi/clk-usb.c2
-rw-r--r--drivers/clk/tegra/clk.c2
-rw-r--r--drivers/clocksource/tango_xtal.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt.c3
-rw-r--r--drivers/cpufreq/cpufreq.c29
-rw-r--r--drivers/cpufreq/cpufreq_governor.c8
-rw-r--r--drivers/cpufreq/intel_pstate.c241
-rw-r--r--drivers/cpufreq/sti-cpufreq.c4
-rw-r--r--drivers/cpuidle/cpuidle-arm.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c3
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c3
-rw-r--r--drivers/crypto/talitos.c87
-rw-r--r--drivers/dma/dw/core.c34
-rw-r--r--drivers/dma/edma.c63
-rw-r--r--drivers/dma/hsu/hsu.c13
-rw-r--r--drivers/dma/hsu/hsu.h3
-rw-r--r--drivers/dma/omap-dma.c26
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c2
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/sb_edac.c32
-rw-r--r--drivers/extcon/extcon-palmas.c3
-rw-r--r--drivers/firmware/efi/arm-init.c18
-rw-r--r--drivers/firmware/efi/vars.c37
-rw-r--r--drivers/firmware/psci.c2
-rw-r--r--drivers/firmware/qemu_fw_cfg.c24
-rw-r--r--drivers/gpio/gpio-menz127.c9
-rw-r--r--drivers/gpio/gpio-pca953x.c3
-rw-r--r--drivers/gpio/gpio-pxa.c4
-rw-r--r--drivers/gpio/gpio-rcar.c65
-rw-r--r--drivers/gpio/gpio-xgene.c5
-rw-r--r--drivers/gpio/gpiolib-acpi.c2
-rw-r--r--drivers/gpio/gpiolib.c133
-rw-r--r--drivers/gpu/drm/amd/acp/Kconfig8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c69
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c16
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c14
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c4
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c27
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c29
-rw-r--r--drivers/gpu/drm/drm_edid.c10
-rw-r--r--drivers/gpu/drm/drm_gem.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c31
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/Makefile6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c12
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c36
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c31
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h9
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c22
-rw-r--r--drivers/gpu/drm/i915/intel_display.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c6
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c12
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c16
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c5
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c42
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c18
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c6
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c13
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c10
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c123
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c3
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c13
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c154
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h46
-rw-r--r--drivers/gpu/drm/radeon/ni_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c10
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c7
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c13
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c22
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c79
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c17
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c7
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c79
-rw-r--r--drivers/gpu/ipu-v3/ipu-dmfc.c8
-rw-r--r--drivers/hid/hid-core.c3
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-lenovo.c16
-rw-r--r--drivers/hid/hid-microsoft.c6
-rw-r--r--drivers/hid/hid-multitouch.c1
-rw-r--r--drivers/hid/hid-wiimote-modules.c14
-rw-r--r--drivers/hid/usbhid/hid-core.c73
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_sys.c102
-rw-r--r--drivers/hid/wacom_wac.c17
-rw-r--r--drivers/hid/wacom_wac.h8
-rw-r--r--drivers/hv/ring_buffer.c26
-rw-r--r--drivers/hwmon/max1111.c6
-rw-r--r--drivers/i2c/busses/Kconfig4
-rw-r--r--drivers/i2c/busses/i2c-cpm.c4
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c24
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c7
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c1
-rw-r--r--drivers/i2c/i2c-core.c10
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c39
-rw-r--r--drivers/ide/icside.c2
-rw-r--r--drivers/ide/palm_bk3710.c2
-rw-r--r--drivers/idle/intel_idle.c97
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c7
-rw-r--r--drivers/iio/adc/Kconfig1
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c2
-rw-r--r--drivers/iio/adc/max1363.c12
-rw-r--r--drivers/iio/gyro/bmg160_core.c9
-rw-r--r--drivers/iio/health/max30100.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig3
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c30
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c3
-rw-r--r--drivers/iio/industrialio-buffer.c1
-rw-r--r--drivers/iio/light/apds9960.c3
-rw-r--r--drivers/iio/magnetometer/ak8975.c6
-rw-r--r--drivers/iio/magnetometer/st_magn.h1
-rw-r--r--drivers/infiniband/core/cache.c3
-rw-r--r--drivers/infiniband/core/ucm.c4
-rw-r--r--drivers/infiniband/core/ucma.c3
-rw-r--r--drivers/infiniband/core/uverbs_main.c5
-rw-r--r--drivers/infiniband/core/verbs.c3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c24
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c10
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h18
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c4
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c14
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c39
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c55
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h2
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/misc/arizona-haptics.c1
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c7
-rw-r--r--drivers/input/misc/twl4030-vibra.c1
-rw-r--r--drivers/input/misc/twl6040-vibra.c24
-rw-r--r--drivers/input/tablet/gtco.c10
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c28
-rw-r--r--drivers/input/touchscreen/zforce_ts.c4
-rw-r--r--drivers/iommu/amd_iommu.c87
-rw-r--r--drivers/iommu/arm-smmu.c22
-rw-r--r--drivers/iommu/dma-iommu.c4
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/iommu.c3
-rw-r--r--drivers/iommu/rockchip-iommu.c8
-rw-r--r--drivers/irqchip/irq-mips-gic.c4
-rw-r--r--drivers/isdn/hisax/isac.c15
-rw-r--r--drivers/isdn/mISDN/socket.c3
-rw-r--r--drivers/lguest/interrupts_and_traps.c6
-rw-r--r--drivers/lguest/lg.h1
-rw-r--r--drivers/lguest/x86/core.c6
-rw-r--r--drivers/mailbox/mailbox-test.c16
-rw-r--r--drivers/mailbox/mailbox-xgene-slimpro.c4
-rw-r--r--drivers/mailbox/mailbox.c4
-rw-r--r--drivers/mailbox/pcc.c4
-rw-r--r--drivers/md/bitmap.c21
-rw-r--r--drivers/md/dm-cache-metadata.c64
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/md.c7
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/media/media-device.c8
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c13
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c12
-rw-r--r--drivers/media/usb/au0828/au0828-cards.c4
-rw-r--r--drivers/media/usb/au0828/au0828-core.c52
-rw-r--r--drivers/media/usb/au0828/au0828-input.c4
-rw-r--r--drivers/media/usb/au0828/au0828-video.c63
-rw-r--r--drivers/media/usb/au0828/au0828.h9
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c20
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c20
-rw-r--r--drivers/misc/cxl/context.c7
-rw-r--r--drivers/misc/cxl/cxl.h2
-rw-r--r--drivers/misc/cxl/irq.c1
-rw-r--r--drivers/misc/cxl/native.c31
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c4
-rw-r--r--drivers/misc/lkdtm.c11
-rw-r--r--drivers/misc/mic/vop/vop_vringh.c5
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c2
-rw-r--r--drivers/mmc/card/block.c18
-rw-r--r--drivers/mmc/core/host.c6
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/sdhci-acpi.c81
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c25
-rw-r--r--drivers/mmc/host/sdhci-pci.h3
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c22
-rw-r--r--drivers/mmc/host/sdhci-tegra.c10
-rw-r--r--drivers/mmc/host/sdhci.c39
-rw-r--r--drivers/mmc/host/sdhci.h4
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/sunxi-mmc.c5
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c4
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c2
-rw-r--r--drivers/mmc/host/usdhi6rol0.c2
-rw-r--r--drivers/mtd/devices/block2mtd.c6
-rw-r--r--drivers/mtd/nand/nand_base.c10
-rw-r--r--drivers/mtd/nand/nandsim.c6
-rw-r--r--drivers/net/Kconfig6
-rw-r--r--drivers/net/dsa/mv88e6xxx.c121
-rw-r--r--drivers/net/dsa/mv88e6xxx.h8
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c5
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c63
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c22
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h6
-rw-r--r--drivers/net/ethernet/cadence/macb.c103
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c43
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c64
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c40
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c196
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c21
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c165
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_model.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c46
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c18
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.h11
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c157
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c5
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c6
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c66
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c91
-rw-r--r--drivers/net/ethernet/ti/cpsw.c71
-rw-r--r--drivers/net/ethernet/ti/cpsw.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c8
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c2
-rw-r--r--drivers/net/macsec.c65
-rw-r--r--drivers/net/phy/at803x.c40
-rw-r--r--drivers/net/phy/bcm7xxx.c4
-rw-r--r--drivers/net/phy/spi_ks8995.c2
-rw-r--r--drivers/net/team/team.c5
-rw-r--r--drivers/net/tun.c12
-rw-r--r--drivers/net/usb/cdc_mbim.c9
-rw-r--r--drivers/net/usb/cdc_ncm.c7
-rw-r--r--drivers/net/usb/lan78xx.c44
-rw-r--r--drivers/net/usb/pegasus.c10
-rw-r--r--drivers/net/usb/plusb.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/smsc75xx.c12
-rw-r--r--drivers/net/usb/smsc95xx.c12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vrf.c177
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c5
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c6
-rw-r--r--drivers/nvdimm/btt.c2
-rw-r--r--drivers/nvdimm/bus.c2
-rw-r--r--drivers/nvdimm/core.c41
-rw-r--r--drivers/nvdimm/nd.h4
-rw-r--r--drivers/nvdimm/pfn_devs.c2
-rw-r--r--drivers/nvdimm/pmem.c69
-rw-r--r--drivers/nvme/host/pci.c31
-rw-r--r--drivers/nvmem/mxs-ocotp.c4
-rw-r--r--drivers/oprofile/oprofilefs.c4
-rw-r--r--drivers/pci/access.c42
-rw-r--r--drivers/pci/host/pci-imx6.c20
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pcmcia/db1xxx_ss.c11
-rw-r--r--drivers/perf/arm_pmu.c15
-rw-r--r--drivers/phy/phy-rockchip-dp.c7
-rw-r--r--drivers/phy/phy-rockchip-emmc.c5
-rw-r--r--drivers/pinctrl/freescale/Kconfig1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c17
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c35
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c9
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c2
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c24
-rw-r--r--drivers/pinctrl/pinctrl-single.c6
-rw-r--r--drivers/pinctrl/pinctrl-xway.c17
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c14
-rw-r--r--drivers/pinctrl/sh-pfc/core.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c17
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h21
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c3
-rw-r--r--drivers/platform/x86/hp_accel.c6
-rw-r--r--drivers/platform/x86/intel-hid.c2
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c48
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c48
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c4
-rw-r--r--drivers/platform/x86/toshiba_acpi.c2
-rw-r--r--drivers/powercap/intel_rapl.c1
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c2
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c121
-rw-r--r--drivers/remoteproc/st_remoteproc.c4
-rw-r--r--drivers/rtc/rtc-ds1307.c6
-rw-r--r--drivers/s390/block/dasd_alias.c226
-rw-r--r--drivers/s390/block/dasd_eckd.c38
-rw-r--r--drivers/s390/block/dasd_eckd.h3
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dcssblk.c5
-rw-r--r--drivers/s390/block/scm_blk.c2
-rw-r--r--drivers/s390/char/sclp_ctl.c12
-rw-r--r--drivers/scsi/aacraid/linit.c3
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c1
-rw-r--r--drivers/scsi/cxlflash/main.c138
-rw-r--r--drivers/scsi/cxlflash/main.h5
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c33
-rw-r--r--drivers/scsi/scsi.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c8
-rw-r--r--drivers/scsi/sd.c49
-rw-r--r--drivers/scsi/sd.h7
-rw-r--r--drivers/scsi/st.c4
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c11
-rw-r--r--drivers/spi/spi-imx.c16
-rw-r--r--drivers/spi/spi-omap2-mcspi.c62
-rw-r--r--drivers/spi/spi-rockchip.c16
-rw-r--r--drivers/spi/spi.c4
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h4
-rw-r--r--drivers/staging/lustre/include/linux/lnet/types.h2
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c16
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.h6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c20
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c10
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h6
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h10
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h4
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h2
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_cl.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c12
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c2
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c23
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h8
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c12
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c18
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c24
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c28
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c10
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c8
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c12
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c6
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c22
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c5
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c6
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c30
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c16
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c44
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c26
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c2
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c54
-rw-r--r--drivers/staging/olpc_dcon/Kconfig35
-rw-r--r--drivers/staging/olpc_dcon/Makefile6
-rw-r--r--drivers/staging/olpc_dcon/TODO9
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c813
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h111
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c205
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c161
-rw-r--r--drivers/staging/rdma/hfi1/Kconfig1
-rw-r--r--drivers/staging/rdma/hfi1/TODO2
-rw-r--r--drivers/staging/rdma/hfi1/file_ops.c91
-rw-r--r--drivers/staging/rdma/hfi1/mmu_rb.c40
-rw-r--r--drivers/staging/rdma/hfi1/mmu_rb.h3
-rw-r--r--drivers/staging/rdma/hfi1/qp.c2
-rw-r--r--drivers/staging/rdma/hfi1/user_exp_rcv.c11
-rw-r--r--drivers/staging/rdma/hfi1/user_sdma.c33
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c28
-rw-r--r--drivers/target/target_core_fabric_configfs.c24
-rw-r--r--drivers/thermal/Kconfig2
-rw-r--r--drivers/thermal/hisi_thermal.c4
-rw-r--r--drivers/thermal/mtk_thermal.c3
-rw-r--r--drivers/thermal/of-thermal.c4
-rw-r--r--drivers/thermal/power_allocator.c2
-rw-r--r--drivers/thermal/thermal_core.c10
-rw-r--r--drivers/tty/pty.c79
-rw-r--r--drivers/tty/serial/8250/8250_port.c11
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/uartlite.c8
-rw-r--r--drivers/tty/tty_io.c11
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/core/config.c16
-rw-r--r--drivers/usb/core/hcd-pci.c9
-rw-r--r--drivers/usb/core/port.c6
-rw-r--r--drivers/usb/core/usb.c8
-rw-r--r--drivers/usb/dwc2/gadget.c23
-rw-r--r--drivers/usb/dwc3/core.c71
-rw-r--r--drivers/usb/dwc3/debugfs.c13
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c5
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c12
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/gadget.c17
-rw-r--r--drivers/usb/gadget/composite.c10
-rw-r--r--drivers/usb/gadget/function/f_fs.c9
-rw-r--r--drivers/usb/gadget/function/f_midi.c17
-rw-r--r--drivers/usb/gadget/legacy/inode.c4
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c14
-rw-r--r--drivers/usb/gadget/udc/udc-core.c6
-rw-r--r--drivers/usb/host/xhci-mem.c6
-rw-r--r--drivers/usb/host/xhci-pci.c5
-rw-r--r--drivers/usb/host/xhci-plat.c13
-rw-r--r--drivers/usb/host/xhci-plat.h2
-rw-r--r--drivers/usb/host/xhci-ring.c3
-rw-r--r--drivers/usb/host/xhci.c24
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/musb/jz4740.c4
-rw-r--r--drivers/usb/musb/musb_gadget.c6
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/phy/phy-qcom-8x16-usb.c72
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c6
-rw-r--r--drivers/usb/serial/cp210x.c5
-rw-r--r--drivers/usb/serial/cypress_m8.c11
-rw-r--r--drivers/usb/serial/digi_acceleport.c19
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h8
-rw-r--r--drivers/usb/serial/mct_u232.c9
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/storage/scsiglue.c2
-rw-r--r--drivers/usb/storage/uas.c21
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/storage/usb.c5
-rw-r--r--drivers/usb/usbip/usbip_common.c11
-rw-r--r--drivers/video/fbdev/amba-clcd.c15
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c12
-rw-r--r--drivers/video/fbdev/pvr2fb.c2
-rw-r--r--drivers/virtio/virtio_pci_modern.c11
-rw-r--r--drivers/virtio/virtio_ring.c2
-rw-r--r--drivers/xen/balloon.c16
-rw-r--r--drivers/xen/events/events_base.c28
-rw-r--r--drivers/xen/evtchn.c20
615 files changed, 7715 insertions, 3742 deletions
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index b5e54f2da53d..0d92d0f915e9 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -491,6 +491,58 @@ static void acpi_processor_remove(struct acpi_device *device)
491} 491}
492#endif /* CONFIG_ACPI_HOTPLUG_CPU */ 492#endif /* CONFIG_ACPI_HOTPLUG_CPU */
493 493
494#ifdef CONFIG_X86
495static bool acpi_hwp_native_thermal_lvt_set;
496static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle,
497 u32 lvl,
498 void *context,
499 void **rv)
500{
501 u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
502 u32 capbuf[2];
503 struct acpi_osc_context osc_context = {
504 .uuid_str = sb_uuid_str,
505 .rev = 1,
506 .cap.length = 8,
507 .cap.pointer = capbuf,
508 };
509
510 if (acpi_hwp_native_thermal_lvt_set)
511 return AE_CTRL_TERMINATE;
512
513 capbuf[0] = 0x0000;
514 capbuf[1] = 0x1000; /* set bit 12 */
515
516 if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) {
517 if (osc_context.ret.pointer && osc_context.ret.length > 1) {
518 u32 *capbuf_ret = osc_context.ret.pointer;
519
520 if (capbuf_ret[1] & 0x1000) {
521 acpi_handle_info(handle,
522 "_OSC native thermal LVT Acked\n");
523 acpi_hwp_native_thermal_lvt_set = true;
524 }
525 }
526 kfree(osc_context.ret.pointer);
527 }
528
529 return AE_OK;
530}
531
532void __init acpi_early_processor_osc(void)
533{
534 if (boot_cpu_has(X86_FEATURE_HWP)) {
535 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
536 ACPI_UINT32_MAX,
537 acpi_hwp_native_thermal_lvt_osc,
538 NULL, NULL, NULL);
539 acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID,
540 acpi_hwp_native_thermal_lvt_osc,
541 NULL, NULL);
542 }
543}
544#endif
545
494/* 546/*
495 * The following ACPI IDs are known to be suitable for representing as 547 * The following ACPI IDs are known to be suitable for representing as
496 * processor devices. 548 * processor devices.
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 1982310e6d83..da198b864107 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -428,6 +428,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
428 obj_desc->method.mutex->mutex. 428 obj_desc->method.mutex->mutex.
429 original_sync_level = 429 original_sync_level =
430 obj_desc->method.mutex->mutex.sync_level; 430 obj_desc->method.mutex->mutex.sync_level;
431
432 obj_desc->method.mutex->mutex.thread_id =
433 acpi_os_get_thread_id();
431 } 434 }
432 } 435 }
433 436
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 0e8567846f1a..c068c829b453 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1019,6 +1019,9 @@ static int __init acpi_bus_init(void)
1019 goto error1; 1019 goto error1;
1020 } 1020 }
1021 1021
1022 /* Set capability bits for _OSC under processor scope */
1023 acpi_early_processor_osc();
1024
1022 /* 1025 /*
1023 * _OSC method may exist in module level code, 1026 * _OSC method may exist in module level code,
1024 * so it must be run after ACPI_FULL_INITIALIZATION 1027 * so it must be run after ACPI_FULL_INITIALIZATION
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index a37508ef66c1..7c188472d9c2 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -145,6 +145,12 @@ void acpi_early_processor_set_pdc(void);
145static inline void acpi_early_processor_set_pdc(void) {} 145static inline void acpi_early_processor_set_pdc(void) {}
146#endif 146#endif
147 147
148#ifdef CONFIG_X86
149void acpi_early_processor_osc(void);
150#else
151static inline void acpi_early_processor_osc(void) {}
152#endif
153
148/* -------------------------------------------------------------------------- 154/* --------------------------------------------------------------------------
149 Embedded Controller 155 Embedded Controller
150 -------------------------------------------------------------------------- */ 156 -------------------------------------------------------------------------- */
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index d0f35e63640b..63cc9dbe4f3b 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -287,8 +287,11 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
287 offset); 287 offset);
288 rc = -ENXIO; 288 rc = -ENXIO;
289 } 289 }
290 } else 290 } else {
291 rc = 0; 291 rc = 0;
292 if (cmd_rc)
293 *cmd_rc = xlat_status(buf, cmd);
294 }
292 295
293 out: 296 out:
294 ACPI_FREE(out_obj); 297 ACPI_FREE(out_obj);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 5083f85efea7..cfa936a32513 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -202,6 +202,14 @@ config SATA_FSL
202 202
203 If unsure, say N. 203 If unsure, say N.
204 204
205config SATA_AHCI_SEATTLE
206 tristate "AMD Seattle 6.0Gbps AHCI SATA host controller support"
207 depends on ARCH_SEATTLE
208 help
209 This option enables support for AMD Seattle SATA host controller.
210
211 If unsure, say N
212
205config SATA_INIC162X 213config SATA_INIC162X
206 tristate "Initio 162x SATA support (Very Experimental)" 214 tristate "Initio 162x SATA support (Very Experimental)"
207 depends on PCI 215 depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 18579521464e..0b2afb7e5f35 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_ATA) += libata.o
4# non-SFF interface 4# non-SFF interface
5obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o 5obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o
6obj-$(CONFIG_SATA_ACARD_AHCI) += acard-ahci.o libahci.o 6obj-$(CONFIG_SATA_ACARD_AHCI) += acard-ahci.o libahci.o
7obj-$(CONFIG_SATA_AHCI_SEATTLE) += ahci_seattle.o libahci.o libahci_platform.o
7obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o libahci_platform.o 8obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o libahci_platform.o
8obj-$(CONFIG_SATA_FSL) += sata_fsl.o 9obj-$(CONFIG_SATA_FSL) += sata_fsl.o
9obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o 10obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 40442332bfa7..62a04c8fb5c9 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -51,6 +51,9 @@ static int ahci_probe(struct platform_device *pdev)
51 if (rc) 51 if (rc)
52 return rc; 52 return rc;
53 53
54 of_property_read_u32(dev->of_node,
55 "ports-implemented", &hpriv->force_port_map);
56
54 if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci")) 57 if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
55 hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ; 58 hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
56 59
diff --git a/drivers/ata/ahci_seattle.c b/drivers/ata/ahci_seattle.c
new file mode 100644
index 000000000000..6e702ab57220
--- /dev/null
+++ b/drivers/ata/ahci_seattle.c
@@ -0,0 +1,210 @@
1/*
2 * AMD Seattle AHCI SATA driver
3 *
4 * Copyright (c) 2015, Advanced Micro Devices
5 * Author: Brijesh Singh <brijesh.singh@amd.com>
6 *
7 * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/pm.h>
22#include <linux/device.h>
23#include <linux/of_device.h>
24#include <linux/platform_device.h>
25#include <linux/libata.h>
26#include <linux/ahci_platform.h>
27#include <linux/acpi.h>
28#include <linux/pci_ids.h>
29#include "ahci.h"
30
31/* SGPIO Control Register definition
32 *
33 * Bit Type Description
34 * 31 RW OD7.2 (activity)
35 * 30 RW OD7.1 (locate)
36 * 29 RW OD7.0 (fault)
37 * 28...8 RW OD6.2...OD0.0 (3bits per port, 1 bit per LED)
38 * 7 RO SGPIO feature flag
39 * 6:4 RO Reserved
40 * 3:0 RO Number of ports (0 means no port supported)
41 */
42#define ACTIVITY_BIT_POS(x) (8 + (3 * x))
43#define LOCATE_BIT_POS(x) (ACTIVITY_BIT_POS(x) + 1)
44#define FAULT_BIT_POS(x) (LOCATE_BIT_POS(x) + 1)
45
46#define ACTIVITY_MASK 0x00010000
47#define LOCATE_MASK 0x00080000
48#define FAULT_MASK 0x00400000
49
50#define DRV_NAME "ahci-seattle"
51
52static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state,
53 ssize_t size);
54
55struct seattle_plat_data {
56 void __iomem *sgpio_ctrl;
57};
58
59static struct ata_port_operations ahci_port_ops = {
60 .inherits = &ahci_ops,
61};
62
63static const struct ata_port_info ahci_port_info = {
64 .flags = AHCI_FLAG_COMMON,
65 .pio_mask = ATA_PIO4,
66 .udma_mask = ATA_UDMA6,
67 .port_ops = &ahci_port_ops,
68};
69
70static struct ata_port_operations ahci_seattle_ops = {
71 .inherits = &ahci_ops,
72 .transmit_led_message = seattle_transmit_led_message,
73};
74
75static const struct ata_port_info ahci_port_seattle_info = {
76 .flags = AHCI_FLAG_COMMON | ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY,
77 .link_flags = ATA_LFLAG_SW_ACTIVITY,
78 .pio_mask = ATA_PIO4,
79 .udma_mask = ATA_UDMA6,
80 .port_ops = &ahci_seattle_ops,
81};
82
83static struct scsi_host_template ahci_platform_sht = {
84 AHCI_SHT(DRV_NAME),
85};
86
87static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state,
88 ssize_t size)
89{
90 struct ahci_host_priv *hpriv = ap->host->private_data;
91 struct ahci_port_priv *pp = ap->private_data;
92 struct seattle_plat_data *plat_data = hpriv->plat_data;
93 unsigned long flags;
94 int pmp;
95 struct ahci_em_priv *emp;
96 u32 val;
97
98 /* get the slot number from the message */
99 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
100 if (pmp >= EM_MAX_SLOTS)
101 return -EINVAL;
102 emp = &pp->em_priv[pmp];
103
104 val = ioread32(plat_data->sgpio_ctrl);
105 if (state & ACTIVITY_MASK)
106 val |= 1 << ACTIVITY_BIT_POS((ap->port_no));
107 else
108 val &= ~(1 << ACTIVITY_BIT_POS((ap->port_no)));
109
110 if (state & LOCATE_MASK)
111 val |= 1 << LOCATE_BIT_POS((ap->port_no));
112 else
113 val &= ~(1 << LOCATE_BIT_POS((ap->port_no)));
114
115 if (state & FAULT_MASK)
116 val |= 1 << FAULT_BIT_POS((ap->port_no));
117 else
118 val &= ~(1 << FAULT_BIT_POS((ap->port_no)));
119
120 iowrite32(val, plat_data->sgpio_ctrl);
121
122 spin_lock_irqsave(ap->lock, flags);
123
124 /* save off new led state for port/slot */
125 emp->led_state = state;
126
127 spin_unlock_irqrestore(ap->lock, flags);
128
129 return size;
130}
131
132static const struct ata_port_info *ahci_seattle_get_port_info(
133 struct platform_device *pdev, struct ahci_host_priv *hpriv)
134{
135 struct device *dev = &pdev->dev;
136 struct seattle_plat_data *plat_data;
137 u32 val;
138
139 plat_data = devm_kzalloc(dev, sizeof(*plat_data), GFP_KERNEL);
140 if (IS_ERR(plat_data))
141 return &ahci_port_info;
142
143 plat_data->sgpio_ctrl = devm_ioremap_resource(dev,
144 platform_get_resource(pdev, IORESOURCE_MEM, 1));
145 if (IS_ERR(plat_data->sgpio_ctrl))
146 return &ahci_port_info;
147
148 val = ioread32(plat_data->sgpio_ctrl);
149
150 if (!(val & 0xf))
151 return &ahci_port_info;
152
153 hpriv->em_loc = 0;
154 hpriv->em_buf_sz = 4;
155 hpriv->em_msg_type = EM_MSG_TYPE_LED;
156 hpriv->plat_data = plat_data;
157
158 dev_info(dev, "SGPIO LED control is enabled.\n");
159 return &ahci_port_seattle_info;
160}
161
162static int ahci_seattle_probe(struct platform_device *pdev)
163{
164 int rc;
165 struct ahci_host_priv *hpriv;
166
167 hpriv = ahci_platform_get_resources(pdev);
168 if (IS_ERR(hpriv))
169 return PTR_ERR(hpriv);
170
171 rc = ahci_platform_enable_resources(hpriv);
172 if (rc)
173 return rc;
174
175 rc = ahci_platform_init_host(pdev, hpriv,
176 ahci_seattle_get_port_info(pdev, hpriv),
177 &ahci_platform_sht);
178 if (rc)
179 goto disable_resources;
180
181 return 0;
182disable_resources:
183 ahci_platform_disable_resources(hpriv);
184 return rc;
185}
186
187static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
188 ahci_platform_resume);
189
190static const struct acpi_device_id ahci_acpi_match[] = {
191 { "AMDI0600", 0 },
192 {}
193};
194MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
195
196static struct platform_driver ahci_seattle_driver = {
197 .probe = ahci_seattle_probe,
198 .remove = ata_platform_remove_one,
199 .driver = {
200 .name = DRV_NAME,
201 .acpi_match_table = ahci_acpi_match,
202 .pm = &ahci_pm_ops,
203 },
204};
205module_platform_driver(ahci_seattle_driver);
206
207MODULE_DESCRIPTION("Seattle AHCI SATA platform driver");
208MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
209MODULE_LICENSE("GPL");
210MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 3982054060b8..a5d7c1c2a05e 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -507,6 +507,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
507 dev_info(dev, "forcing port_map 0x%x -> 0x%x\n", 507 dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
508 port_map, hpriv->force_port_map); 508 port_map, hpriv->force_port_map);
509 port_map = hpriv->force_port_map; 509 port_map = hpriv->force_port_map;
510 hpriv->saved_port_map = port_map;
510 } 511 }
511 512
512 if (hpriv->mask_port_map) { 513 if (hpriv->mask_port_map) {
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index 433b60092972..d8f4cc22856c 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -259,9 +259,6 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
259 reg = opp_table->regulator; 259 reg = opp_table->regulator;
260 if (IS_ERR(reg)) { 260 if (IS_ERR(reg)) {
261 /* Regulator may not be required for device */ 261 /* Regulator may not be required for device */
262 if (reg)
263 dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
264 PTR_ERR(reg));
265 rcu_read_unlock(); 262 rcu_read_unlock();
266 return 0; 263 return 0;
267 } 264 }
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index a1e0b9ab847a..5fb7718f256c 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -246,6 +246,8 @@ static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
246 return -EEXIST; 246 return -EEXIST;
247 } 247 }
248 dev->power.wakeup = ws; 248 dev->power.wakeup = ws;
249 if (dev->power.wakeirq)
250 device_wakeup_attach_irq(dev, dev->power.wakeirq);
249 spin_unlock_irq(&dev->power.lock); 251 spin_unlock_irq(&dev->power.lock);
250 return 0; 252 return 0;
251} 253}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 9b1a65debd49..7f692accdc90 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -21,7 +21,7 @@
21 21
22static inline bool is_pset_node(struct fwnode_handle *fwnode) 22static inline bool is_pset_node(struct fwnode_handle *fwnode)
23{ 23{
24 return fwnode && fwnode->type == FWNODE_PDATA; 24 return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_PDATA;
25} 25}
26 26
27static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode) 27static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode)
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 786be8fed39e..1f635471f318 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -136,7 +136,6 @@ static bool bcma_is_core_needed_early(u16 core_id)
136 return false; 136 return false;
137} 137}
138 138
139#if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS)
140static struct device_node *bcma_of_find_child_device(struct platform_device *parent, 139static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
141 struct bcma_device *core) 140 struct bcma_device *core)
142{ 141{
@@ -184,7 +183,7 @@ static unsigned int bcma_of_get_irq(struct platform_device *parent,
184 struct of_phandle_args out_irq; 183 struct of_phandle_args out_irq;
185 int ret; 184 int ret;
186 185
187 if (!parent || !parent->dev.of_node) 186 if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
188 return 0; 187 return 0;
189 188
190 ret = bcma_of_irq_parse(parent, core, &out_irq, num); 189 ret = bcma_of_irq_parse(parent, core, &out_irq, num);
@@ -202,23 +201,15 @@ static void bcma_of_fill_device(struct platform_device *parent,
202{ 201{
203 struct device_node *node; 202 struct device_node *node;
204 203
204 if (!IS_ENABLED(CONFIG_OF_IRQ))
205 return;
206
205 node = bcma_of_find_child_device(parent, core); 207 node = bcma_of_find_child_device(parent, core);
206 if (node) 208 if (node)
207 core->dev.of_node = node; 209 core->dev.of_node = node;
208 210
209 core->irq = bcma_of_get_irq(parent, core, 0); 211 core->irq = bcma_of_get_irq(parent, core, 0);
210} 212}
211#else
212static void bcma_of_fill_device(struct platform_device *parent,
213 struct bcma_device *core)
214{
215}
216static inline unsigned int bcma_of_get_irq(struct platform_device *parent,
217 struct bcma_device *core, int num)
218{
219 return 0;
220}
221#endif /* CONFIG_OF */
222 213
223unsigned int bcma_core_irq(struct bcma_device *core, int num) 214unsigned int bcma_core_irq(struct bcma_device *core, int num)
224{ 215{
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index dd73e1ff1759..ec9d8610b25f 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -397,7 +397,7 @@ aoeblk_gdalloc(void *vp)
397 WARN_ON(d->flags & DEVFL_UP); 397 WARN_ON(d->flags & DEVFL_UP);
398 blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS); 398 blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
399 q->backing_dev_info.name = "aoe"; 399 q->backing_dev_info.name = "aoe";
400 q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE; 400 q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE;
401 d->bufpool = mp; 401 d->bufpool = mp;
402 d->blkq = gd->queue = q; 402 d->blkq = gd->queue = q;
403 q->queuedata = d; 403 q->queuedata = d;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index f7ecc287d733..51a071e32221 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -374,7 +374,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
374 struct page *page, int rw) 374 struct page *page, int rw)
375{ 375{
376 struct brd_device *brd = bdev->bd_disk->private_data; 376 struct brd_device *brd = bdev->bd_disk->private_data;
377 int err = brd_do_bvec(brd, page, PAGE_CACHE_SIZE, 0, rw, sector); 377 int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector);
378 page_endio(page, rw & WRITE, err); 378 page_endio(page, rw & WRITE, err);
379 return err; 379 return err;
380} 380}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index c227fd4cad75..7a1cf7eaa71d 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1327,8 +1327,8 @@ struct bm_extent {
1327#endif 1327#endif
1328#endif 1328#endif
1329 1329
1330/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE, 1330/* BIO_MAX_SIZE is 256 * PAGE_SIZE,
1331 * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte. 1331 * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
1332 * Since we may live in a mixed-platform cluster, 1332 * Since we may live in a mixed-platform cluster,
1333 * we limit us to a platform agnostic constant here for now. 1333 * we limit us to a platform agnostic constant here for now.
1334 * A followup commit may allow even bigger BIO sizes, 1334 * A followup commit may allow even bigger BIO sizes,
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 226eb0c9f0fb..1fd1dccebb6b 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1178,7 +1178,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
1178 blk_queue_max_hw_sectors(q, max_hw_sectors); 1178 blk_queue_max_hw_sectors(q, max_hw_sectors);
1179 /* This is the workaround for "bio would need to, but cannot, be split" */ 1179 /* This is the workaround for "bio would need to, but cannot, be split" */
1180 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); 1180 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1181 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1); 1181 blk_queue_segment_boundary(q, PAGE_SIZE-1);
1182 1182
1183 if (b) { 1183 if (b) {
1184 struct drbd_connection *connection = first_peer_device(device)->connection; 1184 struct drbd_connection *connection = first_peer_device(device)->connection;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 423f4ca7d712..80cf8add46ff 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
488 bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); 488 bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
489 iov_iter_bvec(&iter, ITER_BVEC | rw, bvec, 489 iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
490 bio_segments(bio), blk_rq_bytes(cmd->rq)); 490 bio_segments(bio), blk_rq_bytes(cmd->rq));
491 /*
492 * This bio may be started from the middle of the 'bvec'
493 * because of bio splitting, so offset from the bvec must
494 * be passed to iov iterator
495 */
496 iter.iov_offset = bio->bi_iter.bi_bvec_done;
491 497
492 cmd->iocb.ki_pos = pos; 498 cmd->iocb.ki_pos = pos;
493 cmd->iocb.ki_filp = file; 499 cmd->iocb.ki_filp = file;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 9c6234428607..0ede6d7e2568 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -538,7 +538,6 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
538 u8 *order, u64 *snap_size); 538 u8 *order, u64 *snap_size);
539static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, 539static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
540 u64 *snap_features); 540 u64 *snap_features);
541static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
542 541
543static int rbd_open(struct block_device *bdev, fmode_t mode) 542static int rbd_open(struct block_device *bdev, fmode_t mode)
544{ 543{
@@ -1953,7 +1952,7 @@ static struct ceph_osd_request *rbd_osd_req_create(
1953 1952
1954 osdc = &rbd_dev->rbd_client->client->osdc; 1953 osdc = &rbd_dev->rbd_client->client->osdc;
1955 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, 1954 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1956 GFP_ATOMIC); 1955 GFP_NOIO);
1957 if (!osd_req) 1956 if (!osd_req)
1958 return NULL; /* ENOMEM */ 1957 return NULL; /* ENOMEM */
1959 1958
@@ -2002,7 +2001,7 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
2002 rbd_dev = img_request->rbd_dev; 2001 rbd_dev = img_request->rbd_dev;
2003 osdc = &rbd_dev->rbd_client->client->osdc; 2002 osdc = &rbd_dev->rbd_client->client->osdc;
2004 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops, 2003 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
2005 false, GFP_ATOMIC); 2004 false, GFP_NOIO);
2006 if (!osd_req) 2005 if (!osd_req)
2007 return NULL; /* ENOMEM */ 2006 return NULL; /* ENOMEM */
2008 2007
@@ -2504,7 +2503,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
2504 bio_chain_clone_range(&bio_list, 2503 bio_chain_clone_range(&bio_list,
2505 &bio_offset, 2504 &bio_offset,
2506 clone_size, 2505 clone_size,
2507 GFP_ATOMIC); 2506 GFP_NOIO);
2508 if (!obj_request->bio_list) 2507 if (!obj_request->bio_list)
2509 goto out_unwind; 2508 goto out_unwind;
2510 } else if (type == OBJ_REQUEST_PAGES) { 2509 } else if (type == OBJ_REQUEST_PAGES) {
@@ -3127,9 +3126,6 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
3127 struct rbd_device *rbd_dev = (struct rbd_device *)data; 3126 struct rbd_device *rbd_dev = (struct rbd_device *)data;
3128 int ret; 3127 int ret;
3129 3128
3130 if (!rbd_dev)
3131 return;
3132
3133 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__, 3129 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
3134 rbd_dev->header_name, (unsigned long long)notify_id, 3130 rbd_dev->header_name, (unsigned long long)notify_id,
3135 (unsigned int)opcode); 3131 (unsigned int)opcode);
@@ -3263,6 +3259,9 @@ static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3263 3259
3264 ceph_osdc_cancel_event(rbd_dev->watch_event); 3260 ceph_osdc_cancel_event(rbd_dev->watch_event);
3265 rbd_dev->watch_event = NULL; 3261 rbd_dev->watch_event = NULL;
3262
3263 dout("%s flushing notifies\n", __func__);
3264 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
3266} 3265}
3267 3266
3268/* 3267/*
@@ -3642,21 +3641,14 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev)
3642static void rbd_dev_update_size(struct rbd_device *rbd_dev) 3641static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3643{ 3642{
3644 sector_t size; 3643 sector_t size;
3645 bool removing;
3646 3644
3647 /* 3645 /*
3648 * Don't hold the lock while doing disk operations, 3646 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
3649 * or lock ordering will conflict with the bdev mutex via: 3647 * try to update its size. If REMOVING is set, updating size
3650 * rbd_add() -> blkdev_get() -> rbd_open() 3648 * is just useless work since the device can't be opened.
3651 */ 3649 */
3652 spin_lock_irq(&rbd_dev->lock); 3650 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
3653 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags); 3651 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
3654 spin_unlock_irq(&rbd_dev->lock);
3655 /*
3656 * If the device is being removed, rbd_dev->disk has
3657 * been destroyed, so don't try to update its size
3658 */
3659 if (!removing) {
3660 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; 3652 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3661 dout("setting size to %llu sectors", (unsigned long long)size); 3653 dout("setting size to %llu sectors", (unsigned long long)size);
3662 set_capacity(rbd_dev->disk, size); 3654 set_capacity(rbd_dev->disk, size);
@@ -4191,7 +4183,7 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4191 __le64 features; 4183 __le64 features;
4192 __le64 incompat; 4184 __le64 incompat;
4193 } __attribute__ ((packed)) features_buf = { 0 }; 4185 } __attribute__ ((packed)) features_buf = { 0 };
4194 u64 incompat; 4186 u64 unsup;
4195 int ret; 4187 int ret;
4196 4188
4197 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, 4189 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
@@ -4204,9 +4196,12 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4204 if (ret < sizeof (features_buf)) 4196 if (ret < sizeof (features_buf))
4205 return -ERANGE; 4197 return -ERANGE;
4206 4198
4207 incompat = le64_to_cpu(features_buf.incompat); 4199 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
4208 if (incompat & ~RBD_FEATURES_SUPPORTED) 4200 if (unsup) {
4201 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
4202 unsup);
4209 return -ENXIO; 4203 return -ENXIO;
4204 }
4210 4205
4211 *snap_features = le64_to_cpu(features_buf.features); 4206 *snap_features = le64_to_cpu(features_buf.features);
4212 4207
@@ -5187,6 +5182,10 @@ out_err:
5187 return ret; 5182 return ret;
5188} 5183}
5189 5184
5185/*
5186 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5187 * upon return.
5188 */
5190static int rbd_dev_device_setup(struct rbd_device *rbd_dev) 5189static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5191{ 5190{
5192 int ret; 5191 int ret;
@@ -5195,7 +5194,7 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5195 5194
5196 ret = rbd_dev_id_get(rbd_dev); 5195 ret = rbd_dev_id_get(rbd_dev);
5197 if (ret) 5196 if (ret)
5198 return ret; 5197 goto err_out_unlock;
5199 5198
5200 BUILD_BUG_ON(DEV_NAME_LEN 5199 BUILD_BUG_ON(DEV_NAME_LEN
5201 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH); 5200 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
@@ -5236,8 +5235,9 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5236 /* Everything's ready. Announce the disk to the world. */ 5235 /* Everything's ready. Announce the disk to the world. */
5237 5236
5238 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 5237 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5239 add_disk(rbd_dev->disk); 5238 up_write(&rbd_dev->header_rwsem);
5240 5239
5240 add_disk(rbd_dev->disk);
5241 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name, 5241 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5242 (unsigned long long) rbd_dev->mapping.size); 5242 (unsigned long long) rbd_dev->mapping.size);
5243 5243
@@ -5252,6 +5252,8 @@ err_out_blkdev:
5252 unregister_blkdev(rbd_dev->major, rbd_dev->name); 5252 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5253err_out_id: 5253err_out_id:
5254 rbd_dev_id_put(rbd_dev); 5254 rbd_dev_id_put(rbd_dev);
5255err_out_unlock:
5256 up_write(&rbd_dev->header_rwsem);
5255 return ret; 5257 return ret;
5256} 5258}
5257 5259
@@ -5442,6 +5444,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
5442 spec = NULL; /* rbd_dev now owns this */ 5444 spec = NULL; /* rbd_dev now owns this */
5443 rbd_opts = NULL; /* rbd_dev now owns this */ 5445 rbd_opts = NULL; /* rbd_dev now owns this */
5444 5446
5447 down_write(&rbd_dev->header_rwsem);
5445 rc = rbd_dev_image_probe(rbd_dev, 0); 5448 rc = rbd_dev_image_probe(rbd_dev, 0);
5446 if (rc < 0) 5449 if (rc < 0)
5447 goto err_out_rbd_dev; 5450 goto err_out_rbd_dev;
@@ -5471,6 +5474,7 @@ out:
5471 return rc; 5474 return rc;
5472 5475
5473err_out_rbd_dev: 5476err_out_rbd_dev:
5477 up_write(&rbd_dev->header_rwsem);
5474 rbd_dev_destroy(rbd_dev); 5478 rbd_dev_destroy(rbd_dev);
5475err_out_client: 5479err_out_client:
5476 rbd_put_client(rbdc); 5480 rbd_put_client(rbdc);
@@ -5577,12 +5581,6 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
5577 return ret; 5581 return ret;
5578 5582
5579 rbd_dev_header_unwatch_sync(rbd_dev); 5583 rbd_dev_header_unwatch_sync(rbd_dev);
5580 /*
5581 * flush remaining watch callbacks - these must be complete
5582 * before the osd_client is shutdown
5583 */
5584 dout("%s: flushing notifies", __func__);
5585 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5586 5584
5587 /* 5585 /*
5588 * Don't free anything from rbd_dev->disk until after all 5586 * Don't free anything from rbd_dev->disk until after all
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index c2e52864bb03..ce54a0160faa 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -972,7 +972,7 @@ int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr)
972 } 972 }
973 } 973 }
974 974
975 pr_err("invalid dram address 0x%x\n", phyaddr); 975 pr_err("invalid dram address %pa\n", &phyaddr);
976 return -EINVAL; 976 return -EINVAL;
977} 977}
978EXPORT_SYMBOL_GPL(mvebu_mbus_get_dram_win_info); 978EXPORT_SYMBOL_GPL(mvebu_mbus_get_dram_win_info);
diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
index 834a2aeaf27a..350b7309c26d 100644
--- a/drivers/bus/uniphier-system-bus.c
+++ b/drivers/bus/uniphier-system-bus.c
@@ -108,7 +108,7 @@ static int uniphier_system_bus_check_overlap(
108 108
109 for (i = 0; i < ARRAY_SIZE(priv->bank); i++) { 109 for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
110 for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) { 110 for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) {
111 if (priv->bank[i].end > priv->bank[j].base || 111 if (priv->bank[i].end > priv->bank[j].base &&
112 priv->bank[i].base < priv->bank[j].end) { 112 priv->bank[i].base < priv->bank[j].end) {
113 dev_err(priv->dev, 113 dev_err(priv->dev,
114 "region overlap between bank%d and bank%d\n", 114 "region overlap between bank%d and bank%d\n",
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
index ca9c40309757..5132c9cde50d 100644
--- a/drivers/char/hw_random/bcm63xx-rng.c
+++ b/drivers/char/hw_random/bcm63xx-rng.c
@@ -12,6 +12,7 @@
12#include <linux/clk.h> 12#include <linux/clk.h>
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/hw_random.h> 14#include <linux/hw_random.h>
15#include <linux/of.h>
15 16
16#define RNG_CTRL 0x00 17#define RNG_CTRL 0x00
17#define RNG_EN (1 << 0) 18#define RNG_EN (1 << 0)
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index 02e18182fcb5..2beb396fe652 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -394,7 +394,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
394 clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1", 2, 7); 394 clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1", 2, 7);
395 } else { 395 } else {
396 clk[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6); 396 clk[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6);
397 clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60", base + 0x20, 2, 6); 397 clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60m", base + 0x20, 2, 6);
398 clk[IMX6QDL_CLK_IPG_PER] = imx_clk_fixup_divider("ipg_per", "ipg", base + 0x1c, 0, 6, imx_cscmr1_fixup); 398 clk[IMX6QDL_CLK_IPG_PER] = imx_clk_fixup_divider("ipg_per", "ipg", base + 0x1c, 0, 6, imx_cscmr1_fixup);
399 clk[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_divider("uart_serial_podf", "pll3_80m", base + 0x24, 0, 6); 399 clk[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_divider("uart_serial_podf", "pll3_80m", base + 0x24, 0, 6);
400 clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7); 400 clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
index 9e9fe4b19ac4..309049d41f1b 100644
--- a/drivers/clk/mediatek/reset.c
+++ b/drivers/clk/mediatek/reset.c
@@ -57,7 +57,7 @@ static int mtk_reset(struct reset_controller_dev *rcdev,
57 return mtk_reset_deassert(rcdev, id); 57 return mtk_reset_deassert(rcdev, id);
58} 58}
59 59
60static struct reset_control_ops mtk_reset_ops = { 60static const struct reset_control_ops mtk_reset_ops = {
61 .assert = mtk_reset_assert, 61 .assert = mtk_reset_assert,
62 .deassert = mtk_reset_deassert, 62 .deassert = mtk_reset_deassert,
63 .reset = mtk_reset, 63 .reset = mtk_reset,
diff --git a/drivers/clk/mmp/reset.c b/drivers/clk/mmp/reset.c
index b54da1fe73f0..b4e4d6aa2631 100644
--- a/drivers/clk/mmp/reset.c
+++ b/drivers/clk/mmp/reset.c
@@ -74,7 +74,7 @@ static int mmp_clk_reset_deassert(struct reset_controller_dev *rcdev,
74 return 0; 74 return 0;
75} 75}
76 76
77static struct reset_control_ops mmp_clk_reset_ops = { 77static const struct reset_control_ops mmp_clk_reset_ops = {
78 .assert = mmp_clk_reset_assert, 78 .assert = mmp_clk_reset_assert,
79 .deassert = mmp_clk_reset_deassert, 79 .deassert = mmp_clk_reset_deassert,
80}; 80};
diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
index 5428efb9fbf5..3cd1af0af0d9 100644
--- a/drivers/clk/qcom/gcc-ipq4019.c
+++ b/drivers/clk/qcom/gcc-ipq4019.c
@@ -129,20 +129,10 @@ static const char * const gcc_xo_ddr_500_200[] = {
129}; 129};
130 130
131#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } 131#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
132#define P_XO 0
133#define FE_PLL_200 1
134#define FE_PLL_500 2
135#define DDRC_PLL_666 3
136
137#define DDRC_PLL_666_SDCC 1
138#define FE_PLL_125_DLY 1
139
140#define FE_PLL_WCSS2G 1
141#define FE_PLL_WCSS5G 1
142 132
143static const struct freq_tbl ftbl_gcc_audio_pwm_clk[] = { 133static const struct freq_tbl ftbl_gcc_audio_pwm_clk[] = {
144 F(48000000, P_XO, 1, 0, 0), 134 F(48000000, P_XO, 1, 0, 0),
145 F(200000000, FE_PLL_200, 1, 0, 0), 135 F(200000000, P_FEPLL200, 1, 0, 0),
146 { } 136 { }
147}; 137};
148 138
@@ -334,15 +324,15 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
334}; 324};
335 325
336static const struct freq_tbl ftbl_gcc_blsp1_uart1_2_apps_clk[] = { 326static const struct freq_tbl ftbl_gcc_blsp1_uart1_2_apps_clk[] = {
337 F(1843200, FE_PLL_200, 1, 144, 15625), 327 F(1843200, P_FEPLL200, 1, 144, 15625),
338 F(3686400, FE_PLL_200, 1, 288, 15625), 328 F(3686400, P_FEPLL200, 1, 288, 15625),
339 F(7372800, FE_PLL_200, 1, 576, 15625), 329 F(7372800, P_FEPLL200, 1, 576, 15625),
340 F(14745600, FE_PLL_200, 1, 1152, 15625), 330 F(14745600, P_FEPLL200, 1, 1152, 15625),
341 F(16000000, FE_PLL_200, 1, 2, 25), 331 F(16000000, P_FEPLL200, 1, 2, 25),
342 F(24000000, P_XO, 1, 1, 2), 332 F(24000000, P_XO, 1, 1, 2),
343 F(32000000, FE_PLL_200, 1, 4, 25), 333 F(32000000, P_FEPLL200, 1, 4, 25),
344 F(40000000, FE_PLL_200, 1, 1, 5), 334 F(40000000, P_FEPLL200, 1, 1, 5),
345 F(46400000, FE_PLL_200, 1, 29, 125), 335 F(46400000, P_FEPLL200, 1, 29, 125),
346 F(48000000, P_XO, 1, 0, 0), 336 F(48000000, P_XO, 1, 0, 0),
347 { } 337 { }
348}; 338};
@@ -410,9 +400,9 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
410}; 400};
411 401
412static const struct freq_tbl ftbl_gcc_gp_clk[] = { 402static const struct freq_tbl ftbl_gcc_gp_clk[] = {
413 F(1250000, FE_PLL_200, 1, 16, 0), 403 F(1250000, P_FEPLL200, 1, 16, 0),
414 F(2500000, FE_PLL_200, 1, 8, 0), 404 F(2500000, P_FEPLL200, 1, 8, 0),
415 F(5000000, FE_PLL_200, 1, 4, 0), 405 F(5000000, P_FEPLL200, 1, 4, 0),
416 { } 406 { }
417}; 407};
418 408
@@ -512,11 +502,11 @@ static struct clk_branch gcc_gp3_clk = {
512static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk[] = { 502static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk[] = {
513 F(144000, P_XO, 1, 3, 240), 503 F(144000, P_XO, 1, 3, 240),
514 F(400000, P_XO, 1, 1, 0), 504 F(400000, P_XO, 1, 1, 0),
515 F(20000000, FE_PLL_500, 1, 1, 25), 505 F(20000000, P_FEPLL500, 1, 1, 25),
516 F(25000000, FE_PLL_500, 1, 1, 20), 506 F(25000000, P_FEPLL500, 1, 1, 20),
517 F(50000000, FE_PLL_500, 1, 1, 10), 507 F(50000000, P_FEPLL500, 1, 1, 10),
518 F(100000000, FE_PLL_500, 1, 1, 5), 508 F(100000000, P_FEPLL500, 1, 1, 5),
519 F(193000000, DDRC_PLL_666_SDCC, 1, 0, 0), 509 F(193000000, P_DDRPLL, 1, 0, 0),
520 { } 510 { }
521}; 511};
522 512
@@ -536,9 +526,9 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
536 526
537static const struct freq_tbl ftbl_gcc_apps_clk[] = { 527static const struct freq_tbl ftbl_gcc_apps_clk[] = {
538 F(48000000, P_XO, 1, 0, 0), 528 F(48000000, P_XO, 1, 0, 0),
539 F(200000000, FE_PLL_200, 1, 0, 0), 529 F(200000000, P_FEPLL200, 1, 0, 0),
540 F(500000000, FE_PLL_500, 1, 0, 0), 530 F(500000000, P_FEPLL500, 1, 0, 0),
541 F(626000000, DDRC_PLL_666, 1, 0, 0), 531 F(626000000, P_DDRPLLAPSS, 1, 0, 0),
542 { } 532 { }
543}; 533};
544 534
@@ -557,7 +547,7 @@ static struct clk_rcg2 apps_clk_src = {
557 547
558static const struct freq_tbl ftbl_gcc_apps_ahb_clk[] = { 548static const struct freq_tbl ftbl_gcc_apps_ahb_clk[] = {
559 F(48000000, P_XO, 1, 0, 0), 549 F(48000000, P_XO, 1, 0, 0),
560 F(100000000, FE_PLL_200, 2, 0, 0), 550 F(100000000, P_FEPLL200, 2, 0, 0),
561 { } 551 { }
562}; 552};
563 553
@@ -940,7 +930,7 @@ static struct clk_branch gcc_usb2_mock_utmi_clk = {
940}; 930};
941 931
942static const struct freq_tbl ftbl_gcc_usb30_mock_utmi_clk[] = { 932static const struct freq_tbl ftbl_gcc_usb30_mock_utmi_clk[] = {
943 F(2000000, FE_PLL_200, 10, 0, 0), 933 F(2000000, P_FEPLL200, 10, 0, 0),
944 { } 934 { }
945}; 935};
946 936
@@ -1007,7 +997,7 @@ static struct clk_branch gcc_usb3_mock_utmi_clk = {
1007}; 997};
1008 998
1009static const struct freq_tbl ftbl_gcc_fephy_dly_clk[] = { 999static const struct freq_tbl ftbl_gcc_fephy_dly_clk[] = {
1010 F(125000000, FE_PLL_125_DLY, 1, 0, 0), 1000 F(125000000, P_FEPLL125DLY, 1, 0, 0),
1011 { } 1001 { }
1012}; 1002};
1013 1003
@@ -1027,7 +1017,7 @@ static struct clk_rcg2 fephy_125m_dly_clk_src = {
1027 1017
1028static const struct freq_tbl ftbl_gcc_wcss2g_clk[] = { 1018static const struct freq_tbl ftbl_gcc_wcss2g_clk[] = {
1029 F(48000000, P_XO, 1, 0, 0), 1019 F(48000000, P_XO, 1, 0, 0),
1030 F(250000000, FE_PLL_WCSS2G, 1, 0, 0), 1020 F(250000000, P_FEPLLWCSS2G, 1, 0, 0),
1031 { } 1021 { }
1032}; 1022};
1033 1023
@@ -1097,7 +1087,7 @@ static struct clk_branch gcc_wcss2g_rtc_clk = {
1097 1087
1098static const struct freq_tbl ftbl_gcc_wcss5g_clk[] = { 1088static const struct freq_tbl ftbl_gcc_wcss5g_clk[] = {
1099 F(48000000, P_XO, 1, 0, 0), 1089 F(48000000, P_XO, 1, 0, 0),
1100 F(250000000, FE_PLL_WCSS5G, 1, 0, 0), 1090 F(250000000, P_FEPLLWCSS5G, 1, 0, 0),
1101 { } 1091 { }
1102}; 1092};
1103 1093
@@ -1325,6 +1315,16 @@ MODULE_DEVICE_TABLE(of, gcc_ipq4019_match_table);
1325 1315
1326static int gcc_ipq4019_probe(struct platform_device *pdev) 1316static int gcc_ipq4019_probe(struct platform_device *pdev)
1327{ 1317{
1318 struct device *dev = &pdev->dev;
1319
1320 clk_register_fixed_rate(dev, "fepll125", "xo", 0, 200000000);
1321 clk_register_fixed_rate(dev, "fepll125dly", "xo", 0, 200000000);
1322 clk_register_fixed_rate(dev, "fepllwcss2g", "xo", 0, 200000000);
1323 clk_register_fixed_rate(dev, "fepllwcss5g", "xo", 0, 200000000);
1324 clk_register_fixed_rate(dev, "fepll200", "xo", 0, 200000000);
1325 clk_register_fixed_rate(dev, "fepll500", "xo", 0, 200000000);
1326 clk_register_fixed_rate(dev, "ddrpllapss", "xo", 0, 666000000);
1327
1328 return qcom_cc_probe(pdev, &gcc_ipq4019_desc); 1328 return qcom_cc_probe(pdev, &gcc_ipq4019_desc);
1329} 1329}
1330 1330
diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c
index 6c977d3a8590..0324d8daab9b 100644
--- a/drivers/clk/qcom/reset.c
+++ b/drivers/clk/qcom/reset.c
@@ -55,7 +55,7 @@ qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
55 return regmap_update_bits(rst->regmap, map->reg, mask, 0); 55 return regmap_update_bits(rst->regmap, map->reg, mask, 0);
56} 56}
57 57
58struct reset_control_ops qcom_reset_ops = { 58const struct reset_control_ops qcom_reset_ops = {
59 .reset = qcom_reset, 59 .reset = qcom_reset,
60 .assert = qcom_reset_assert, 60 .assert = qcom_reset_assert,
61 .deassert = qcom_reset_deassert, 61 .deassert = qcom_reset_deassert,
diff --git a/drivers/clk/qcom/reset.h b/drivers/clk/qcom/reset.h
index 0e11e2130f97..cda877927d43 100644
--- a/drivers/clk/qcom/reset.h
+++ b/drivers/clk/qcom/reset.h
@@ -32,6 +32,6 @@ struct qcom_reset_controller {
32#define to_qcom_reset_controller(r) \ 32#define to_qcom_reset_controller(r) \
33 container_of(r, struct qcom_reset_controller, rcdev); 33 container_of(r, struct qcom_reset_controller, rcdev);
34 34
35extern struct reset_control_ops qcom_reset_ops; 35extern const struct reset_control_ops qcom_reset_ops;
36 36
37#endif 37#endif
diff --git a/drivers/clk/rockchip/softrst.c b/drivers/clk/rockchip/softrst.c
index 552f7bb15bc5..21218987bbc3 100644
--- a/drivers/clk/rockchip/softrst.c
+++ b/drivers/clk/rockchip/softrst.c
@@ -81,7 +81,7 @@ static int rockchip_softrst_deassert(struct reset_controller_dev *rcdev,
81 return 0; 81 return 0;
82} 82}
83 83
84static struct reset_control_ops rockchip_softrst_ops = { 84static const struct reset_control_ops rockchip_softrst_ops = {
85 .assert = rockchip_softrst_assert, 85 .assert = rockchip_softrst_assert,
86 .deassert = rockchip_softrst_deassert, 86 .deassert = rockchip_softrst_deassert,
87}; 87};
diff --git a/drivers/clk/sirf/clk-atlas7.c b/drivers/clk/sirf/clk-atlas7.c
index 957aae63e7cc..d0c6c9a2d06a 100644
--- a/drivers/clk/sirf/clk-atlas7.c
+++ b/drivers/clk/sirf/clk-atlas7.c
@@ -1423,7 +1423,7 @@ static int atlas7_reset_module(struct reset_controller_dev *rcdev,
1423 return 0; 1423 return 0;
1424} 1424}
1425 1425
1426static struct reset_control_ops atlas7_rst_ops = { 1426static const struct reset_control_ops atlas7_rst_ops = {
1427 .reset = atlas7_reset_module, 1427 .reset = atlas7_reset_module,
1428}; 1428};
1429 1429
diff --git a/drivers/clk/sunxi/clk-a10-ve.c b/drivers/clk/sunxi/clk-a10-ve.c
index 044c1717b762..d9ea22ec4e25 100644
--- a/drivers/clk/sunxi/clk-a10-ve.c
+++ b/drivers/clk/sunxi/clk-a10-ve.c
@@ -85,7 +85,7 @@ static int sunxi_ve_of_xlate(struct reset_controller_dev *rcdev,
85 return 0; 85 return 0;
86} 86}
87 87
88static struct reset_control_ops sunxi_ve_reset_ops = { 88static const struct reset_control_ops sunxi_ve_reset_ops = {
89 .assert = sunxi_ve_reset_assert, 89 .assert = sunxi_ve_reset_assert,
90 .deassert = sunxi_ve_reset_deassert, 90 .deassert = sunxi_ve_reset_deassert,
91}; 91};
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index a9b176139aca..028dd832a39f 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -83,7 +83,7 @@ static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev,
83 return 0; 83 return 0;
84} 84}
85 85
86static struct reset_control_ops sun9i_mmc_reset_ops = { 86static const struct reset_control_ops sun9i_mmc_reset_ops = {
87 .assert = sun9i_mmc_reset_assert, 87 .assert = sun9i_mmc_reset_assert,
88 .deassert = sun9i_mmc_reset_deassert, 88 .deassert = sun9i_mmc_reset_deassert,
89}; 89};
diff --git a/drivers/clk/sunxi/clk-usb.c b/drivers/clk/sunxi/clk-usb.c
index 5432b1c198a4..fe0c3d169377 100644
--- a/drivers/clk/sunxi/clk-usb.c
+++ b/drivers/clk/sunxi/clk-usb.c
@@ -76,7 +76,7 @@ static int sunxi_usb_reset_deassert(struct reset_controller_dev *rcdev,
76 return 0; 76 return 0;
77} 77}
78 78
79static struct reset_control_ops sunxi_usb_reset_ops = { 79static const struct reset_control_ops sunxi_usb_reset_ops = {
80 .assert = sunxi_usb_reset_assert, 80 .assert = sunxi_usb_reset_assert,
81 .deassert = sunxi_usb_reset_deassert, 81 .deassert = sunxi_usb_reset_deassert,
82}; 82};
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index 2a3a4fe803d6..f60fe2e344ca 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -271,7 +271,7 @@ void __init tegra_init_from_table(struct tegra_clk_init_table *tbl,
271 } 271 }
272} 272}
273 273
274static struct reset_control_ops rst_ops = { 274static const struct reset_control_ops rst_ops = {
275 .assert = tegra_clk_rst_assert, 275 .assert = tegra_clk_rst_assert,
276 .deassert = tegra_clk_rst_deassert, 276 .deassert = tegra_clk_rst_deassert,
277}; 277};
diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
index 2bcecafdeaea..c407c47a3232 100644
--- a/drivers/clocksource/tango_xtal.c
+++ b/drivers/clocksource/tango_xtal.c
@@ -42,7 +42,7 @@ static void __init tango_clocksource_init(struct device_node *np)
42 42
43 ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350, 43 ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
44 32, clocksource_mmio_readl_up); 44 32, clocksource_mmio_readl_up);
45 if (!ret) { 45 if (ret) {
46 pr_err("%s: registration failed\n", np->full_name); 46 pr_err("%s: registration failed\n", np->full_name);
47 return; 47 return;
48 } 48 }
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index f951f911786e..5f8dbe640a20 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -4,9 +4,6 @@
4 * Copyright (C) 2014 Linaro. 4 * Copyright (C) 2014 Linaro.
5 * Viresh Kumar <viresh.kumar@linaro.org> 5 * Viresh Kumar <viresh.kumar@linaro.org>
6 * 6 *
7 * The OPP code in function set_target() is reused from
8 * drivers/cpufreq/omap-cpufreq.c
9 *
10 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b87596b591b3..c4acfc5273b3 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1491,6 +1491,9 @@ static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
1491{ 1491{
1492 unsigned int new_freq; 1492 unsigned int new_freq;
1493 1493
1494 if (cpufreq_suspended)
1495 return 0;
1496
1494 new_freq = cpufreq_driver->get(policy->cpu); 1497 new_freq = cpufreq_driver->get(policy->cpu);
1495 if (!new_freq) 1498 if (!new_freq)
1496 return 0; 1499 return 0;
@@ -1554,21 +1557,25 @@ void cpufreq_suspend(void)
1554 if (!cpufreq_driver) 1557 if (!cpufreq_driver)
1555 return; 1558 return;
1556 1559
1557 if (!has_target()) 1560 if (!has_target() && !cpufreq_driver->suspend)
1558 goto suspend; 1561 goto suspend;
1559 1562
1560 pr_debug("%s: Suspending Governors\n", __func__); 1563 pr_debug("%s: Suspending Governors\n", __func__);
1561 1564
1562 for_each_active_policy(policy) { 1565 for_each_active_policy(policy) {
1563 down_write(&policy->rwsem); 1566 if (has_target()) {
1564 ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP); 1567 down_write(&policy->rwsem);
1565 up_write(&policy->rwsem); 1568 ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1569 up_write(&policy->rwsem);
1566 1570
1567 if (ret) 1571 if (ret) {
1568 pr_err("%s: Failed to stop governor for policy: %p\n", 1572 pr_err("%s: Failed to stop governor for policy: %p\n",
1569 __func__, policy); 1573 __func__, policy);
1570 else if (cpufreq_driver->suspend 1574 continue;
1571 && cpufreq_driver->suspend(policy)) 1575 }
1576 }
1577
1578 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1572 pr_err("%s: Failed to suspend driver: %p\n", __func__, 1579 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1573 policy); 1580 policy);
1574 } 1581 }
@@ -1593,7 +1600,7 @@ void cpufreq_resume(void)
1593 1600
1594 cpufreq_suspended = false; 1601 cpufreq_suspended = false;
1595 1602
1596 if (!has_target()) 1603 if (!has_target() && !cpufreq_driver->resume)
1597 return; 1604 return;
1598 1605
1599 pr_debug("%s: Resuming Governors\n", __func__); 1606 pr_debug("%s: Resuming Governors\n", __func__);
@@ -1602,7 +1609,7 @@ void cpufreq_resume(void)
1602 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) { 1609 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1603 pr_err("%s: Failed to resume driver: %p\n", __func__, 1610 pr_err("%s: Failed to resume driver: %p\n", __func__,
1604 policy); 1611 policy);
1605 } else { 1612 } else if (has_target()) {
1606 down_write(&policy->rwsem); 1613 down_write(&policy->rwsem);
1607 ret = cpufreq_start_governor(policy); 1614 ret = cpufreq_start_governor(policy);
1608 up_write(&policy->rwsem); 1615 up_write(&policy->rwsem);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 10a5cfeae8c5..5f1147fa9239 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -193,12 +193,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
193 wall_time = cur_wall_time - j_cdbs->prev_cpu_wall; 193 wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
194 j_cdbs->prev_cpu_wall = cur_wall_time; 194 j_cdbs->prev_cpu_wall = cur_wall_time;
195 195
196 if (cur_idle_time <= j_cdbs->prev_cpu_idle) { 196 idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
197 idle_time = 0; 197 j_cdbs->prev_cpu_idle = cur_idle_time;
198 } else {
199 idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
200 j_cdbs->prev_cpu_idle = cur_idle_time;
201 }
202 198
203 if (ignore_nice) { 199 if (ignore_nice) {
204 u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 200 u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 4b644526fd59..b230ebaae66c 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -64,6 +64,25 @@ static inline int ceiling_fp(int32_t x)
64 return ret; 64 return ret;
65} 65}
66 66
67/**
68 * struct sample - Store performance sample
69 * @core_pct_busy: Ratio of APERF/MPERF in percent, which is actual
70 * performance during last sample period
71 * @busy_scaled: Scaled busy value which is used to calculate next
72 * P state. This can be different than core_pct_busy
73 * to account for cpu idle period
74 * @aperf: Difference of actual performance frequency clock count
75 * read from APERF MSR between last and current sample
76 * @mperf: Difference of maximum performance frequency clock count
77 * read from MPERF MSR between last and current sample
78 * @tsc: Difference of time stamp counter between last and
79 * current sample
80 * @freq: Effective frequency calculated from APERF/MPERF
81 * @time: Current time from scheduler
82 *
83 * This structure is used in the cpudata structure to store performance sample
84 * data for choosing next P State.
85 */
67struct sample { 86struct sample {
68 int32_t core_pct_busy; 87 int32_t core_pct_busy;
69 int32_t busy_scaled; 88 int32_t busy_scaled;
@@ -74,6 +93,20 @@ struct sample {
74 u64 time; 93 u64 time;
75}; 94};
76 95
96/**
97 * struct pstate_data - Store P state data
98 * @current_pstate: Current requested P state
99 * @min_pstate: Min P state possible for this platform
100 * @max_pstate: Max P state possible for this platform
101 * @max_pstate_physical:This is physical Max P state for a processor
102 * This can be higher than the max_pstate which can
103 * be limited by platform thermal design power limits
104 * @scaling: Scaling factor to convert frequency to cpufreq
105 * frequency units
106 * @turbo_pstate: Max Turbo P state possible for this platform
107 *
108 * Stores the per cpu model P state limits and current P state.
109 */
77struct pstate_data { 110struct pstate_data {
78 int current_pstate; 111 int current_pstate;
79 int min_pstate; 112 int min_pstate;
@@ -83,6 +116,19 @@ struct pstate_data {
83 int turbo_pstate; 116 int turbo_pstate;
84}; 117};
85 118
119/**
120 * struct vid_data - Stores voltage information data
121 * @min: VID data for this platform corresponding to
122 * the lowest P state
123 * @max: VID data corresponding to the highest P State.
124 * @turbo: VID data for turbo P state
125 * @ratio: Ratio of (vid max - vid min) /
126 * (max P state - Min P State)
127 *
128 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
129 * This data is used in Atom platforms, where in addition to target P state,
130 * the voltage data needs to be specified to select next P State.
131 */
86struct vid_data { 132struct vid_data {
87 int min; 133 int min;
88 int max; 134 int max;
@@ -90,6 +136,18 @@ struct vid_data {
90 int32_t ratio; 136 int32_t ratio;
91}; 137};
92 138
139/**
140 * struct _pid - Stores PID data
141 * @setpoint: Target set point for busyness or performance
142 * @integral: Storage for accumulated error values
143 * @p_gain: PID proportional gain
144 * @i_gain: PID integral gain
145 * @d_gain: PID derivative gain
146 * @deadband: PID deadband
147 * @last_err: Last error storage for integral part of PID calculation
148 *
149 * Stores PID coefficients and last error for PID controller.
150 */
93struct _pid { 151struct _pid {
94 int setpoint; 152 int setpoint;
95 int32_t integral; 153 int32_t integral;
@@ -100,6 +158,23 @@ struct _pid {
100 int32_t last_err; 158 int32_t last_err;
101}; 159};
102 160
161/**
162 * struct cpudata - Per CPU instance data storage
163 * @cpu: CPU number for this instance data
164 * @update_util: CPUFreq utility callback information
165 * @pstate: Stores P state limits for this CPU
166 * @vid: Stores VID limits for this CPU
167 * @pid: Stores PID parameters for this CPU
168 * @last_sample_time: Last Sample time
169 * @prev_aperf: Last APERF value read from APERF MSR
170 * @prev_mperf: Last MPERF value read from MPERF MSR
171 * @prev_tsc: Last timestamp counter (TSC) value
172 * @prev_cummulative_iowait: IO Wait time difference from last and
173 * current sample
174 * @sample: Storage for storing last Sample data
175 *
176 * This structure stores per CPU instance data for all CPUs.
177 */
103struct cpudata { 178struct cpudata {
104 int cpu; 179 int cpu;
105 180
@@ -118,6 +193,19 @@ struct cpudata {
118}; 193};
119 194
120static struct cpudata **all_cpu_data; 195static struct cpudata **all_cpu_data;
196
197/**
198 * struct pid_adjust_policy - Stores static PID configuration data
199 * @sample_rate_ms: PID calculation sample rate in ms
200 * @sample_rate_ns: Sample rate calculation in ns
201 * @deadband: PID deadband
202 * @setpoint: PID Setpoint
203 * @p_gain_pct: PID proportional gain
204 * @i_gain_pct: PID integral gain
205 * @d_gain_pct: PID derivative gain
206 *
207 * Stores per CPU model static PID configuration data.
208 */
121struct pstate_adjust_policy { 209struct pstate_adjust_policy {
122 int sample_rate_ms; 210 int sample_rate_ms;
123 s64 sample_rate_ns; 211 s64 sample_rate_ns;
@@ -128,6 +216,20 @@ struct pstate_adjust_policy {
128 int i_gain_pct; 216 int i_gain_pct;
129}; 217};
130 218
219/**
220 * struct pstate_funcs - Per CPU model specific callbacks
221 * @get_max: Callback to get maximum non turbo effective P state
222 * @get_max_physical: Callback to get maximum non turbo physical P state
223 * @get_min: Callback to get minimum P state
224 * @get_turbo: Callback to get turbo P state
225 * @get_scaling: Callback to get frequency scaling factor
226 * @get_val: Callback to convert P state to actual MSR write value
227 * @get_vid: Callback to get VID data for Atom platforms
228 * @get_target_pstate: Callback to a function to calculate next P state to use
229 *
230 * Core and Atom CPU models have different way to get P State limits. This
231 * structure is used to store those callbacks.
232 */
131struct pstate_funcs { 233struct pstate_funcs {
132 int (*get_max)(void); 234 int (*get_max)(void);
133 int (*get_max_physical)(void); 235 int (*get_max_physical)(void);
@@ -139,6 +241,11 @@ struct pstate_funcs {
139 int32_t (*get_target_pstate)(struct cpudata *); 241 int32_t (*get_target_pstate)(struct cpudata *);
140}; 242};
141 243
244/**
245 * struct cpu_defaults- Per CPU model default config data
246 * @pid_policy: PID config data
247 * @funcs: Callback function data
248 */
142struct cpu_defaults { 249struct cpu_defaults {
143 struct pstate_adjust_policy pid_policy; 250 struct pstate_adjust_policy pid_policy;
144 struct pstate_funcs funcs; 251 struct pstate_funcs funcs;
@@ -151,6 +258,34 @@ static struct pstate_adjust_policy pid_params;
151static struct pstate_funcs pstate_funcs; 258static struct pstate_funcs pstate_funcs;
152static int hwp_active; 259static int hwp_active;
153 260
261
262/**
263 * struct perf_limits - Store user and policy limits
264 * @no_turbo: User requested turbo state from intel_pstate sysfs
265 * @turbo_disabled: Platform turbo status either from msr
266 * MSR_IA32_MISC_ENABLE or when maximum available pstate
267 * matches the maximum turbo pstate
268 * @max_perf_pct: Effective maximum performance limit in percentage, this
269 * is minimum of either limits enforced by cpufreq policy
270 * or limits from user set limits via intel_pstate sysfs
271 * @min_perf_pct: Effective minimum performance limit in percentage, this
272 * is maximum of either limits enforced by cpufreq policy
273 * or limits from user set limits via intel_pstate sysfs
274 * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct
275 * This value is used to limit max pstate
276 * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct
277 * This value is used to limit min pstate
278 * @max_policy_pct: The maximum performance in percentage enforced by
279 * cpufreq setpolicy interface
280 * @max_sysfs_pct: The maximum performance in percentage enforced by
281 * intel pstate sysfs interface
282 * @min_policy_pct: The minimum performance in percentage enforced by
283 * cpufreq setpolicy interface
284 * @min_sysfs_pct: The minimum performance in percentage enforced by
285 * intel pstate sysfs interface
286 *
287 * Storage for user and policy defined limits.
288 */
154struct perf_limits { 289struct perf_limits {
155 int no_turbo; 290 int no_turbo;
156 int turbo_disabled; 291 int turbo_disabled;
@@ -318,6 +453,14 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
318 } 453 }
319} 454}
320 455
456static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
457{
458 if (hwp_active)
459 intel_pstate_hwp_set(policy->cpus);
460
461 return 0;
462}
463
321static void intel_pstate_hwp_set_online_cpus(void) 464static void intel_pstate_hwp_set_online_cpus(void)
322{ 465{
323 get_online_cpus(); 466 get_online_cpus();
@@ -678,6 +821,11 @@ static int core_get_max_pstate(void)
678 if (err) 821 if (err)
679 goto skip_tar; 822 goto skip_tar;
680 823
824 /* For level 1 and 2, bits[23:16] contain the ratio */
825 if (tdp_ctrl)
826 tdp_ratio >>= 16;
827
828 tdp_ratio &= 0xff; /* ratios are only 8 bits long */
681 if (tdp_ratio - 1 == tar) { 829 if (tdp_ratio - 1 == tar) {
682 max_pstate = tar; 830 max_pstate = tar;
683 pr_debug("max_pstate=TAC %x\n", max_pstate); 831 pr_debug("max_pstate=TAC %x\n", max_pstate);
@@ -910,13 +1058,21 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
910 cpu->prev_aperf = aperf; 1058 cpu->prev_aperf = aperf;
911 cpu->prev_mperf = mperf; 1059 cpu->prev_mperf = mperf;
912 cpu->prev_tsc = tsc; 1060 cpu->prev_tsc = tsc;
913 return true; 1061 /*
1062 * First time this function is invoked in a given cycle, all of the
1063 * previous sample data fields are equal to zero or stale and they must
1064 * be populated with meaningful numbers for things to work, so assume
1065 * that sample.time will always be reset before setting the utilization
1066 * update hook and make the caller skip the sample then.
1067 */
1068 return !!cpu->last_sample_time;
914} 1069}
915 1070
916static inline int32_t get_avg_frequency(struct cpudata *cpu) 1071static inline int32_t get_avg_frequency(struct cpudata *cpu)
917{ 1072{
918 return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf * 1073 return fp_toint(mul_fp(cpu->sample.core_pct_busy,
919 cpu->pstate.scaling, cpu->sample.mperf); 1074 int_tofp(cpu->pstate.max_pstate_physical *
1075 cpu->pstate.scaling / 100)));
920} 1076}
921 1077
922static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) 1078static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
@@ -959,8 +1115,6 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
959 int32_t core_busy, max_pstate, current_pstate, sample_ratio; 1115 int32_t core_busy, max_pstate, current_pstate, sample_ratio;
960 u64 duration_ns; 1116 u64 duration_ns;
961 1117
962 intel_pstate_calc_busy(cpu);
963
964 /* 1118 /*
965 * core_busy is the ratio of actual performance to max 1119 * core_busy is the ratio of actual performance to max
966 * max_pstate is the max non turbo pstate available 1120 * max_pstate is the max non turbo pstate available
@@ -984,11 +1138,14 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
984 * enough period of time to adjust our busyness. 1138 * enough period of time to adjust our busyness.
985 */ 1139 */
986 duration_ns = cpu->sample.time - cpu->last_sample_time; 1140 duration_ns = cpu->sample.time - cpu->last_sample_time;
987 if ((s64)duration_ns > pid_params.sample_rate_ns * 3 1141 if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
988 && cpu->last_sample_time > 0) {
989 sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns), 1142 sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
990 int_tofp(duration_ns)); 1143 int_tofp(duration_ns));
991 core_busy = mul_fp(core_busy, sample_ratio); 1144 core_busy = mul_fp(core_busy, sample_ratio);
1145 } else {
1146 sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
1147 if (sample_ratio < int_tofp(1))
1148 core_busy = 0;
992 } 1149 }
993 1150
994 cpu->sample.busy_scaled = core_busy; 1151 cpu->sample.busy_scaled = core_busy;
@@ -1041,8 +1198,11 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
1041 if ((s64)delta_ns >= pid_params.sample_rate_ns) { 1198 if ((s64)delta_ns >= pid_params.sample_rate_ns) {
1042 bool sample_taken = intel_pstate_sample(cpu, time); 1199 bool sample_taken = intel_pstate_sample(cpu, time);
1043 1200
1044 if (sample_taken && !hwp_active) 1201 if (sample_taken) {
1045 intel_pstate_adjust_busy_pstate(cpu); 1202 intel_pstate_calc_busy(cpu);
1203 if (!hwp_active)
1204 intel_pstate_adjust_busy_pstate(cpu);
1205 }
1046 } 1206 }
1047} 1207}
1048 1208
@@ -1100,10 +1260,8 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
1100 intel_pstate_get_cpu_pstates(cpu); 1260 intel_pstate_get_cpu_pstates(cpu);
1101 1261
1102 intel_pstate_busy_pid_reset(cpu); 1262 intel_pstate_busy_pid_reset(cpu);
1103 intel_pstate_sample(cpu, 0);
1104 1263
1105 cpu->update_util.func = intel_pstate_update_util; 1264 cpu->update_util.func = intel_pstate_update_util;
1106 cpufreq_set_update_util_data(cpunum, &cpu->update_util);
1107 1265
1108 pr_debug("intel_pstate: controlling: cpu %d\n", cpunum); 1266 pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
1109 1267
@@ -1122,22 +1280,54 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
1122 return get_avg_frequency(cpu); 1280 return get_avg_frequency(cpu);
1123} 1281}
1124 1282
1283static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
1284{
1285 struct cpudata *cpu = all_cpu_data[cpu_num];
1286
1287 /* Prevent intel_pstate_update_util() from using stale data. */
1288 cpu->sample.time = 0;
1289 cpufreq_set_update_util_data(cpu_num, &cpu->update_util);
1290}
1291
1292static void intel_pstate_clear_update_util_hook(unsigned int cpu)
1293{
1294 cpufreq_set_update_util_data(cpu, NULL);
1295 synchronize_sched();
1296}
1297
1298static void intel_pstate_set_performance_limits(struct perf_limits *limits)
1299{
1300 limits->no_turbo = 0;
1301 limits->turbo_disabled = 0;
1302 limits->max_perf_pct = 100;
1303 limits->max_perf = int_tofp(1);
1304 limits->min_perf_pct = 100;
1305 limits->min_perf = int_tofp(1);
1306 limits->max_policy_pct = 100;
1307 limits->max_sysfs_pct = 100;
1308 limits->min_policy_pct = 0;
1309 limits->min_sysfs_pct = 0;
1310}
1311
1125static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1312static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1126{ 1313{
1127 if (!policy->cpuinfo.max_freq) 1314 if (!policy->cpuinfo.max_freq)
1128 return -ENODEV; 1315 return -ENODEV;
1129 1316
1130 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE && 1317 intel_pstate_clear_update_util_hook(policy->cpu);
1131 policy->max >= policy->cpuinfo.max_freq) { 1318
1132 pr_debug("intel_pstate: set performance\n"); 1319 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
1133 limits = &performance_limits; 1320 limits = &performance_limits;
1134 if (hwp_active) 1321 if (policy->max >= policy->cpuinfo.max_freq) {
1135 intel_pstate_hwp_set(policy->cpus); 1322 pr_debug("intel_pstate: set performance\n");
1136 return 0; 1323 intel_pstate_set_performance_limits(limits);
1324 goto out;
1325 }
1326 } else {
1327 pr_debug("intel_pstate: set powersave\n");
1328 limits = &powersave_limits;
1137 } 1329 }
1138 1330
1139 pr_debug("intel_pstate: set powersave\n");
1140 limits = &powersave_limits;
1141 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 1331 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
1142 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); 1332 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
1143 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, 1333 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
@@ -1163,8 +1353,10 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1163 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), 1353 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
1164 int_tofp(100)); 1354 int_tofp(100));
1165 1355
1166 if (hwp_active) 1356 out:
1167 intel_pstate_hwp_set(policy->cpus); 1357 intel_pstate_set_update_util_hook(policy->cpu);
1358
1359 intel_pstate_hwp_set_policy(policy);
1168 1360
1169 return 0; 1361 return 0;
1170} 1362}
@@ -1187,8 +1379,7 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
1187 1379
1188 pr_debug("intel_pstate: CPU %d exiting\n", cpu_num); 1380 pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
1189 1381
1190 cpufreq_set_update_util_data(cpu_num, NULL); 1382 intel_pstate_clear_update_util_hook(cpu_num);
1191 synchronize_sched();
1192 1383
1193 if (hwp_active) 1384 if (hwp_active)
1194 return; 1385 return;
@@ -1229,6 +1420,7 @@ static struct cpufreq_driver intel_pstate_driver = {
1229 .flags = CPUFREQ_CONST_LOOPS, 1420 .flags = CPUFREQ_CONST_LOOPS,
1230 .verify = intel_pstate_verify_policy, 1421 .verify = intel_pstate_verify_policy,
1231 .setpolicy = intel_pstate_set_policy, 1422 .setpolicy = intel_pstate_set_policy,
1423 .resume = intel_pstate_hwp_set_policy,
1232 .get = intel_pstate_get, 1424 .get = intel_pstate_get,
1233 .init = intel_pstate_cpu_init, 1425 .init = intel_pstate_cpu_init,
1234 .stop_cpu = intel_pstate_stop_cpu, 1426 .stop_cpu = intel_pstate_stop_cpu,
@@ -1455,8 +1647,7 @@ out:
1455 get_online_cpus(); 1647 get_online_cpus();
1456 for_each_online_cpu(cpu) { 1648 for_each_online_cpu(cpu) {
1457 if (all_cpu_data[cpu]) { 1649 if (all_cpu_data[cpu]) {
1458 cpufreq_set_update_util_data(cpu, NULL); 1650 intel_pstate_clear_update_util_hook(cpu);
1459 synchronize_sched();
1460 kfree(all_cpu_data[cpu]); 1651 kfree(all_cpu_data[cpu]);
1461 } 1652 }
1462 } 1653 }
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index a9c659f58974..04042038ec4b 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -259,6 +259,10 @@ static int sti_cpufreq_init(void)
259{ 259{
260 int ret; 260 int ret;
261 261
262 if ((!of_machine_is_compatible("st,stih407")) &&
263 (!of_machine_is_compatible("st,stih410")))
264 return -ENODEV;
265
262 ddata.cpu = get_cpu_device(0); 266 ddata.cpu = get_cpu_device(0);
263 if (!ddata.cpu) { 267 if (!ddata.cpu) {
264 dev_err(ddata.cpu, "Failed to get device for CPU0\n"); 268 dev_err(ddata.cpu, "Failed to get device for CPU0\n");
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 545069d5fdfb..e342565e8715 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -50,7 +50,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
50 * call the CPU ops suspend protocol with idle index as a 50 * call the CPU ops suspend protocol with idle index as a
51 * parameter. 51 * parameter.
52 */ 52 */
53 arm_cpuidle_suspend(idx); 53 ret = arm_cpuidle_suspend(idx);
54 54
55 cpu_pm_exit(); 55 cpu_pm_exit();
56 } 56 }
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 3d9acc53d247..60fc0fa26fd3 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -225,6 +225,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
225 struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); 225 struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
226 struct ccp_aes_cmac_exp_ctx state; 226 struct ccp_aes_cmac_exp_ctx state;
227 227
228 /* Don't let anything leak to 'out' */
229 memset(&state, 0, sizeof(state));
230
228 state.null_msg = rctx->null_msg; 231 state.null_msg = rctx->null_msg;
229 memcpy(state.iv, rctx->iv, sizeof(state.iv)); 232 memcpy(state.iv, rctx->iv, sizeof(state.iv));
230 state.buf_count = rctx->buf_count; 233 state.buf_count = rctx->buf_count;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index b5ad72897dc2..8f36af62fe95 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -212,6 +212,9 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
212 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 212 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
213 struct ccp_sha_exp_ctx state; 213 struct ccp_sha_exp_ctx state;
214 214
215 /* Don't let anything leak to 'out' */
216 memset(&state, 0, sizeof(state));
217
215 state.type = rctx->type; 218 state.type = rctx->type;
216 state.msg_bits = rctx->msg_bits; 219 state.msg_bits = rctx->msg_bits;
217 state.first = rctx->first; 220 state.first = rctx->first;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index a0d4a08313ae..aae05547b924 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
63 ptr->eptr = upper_32_bits(dma_addr); 63 ptr->eptr = upper_32_bits(dma_addr);
64} 64}
65 65
66static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
67 struct talitos_ptr *src_ptr, bool is_sec1)
68{
69 dst_ptr->ptr = src_ptr->ptr;
70 if (!is_sec1)
71 dst_ptr->eptr = src_ptr->eptr;
72}
73
66static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len, 74static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
67 bool is_sec1) 75 bool is_sec1)
68{ 76{
@@ -1083,21 +1091,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1083 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1, 1091 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1084 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL 1092 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1085 : DMA_TO_DEVICE); 1093 : DMA_TO_DEVICE);
1086
1087 /* hmac data */ 1094 /* hmac data */
1088 desc->ptr[1].len = cpu_to_be16(areq->assoclen); 1095 desc->ptr[1].len = cpu_to_be16(areq->assoclen);
1089 if (sg_count > 1 && 1096 if (sg_count > 1 &&
1090 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0, 1097 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
1091 areq->assoclen, 1098 areq->assoclen,
1092 &edesc->link_tbl[tbl_off])) > 1) { 1099 &edesc->link_tbl[tbl_off])) > 1) {
1093 tbl_off += ret;
1094
1095 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * 1100 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
1096 sizeof(struct talitos_ptr), 0); 1101 sizeof(struct talitos_ptr), 0);
1097 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP; 1102 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1098 1103
1099 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1104 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1100 edesc->dma_len, DMA_BIDIRECTIONAL); 1105 edesc->dma_len, DMA_BIDIRECTIONAL);
1106
1107 tbl_off += ret;
1101 } else { 1108 } else {
1102 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0); 1109 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
1103 desc->ptr[1].j_extent = 0; 1110 desc->ptr[1].j_extent = 0;
@@ -1126,11 +1133,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1126 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) 1133 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1127 sg_link_tbl_len += authsize; 1134 sg_link_tbl_len += authsize;
1128 1135
1129 if (sg_count > 1 && 1136 if (sg_count == 1) {
1130 (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen, 1137 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
1131 sg_link_tbl_len, 1138 areq->assoclen, 0);
1132 &edesc->link_tbl[tbl_off])) > 1) { 1139 } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
1133 tbl_off += ret; 1140 areq->assoclen, sg_link_tbl_len,
1141 &edesc->link_tbl[tbl_off])) >
1142 1) {
1134 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 1143 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1135 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + 1144 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1136 tbl_off * 1145 tbl_off *
@@ -1138,8 +1147,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1138 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1147 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1139 edesc->dma_len, 1148 edesc->dma_len,
1140 DMA_BIDIRECTIONAL); 1149 DMA_BIDIRECTIONAL);
1141 } else 1150 tbl_off += ret;
1142 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0); 1151 } else {
1152 copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
1153 }
1143 1154
1144 /* cipher out */ 1155 /* cipher out */
1145 desc->ptr[5].len = cpu_to_be16(cryptlen); 1156 desc->ptr[5].len = cpu_to_be16(cryptlen);
@@ -1151,11 +1162,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1151 1162
1152 edesc->icv_ool = false; 1163 edesc->icv_ool = false;
1153 1164
1154 if (sg_count > 1 && 1165 if (sg_count == 1) {
1155 (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count, 1166 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
1167 areq->assoclen, 0);
1168 } else if ((sg_count =
1169 sg_to_link_tbl_offset(areq->dst, sg_count,
1156 areq->assoclen, cryptlen, 1170 areq->assoclen, cryptlen,
1157 &edesc->link_tbl[tbl_off])) > 1171 &edesc->link_tbl[tbl_off])) > 1) {
1158 1) {
1159 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; 1172 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1160 1173
1161 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + 1174 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
@@ -1178,8 +1191,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1178 edesc->dma_len, DMA_BIDIRECTIONAL); 1191 edesc->dma_len, DMA_BIDIRECTIONAL);
1179 1192
1180 edesc->icv_ool = true; 1193 edesc->icv_ool = true;
1181 } else 1194 } else {
1182 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0); 1195 copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
1196 }
1183 1197
1184 /* iv out */ 1198 /* iv out */
1185 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 1199 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
@@ -2629,21 +2643,11 @@ struct talitos_crypto_alg {
2629 struct talitos_alg_template algt; 2643 struct talitos_alg_template algt;
2630}; 2644};
2631 2645
2632static int talitos_cra_init(struct crypto_tfm *tfm) 2646static int talitos_init_common(struct talitos_ctx *ctx,
2647 struct talitos_crypto_alg *talitos_alg)
2633{ 2648{
2634 struct crypto_alg *alg = tfm->__crt_alg;
2635 struct talitos_crypto_alg *talitos_alg;
2636 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2637 struct talitos_private *priv; 2649 struct talitos_private *priv;
2638 2650
2639 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2640 talitos_alg = container_of(__crypto_ahash_alg(alg),
2641 struct talitos_crypto_alg,
2642 algt.alg.hash);
2643 else
2644 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2645 algt.alg.crypto);
2646
2647 /* update context with ptr to dev */ 2651 /* update context with ptr to dev */
2648 ctx->dev = talitos_alg->dev; 2652 ctx->dev = talitos_alg->dev;
2649 2653
@@ -2661,10 +2665,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
2661 return 0; 2665 return 0;
2662} 2666}
2663 2667
2668static int talitos_cra_init(struct crypto_tfm *tfm)
2669{
2670 struct crypto_alg *alg = tfm->__crt_alg;
2671 struct talitos_crypto_alg *talitos_alg;
2672 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2673
2674 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2675 talitos_alg = container_of(__crypto_ahash_alg(alg),
2676 struct talitos_crypto_alg,
2677 algt.alg.hash);
2678 else
2679 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2680 algt.alg.crypto);
2681
2682 return talitos_init_common(ctx, talitos_alg);
2683}
2684
2664static int talitos_cra_init_aead(struct crypto_aead *tfm) 2685static int talitos_cra_init_aead(struct crypto_aead *tfm)
2665{ 2686{
2666 talitos_cra_init(crypto_aead_tfm(tfm)); 2687 struct aead_alg *alg = crypto_aead_alg(tfm);
2667 return 0; 2688 struct talitos_crypto_alg *talitos_alg;
2689 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
2690
2691 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2692 algt.alg.aead);
2693
2694 return talitos_init_common(ctx, talitos_alg);
2668} 2695}
2669 2696
2670static int talitos_cra_init_ahash(struct crypto_tfm *tfm) 2697static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 5ad0ec1f0e29..97199b3c25a2 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
130static void dwc_initialize(struct dw_dma_chan *dwc) 130static void dwc_initialize(struct dw_dma_chan *dwc)
131{ 131{
132 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 132 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
133 struct dw_dma_slave *dws = dwc->chan.private;
134 u32 cfghi = DWC_CFGH_FIFO_MODE; 133 u32 cfghi = DWC_CFGH_FIFO_MODE;
135 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); 134 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
136 135
137 if (dwc->initialized == true) 136 if (dwc->initialized == true)
138 return; 137 return;
139 138
140 if (dws) { 139 cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
141 /* 140 cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
142 * We need controller-specific data to set up slave
143 * transfers.
144 */
145 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
146
147 cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
148 cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
149 } else {
150 cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
151 cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
152 }
153 141
154 channel_writel(dwc, CFG_LO, cfglo); 142 channel_writel(dwc, CFG_LO, cfglo);
155 channel_writel(dwc, CFG_HI, cfghi); 143 channel_writel(dwc, CFG_HI, cfghi);
@@ -941,7 +929,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
941 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 929 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
942 struct dw_dma_slave *dws = param; 930 struct dw_dma_slave *dws = param;
943 931
944 if (!dws || dws->dma_dev != chan->device->dev) 932 if (dws->dma_dev != chan->device->dev)
945 return false; 933 return false;
946 934
947 /* We have to copy data since dws can be temporary storage */ 935 /* We have to copy data since dws can be temporary storage */
@@ -1165,6 +1153,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
1165 * doesn't mean what you think it means), and status writeback. 1153 * doesn't mean what you think it means), and status writeback.
1166 */ 1154 */
1167 1155
1156 /*
1157 * We need controller-specific data to set up slave transfers.
1158 */
1159 if (chan->private && !dw_dma_filter(chan, chan->private)) {
1160 dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
1161 return -EINVAL;
1162 }
1163
1168 /* Enable controller here if needed */ 1164 /* Enable controller here if needed */
1169 if (!dw->in_use) 1165 if (!dw->in_use)
1170 dw_dma_on(dw); 1166 dw_dma_on(dw);
@@ -1226,6 +1222,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1226 spin_lock_irqsave(&dwc->lock, flags); 1222 spin_lock_irqsave(&dwc->lock, flags);
1227 list_splice_init(&dwc->free_list, &list); 1223 list_splice_init(&dwc->free_list, &list);
1228 dwc->descs_allocated = 0; 1224 dwc->descs_allocated = 0;
1225
1226 /* Clear custom channel configuration */
1227 dwc->src_id = 0;
1228 dwc->dst_id = 0;
1229
1230 dwc->src_master = 0;
1231 dwc->dst_master = 0;
1232
1229 dwc->initialized = false; 1233 dwc->initialized = false;
1230 1234
1231 /* Disable interrupts */ 1235 /* Disable interrupts */
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index ee3463e774f8..04070baab78a 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1238,6 +1238,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1238 struct edma_desc *edesc; 1238 struct edma_desc *edesc;
1239 dma_addr_t src_addr, dst_addr; 1239 dma_addr_t src_addr, dst_addr;
1240 enum dma_slave_buswidth dev_width; 1240 enum dma_slave_buswidth dev_width;
1241 bool use_intermediate = false;
1241 u32 burst; 1242 u32 burst;
1242 int i, ret, nslots; 1243 int i, ret, nslots;
1243 1244
@@ -1279,8 +1280,21 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1279 * but the synchronization is difficult to achieve with Cyclic and 1280 * but the synchronization is difficult to achieve with Cyclic and
1280 * cannot be guaranteed, so we error out early. 1281 * cannot be guaranteed, so we error out early.
1281 */ 1282 */
1282 if (nslots > MAX_NR_SG) 1283 if (nslots > MAX_NR_SG) {
1283 return NULL; 1284 /*
1285 * If the burst and period sizes are the same, we can put
1286 * the full buffer into a single period and activate
1287 * intermediate interrupts. This will produce interrupts
1288 * after each burst, which is also after each desired period.
1289 */
1290 if (burst == period_len) {
1291 period_len = buf_len;
1292 nslots = 2;
1293 use_intermediate = true;
1294 } else {
1295 return NULL;
1296 }
1297 }
1284 1298
1285 edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), 1299 edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
1286 GFP_ATOMIC); 1300 GFP_ATOMIC);
@@ -1358,8 +1372,13 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1358 /* 1372 /*
1359 * Enable period interrupt only if it is requested 1373 * Enable period interrupt only if it is requested
1360 */ 1374 */
1361 if (tx_flags & DMA_PREP_INTERRUPT) 1375 if (tx_flags & DMA_PREP_INTERRUPT) {
1362 edesc->pset[i].param.opt |= TCINTEN; 1376 edesc->pset[i].param.opt |= TCINTEN;
1377
1378 /* Also enable intermediate interrupts if necessary */
1379 if (use_intermediate)
1380 edesc->pset[i].param.opt |= ITCINTEN;
1381 }
1363 } 1382 }
1364 1383
1365 /* Place the cyclic channel to highest priority queue */ 1384 /* Place the cyclic channel to highest priority queue */
@@ -1570,32 +1589,6 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
1570 return IRQ_HANDLED; 1589 return IRQ_HANDLED;
1571} 1590}
1572 1591
1573static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
1574{
1575 struct platform_device *tc_pdev;
1576 int ret;
1577
1578 if (!IS_ENABLED(CONFIG_OF) || !tc)
1579 return;
1580
1581 tc_pdev = of_find_device_by_node(tc->node);
1582 if (!tc_pdev) {
1583 pr_err("%s: TPTC device is not found\n", __func__);
1584 return;
1585 }
1586 if (!pm_runtime_enabled(&tc_pdev->dev))
1587 pm_runtime_enable(&tc_pdev->dev);
1588
1589 if (enable)
1590 ret = pm_runtime_get_sync(&tc_pdev->dev);
1591 else
1592 ret = pm_runtime_put_sync(&tc_pdev->dev);
1593
1594 if (ret < 0)
1595 pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__,
1596 enable ? "get" : "put", dev_name(&tc_pdev->dev));
1597}
1598
1599/* Alloc channel resources */ 1592/* Alloc channel resources */
1600static int edma_alloc_chan_resources(struct dma_chan *chan) 1593static int edma_alloc_chan_resources(struct dma_chan *chan)
1601{ 1594{
@@ -1632,8 +1625,6 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
1632 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id, 1625 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
1633 echan->hw_triggered ? "HW" : "SW"); 1626 echan->hw_triggered ? "HW" : "SW");
1634 1627
1635 edma_tc_set_pm_state(echan->tc, true);
1636
1637 return 0; 1628 return 0;
1638 1629
1639err_slot: 1630err_slot:
@@ -1670,7 +1661,6 @@ static void edma_free_chan_resources(struct dma_chan *chan)
1670 echan->alloced = false; 1661 echan->alloced = false;
1671 } 1662 }
1672 1663
1673 edma_tc_set_pm_state(echan->tc, false);
1674 echan->tc = NULL; 1664 echan->tc = NULL;
1675 echan->hw_triggered = false; 1665 echan->hw_triggered = false;
1676 1666
@@ -2417,10 +2407,8 @@ static int edma_pm_suspend(struct device *dev)
2417 int i; 2407 int i;
2418 2408
2419 for (i = 0; i < ecc->num_channels; i++) { 2409 for (i = 0; i < ecc->num_channels; i++) {
2420 if (echan[i].alloced) { 2410 if (echan[i].alloced)
2421 edma_setup_interrupt(&echan[i], false); 2411 edma_setup_interrupt(&echan[i], false);
2422 edma_tc_set_pm_state(echan[i].tc, false);
2423 }
2424 } 2412 }
2425 2413
2426 return 0; 2414 return 0;
@@ -2450,8 +2438,6 @@ static int edma_pm_resume(struct device *dev)
2450 2438
2451 /* Set up channel -> slot mapping for the entry slot */ 2439 /* Set up channel -> slot mapping for the entry slot */
2452 edma_set_chmap(&echan[i], echan[i].slot[0]); 2440 edma_set_chmap(&echan[i], echan[i].slot[0]);
2453
2454 edma_tc_set_pm_state(echan[i].tc, true);
2455 } 2441 }
2456 } 2442 }
2457 2443
@@ -2475,7 +2461,8 @@ static struct platform_driver edma_driver = {
2475 2461
2476static int edma_tptc_probe(struct platform_device *pdev) 2462static int edma_tptc_probe(struct platform_device *pdev)
2477{ 2463{
2478 return 0; 2464 pm_runtime_enable(&pdev->dev);
2465 return pm_runtime_get_sync(&pdev->dev);
2479} 2466}
2480 2467
2481static struct platform_driver edma_tptc_driver = { 2468static struct platform_driver edma_tptc_driver = {
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index eef145edb936..ee510515ce18 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -64,10 +64,10 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
64 64
65 if (hsuc->direction == DMA_MEM_TO_DEV) { 65 if (hsuc->direction == DMA_MEM_TO_DEV) {
66 bsr = config->dst_maxburst; 66 bsr = config->dst_maxburst;
67 mtsr = config->dst_addr_width; 67 mtsr = config->src_addr_width;
68 } else if (hsuc->direction == DMA_DEV_TO_MEM) { 68 } else if (hsuc->direction == DMA_DEV_TO_MEM) {
69 bsr = config->src_maxburst; 69 bsr = config->src_maxburst;
70 mtsr = config->src_addr_width; 70 mtsr = config->dst_addr_width;
71 } 71 }
72 72
73 hsu_chan_disable(hsuc); 73 hsu_chan_disable(hsuc);
@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
135 sr = hsu_chan_readl(hsuc, HSU_CH_SR); 135 sr = hsu_chan_readl(hsuc, HSU_CH_SR);
136 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 136 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
137 137
138 return sr; 138 return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
139} 139}
140 140
141irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr) 141irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
@@ -254,10 +254,13 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
254static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc) 254static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
255{ 255{
256 struct hsu_dma_desc *desc = hsuc->desc; 256 struct hsu_dma_desc *desc = hsuc->desc;
257 size_t bytes = desc->length; 257 size_t bytes = 0;
258 int i; 258 int i;
259 259
260 i = desc->active % HSU_DMA_CHAN_NR_DESC; 260 for (i = desc->active; i < desc->nents; i++)
261 bytes += desc->sg[i].len;
262
263 i = HSU_DMA_CHAN_NR_DESC - 1;
261 do { 264 do {
262 bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i)); 265 bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
263 } while (--i >= 0); 266 } while (--i >= 0);
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index 578a8ee8cd05..6b070c22b1df 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -41,6 +41,9 @@
41#define HSU_CH_SR_DESCTO(x) BIT(8 + (x)) 41#define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
42#define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8)) 42#define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
43#define HSU_CH_SR_CHE BIT(15) 43#define HSU_CH_SR_CHE BIT(15)
44#define HSU_CH_SR_DESCE(x) BIT(16 + (x))
45#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16))
46#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30))
44 47
45/* Bits in HSU_CH_CR */ 48/* Bits in HSU_CH_CR */
46#define HSU_CH_CR_CHA BIT(0) 49#define HSU_CH_CR_CHA BIT(0)
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 43bd5aee7ffe..1e984e18c126 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -48,6 +48,7 @@ struct omap_chan {
48 unsigned dma_sig; 48 unsigned dma_sig;
49 bool cyclic; 49 bool cyclic;
50 bool paused; 50 bool paused;
51 bool running;
51 52
52 int dma_ch; 53 int dma_ch;
53 struct omap_desc *desc; 54 struct omap_desc *desc;
@@ -294,6 +295,8 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
294 295
295 /* Enable channel */ 296 /* Enable channel */
296 omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE); 297 omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
298
299 c->running = true;
297} 300}
298 301
299static void omap_dma_stop(struct omap_chan *c) 302static void omap_dma_stop(struct omap_chan *c)
@@ -355,6 +358,8 @@ static void omap_dma_stop(struct omap_chan *c)
355 358
356 omap_dma_chan_write(c, CLNK_CTRL, val); 359 omap_dma_chan_write(c, CLNK_CTRL, val);
357 } 360 }
361
362 c->running = false;
358} 363}
359 364
360static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, 365static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
@@ -673,15 +678,20 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
673 struct omap_chan *c = to_omap_dma_chan(chan); 678 struct omap_chan *c = to_omap_dma_chan(chan);
674 struct virt_dma_desc *vd; 679 struct virt_dma_desc *vd;
675 enum dma_status ret; 680 enum dma_status ret;
676 uint32_t ccr;
677 unsigned long flags; 681 unsigned long flags;
678 682
679 ccr = omap_dma_chan_read(c, CCR);
680 /* The channel is no longer active, handle the completion right away */
681 if (!(ccr & CCR_ENABLE))
682 omap_dma_callback(c->dma_ch, 0, c);
683
684 ret = dma_cookie_status(chan, cookie, txstate); 683 ret = dma_cookie_status(chan, cookie, txstate);
684
685 if (!c->paused && c->running) {
686 uint32_t ccr = omap_dma_chan_read(c, CCR);
687 /*
688 * The channel is no longer active, set the return value
689 * accordingly
690 */
691 if (!(ccr & CCR_ENABLE))
692 ret = DMA_COMPLETE;
693 }
694
685 if (ret == DMA_COMPLETE || !txstate) 695 if (ret == DMA_COMPLETE || !txstate)
686 return ret; 696 return ret;
687 697
@@ -945,9 +955,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
945 d->ccr = c->ccr; 955 d->ccr = c->ccr;
946 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC; 956 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
947 957
948 d->cicr = CICR_DROP_IE; 958 d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
949 if (tx_flags & DMA_PREP_INTERRUPT)
950 d->cicr |= CICR_FRAME_IE;
951 959
952 d->csdp = data_type; 960 d->csdp = data_type;
953 961
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 0ee0321868d3..ef67f278e076 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -1236,7 +1236,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
1236 struct xilinx_vdma_device *xdev = ofdma->of_dma_data; 1236 struct xilinx_vdma_device *xdev = ofdma->of_dma_data;
1237 int chan_id = dma_spec->args[0]; 1237 int chan_id = dma_spec->args[0];
1238 1238
1239 if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE) 1239 if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id])
1240 return NULL; 1240 return NULL;
1241 1241
1242 return dma_get_slave_channel(&xdev->chan[chan_id]->common); 1242 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 01087a38da22..792bdae2b91d 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
1866 1866
1867 i7_dev = get_i7core_dev(mce->socketid); 1867 i7_dev = get_i7core_dev(mce->socketid);
1868 if (!i7_dev) 1868 if (!i7_dev)
1869 return NOTIFY_BAD; 1869 return NOTIFY_DONE;
1870 1870
1871 mci = i7_dev->mci; 1871 mci = i7_dev->mci;
1872 pvt = mci->pvt_info; 1872 pvt = mci->pvt_info;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 93f0d4120289..8bf745d2da7e 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -362,6 +362,7 @@ struct sbridge_pvt {
362 362
363 /* Memory type detection */ 363 /* Memory type detection */
364 bool is_mirrored, is_lockstep, is_close_pg; 364 bool is_mirrored, is_lockstep, is_close_pg;
365 bool is_chan_hash;
365 366
366 /* Fifo double buffers */ 367 /* Fifo double buffers */
367 struct mce mce_entry[MCE_LOG_LEN]; 368 struct mce mce_entry[MCE_LOG_LEN];
@@ -1060,6 +1061,20 @@ static inline u8 sad_pkg_ha(u8 pkg)
1060 return (pkg >> 2) & 0x1; 1061 return (pkg >> 2) & 0x1;
1061} 1062}
1062 1063
1064static int haswell_chan_hash(int idx, u64 addr)
1065{
1066 int i;
1067
1068 /*
1069 * XOR even bits from 12:26 to bit0 of idx,
1070 * odd bits from 13:27 to bit1
1071 */
1072 for (i = 12; i < 28; i += 2)
1073 idx ^= (addr >> i) & 3;
1074
1075 return idx;
1076}
1077
1063/**************************************************************************** 1078/****************************************************************************
1064 Memory check routines 1079 Memory check routines
1065 ****************************************************************************/ 1080 ****************************************************************************/
@@ -1616,6 +1631,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
1616 KNL_MAX_CHANNELS : NUM_CHANNELS; 1631 KNL_MAX_CHANNELS : NUM_CHANNELS;
1617 u64 knl_mc_sizes[KNL_MAX_CHANNELS]; 1632 u64 knl_mc_sizes[KNL_MAX_CHANNELS];
1618 1633
1634 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
1635 pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, &reg);
1636 pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
1637 }
1619 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL || 1638 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
1620 pvt->info.type == KNIGHTS_LANDING) 1639 pvt->info.type == KNIGHTS_LANDING)
1621 pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg); 1640 pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
@@ -2118,12 +2137,15 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
2118 } 2137 }
2119 2138
2120 ch_way = TAD_CH(reg) + 1; 2139 ch_way = TAD_CH(reg) + 1;
2121 sck_way = 1 << TAD_SOCK(reg); 2140 sck_way = TAD_SOCK(reg);
2122 2141
2123 if (ch_way == 3) 2142 if (ch_way == 3)
2124 idx = addr >> 6; 2143 idx = addr >> 6;
2125 else 2144 else {
2126 idx = (addr >> (6 + sck_way + shiftup)) & 0x3; 2145 idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
2146 if (pvt->is_chan_hash)
2147 idx = haswell_chan_hash(idx, addr);
2148 }
2127 idx = idx % ch_way; 2149 idx = idx % ch_way;
2128 2150
2129 /* 2151 /*
@@ -2157,7 +2179,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
2157 switch(ch_way) { 2179 switch(ch_way) {
2158 case 2: 2180 case 2:
2159 case 4: 2181 case 4:
2160 sck_xch = 1 << sck_way * (ch_way >> 1); 2182 sck_xch = (1 << sck_way) * (ch_way >> 1);
2161 break; 2183 break;
2162 default: 2184 default:
2163 sprintf(msg, "Invalid mirror set. Can't decode addr"); 2185 sprintf(msg, "Invalid mirror set. Can't decode addr");
@@ -2193,7 +2215,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
2193 2215
2194 ch_addr = addr - offset; 2216 ch_addr = addr - offset;
2195 ch_addr >>= (6 + shiftup); 2217 ch_addr >>= (6 + shiftup);
2196 ch_addr /= ch_way * sck_way; 2218 ch_addr /= sck_xch;
2197 ch_addr <<= (6 + shiftup); 2219 ch_addr <<= (6 + shiftup);
2198 ch_addr |= addr & ((1 << (6 + shiftup)) - 1); 2220 ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
2199 2221
@@ -3146,7 +3168,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
3146 3168
3147 mci = get_mci_for_node_id(mce->socketid); 3169 mci = get_mci_for_node_id(mce->socketid);
3148 if (!mci) 3170 if (!mci)
3149 return NOTIFY_BAD; 3171 return NOTIFY_DONE;
3150 pvt = mci->pvt_info; 3172 pvt = mci->pvt_info;
3151 3173
3152 /* 3174 /*
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 841a4b586395..8b3226dca1d9 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -348,8 +348,7 @@ static int palmas_usb_probe(struct platform_device *pdev)
348 palmas_vbus_irq_handler, 348 palmas_vbus_irq_handler,
349 IRQF_TRIGGER_FALLING | 349 IRQF_TRIGGER_FALLING |
350 IRQF_TRIGGER_RISING | 350 IRQF_TRIGGER_RISING |
351 IRQF_ONESHOT | 351 IRQF_ONESHOT,
352 IRQF_EARLY_RESUME,
353 "palmas_usb_vbus", 352 "palmas_usb_vbus",
354 palmas_usb); 353 palmas_usb);
355 if (status < 0) { 354 if (status < 0) {
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index aa1f743152a2..8714f8c271ba 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -203,7 +203,19 @@ void __init efi_init(void)
203 203
204 reserve_regions(); 204 reserve_regions();
205 early_memunmap(memmap.map, params.mmap_size); 205 early_memunmap(memmap.map, params.mmap_size);
206 memblock_mark_nomap(params.mmap & PAGE_MASK, 206
207 PAGE_ALIGN(params.mmap_size + 207 if (IS_ENABLED(CONFIG_ARM)) {
208 (params.mmap & ~PAGE_MASK))); 208 /*
209 * ARM currently does not allow ioremap_cache() to be called on
210 * memory regions that are covered by struct page. So remove the
211 * UEFI memory map from the linear mapping.
212 */
213 memblock_mark_nomap(params.mmap & PAGE_MASK,
214 PAGE_ALIGN(params.mmap_size +
215 (params.mmap & ~PAGE_MASK)));
216 } else {
217 memblock_reserve(params.mmap & PAGE_MASK,
218 PAGE_ALIGN(params.mmap_size +
219 (params.mmap & ~PAGE_MASK)));
220 }
209} 221}
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 0ac594c0a234..34b741940494 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
202 { NULL_GUID, "", NULL }, 202 { NULL_GUID, "", NULL },
203}; 203};
204 204
205/*
206 * Check if @var_name matches the pattern given in @match_name.
207 *
208 * @var_name: an array of @len non-NUL characters.
209 * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
210 * final "*" character matches any trailing characters @var_name,
211 * including the case when there are none left in @var_name.
212 * @match: on output, the number of non-wildcard characters in @match_name
213 * that @var_name matches, regardless of the return value.
214 * @return: whether @var_name fully matches @match_name.
215 */
205static bool 216static bool
206variable_matches(const char *var_name, size_t len, const char *match_name, 217variable_matches(const char *var_name, size_t len, const char *match_name,
207 int *match) 218 int *match)
208{ 219{
209 for (*match = 0; ; (*match)++) { 220 for (*match = 0; ; (*match)++) {
210 char c = match_name[*match]; 221 char c = match_name[*match];
211 char u = var_name[*match];
212 222
213 /* Wildcard in the matching name means we've matched */ 223 switch (c) {
214 if (c == '*') 224 case '*':
225 /* Wildcard in @match_name means we've matched. */
215 return true; 226 return true;
216 227
217 /* Case sensitive match */ 228 case '\0':
218 if (!c && *match == len) 229 /* @match_name has ended. Has @var_name too? */
219 return true; 230 return (*match == len);
220 231
221 if (c != u) 232 default:
233 /*
234 * We've reached a non-wildcard char in @match_name.
235 * Continue only if there's an identical character in
236 * @var_name.
237 */
238 if (*match < len && c == var_name[*match])
239 continue;
222 return false; 240 return false;
223 241 }
224 if (!c)
225 return true;
226 } 242 }
227 return true;
228} 243}
229 244
230bool 245bool
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index 11bfee8b79a9..b5d05807e6ec 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -360,7 +360,7 @@ static struct cpuidle_ops psci_cpuidle_ops __initdata = {
360 .init = psci_dt_cpu_init_idle, 360 .init = psci_dt_cpu_init_idle,
361}; 361};
362 362
363CPUIDLE_METHOD_OF_DECLARE(psci, "arm,psci", &psci_cpuidle_ops); 363CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops);
364#endif 364#endif
365#endif 365#endif
366 366
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index fedbff55a7f3..1b95475b6aef 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -77,12 +77,28 @@ static inline u16 fw_cfg_sel_endianness(u16 key)
77static inline void fw_cfg_read_blob(u16 key, 77static inline void fw_cfg_read_blob(u16 key,
78 void *buf, loff_t pos, size_t count) 78 void *buf, loff_t pos, size_t count)
79{ 79{
80 u32 glk = -1U;
81 acpi_status status;
82
83 /* If we have ACPI, ensure mutual exclusion against any potential
84 * device access by the firmware, e.g. via AML methods:
85 */
86 status = acpi_acquire_global_lock(ACPI_WAIT_FOREVER, &glk);
87 if (ACPI_FAILURE(status) && status != AE_NOT_CONFIGURED) {
88 /* Should never get here */
89 WARN(1, "fw_cfg_read_blob: Failed to lock ACPI!\n");
90 memset(buf, 0, count);
91 return;
92 }
93
80 mutex_lock(&fw_cfg_dev_lock); 94 mutex_lock(&fw_cfg_dev_lock);
81 iowrite16(fw_cfg_sel_endianness(key), fw_cfg_reg_ctrl); 95 iowrite16(fw_cfg_sel_endianness(key), fw_cfg_reg_ctrl);
82 while (pos-- > 0) 96 while (pos-- > 0)
83 ioread8(fw_cfg_reg_data); 97 ioread8(fw_cfg_reg_data);
84 ioread8_rep(fw_cfg_reg_data, buf, count); 98 ioread8_rep(fw_cfg_reg_data, buf, count);
85 mutex_unlock(&fw_cfg_dev_lock); 99 mutex_unlock(&fw_cfg_dev_lock);
100
101 acpi_release_global_lock(glk);
86} 102}
87 103
88/* clean up fw_cfg device i/o */ 104/* clean up fw_cfg device i/o */
@@ -727,12 +743,18 @@ device_param_cb(mmio, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR);
727 743
728static int __init fw_cfg_sysfs_init(void) 744static int __init fw_cfg_sysfs_init(void)
729{ 745{
746 int ret;
747
730 /* create /sys/firmware/qemu_fw_cfg/ top level directory */ 748 /* create /sys/firmware/qemu_fw_cfg/ top level directory */
731 fw_cfg_top_ko = kobject_create_and_add("qemu_fw_cfg", firmware_kobj); 749 fw_cfg_top_ko = kobject_create_and_add("qemu_fw_cfg", firmware_kobj);
732 if (!fw_cfg_top_ko) 750 if (!fw_cfg_top_ko)
733 return -ENOMEM; 751 return -ENOMEM;
734 752
735 return platform_driver_register(&fw_cfg_sysfs_driver); 753 ret = platform_driver_register(&fw_cfg_sysfs_driver);
754 if (ret)
755 fw_cfg_kobj_cleanup(fw_cfg_top_ko);
756
757 return ret;
736} 758}
737 759
738static void __exit fw_cfg_sysfs_exit(void) 760static void __exit fw_cfg_sysfs_exit(void)
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index a68e199d579d..c5c9599a3a71 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -37,7 +37,6 @@ struct men_z127_gpio {
37 void __iomem *reg_base; 37 void __iomem *reg_base;
38 struct mcb_device *mdev; 38 struct mcb_device *mdev;
39 struct resource *mem; 39 struct resource *mem;
40 spinlock_t lock;
41}; 40};
42 41
43static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio, 42static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
@@ -69,7 +68,7 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
69 debounce /= 50; 68 debounce /= 50;
70 } 69 }
71 70
72 spin_lock(&priv->lock); 71 spin_lock(&gc->bgpio_lock);
73 72
74 db_en = readl(priv->reg_base + MEN_Z127_DBER); 73 db_en = readl(priv->reg_base + MEN_Z127_DBER);
75 74
@@ -84,7 +83,7 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
84 writel(db_en, priv->reg_base + MEN_Z127_DBER); 83 writel(db_en, priv->reg_base + MEN_Z127_DBER);
85 writel(db_cnt, priv->reg_base + GPIO_TO_DBCNT_REG(gpio)); 84 writel(db_cnt, priv->reg_base + GPIO_TO_DBCNT_REG(gpio));
86 85
87 spin_unlock(&priv->lock); 86 spin_unlock(&gc->bgpio_lock);
88 87
89 return 0; 88 return 0;
90} 89}
@@ -97,7 +96,7 @@ static int men_z127_request(struct gpio_chip *gc, unsigned gpio_pin)
97 if (gpio_pin >= gc->ngpio) 96 if (gpio_pin >= gc->ngpio)
98 return -EINVAL; 97 return -EINVAL;
99 98
100 spin_lock(&priv->lock); 99 spin_lock(&gc->bgpio_lock);
101 od_en = readl(priv->reg_base + MEN_Z127_ODER); 100 od_en = readl(priv->reg_base + MEN_Z127_ODER);
102 101
103 if (gpiochip_line_is_open_drain(gc, gpio_pin)) 102 if (gpiochip_line_is_open_drain(gc, gpio_pin))
@@ -106,7 +105,7 @@ static int men_z127_request(struct gpio_chip *gc, unsigned gpio_pin)
106 od_en &= ~BIT(gpio_pin); 105 od_en &= ~BIT(gpio_pin);
107 106
108 writel(od_en, priv->reg_base + MEN_Z127_ODER); 107 writel(od_en, priv->reg_base + MEN_Z127_ODER);
109 spin_unlock(&priv->lock); 108 spin_unlock(&gc->bgpio_lock);
110 109
111 return 0; 110 return 0;
112} 111}
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index d0d3065a7557..e66084c295fb 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -18,6 +18,7 @@
18#include <linux/i2c.h> 18#include <linux/i2c.h>
19#include <linux/platform_data/pca953x.h> 19#include <linux/platform_data/pca953x.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <asm/unaligned.h>
21#include <linux/of_platform.h> 22#include <linux/of_platform.h>
22#include <linux/acpi.h> 23#include <linux/acpi.h>
23 24
@@ -159,7 +160,7 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
159 switch (chip->chip_type) { 160 switch (chip->chip_type) {
160 case PCA953X_TYPE: 161 case PCA953X_TYPE:
161 ret = i2c_smbus_write_word_data(chip->client, 162 ret = i2c_smbus_write_word_data(chip->client,
162 reg << 1, (u16) *val); 163 reg << 1, cpu_to_le16(get_unaligned((u16 *)val)));
163 break; 164 break;
164 case PCA957X_TYPE: 165 case PCA957X_TYPE:
165 ret = i2c_smbus_write_byte_data(chip->client, reg << 1, 166 ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index b2b7b78664b8..76ac906b4d78 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -283,8 +283,8 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
283 writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET)); 283 writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
284 284
285 ret = pinctrl_gpio_direction_output(chip->base + offset); 285 ret = pinctrl_gpio_direction_output(chip->base + offset);
286 if (!ret) 286 if (ret)
287 return 0; 287 return ret;
288 288
289 spin_lock_irqsave(&gpio_lock, flags); 289 spin_lock_irqsave(&gpio_lock, flags);
290 290
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index d9ab0cd1d205..4d9a315cfd43 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -196,44 +196,6 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
196 return 0; 196 return 0;
197} 197}
198 198
199static void gpio_rcar_irq_bus_lock(struct irq_data *d)
200{
201 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
202 struct gpio_rcar_priv *p = gpiochip_get_data(gc);
203
204 pm_runtime_get_sync(&p->pdev->dev);
205}
206
207static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
208{
209 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
210 struct gpio_rcar_priv *p = gpiochip_get_data(gc);
211
212 pm_runtime_put(&p->pdev->dev);
213}
214
215
216static int gpio_rcar_irq_request_resources(struct irq_data *d)
217{
218 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
219 struct gpio_rcar_priv *p = gpiochip_get_data(gc);
220 int error;
221
222 error = pm_runtime_get_sync(&p->pdev->dev);
223 if (error < 0)
224 return error;
225
226 return 0;
227}
228
229static void gpio_rcar_irq_release_resources(struct irq_data *d)
230{
231 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
232 struct gpio_rcar_priv *p = gpiochip_get_data(gc);
233
234 pm_runtime_put(&p->pdev->dev);
235}
236
237static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id) 199static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
238{ 200{
239 struct gpio_rcar_priv *p = dev_id; 201 struct gpio_rcar_priv *p = dev_id;
@@ -280,32 +242,18 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
280 242
281static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset) 243static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
282{ 244{
283 struct gpio_rcar_priv *p = gpiochip_get_data(chip); 245 return pinctrl_request_gpio(chip->base + offset);
284 int error;
285
286 error = pm_runtime_get_sync(&p->pdev->dev);
287 if (error < 0)
288 return error;
289
290 error = pinctrl_request_gpio(chip->base + offset);
291 if (error)
292 pm_runtime_put(&p->pdev->dev);
293
294 return error;
295} 246}
296 247
297static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset) 248static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
298{ 249{
299 struct gpio_rcar_priv *p = gpiochip_get_data(chip);
300
301 pinctrl_free_gpio(chip->base + offset); 250 pinctrl_free_gpio(chip->base + offset);
302 251
303 /* Set the GPIO as an input to ensure that the next GPIO request won't 252 /*
253 * Set the GPIO as an input to ensure that the next GPIO request won't
304 * drive the GPIO pin as an output. 254 * drive the GPIO pin as an output.
305 */ 255 */
306 gpio_rcar_config_general_input_output_mode(chip, offset, false); 256 gpio_rcar_config_general_input_output_mode(chip, offset, false);
307
308 pm_runtime_put(&p->pdev->dev);
309} 257}
310 258
311static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset) 259static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -452,6 +400,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
452 } 400 }
453 401
454 pm_runtime_enable(dev); 402 pm_runtime_enable(dev);
403 pm_runtime_get_sync(dev);
455 404
456 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 405 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
457 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 406 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -488,10 +437,6 @@ static int gpio_rcar_probe(struct platform_device *pdev)
488 irq_chip->irq_unmask = gpio_rcar_irq_enable; 437 irq_chip->irq_unmask = gpio_rcar_irq_enable;
489 irq_chip->irq_set_type = gpio_rcar_irq_set_type; 438 irq_chip->irq_set_type = gpio_rcar_irq_set_type;
490 irq_chip->irq_set_wake = gpio_rcar_irq_set_wake; 439 irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
491 irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
492 irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
493 irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
494 irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
495 irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND; 440 irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
496 441
497 ret = gpiochip_add_data(gpio_chip, p); 442 ret = gpiochip_add_data(gpio_chip, p);
@@ -522,6 +467,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
522err1: 467err1:
523 gpiochip_remove(gpio_chip); 468 gpiochip_remove(gpio_chip);
524err0: 469err0:
470 pm_runtime_put(dev);
525 pm_runtime_disable(dev); 471 pm_runtime_disable(dev);
526 return ret; 472 return ret;
527} 473}
@@ -532,6 +478,7 @@ static int gpio_rcar_remove(struct platform_device *pdev)
532 478
533 gpiochip_remove(&p->gpio_chip); 479 gpiochip_remove(&p->gpio_chip);
534 480
481 pm_runtime_put(&pdev->dev);
535 pm_runtime_disable(&pdev->dev); 482 pm_runtime_disable(&pdev->dev);
536 return 0; 483 return 0;
537} 484}
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index c0aa387664bf..0dc916191689 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -173,6 +173,11 @@ static int xgene_gpio_probe(struct platform_device *pdev)
173 } 173 }
174 174
175 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 175 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
176 if (!res) {
177 err = -EINVAL;
178 goto err;
179 }
180
176 gpio->base = devm_ioremap_nocache(&pdev->dev, res->start, 181 gpio->base = devm_ioremap_nocache(&pdev->dev, res->start,
177 resource_size(res)); 182 resource_size(res));
178 if (!gpio->base) { 183 if (!gpio->base) {
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 682070d20f00..2dc52585e3f2 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -977,7 +977,7 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
977 lookup = kmalloc(sizeof(*lookup), GFP_KERNEL); 977 lookup = kmalloc(sizeof(*lookup), GFP_KERNEL);
978 if (lookup) { 978 if (lookup) {
979 lookup->adev = adev; 979 lookup->adev = adev;
980 lookup->con_id = con_id; 980 lookup->con_id = kstrdup(con_id, GFP_KERNEL);
981 list_add_tail(&lookup->node, &acpi_crs_lookup_list); 981 list_add_tail(&lookup->node, &acpi_crs_lookup_list);
982 } 982 }
983 } 983 }
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 72065532c1c7..b747c76fd2b1 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -68,6 +68,7 @@ LIST_HEAD(gpio_devices);
68static void gpiochip_free_hogs(struct gpio_chip *chip); 68static void gpiochip_free_hogs(struct gpio_chip *chip);
69static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip); 69static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
70 70
71static bool gpiolib_initialized;
71 72
72static inline void desc_set_label(struct gpio_desc *d, const char *label) 73static inline void desc_set_label(struct gpio_desc *d, const char *label)
73{ 74{
@@ -440,9 +441,63 @@ static void gpiodevice_release(struct device *dev)
440 cdev_del(&gdev->chrdev); 441 cdev_del(&gdev->chrdev);
441 list_del(&gdev->list); 442 list_del(&gdev->list);
442 ida_simple_remove(&gpio_ida, gdev->id); 443 ida_simple_remove(&gpio_ida, gdev->id);
444 kfree(gdev->label);
445 kfree(gdev->descs);
443 kfree(gdev); 446 kfree(gdev);
444} 447}
445 448
449static int gpiochip_setup_dev(struct gpio_device *gdev)
450{
451 int status;
452
453 cdev_init(&gdev->chrdev, &gpio_fileops);
454 gdev->chrdev.owner = THIS_MODULE;
455 gdev->chrdev.kobj.parent = &gdev->dev.kobj;
456 gdev->dev.devt = MKDEV(MAJOR(gpio_devt), gdev->id);
457 status = cdev_add(&gdev->chrdev, gdev->dev.devt, 1);
458 if (status < 0)
459 chip_warn(gdev->chip, "failed to add char device %d:%d\n",
460 MAJOR(gpio_devt), gdev->id);
461 else
462 chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n",
463 MAJOR(gpio_devt), gdev->id);
464 status = device_add(&gdev->dev);
465 if (status)
466 goto err_remove_chardev;
467
468 status = gpiochip_sysfs_register(gdev);
469 if (status)
470 goto err_remove_device;
471
472 /* From this point, the .release() function cleans up gpio_device */
473 gdev->dev.release = gpiodevice_release;
474 get_device(&gdev->dev);
475 pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
476 __func__, gdev->base, gdev->base + gdev->ngpio - 1,
477 dev_name(&gdev->dev), gdev->chip->label ? : "generic");
478
479 return 0;
480
481err_remove_device:
482 device_del(&gdev->dev);
483err_remove_chardev:
484 cdev_del(&gdev->chrdev);
485 return status;
486}
487
488static void gpiochip_setup_devs(void)
489{
490 struct gpio_device *gdev;
491 int err;
492
493 list_for_each_entry(gdev, &gpio_devices, list) {
494 err = gpiochip_setup_dev(gdev);
495 if (err)
496 pr_err("%s: Failed to initialize gpio device (%d)\n",
497 dev_name(&gdev->dev), err);
498 }
499}
500
446/** 501/**
447 * gpiochip_add_data() - register a gpio_chip 502 * gpiochip_add_data() - register a gpio_chip
448 * @chip: the chip to register, with chip->base initialized 503 * @chip: the chip to register, with chip->base initialized
@@ -457,6 +512,9 @@ static void gpiodevice_release(struct device *dev)
457 * the gpio framework's arch_initcall(). Otherwise sysfs initialization 512 * the gpio framework's arch_initcall(). Otherwise sysfs initialization
458 * for GPIOs will fail rudely. 513 * for GPIOs will fail rudely.
459 * 514 *
515 * gpiochip_add_data() must only be called after gpiolib initialization,
516 * ie after core_initcall().
517 *
460 * If chip->base is negative, this requests dynamic assignment of 518 * If chip->base is negative, this requests dynamic assignment of
461 * a range of valid GPIOs. 519 * a range of valid GPIOs.
462 */ 520 */
@@ -504,8 +562,7 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
504 else 562 else
505 gdev->owner = THIS_MODULE; 563 gdev->owner = THIS_MODULE;
506 564
507 gdev->descs = devm_kcalloc(&gdev->dev, chip->ngpio, 565 gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
508 sizeof(gdev->descs[0]), GFP_KERNEL);
509 if (!gdev->descs) { 566 if (!gdev->descs) {
510 status = -ENOMEM; 567 status = -ENOMEM;
511 goto err_free_gdev; 568 goto err_free_gdev;
@@ -514,16 +571,16 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
514 if (chip->ngpio == 0) { 571 if (chip->ngpio == 0) {
515 chip_err(chip, "tried to insert a GPIO chip with zero lines\n"); 572 chip_err(chip, "tried to insert a GPIO chip with zero lines\n");
516 status = -EINVAL; 573 status = -EINVAL;
517 goto err_free_gdev; 574 goto err_free_descs;
518 } 575 }
519 576
520 if (chip->label) 577 if (chip->label)
521 gdev->label = devm_kstrdup(&gdev->dev, chip->label, GFP_KERNEL); 578 gdev->label = kstrdup(chip->label, GFP_KERNEL);
522 else 579 else
523 gdev->label = devm_kstrdup(&gdev->dev, "unknown", GFP_KERNEL); 580 gdev->label = kstrdup("unknown", GFP_KERNEL);
524 if (!gdev->label) { 581 if (!gdev->label) {
525 status = -ENOMEM; 582 status = -ENOMEM;
526 goto err_free_gdev; 583 goto err_free_descs;
527 } 584 }
528 585
529 gdev->ngpio = chip->ngpio; 586 gdev->ngpio = chip->ngpio;
@@ -543,7 +600,7 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
543 if (base < 0) { 600 if (base < 0) {
544 status = base; 601 status = base;
545 spin_unlock_irqrestore(&gpio_lock, flags); 602 spin_unlock_irqrestore(&gpio_lock, flags);
546 goto err_free_gdev; 603 goto err_free_label;
547 } 604 }
548 /* 605 /*
549 * TODO: it should not be necessary to reflect the assigned 606 * TODO: it should not be necessary to reflect the assigned
@@ -558,7 +615,7 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
558 status = gpiodev_add_to_list(gdev); 615 status = gpiodev_add_to_list(gdev);
559 if (status) { 616 if (status) {
560 spin_unlock_irqrestore(&gpio_lock, flags); 617 spin_unlock_irqrestore(&gpio_lock, flags);
561 goto err_free_gdev; 618 goto err_free_label;
562 } 619 }
563 620
564 for (i = 0; i < chip->ngpio; i++) { 621 for (i = 0; i < chip->ngpio; i++) {
@@ -596,39 +653,16 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
596 * we get a device node entry in sysfs under 653 * we get a device node entry in sysfs under
597 * /sys/bus/gpio/devices/gpiochipN/dev that can be used for 654 * /sys/bus/gpio/devices/gpiochipN/dev that can be used for
598 * coldplug of device nodes and other udev business. 655 * coldplug of device nodes and other udev business.
656 * We can do this only if gpiolib has been initialized.
657 * Otherwise, defer until later.
599 */ 658 */
600 cdev_init(&gdev->chrdev, &gpio_fileops); 659 if (gpiolib_initialized) {
601 gdev->chrdev.owner = THIS_MODULE; 660 status = gpiochip_setup_dev(gdev);
602 gdev->chrdev.kobj.parent = &gdev->dev.kobj; 661 if (status)
603 gdev->dev.devt = MKDEV(MAJOR(gpio_devt), gdev->id); 662 goto err_remove_chip;
604 status = cdev_add(&gdev->chrdev, gdev->dev.devt, 1); 663 }
605 if (status < 0)
606 chip_warn(chip, "failed to add char device %d:%d\n",
607 MAJOR(gpio_devt), gdev->id);
608 else
609 chip_dbg(chip, "added GPIO chardev (%d:%d)\n",
610 MAJOR(gpio_devt), gdev->id);
611 status = device_add(&gdev->dev);
612 if (status)
613 goto err_remove_chardev;
614
615 status = gpiochip_sysfs_register(gdev);
616 if (status)
617 goto err_remove_device;
618
619 /* From this point, the .release() function cleans up gpio_device */
620 gdev->dev.release = gpiodevice_release;
621 get_device(&gdev->dev);
622 pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
623 __func__, gdev->base, gdev->base + gdev->ngpio - 1,
624 dev_name(&gdev->dev), chip->label ? : "generic");
625
626 return 0; 664 return 0;
627 665
628err_remove_device:
629 device_del(&gdev->dev);
630err_remove_chardev:
631 cdev_del(&gdev->chrdev);
632err_remove_chip: 666err_remove_chip:
633 acpi_gpiochip_remove(chip); 667 acpi_gpiochip_remove(chip);
634 gpiochip_free_hogs(chip); 668 gpiochip_free_hogs(chip);
@@ -637,6 +671,10 @@ err_remove_from_list:
637 spin_lock_irqsave(&gpio_lock, flags); 671 spin_lock_irqsave(&gpio_lock, flags);
638 list_del(&gdev->list); 672 list_del(&gdev->list);
639 spin_unlock_irqrestore(&gpio_lock, flags); 673 spin_unlock_irqrestore(&gpio_lock, flags);
674err_free_label:
675 kfree(gdev->label);
676err_free_descs:
677 kfree(gdev->descs);
640err_free_gdev: 678err_free_gdev:
641 ida_simple_remove(&gpio_ida, gdev->id); 679 ida_simple_remove(&gpio_ida, gdev->id);
642 /* failures here can mean systems won't boot... */ 680 /* failures here can mean systems won't boot... */
@@ -2231,9 +2269,11 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
2231 return desc; 2269 return desc;
2232} 2270}
2233 2271
2234static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id, 2272static struct gpio_desc *acpi_find_gpio(struct device *dev,
2273 const char *con_id,
2235 unsigned int idx, 2274 unsigned int idx,
2236 enum gpio_lookup_flags *flags) 2275 enum gpiod_flags flags,
2276 enum gpio_lookup_flags *lookupflags)
2237{ 2277{
2238 struct acpi_device *adev = ACPI_COMPANION(dev); 2278 struct acpi_device *adev = ACPI_COMPANION(dev);
2239 struct acpi_gpio_info info; 2279 struct acpi_gpio_info info;
@@ -2264,10 +2304,16 @@ static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
2264 desc = acpi_get_gpiod_by_index(adev, NULL, idx, &info); 2304 desc = acpi_get_gpiod_by_index(adev, NULL, idx, &info);
2265 if (IS_ERR(desc)) 2305 if (IS_ERR(desc))
2266 return desc; 2306 return desc;
2307
2308 if ((flags == GPIOD_OUT_LOW || flags == GPIOD_OUT_HIGH) &&
2309 info.gpioint) {
2310 dev_dbg(dev, "refusing GpioInt() entry when doing GPIOD_OUT_* lookup\n");
2311 return ERR_PTR(-ENOENT);
2312 }
2267 } 2313 }
2268 2314
2269 if (info.polarity == GPIO_ACTIVE_LOW) 2315 if (info.polarity == GPIO_ACTIVE_LOW)
2270 *flags |= GPIO_ACTIVE_LOW; 2316 *lookupflags |= GPIO_ACTIVE_LOW;
2271 2317
2272 return desc; 2318 return desc;
2273} 2319}
@@ -2530,7 +2576,7 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
2530 desc = of_find_gpio(dev, con_id, idx, &lookupflags); 2576 desc = of_find_gpio(dev, con_id, idx, &lookupflags);
2531 } else if (ACPI_COMPANION(dev)) { 2577 } else if (ACPI_COMPANION(dev)) {
2532 dev_dbg(dev, "using ACPI for GPIO lookup\n"); 2578 dev_dbg(dev, "using ACPI for GPIO lookup\n");
2533 desc = acpi_find_gpio(dev, con_id, idx, &lookupflags); 2579 desc = acpi_find_gpio(dev, con_id, idx, flags, &lookupflags);
2534 } 2580 }
2535 } 2581 }
2536 2582
@@ -2829,6 +2875,9 @@ static int __init gpiolib_dev_init(void)
2829 if (ret < 0) { 2875 if (ret < 0) {
2830 pr_err("gpiolib: failed to allocate char dev region\n"); 2876 pr_err("gpiolib: failed to allocate char dev region\n");
2831 bus_unregister(&gpio_bus_type); 2877 bus_unregister(&gpio_bus_type);
2878 } else {
2879 gpiolib_initialized = true;
2880 gpiochip_setup_devs();
2832 } 2881 }
2833 return ret; 2882 return ret;
2834} 2883}
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
index 0f734ee05274..ca77ec10147c 100644
--- a/drivers/gpu/drm/amd/acp/Kconfig
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -1,10 +1,14 @@
1menu "ACP Configuration" 1menu "ACP (Audio CoProcessor) Configuration"
2 2
3config DRM_AMD_ACP 3config DRM_AMD_ACP
4 bool "Enable ACP IP support" 4 bool "Enable AMD Audio CoProcessor IP support"
5 select MFD_CORE 5 select MFD_CORE
6 select PM_GENERIC_DOMAINS if PM 6 select PM_GENERIC_DOMAINS if PM
7 help 7 help
8 Choose this option to enable ACP IP support for AMD SOCs. 8 Choose this option to enable ACP IP support for AMD SOCs.
9 This adds the ACP (Audio CoProcessor) IP driver and wires
10 it up into the amdgpu driver. The ACP block provides the DMA
11 engine for the i2s-based ALSA driver. It is required for audio
12 on APUs which utilize an i2s codec.
9 13
10endmenu 14endmenu
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index c4a21c6428f5..1bcbade479dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1591,6 +1591,8 @@ struct amdgpu_uvd {
1591 struct amdgpu_bo *vcpu_bo; 1591 struct amdgpu_bo *vcpu_bo;
1592 void *cpu_addr; 1592 void *cpu_addr;
1593 uint64_t gpu_addr; 1593 uint64_t gpu_addr;
1594 unsigned fw_version;
1595 void *saved_bo;
1594 atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; 1596 atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
1595 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; 1597 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
1596 struct delayed_work idle_work; 1598 struct delayed_work idle_work;
@@ -2033,6 +2035,7 @@ struct amdgpu_device {
2033 2035
2034 /* tracking pinned memory */ 2036 /* tracking pinned memory */
2035 u64 vram_pin_size; 2037 u64 vram_pin_size;
2038 u64 invisible_pin_size;
2036 u64 gart_pin_size; 2039 u64 gart_pin_size;
2037 2040
2038 /* amdkfd interface */ 2041 /* amdkfd interface */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index d6b0bff510aa..b7b583c42ea8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -425,6 +425,10 @@ static int acp_resume(void *handle)
425 struct acp_pm_domain *apd; 425 struct acp_pm_domain *apd;
426 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 426 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
427 427
428 /* return early if no ACP */
429 if (!adev->acp.acp_genpd)
430 return 0;
431
428 /* SMU block will power on ACP irrespective of ACP runtime status. 432 /* SMU block will power on ACP irrespective of ACP runtime status.
429 * Power off explicitly based on genpd ACP runtime status so that ACP 433 * Power off explicitly based on genpd ACP runtime status so that ACP
430 * hw and ACP-genpd status are in sync. 434 * hw and ACP-genpd status are in sync.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 0020a0ea43ff..35a1248aaa77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -63,10 +63,6 @@ bool amdgpu_has_atpx(void) {
63 return amdgpu_atpx_priv.atpx_detected; 63 return amdgpu_atpx_priv.atpx_detected;
64} 64}
65 65
66bool amdgpu_has_atpx_dgpu_power_cntl(void) {
67 return amdgpu_atpx_priv.atpx.functions.power_cntl;
68}
69
70/** 66/**
71 * amdgpu_atpx_call - call an ATPX method 67 * amdgpu_atpx_call - call an ATPX method
72 * 68 *
@@ -146,6 +142,13 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
146 */ 142 */
147static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) 143static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
148{ 144{
145 /* make sure required functions are enabled */
146 /* dGPU power control is required */
147 if (atpx->functions.power_cntl == false) {
148 printk("ATPX dGPU power cntl not present, forcing\n");
149 atpx->functions.power_cntl = true;
150 }
151
149 if (atpx->functions.px_params) { 152 if (atpx->functions.px_params) {
150 union acpi_object *info; 153 union acpi_object *info;
151 struct atpx_px_params output; 154 struct atpx_px_params output;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 7a4b101e10c6..6043dc7c3a94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -816,10 +816,13 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
816 struct drm_device *ddev = adev->ddev; 816 struct drm_device *ddev = adev->ddev;
817 struct drm_crtc *crtc; 817 struct drm_crtc *crtc;
818 uint32_t line_time_us, vblank_lines; 818 uint32_t line_time_us, vblank_lines;
819 struct cgs_mode_info *mode_info;
819 820
820 if (info == NULL) 821 if (info == NULL)
821 return -EINVAL; 822 return -EINVAL;
822 823
824 mode_info = info->mode_info;
825
823 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 826 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
824 list_for_each_entry(crtc, 827 list_for_each_entry(crtc,
825 &ddev->mode_config.crtc_list, head) { 828 &ddev->mode_config.crtc_list, head) {
@@ -828,7 +831,7 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
828 info->active_display_mask |= (1 << amdgpu_crtc->crtc_id); 831 info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
829 info->display_count++; 832 info->display_count++;
830 } 833 }
831 if (info->mode_info != NULL && 834 if (mode_info != NULL &&
832 crtc->enabled && amdgpu_crtc->enabled && 835 crtc->enabled && amdgpu_crtc->enabled &&
833 amdgpu_crtc->hw_mode.clock) { 836 amdgpu_crtc->hw_mode.clock) {
834 line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) / 837 line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
@@ -836,10 +839,10 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
836 vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end - 839 vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
837 amdgpu_crtc->hw_mode.crtc_vdisplay + 840 amdgpu_crtc->hw_mode.crtc_vdisplay +
838 (amdgpu_crtc->v_border * 2); 841 (amdgpu_crtc->v_border * 2);
839 info->mode_info->vblank_time_us = vblank_lines * line_time_us; 842 mode_info->vblank_time_us = vblank_lines * line_time_us;
840 info->mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); 843 mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
841 info->mode_info->ref_clock = adev->clock.spll.reference_freq; 844 mode_info->ref_clock = adev->clock.spll.reference_freq;
842 info->mode_info++; 845 mode_info = NULL;
843 } 846 }
844 } 847 }
845 } 848 }
@@ -847,6 +850,16 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
847 return 0; 850 return 0;
848} 851}
849 852
853
854static int amdgpu_cgs_notify_dpm_enabled(void *cgs_device, bool enabled)
855{
856 CGS_FUNC_ADEV;
857
858 adev->pm.dpm_enabled = enabled;
859
860 return 0;
861}
862
850/** \brief evaluate acpi namespace object, handle or pathname must be valid 863/** \brief evaluate acpi namespace object, handle or pathname must be valid
851 * \param cgs_device 864 * \param cgs_device
852 * \param info input/output arguments for the control method 865 * \param info input/output arguments for the control method
@@ -1097,6 +1110,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
1097 amdgpu_cgs_set_powergating_state, 1110 amdgpu_cgs_set_powergating_state,
1098 amdgpu_cgs_set_clockgating_state, 1111 amdgpu_cgs_set_clockgating_state,
1099 amdgpu_cgs_get_active_displays_info, 1112 amdgpu_cgs_get_active_displays_info,
1113 amdgpu_cgs_notify_dpm_enabled,
1100 amdgpu_cgs_call_acpi_method, 1114 amdgpu_cgs_call_acpi_method,
1101 amdgpu_cgs_query_system_info, 1115 amdgpu_cgs_query_system_info,
1102}; 1116};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 612117478b57..2139da773da6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -62,12 +62,6 @@ static const char *amdgpu_asic_name[] = {
62 "LAST", 62 "LAST",
63}; 63};
64 64
65#if defined(CONFIG_VGA_SWITCHEROO)
66bool amdgpu_has_atpx_dgpu_power_cntl(void);
67#else
68static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
69#endif
70
71bool amdgpu_device_is_px(struct drm_device *dev) 65bool amdgpu_device_is_px(struct drm_device *dev)
72{ 66{
73 struct amdgpu_device *adev = dev->dev_private; 67 struct amdgpu_device *adev = dev->dev_private;
@@ -1485,7 +1479,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1485 1479
1486 if (amdgpu_runtime_pm == 1) 1480 if (amdgpu_runtime_pm == 1)
1487 runtime = true; 1481 runtime = true;
1488 if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl()) 1482 if (amdgpu_device_is_px(ddev))
1489 runtime = true; 1483 runtime = true;
1490 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime); 1484 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
1491 if (runtime) 1485 if (runtime)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index f0ed974bd4e0..3fb405b3a614 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -57,7 +57,7 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
57 if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) 57 if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
58 return true; 58 return true;
59 59
60 fence_put(*f); 60 fence_put(fence);
61 return false; 61 return false;
62} 62}
63 63
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 4303b447efe8..d81f1f4883a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -121,7 +121,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
121{ 121{
122 struct amdgpu_device *adev = ring->adev; 122 struct amdgpu_device *adev = ring->adev;
123 struct amdgpu_fence *fence; 123 struct amdgpu_fence *fence;
124 struct fence **ptr; 124 struct fence *old, **ptr;
125 uint32_t seq; 125 uint32_t seq;
126 126
127 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); 127 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
@@ -141,7 +141,11 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
141 /* This function can't be called concurrently anyway, otherwise 141 /* This function can't be called concurrently anyway, otherwise
142 * emitting the fence would mess up the hardware ring buffer. 142 * emitting the fence would mess up the hardware ring buffer.
143 */ 143 */
144 BUG_ON(rcu_dereference_protected(*ptr, 1)); 144 old = rcu_dereference_protected(*ptr, 1);
145 if (old && !fence_is_signaled(old)) {
146 DRM_INFO("rcu slot is busy\n");
147 fence_wait(old, false);
148 }
145 149
146 rcu_assign_pointer(*ptr, fence_get(&fence->base)); 150 rcu_assign_pointer(*ptr, fence_get(&fence->base));
147 151
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index f594cfaa97e5..762cfdb85147 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -219,6 +219,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
219 if (r) { 219 if (r) {
220 return r; 220 return r;
221 } 221 }
222 adev->ddev->vblank_disable_allowed = true;
223
222 /* enable msi */ 224 /* enable msi */
223 adev->irq.msi_enabled = false; 225 adev->irq.msi_enabled = false;
224 226
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 7805a8706af7..b04337de65d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
303 fw_info.feature = adev->vce.fb_version; 303 fw_info.feature = adev->vce.fb_version;
304 break; 304 break;
305 case AMDGPU_INFO_FW_UVD: 305 case AMDGPU_INFO_FW_UVD:
306 fw_info.ver = 0; 306 fw_info.ver = adev->uvd.fw_version;
307 fw_info.feature = 0; 307 fw_info.feature = 0;
308 break; 308 break;
309 case AMDGPU_INFO_FW_GMC: 309 case AMDGPU_INFO_FW_GMC:
@@ -382,8 +382,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
382 struct drm_amdgpu_info_vram_gtt vram_gtt; 382 struct drm_amdgpu_info_vram_gtt vram_gtt;
383 383
384 vram_gtt.vram_size = adev->mc.real_vram_size; 384 vram_gtt.vram_size = adev->mc.real_vram_size;
385 vram_gtt.vram_size -= adev->vram_pin_size;
385 vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size; 386 vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
386 vram_gtt.vram_cpu_accessible_size -= adev->vram_pin_size; 387 vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
387 vram_gtt.gtt_size = adev->mc.gtt_size; 388 vram_gtt.gtt_size = adev->mc.gtt_size;
388 vram_gtt.gtt_size -= adev->gart_pin_size; 389 vram_gtt.gtt_size -= adev->gart_pin_size;
389 return copy_to_user(out, &vram_gtt, 390 return copy_to_user(out, &vram_gtt,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 8d432e6901af..81bd964d3dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -53,7 +53,7 @@ struct amdgpu_hpd;
53 53
54#define AMDGPU_MAX_HPD_PINS 6 54#define AMDGPU_MAX_HPD_PINS 6
55#define AMDGPU_MAX_CRTCS 6 55#define AMDGPU_MAX_CRTCS 6
56#define AMDGPU_MAX_AFMT_BLOCKS 7 56#define AMDGPU_MAX_AFMT_BLOCKS 9
57 57
58enum amdgpu_rmx_type { 58enum amdgpu_rmx_type {
59 RMX_OFF, 59 RMX_OFF,
@@ -309,8 +309,8 @@ struct amdgpu_mode_info {
309 struct atom_context *atom_context; 309 struct atom_context *atom_context;
310 struct card_info *atom_card_info; 310 struct card_info *atom_card_info;
311 bool mode_config_initialized; 311 bool mode_config_initialized;
312 struct amdgpu_crtc *crtcs[6]; 312 struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
313 struct amdgpu_afmt *afmt[7]; 313 struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
314 /* DVI-I properties */ 314 /* DVI-I properties */
315 struct drm_property *coherent_mode_property; 315 struct drm_property *coherent_mode_property;
316 /* DAC enable load detect */ 316 /* DAC enable load detect */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 151a2d42c639..7ecea83ce453 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -424,9 +424,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
424 bo->pin_count = 1; 424 bo->pin_count = 1;
425 if (gpu_addr != NULL) 425 if (gpu_addr != NULL)
426 *gpu_addr = amdgpu_bo_gpu_offset(bo); 426 *gpu_addr = amdgpu_bo_gpu_offset(bo);
427 if (domain == AMDGPU_GEM_DOMAIN_VRAM) 427 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
428 bo->adev->vram_pin_size += amdgpu_bo_size(bo); 428 bo->adev->vram_pin_size += amdgpu_bo_size(bo);
429 else 429 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
430 bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
431 } else
430 bo->adev->gart_pin_size += amdgpu_bo_size(bo); 432 bo->adev->gart_pin_size += amdgpu_bo_size(bo);
431 } else { 433 } else {
432 dev_err(bo->adev->dev, "%p pin failed\n", bo); 434 dev_err(bo->adev->dev, "%p pin failed\n", bo);
@@ -456,9 +458,11 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
456 } 458 }
457 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 459 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
458 if (likely(r == 0)) { 460 if (likely(r == 0)) {
459 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 461 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
460 bo->adev->vram_pin_size -= amdgpu_bo_size(bo); 462 bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
461 else 463 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
464 bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
465 } else
462 bo->adev->gart_pin_size -= amdgpu_bo_size(bo); 466 bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
463 } else { 467 } else {
464 dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo); 468 dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
@@ -476,6 +480,17 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
476 return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); 480 return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
477} 481}
478 482
483static const char *amdgpu_vram_names[] = {
484 "UNKNOWN",
485 "GDDR1",
486 "DDR2",
487 "GDDR3",
488 "GDDR4",
489 "GDDR5",
490 "HBM",
491 "DDR3"
492};
493
479int amdgpu_bo_init(struct amdgpu_device *adev) 494int amdgpu_bo_init(struct amdgpu_device *adev)
480{ 495{
481 /* Add an MTRR for the VRAM */ 496 /* Add an MTRR for the VRAM */
@@ -484,8 +499,8 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
484 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", 499 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
485 adev->mc.mc_vram_size >> 20, 500 adev->mc.mc_vram_size >> 20,
486 (unsigned long long)adev->mc.aper_size >> 20); 501 (unsigned long long)adev->mc.aper_size >> 20);
487 DRM_INFO("RAM width %dbits DDR\n", 502 DRM_INFO("RAM width %dbits %s\n",
488 adev->mc.vram_width); 503 adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
489 return amdgpu_ttm_init(adev); 504 return amdgpu_ttm_init(adev);
490} 505}
491 506
@@ -526,6 +541,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
526 if (!metadata_size) { 541 if (!metadata_size) {
527 if (bo->metadata_size) { 542 if (bo->metadata_size) {
528 kfree(bo->metadata); 543 kfree(bo->metadata);
544 bo->metadata = NULL;
529 bo->metadata_size = 0; 545 bo->metadata_size = 0;
530 } 546 }
531 return 0; 547 return 0;
@@ -608,6 +624,10 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
608 if ((offset + size) <= adev->mc.visible_vram_size) 624 if ((offset + size) <= adev->mc.visible_vram_size)
609 return 0; 625 return 0;
610 626
627 /* Can't move a pinned BO to visible VRAM */
628 if (abo->pin_count > 0)
629 return -EINVAL;
630
611 /* hurrah the memory is not visible ! */ 631 /* hurrah the memory is not visible ! */
612 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM); 632 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
613 lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; 633 lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 3cb6d6c413c7..e9c6ae6ed2f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -143,7 +143,7 @@ static int amdgpu_pp_late_init(void *handle)
143 adev->powerplay.pp_handle); 143 adev->powerplay.pp_handle);
144 144
145#ifdef CONFIG_DRM_AMD_POWERPLAY 145#ifdef CONFIG_DRM_AMD_POWERPLAY
146 if (adev->pp_enabled) { 146 if (adev->pp_enabled && adev->pm.dpm_enabled) {
147 amdgpu_pm_sysfs_init(adev); 147 amdgpu_pm_sysfs_init(adev);
148 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL); 148 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
149 } 149 }
@@ -161,12 +161,8 @@ static int amdgpu_pp_sw_init(void *handle)
161 adev->powerplay.pp_handle); 161 adev->powerplay.pp_handle);
162 162
163#ifdef CONFIG_DRM_AMD_POWERPLAY 163#ifdef CONFIG_DRM_AMD_POWERPLAY
164 if (adev->pp_enabled) { 164 if (adev->pp_enabled)
165 if (amdgpu_dpm == 0) 165 adev->pm.dpm_enabled = true;
166 adev->pm.dpm_enabled = false;
167 else
168 adev->pm.dpm_enabled = true;
169 }
170#endif 166#endif
171 167
172 return ret; 168 return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index ab34190859a8..11af4492b4be 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -223,6 +223,8 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
223{ 223{
224 struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo); 224 struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo);
225 225
226 if (amdgpu_ttm_tt_get_usermm(bo->ttm))
227 return -EPERM;
226 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); 228 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
227} 229}
228 230
@@ -384,9 +386,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
384 struct ttm_mem_reg *new_mem) 386 struct ttm_mem_reg *new_mem)
385{ 387{
386 struct amdgpu_device *adev; 388 struct amdgpu_device *adev;
389 struct amdgpu_bo *abo;
387 struct ttm_mem_reg *old_mem = &bo->mem; 390 struct ttm_mem_reg *old_mem = &bo->mem;
388 int r; 391 int r;
389 392
393 /* Can't move a pinned BO */
394 abo = container_of(bo, struct amdgpu_bo, tbo);
395 if (WARN_ON_ONCE(abo->pin_count > 0))
396 return -EINVAL;
397
390 adev = amdgpu_get_adev(bo->bdev); 398 adev = amdgpu_get_adev(bo->bdev);
391 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 399 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
392 amdgpu_move_null(bo, new_mem); 400 amdgpu_move_null(bo, new_mem);
@@ -616,7 +624,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
616 set_page_dirty(page); 624 set_page_dirty(page);
617 625
618 mark_page_accessed(page); 626 mark_page_accessed(page);
619 page_cache_release(page); 627 put_page(page);
620 } 628 }
621 629
622 sg_free_table(ttm->sg); 630 sg_free_table(ttm->sg);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index c1a581044417..871018c634e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -158,6 +158,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
158 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", 158 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
159 version_major, version_minor, family_id); 159 version_major, version_minor, family_id);
160 160
161 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
162 (family_id << 8));
163
161 bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) 164 bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
162 + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE; 165 + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
163 r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, 166 r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
@@ -241,32 +244,30 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
241 244
242int amdgpu_uvd_suspend(struct amdgpu_device *adev) 245int amdgpu_uvd_suspend(struct amdgpu_device *adev)
243{ 246{
244 struct amdgpu_ring *ring = &adev->uvd.ring; 247 unsigned size;
245 int i, r; 248 void *ptr;
249 int i;
246 250
247 if (adev->uvd.vcpu_bo == NULL) 251 if (adev->uvd.vcpu_bo == NULL)
248 return 0; 252 return 0;
249 253
250 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { 254 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
251 uint32_t handle = atomic_read(&adev->uvd.handles[i]); 255 if (atomic_read(&adev->uvd.handles[i]))
252 if (handle != 0) { 256 break;
253 struct fence *fence;
254 257
255 amdgpu_uvd_note_usage(adev); 258 if (i == AMDGPU_MAX_UVD_HANDLES)
259 return 0;
256 260
257 r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence); 261 cancel_delayed_work_sync(&adev->uvd.idle_work);
258 if (r) {
259 DRM_ERROR("Error destroying UVD (%d)!\n", r);
260 continue;
261 }
262 262
263 fence_wait(fence, false); 263 size = amdgpu_bo_size(adev->uvd.vcpu_bo);
264 fence_put(fence); 264 ptr = adev->uvd.cpu_addr;
265 265
266 adev->uvd.filp[i] = NULL; 266 adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
267 atomic_set(&adev->uvd.handles[i], 0); 267 if (!adev->uvd.saved_bo)
268 } 268 return -ENOMEM;
269 } 269
270 memcpy(adev->uvd.saved_bo, ptr, size);
270 271
271 return 0; 272 return 0;
272} 273}
@@ -275,23 +276,29 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
275{ 276{
276 unsigned size; 277 unsigned size;
277 void *ptr; 278 void *ptr;
278 const struct common_firmware_header *hdr;
279 unsigned offset;
280 279
281 if (adev->uvd.vcpu_bo == NULL) 280 if (adev->uvd.vcpu_bo == NULL)
282 return -EINVAL; 281 return -EINVAL;
283 282
284 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
285 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
286 memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
287 (adev->uvd.fw->size) - offset);
288
289 size = amdgpu_bo_size(adev->uvd.vcpu_bo); 283 size = amdgpu_bo_size(adev->uvd.vcpu_bo);
290 size -= le32_to_cpu(hdr->ucode_size_bytes);
291 ptr = adev->uvd.cpu_addr; 284 ptr = adev->uvd.cpu_addr;
292 ptr += le32_to_cpu(hdr->ucode_size_bytes);
293 285
294 memset(ptr, 0, size); 286 if (adev->uvd.saved_bo != NULL) {
287 memcpy(ptr, adev->uvd.saved_bo, size);
288 kfree(adev->uvd.saved_bo);
289 adev->uvd.saved_bo = NULL;
290 } else {
291 const struct common_firmware_header *hdr;
292 unsigned offset;
293
294 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
295 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
296 memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
297 (adev->uvd.fw->size) - offset);
298 size -= le32_to_cpu(hdr->ucode_size_bytes);
299 ptr += le32_to_cpu(hdr->ucode_size_bytes);
300 memset(ptr, 0, size);
301 }
295 302
296 return 0; 303 return 0;
297} 304}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 4bec0c108cea..481a64fa9b47 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -234,6 +234,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
234 if (i == AMDGPU_MAX_VCE_HANDLES) 234 if (i == AMDGPU_MAX_VCE_HANDLES)
235 return 0; 235 return 0;
236 236
237 cancel_delayed_work_sync(&adev->vce.idle_work);
237 /* TODO: suspending running encoding sessions isn't supported */ 238 /* TODO: suspending running encoding sessions isn't supported */
238 return -EINVAL; 239 return -EINVAL;
239} 240}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 1e0bba29e167..1cd6de575305 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -298,6 +298,10 @@ bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder,
298 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) 298 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
299 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; 299 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
300 300
301 /* vertical FP must be at least 1 */
302 if (mode->crtc_vsync_start == mode->crtc_vdisplay)
303 adjusted_mode->crtc_vsync_start++;
304
301 /* get the native mode for scaling */ 305 /* get the native mode for scaling */
302 if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) 306 if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
303 amdgpu_panel_mode_fixup(encoder, adjusted_mode); 307 amdgpu_panel_mode_fixup(encoder, adjusted_mode);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 82ce7d943884..a4a2e6cc61bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -903,14 +903,6 @@ static int gmc_v7_0_early_init(void *handle)
903 gmc_v7_0_set_gart_funcs(adev); 903 gmc_v7_0_set_gart_funcs(adev);
904 gmc_v7_0_set_irq_funcs(adev); 904 gmc_v7_0_set_irq_funcs(adev);
905 905
906 if (adev->flags & AMD_IS_APU) {
907 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
908 } else {
909 u32 tmp = RREG32(mmMC_SEQ_MISC0);
910 tmp &= MC_SEQ_MISC0__MT__MASK;
911 adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
912 }
913
914 return 0; 906 return 0;
915} 907}
916 908
@@ -918,7 +910,10 @@ static int gmc_v7_0_late_init(void *handle)
918{ 910{
919 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 911 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
920 912
921 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 913 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
914 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
915 else
916 return 0;
922} 917}
923 918
924static int gmc_v7_0_sw_init(void *handle) 919static int gmc_v7_0_sw_init(void *handle)
@@ -927,6 +922,14 @@ static int gmc_v7_0_sw_init(void *handle)
927 int dma_bits; 922 int dma_bits;
928 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 923 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
929 924
925 if (adev->flags & AMD_IS_APU) {
926 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
927 } else {
928 u32 tmp = RREG32(mmMC_SEQ_MISC0);
929 tmp &= MC_SEQ_MISC0__MT__MASK;
930 adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
931 }
932
930 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); 933 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
931 if (r) 934 if (r)
932 return r; 935 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 29bd7b57dc91..7a9db2c72c89 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -863,14 +863,6 @@ static int gmc_v8_0_early_init(void *handle)
863 gmc_v8_0_set_gart_funcs(adev); 863 gmc_v8_0_set_gart_funcs(adev);
864 gmc_v8_0_set_irq_funcs(adev); 864 gmc_v8_0_set_irq_funcs(adev);
865 865
866 if (adev->flags & AMD_IS_APU) {
867 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
868 } else {
869 u32 tmp = RREG32(mmMC_SEQ_MISC0);
870 tmp &= MC_SEQ_MISC0__MT__MASK;
871 adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
872 }
873
874 return 0; 866 return 0;
875} 867}
876 868
@@ -878,15 +870,33 @@ static int gmc_v8_0_late_init(void *handle)
878{ 870{
879 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 871 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
880 872
881 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 873 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
874 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
875 else
876 return 0;
882} 877}
883 878
879#define mmMC_SEQ_MISC0_FIJI 0xA71
880
884static int gmc_v8_0_sw_init(void *handle) 881static int gmc_v8_0_sw_init(void *handle)
885{ 882{
886 int r; 883 int r;
887 int dma_bits; 884 int dma_bits;
888 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 885 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
889 886
887 if (adev->flags & AMD_IS_APU) {
888 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
889 } else {
890 u32 tmp;
891
892 if (adev->asic_type == CHIP_FIJI)
893 tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
894 else
895 tmp = RREG32(mmMC_SEQ_MISC0);
896 tmp &= MC_SEQ_MISC0__MT__MASK;
897 adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
898 }
899
890 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); 900 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
891 if (r) 901 if (r)
892 return r; 902 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index b6f7d7bff929..0f14199cf716 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -307,7 +307,7 @@ static int tonga_ih_sw_fini(void *handle)
307 307
308 amdgpu_irq_fini(adev); 308 amdgpu_irq_fini(adev);
309 amdgpu_ih_ring_fini(adev); 309 amdgpu_ih_ring_fini(adev);
310 amdgpu_irq_add_domain(adev); 310 amdgpu_irq_remove_domain(adev);
311 311
312 return 0; 312 return 0;
313} 313}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index c606ccb38d8b..cb463753115b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -224,11 +224,11 @@ static int uvd_v4_2_suspend(void *handle)
224 int r; 224 int r;
225 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 225 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
226 226
227 r = amdgpu_uvd_suspend(adev); 227 r = uvd_v4_2_hw_fini(adev);
228 if (r) 228 if (r)
229 return r; 229 return r;
230 230
231 r = uvd_v4_2_hw_fini(adev); 231 r = amdgpu_uvd_suspend(adev);
232 if (r) 232 if (r)
233 return r; 233 return r;
234 234
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index e3c852d9d79a..16476d80f475 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -220,11 +220,11 @@ static int uvd_v5_0_suspend(void *handle)
220 int r; 220 int r;
221 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 221 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
222 222
223 r = amdgpu_uvd_suspend(adev); 223 r = uvd_v5_0_hw_fini(adev);
224 if (r) 224 if (r)
225 return r; 225 return r;
226 226
227 r = uvd_v5_0_hw_fini(adev); 227 r = amdgpu_uvd_suspend(adev);
228 if (r) 228 if (r)
229 return r; 229 return r;
230 230
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 3375e614ac67..d49379145ef2 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -214,15 +214,16 @@ static int uvd_v6_0_suspend(void *handle)
214 int r; 214 int r;
215 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 215 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
216 216
217 r = uvd_v6_0_hw_fini(adev);
218 if (r)
219 return r;
220
217 /* Skip this for APU for now */ 221 /* Skip this for APU for now */
218 if (!(adev->flags & AMD_IS_APU)) { 222 if (!(adev->flags & AMD_IS_APU)) {
219 r = amdgpu_uvd_suspend(adev); 223 r = amdgpu_uvd_suspend(adev);
220 if (r) 224 if (r)
221 return r; 225 return r;
222 } 226 }
223 r = uvd_v6_0_hw_fini(adev);
224 if (r)
225 return r;
226 227
227 return r; 228 return r;
228} 229}
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index aec38fc3834f..ab84d4947247 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -589,6 +589,8 @@ typedef int(*cgs_get_active_displays_info)(
589 void *cgs_device, 589 void *cgs_device,
590 struct cgs_display_info *info); 590 struct cgs_display_info *info);
591 591
592typedef int (*cgs_notify_dpm_enabled)(void *cgs_device, bool enabled);
593
592typedef int (*cgs_call_acpi_method)(void *cgs_device, 594typedef int (*cgs_call_acpi_method)(void *cgs_device,
593 uint32_t acpi_method, 595 uint32_t acpi_method,
594 uint32_t acpi_function, 596 uint32_t acpi_function,
@@ -644,6 +646,8 @@ struct cgs_ops {
644 cgs_set_clockgating_state set_clockgating_state; 646 cgs_set_clockgating_state set_clockgating_state;
645 /* display manager */ 647 /* display manager */
646 cgs_get_active_displays_info get_active_displays_info; 648 cgs_get_active_displays_info get_active_displays_info;
649 /* notify dpm enabled */
650 cgs_notify_dpm_enabled notify_dpm_enabled;
647 /* ACPI */ 651 /* ACPI */
648 cgs_call_acpi_method call_acpi_method; 652 cgs_call_acpi_method call_acpi_method;
649 /* get system info */ 653 /* get system info */
@@ -734,8 +738,12 @@ struct cgs_device
734 CGS_CALL(set_powergating_state, dev, block_type, state) 738 CGS_CALL(set_powergating_state, dev, block_type, state)
735#define cgs_set_clockgating_state(dev, block_type, state) \ 739#define cgs_set_clockgating_state(dev, block_type, state) \
736 CGS_CALL(set_clockgating_state, dev, block_type, state) 740 CGS_CALL(set_clockgating_state, dev, block_type, state)
741#define cgs_notify_dpm_enabled(dev, enabled) \
742 CGS_CALL(notify_dpm_enabled, dev, enabled)
743
737#define cgs_get_active_displays_info(dev, info) \ 744#define cgs_get_active_displays_info(dev, info) \
738 CGS_CALL(get_active_displays_info, dev, info) 745 CGS_CALL(get_active_displays_info, dev, info)
746
739#define cgs_call_acpi_method(dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) \ 747#define cgs_call_acpi_method(dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) \
740 CGS_CALL(call_acpi_method, dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) 748 CGS_CALL(call_acpi_method, dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size)
741#define cgs_query_system_info(dev, sys_info) \ 749#define cgs_query_system_info(dev, sys_info) \
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 6b52c78cb404..56856a2864d1 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -137,14 +137,14 @@ static const pem_event_action *resume_event[] = {
137 reset_display_configCounter_tasks, 137 reset_display_configCounter_tasks,
138 update_dal_configuration_tasks, 138 update_dal_configuration_tasks,
139 vari_bright_resume_tasks, 139 vari_bright_resume_tasks,
140 block_adjust_power_state_tasks,
141 setup_asic_tasks, 140 setup_asic_tasks,
142 enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */ 141 enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */
143 enable_dynamic_state_management_tasks, 142 enable_dynamic_state_management_tasks,
144 enable_clock_power_gatings_tasks, 143 enable_clock_power_gatings_tasks,
145 enable_disable_bapm_tasks, 144 enable_disable_bapm_tasks,
146 initialize_thermal_controller_tasks, 145 initialize_thermal_controller_tasks,
147 reset_boot_state_tasks, 146 get_2d_performance_state_tasks,
147 set_performance_state_tasks,
148 adjust_power_state_tasks, 148 adjust_power_state_tasks,
149 enable_disable_fps_tasks, 149 enable_disable_fps_tasks,
150 notify_hw_power_source_tasks, 150 notify_hw_power_source_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index 51dedf84623c..89f31bc5b68b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -2389,6 +2389,7 @@ static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
2389 2389
2390 for(count = 0; count < table->VceLevelCount; count++) { 2390 for(count = 0; count < table->VceLevelCount; count++) {
2391 table->VceLevel[count].Frequency = mm_table->entries[count].eclk; 2391 table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
2392 table->VceLevel[count].MinVoltage = 0;
2392 table->VceLevel[count].MinVoltage |= 2393 table->VceLevel[count].MinVoltage |=
2393 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; 2394 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
2394 table->VceLevel[count].MinVoltage |= 2395 table->VceLevel[count].MinVoltage |=
@@ -2465,6 +2466,7 @@ static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
2465 2466
2466 for (count = 0; count < table->SamuLevelCount; count++) { 2467 for (count = 0; count < table->SamuLevelCount; count++) {
2467 /* not sure whether we need evclk or not */ 2468 /* not sure whether we need evclk or not */
2469 table->SamuLevel[count].MinVoltage = 0;
2468 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; 2470 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
2469 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * 2471 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
2470 VOLTAGE_SCALE) << VDDC_SHIFT; 2472 VOLTAGE_SCALE) << VDDC_SHIFT;
@@ -2562,6 +2564,7 @@ static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
2562 table->UvdBootLevel = 0; 2564 table->UvdBootLevel = 0;
2563 2565
2564 for (count = 0; count < table->UvdLevelCount; count++) { 2566 for (count = 0; count < table->UvdLevelCount; count++) {
2567 table->UvdLevel[count].MinVoltage = 0;
2565 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; 2568 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
2566 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; 2569 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
2567 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * 2570 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
@@ -2900,6 +2903,8 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
2900 if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control) 2903 if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control)
2901 fiji_populate_smc_voltage_tables(hwmgr, table); 2904 fiji_populate_smc_voltage_tables(hwmgr, table);
2902 2905
2906 table->SystemFlags = 0;
2907
2903 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 2908 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2904 PHM_PlatformCaps_AutomaticDCTransition)) 2909 PHM_PlatformCaps_AutomaticDCTransition))
2905 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; 2910 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
@@ -2997,6 +3002,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
2997 table->MemoryThermThrottleEnable = 1; 3002 table->MemoryThermThrottleEnable = 1;
2998 table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/ 3003 table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
2999 table->PCIeGenInterval = 1; 3004 table->PCIeGenInterval = 1;
3005 table->VRConfig = 0;
3000 3006
3001 result = fiji_populate_vr_config(hwmgr, table); 3007 result = fiji_populate_vr_config(hwmgr, table);
3002 PP_ASSERT_WITH_CODE(0 == result, 3008 PP_ASSERT_WITH_CODE(0 == result,
@@ -5195,6 +5201,67 @@ static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr,
5195 return size; 5201 return size;
5196} 5202}
5197 5203
5204static inline bool fiji_are_power_levels_equal(const struct fiji_performance_level *pl1,
5205 const struct fiji_performance_level *pl2)
5206{
5207 return ((pl1->memory_clock == pl2->memory_clock) &&
5208 (pl1->engine_clock == pl2->engine_clock) &&
5209 (pl1->pcie_gen == pl2->pcie_gen) &&
5210 (pl1->pcie_lane == pl2->pcie_lane));
5211}
5212
5213int fiji_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
5214{
5215 const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1);
5216 const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2);
5217 int i;
5218
5219 if (equal == NULL || psa == NULL || psb == NULL)
5220 return -EINVAL;
5221
5222 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
5223 if (psa->performance_level_count != psb->performance_level_count) {
5224 *equal = false;
5225 return 0;
5226 }
5227
5228 for (i = 0; i < psa->performance_level_count; i++) {
5229 if (!fiji_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
5230 /* If we have found even one performance level pair that is different the states are different. */
5231 *equal = false;
5232 return 0;
5233 }
5234 }
5235
5236 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
5237 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
5238 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
5239 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
5240 *equal &= (psa->acp_clk == psb->acp_clk);
5241
5242 return 0;
5243}
5244
5245bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
5246{
5247 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5248 bool is_update_required = false;
5249 struct cgs_display_info info = {0,0,NULL};
5250
5251 cgs_get_active_displays_info(hwmgr->device, &info);
5252
5253 if (data->display_timing.num_existing_displays != info.display_count)
5254 is_update_required = true;
5255/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
5256 if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
5257 cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
5258 if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
5259 is_update_required = true;
5260*/
5261 return is_update_required;
5262}
5263
5264
5198static const struct pp_hwmgr_func fiji_hwmgr_funcs = { 5265static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
5199 .backend_init = &fiji_hwmgr_backend_init, 5266 .backend_init = &fiji_hwmgr_backend_init,
5200 .backend_fini = &tonga_hwmgr_backend_fini, 5267 .backend_fini = &tonga_hwmgr_backend_fini,
@@ -5230,6 +5297,8 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
5230 .register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt, 5297 .register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt,
5231 .set_fan_control_mode = fiji_set_fan_control_mode, 5298 .set_fan_control_mode = fiji_set_fan_control_mode,
5232 .get_fan_control_mode = fiji_get_fan_control_mode, 5299 .get_fan_control_mode = fiji_get_fan_control_mode,
5300 .check_states_equal = fiji_check_states_equal,
5301 .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
5233 .get_pp_table = fiji_get_pp_table, 5302 .get_pp_table = fiji_get_pp_table,
5234 .set_pp_table = fiji_set_pp_table, 5303 .set_pp_table = fiji_set_pp_table,
5235 .force_clock_level = fiji_force_clock_level, 5304 .force_clock_level = fiji_force_clock_level,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index be31bed2538a..fa208ada6892 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -58,6 +58,9 @@ void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr)
58 58
59 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress); 59 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
60 60
61 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
62 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
63
61 if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) && 64 if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
62 acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION)) 65 acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
63 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest); 66 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
@@ -130,18 +133,25 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr,
130 133
131int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) 134int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
132{ 135{
136 int ret = 1;
137 bool enabled;
133 PHM_FUNC_CHECK(hwmgr); 138 PHM_FUNC_CHECK(hwmgr);
134 139
135 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 140 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
136 PHM_PlatformCaps_TablelessHardwareInterface)) { 141 PHM_PlatformCaps_TablelessHardwareInterface)) {
137 if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable) 142 if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
138 return hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr); 143 ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
139 } else { 144 } else {
140 return phm_dispatch_table(hwmgr, 145 ret = phm_dispatch_table(hwmgr,
141 &(hwmgr->enable_dynamic_state_management), 146 &(hwmgr->enable_dynamic_state_management),
142 NULL, NULL); 147 NULL, NULL);
143 } 148 }
144 return 0; 149
150 enabled = ret == 0 ? true : false;
151
152 cgs_notify_dpm_enabled(hwmgr->device, enabled);
153
154 return ret;
145} 155}
146 156
147int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) 157int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 56b829f97699..3ac1ae4d8caf 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -57,14 +57,13 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
57 DRM_ERROR("failed to map control registers area\n"); 57 DRM_ERROR("failed to map control registers area\n");
58 ret = PTR_ERR(hdlcd->mmio); 58 ret = PTR_ERR(hdlcd->mmio);
59 hdlcd->mmio = NULL; 59 hdlcd->mmio = NULL;
60 goto fail; 60 return ret;
61 } 61 }
62 62
63 version = hdlcd_read(hdlcd, HDLCD_REG_VERSION); 63 version = hdlcd_read(hdlcd, HDLCD_REG_VERSION);
64 if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) { 64 if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) {
65 DRM_ERROR("unknown product id: 0x%x\n", version); 65 DRM_ERROR("unknown product id: 0x%x\n", version);
66 ret = -EINVAL; 66 return -EINVAL;
67 goto fail;
68 } 67 }
69 DRM_INFO("found ARM HDLCD version r%dp%d\n", 68 DRM_INFO("found ARM HDLCD version r%dp%d\n",
70 (version & HDLCD_VERSION_MAJOR_MASK) >> 8, 69 (version & HDLCD_VERSION_MAJOR_MASK) >> 8,
@@ -73,7 +72,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
73 /* Get the optional framebuffer memory resource */ 72 /* Get the optional framebuffer memory resource */
74 ret = of_reserved_mem_device_init(drm->dev); 73 ret = of_reserved_mem_device_init(drm->dev);
75 if (ret && ret != -ENODEV) 74 if (ret && ret != -ENODEV)
76 goto fail; 75 return ret;
77 76
78 ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32)); 77 ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
79 if (ret) 78 if (ret)
@@ -101,8 +100,6 @@ irq_fail:
101 drm_crtc_cleanup(&hdlcd->crtc); 100 drm_crtc_cleanup(&hdlcd->crtc);
102setup_fail: 101setup_fail:
103 of_reserved_mem_device_release(drm->dev); 102 of_reserved_mem_device_release(drm->dev);
104fail:
105 devm_clk_put(drm->dev, hdlcd->clk);
106 103
107 return ret; 104 return ret;
108} 105}
@@ -412,7 +409,6 @@ err_unload:
412 pm_runtime_put_sync(drm->dev); 409 pm_runtime_put_sync(drm->dev);
413 pm_runtime_disable(drm->dev); 410 pm_runtime_disable(drm->dev);
414 of_reserved_mem_device_release(drm->dev); 411 of_reserved_mem_device_release(drm->dev);
415 devm_clk_put(dev, hdlcd->clk);
416err_free: 412err_free:
417 drm_dev_unref(drm); 413 drm_dev_unref(drm);
418 414
@@ -436,10 +432,6 @@ static void hdlcd_drm_unbind(struct device *dev)
436 pm_runtime_put_sync(drm->dev); 432 pm_runtime_put_sync(drm->dev);
437 pm_runtime_disable(drm->dev); 433 pm_runtime_disable(drm->dev);
438 of_reserved_mem_device_release(drm->dev); 434 of_reserved_mem_device_release(drm->dev);
439 if (!IS_ERR(hdlcd->clk)) {
440 devm_clk_put(drm->dev, hdlcd->clk);
441 hdlcd->clk = NULL;
442 }
443 drm_mode_config_cleanup(drm); 435 drm_mode_config_cleanup(drm);
444 drm_dev_unregister(drm); 436 drm_dev_unregister(drm);
445 drm_dev_unref(drm); 437 drm_dev_unref(drm);
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 6e731db31aa4..aca7f9cc6109 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -481,7 +481,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
481 481
482 release: 482 release:
483 for_each_sg(sgt->sgl, sg, num, i) 483 for_each_sg(sgt->sgl, sg, num, i)
484 page_cache_release(sg_page(sg)); 484 put_page(sg_page(sg));
485 free_table: 485 free_table:
486 sg_free_table(sgt); 486 sg_free_table(sgt);
487 free_sgt: 487 free_sgt:
@@ -502,7 +502,7 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
502 if (dobj->obj.filp) { 502 if (dobj->obj.filp) {
503 struct scatterlist *sg; 503 struct scatterlist *sg;
504 for_each_sg(sgt->sgl, sg, sgt->nents, i) 504 for_each_sg(sgt->sgl, sg, sgt->nents, i)
505 page_cache_release(sg_page(sg)); 505 put_page(sg_page(sg));
506 } 506 }
507 507
508 sg_free_table(sgt); 508 sg_free_table(sgt);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 7d58f594cffe..df64ed1c0139 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -179,7 +179,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
179{ 179{
180 struct drm_dp_aux_msg msg; 180 struct drm_dp_aux_msg msg;
181 unsigned int retry; 181 unsigned int retry;
182 int err; 182 int err = 0;
183 183
184 memset(&msg, 0, sizeof(msg)); 184 memset(&msg, 0, sizeof(msg));
185 msg.address = offset; 185 msg.address = offset;
@@ -187,6 +187,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
187 msg.buffer = buffer; 187 msg.buffer = buffer;
188 msg.size = size; 188 msg.size = size;
189 189
190 mutex_lock(&aux->hw_mutex);
191
190 /* 192 /*
191 * The specification doesn't give any recommendation on how often to 193 * The specification doesn't give any recommendation on how often to
192 * retry native transactions. We used to retry 7 times like for 194 * retry native transactions. We used to retry 7 times like for
@@ -195,25 +197,24 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
195 */ 197 */
196 for (retry = 0; retry < 32; retry++) { 198 for (retry = 0; retry < 32; retry++) {
197 199
198 mutex_lock(&aux->hw_mutex);
199 err = aux->transfer(aux, &msg); 200 err = aux->transfer(aux, &msg);
200 mutex_unlock(&aux->hw_mutex);
201 if (err < 0) { 201 if (err < 0) {
202 if (err == -EBUSY) 202 if (err == -EBUSY)
203 continue; 203 continue;
204 204
205 return err; 205 goto unlock;
206 } 206 }
207 207
208 208
209 switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) { 209 switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
210 case DP_AUX_NATIVE_REPLY_ACK: 210 case DP_AUX_NATIVE_REPLY_ACK:
211 if (err < size) 211 if (err < size)
212 return -EPROTO; 212 err = -EPROTO;
213 return err; 213 goto unlock;
214 214
215 case DP_AUX_NATIVE_REPLY_NACK: 215 case DP_AUX_NATIVE_REPLY_NACK:
216 return -EIO; 216 err = -EIO;
217 goto unlock;
217 218
218 case DP_AUX_NATIVE_REPLY_DEFER: 219 case DP_AUX_NATIVE_REPLY_DEFER:
219 usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100); 220 usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
@@ -222,7 +223,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
222 } 223 }
223 224
224 DRM_DEBUG_KMS("too many retries, giving up\n"); 225 DRM_DEBUG_KMS("too many retries, giving up\n");
225 return -EIO; 226 err = -EIO;
227
228unlock:
229 mutex_unlock(&aux->hw_mutex);
230 return err;
226} 231}
227 232
228/** 233/**
@@ -544,9 +549,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
544 int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz)); 549 int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz));
545 550
546 for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) { 551 for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) {
547 mutex_lock(&aux->hw_mutex);
548 ret = aux->transfer(aux, msg); 552 ret = aux->transfer(aux, msg);
549 mutex_unlock(&aux->hw_mutex);
550 if (ret < 0) { 553 if (ret < 0) {
551 if (ret == -EBUSY) 554 if (ret == -EBUSY)
552 continue; 555 continue;
@@ -685,6 +688,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
685 688
686 memset(&msg, 0, sizeof(msg)); 689 memset(&msg, 0, sizeof(msg));
687 690
691 mutex_lock(&aux->hw_mutex);
692
688 for (i = 0; i < num; i++) { 693 for (i = 0; i < num; i++) {
689 msg.address = msgs[i].addr; 694 msg.address = msgs[i].addr;
690 drm_dp_i2c_msg_set_request(&msg, &msgs[i]); 695 drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
@@ -739,6 +744,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
739 msg.size = 0; 744 msg.size = 0;
740 (void)drm_dp_i2c_do_msg(aux, &msg); 745 (void)drm_dp_i2c_do_msg(aux, &msg);
741 746
747 mutex_unlock(&aux->hw_mutex);
748
742 return err; 749 return err;
743} 750}
744 751
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 27fbd79d0daf..71ea0521ea96 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1672,13 +1672,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1672 u8 sinks[DRM_DP_MAX_SDP_STREAMS]; 1672 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
1673 int i; 1673 int i;
1674 1674
1675 port = drm_dp_get_validated_port_ref(mgr, port);
1676 if (!port)
1677 return -EINVAL;
1678
1675 port_num = port->port_num; 1679 port_num = port->port_num;
1676 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 1680 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1677 if (!mstb) { 1681 if (!mstb) {
1678 mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num); 1682 mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
1679 1683
1680 if (!mstb) 1684 if (!mstb) {
1685 drm_dp_put_port(port);
1681 return -EINVAL; 1686 return -EINVAL;
1687 }
1682 } 1688 }
1683 1689
1684 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1690 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -1707,6 +1713,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1707 kfree(txmsg); 1713 kfree(txmsg);
1708fail_put: 1714fail_put:
1709 drm_dp_put_mst_branch_device(mstb); 1715 drm_dp_put_mst_branch_device(mstb);
1716 drm_dp_put_port(port);
1710 return ret; 1717 return ret;
1711} 1718}
1712 1719
@@ -1789,6 +1796,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1789 req_payload.start_slot = cur_slots; 1796 req_payload.start_slot = cur_slots;
1790 if (mgr->proposed_vcpis[i]) { 1797 if (mgr->proposed_vcpis[i]) {
1791 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); 1798 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1799 port = drm_dp_get_validated_port_ref(mgr, port);
1800 if (!port) {
1801 mutex_unlock(&mgr->payload_lock);
1802 return -EINVAL;
1803 }
1792 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots; 1804 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1793 req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi; 1805 req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
1794 } else { 1806 } else {
@@ -1816,6 +1828,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1816 mgr->payloads[i].payload_state = req_payload.payload_state; 1828 mgr->payloads[i].payload_state = req_payload.payload_state;
1817 } 1829 }
1818 cur_slots += req_payload.num_slots; 1830 cur_slots += req_payload.num_slots;
1831
1832 if (port)
1833 drm_dp_put_port(port);
1819 } 1834 }
1820 1835
1821 for (i = 0; i < mgr->max_payloads; i++) { 1836 for (i = 0; i < mgr->max_payloads; i++) {
@@ -2121,6 +2136,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2121 2136
2122 if (mgr->mst_primary) { 2137 if (mgr->mst_primary) {
2123 int sret; 2138 int sret;
2139 u8 guid[16];
2140
2124 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); 2141 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2125 if (sret != DP_RECEIVER_CAP_SIZE) { 2142 if (sret != DP_RECEIVER_CAP_SIZE) {
2126 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); 2143 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
@@ -2135,6 +2152,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2135 ret = -1; 2152 ret = -1;
2136 goto out_unlock; 2153 goto out_unlock;
2137 } 2154 }
2155
2156 /* Some hubs forget their guids after they resume */
2157 sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2158 if (sret != 16) {
2159 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2160 ret = -1;
2161 goto out_unlock;
2162 }
2163 drm_dp_check_mstb_guid(mgr->mst_primary, guid);
2164
2138 ret = 0; 2165 ret = 0;
2139 } else 2166 } else
2140 ret = -1; 2167 ret = -1;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 414d7f61aa05..558ef9fc39e6 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -205,7 +205,7 @@ static const struct drm_display_mode drm_dmt_modes[] = {
205 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 205 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
206 /* 0x0f - 1024x768@43Hz, interlace */ 206 /* 0x0f - 1024x768@43Hz, interlace */
207 { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032, 207 { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
208 1208, 1264, 0, 768, 768, 772, 817, 0, 208 1208, 1264, 0, 768, 768, 776, 817, 0,
209 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | 209 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
210 DRM_MODE_FLAG_INTERLACE) }, 210 DRM_MODE_FLAG_INTERLACE) },
211 /* 0x10 - 1024x768@60Hz */ 211 /* 0x10 - 1024x768@60Hz */
@@ -522,12 +522,12 @@ static const struct drm_display_mode edid_est_modes[] = {
522 720, 840, 0, 480, 481, 484, 500, 0, 522 720, 840, 0, 480, 481, 484, 500, 0,
523 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */ 523 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
524 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664, 524 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
525 704, 832, 0, 480, 489, 491, 520, 0, 525 704, 832, 0, 480, 489, 492, 520, 0,
526 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */ 526 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
527 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704, 527 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
528 768, 864, 0, 480, 483, 486, 525, 0, 528 768, 864, 0, 480, 483, 486, 525, 0,
529 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */ 529 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
530 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656, 530 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
531 752, 800, 0, 480, 490, 492, 525, 0, 531 752, 800, 0, 480, 490, 492, 525, 0,
532 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */ 532 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
533 { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738, 533 { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
@@ -539,7 +539,7 @@ static const struct drm_display_mode edid_est_modes[] = {
539 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296, 539 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
540 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, 540 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
541 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */ 541 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
542 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040, 542 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
543 1136, 1312, 0, 768, 769, 772, 800, 0, 543 1136, 1312, 0, 768, 769, 772, 800, 0,
544 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */ 544 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
545 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048, 545 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
@@ -2241,7 +2241,7 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
2241{ 2241{
2242 int i, j, m, modes = 0; 2242 int i, j, m, modes = 0;
2243 struct drm_display_mode *mode; 2243 struct drm_display_mode *mode;
2244 u8 *est = ((u8 *)timing) + 5; 2244 u8 *est = ((u8 *)timing) + 6;
2245 2245
2246 for (i = 0; i < 6; i++) { 2246 for (i = 0; i < 6; i++) {
2247 for (j = 7; j >= 0; j--) { 2247 for (j = 7; j >= 0; j--) {
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 2e8c77e71e1f..da0c5320789f 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -534,7 +534,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
534 534
535fail: 535fail:
536 while (i--) 536 while (i--)
537 page_cache_release(pages[i]); 537 put_page(pages[i]);
538 538
539 drm_free_large(pages); 539 drm_free_large(pages);
540 return ERR_CAST(p); 540 return ERR_CAST(p);
@@ -569,7 +569,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
569 mark_page_accessed(pages[i]); 569 mark_page_accessed(pages[i]);
570 570
571 /* Undo the reference we took when populating the table */ 571 /* Undo the reference we took when populating the table */
572 page_cache_release(pages[i]); 572 put_page(pages[i]);
573 } 573 }
574 574
575 drm_free_large(pages); 575 drm_free_large(pages);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 09198d0b5814..306dde18a94a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -572,6 +572,24 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
572 goto fail; 572 goto fail;
573 } 573 }
574 574
575 /*
576 * Set the GPU linear window to be at the end of the DMA window, where
577 * the CMA area is likely to reside. This ensures that we are able to
578 * map the command buffers while having the linear window overlap as
579 * much RAM as possible, so we can optimize mappings for other buffers.
580 *
581 * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
582 * to different views of the memory on the individual engines.
583 */
584 if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
585 (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
586 u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
587 if (dma_mask < PHYS_OFFSET + SZ_2G)
588 gpu->memory_base = PHYS_OFFSET;
589 else
590 gpu->memory_base = dma_mask - SZ_2G + 1;
591 }
592
575 ret = etnaviv_hw_reset(gpu); 593 ret = etnaviv_hw_reset(gpu);
576 if (ret) 594 if (ret)
577 goto fail; 595 goto fail;
@@ -1566,7 +1584,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1566{ 1584{
1567 struct device *dev = &pdev->dev; 1585 struct device *dev = &pdev->dev;
1568 struct etnaviv_gpu *gpu; 1586 struct etnaviv_gpu *gpu;
1569 u32 dma_mask;
1570 int err = 0; 1587 int err = 0;
1571 1588
1572 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL); 1589 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
@@ -1576,18 +1593,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1576 gpu->dev = &pdev->dev; 1593 gpu->dev = &pdev->dev;
1577 mutex_init(&gpu->lock); 1594 mutex_init(&gpu->lock);
1578 1595
1579 /*
1580 * Set the GPU linear window to be at the end of the DMA window, where
1581 * the CMA area is likely to reside. This ensures that we are able to
1582 * map the command buffers while having the linear window overlap as
1583 * much RAM as possible, so we can optimize mappings for other buffers.
1584 */
1585 dma_mask = (u32)dma_get_required_mask(dev);
1586 if (dma_mask < PHYS_OFFSET + SZ_2G)
1587 gpu->memory_base = PHYS_OFFSET;
1588 else
1589 gpu->memory_base = dma_mask - SZ_2G + 1;
1590
1591 /* Map registers: */ 1596 /* Map registers: */
1592 gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev)); 1597 gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
1593 if (IS_ERR(gpu->mmio)) 1598 if (IS_ERR(gpu->mmio))
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index f17d39279596..baddf33fb475 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -94,7 +94,7 @@ comment "Sub-drivers"
94 94
95config DRM_EXYNOS_G2D 95config DRM_EXYNOS_G2D
96 bool "G2D" 96 bool "G2D"
97 depends on !VIDEO_SAMSUNG_S5P_G2D 97 depends on VIDEO_SAMSUNG_S5P_G2D=n
98 select FRAME_VECTOR 98 select FRAME_VECTOR
99 help 99 help
100 Choose this option if you want to use Exynos G2D for DRM. 100 Choose this option if you want to use Exynos G2D for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 968b31c522b2..23d2f958739b 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -2,10 +2,10 @@
2# Makefile for the drm device driver. This driver provides support for the 2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4 4
5exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \ 5exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
6 exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \ 6 exynos_drm_gem.o exynos_drm_core.o exynos_drm_plane.o
7 exynos_drm_plane.o
8 7
8exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
9exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o 9exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
10exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o 10exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
11exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON) += exynos5433_drm_decon.o 11exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON) += exynos5433_drm_decon.o
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 7f55ba6771c6..011211e4167d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -101,7 +101,7 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
101 return 0; 101 return 0;
102 102
103err: 103err:
104 list_for_each_entry_reverse(subdrv, &subdrv->list, list) { 104 list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) {
105 if (subdrv->close) 105 if (subdrv->close)
106 subdrv->close(dev, subdrv->dev, file); 106 subdrv->close(dev, subdrv->dev, file);
107 } 107 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index d614194644c8..81cc5537cf25 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -199,17 +199,6 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
199 return exynos_fb->dma_addr[index]; 199 return exynos_fb->dma_addr[index];
200} 200}
201 201
202static void exynos_drm_output_poll_changed(struct drm_device *dev)
203{
204 struct exynos_drm_private *private = dev->dev_private;
205 struct drm_fb_helper *fb_helper = private->fb_helper;
206
207 if (fb_helper)
208 drm_fb_helper_hotplug_event(fb_helper);
209 else
210 exynos_drm_fbdev_init(dev);
211}
212
213static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { 202static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
214 .fb_create = exynos_user_fb_create, 203 .fb_create = exynos_user_fb_create,
215 .output_poll_changed = exynos_drm_output_poll_changed, 204 .output_poll_changed = exynos_drm_output_poll_changed,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 4ae860c44f1d..72d7c0b7c216 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -317,3 +317,14 @@ void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
317 317
318 drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper); 318 drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper);
319} 319}
320
321void exynos_drm_output_poll_changed(struct drm_device *dev)
322{
323 struct exynos_drm_private *private = dev->dev_private;
324 struct drm_fb_helper *fb_helper = private->fb_helper;
325
326 if (fb_helper)
327 drm_fb_helper_hotplug_event(fb_helper);
328 else
329 exynos_drm_fbdev_init(dev);
330}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
index e16d7f0ae192..330eef87f718 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -15,9 +15,30 @@
15#ifndef _EXYNOS_DRM_FBDEV_H_ 15#ifndef _EXYNOS_DRM_FBDEV_H_
16#define _EXYNOS_DRM_FBDEV_H_ 16#define _EXYNOS_DRM_FBDEV_H_
17 17
18#ifdef CONFIG_DRM_FBDEV_EMULATION
19
18int exynos_drm_fbdev_init(struct drm_device *dev); 20int exynos_drm_fbdev_init(struct drm_device *dev);
19int exynos_drm_fbdev_reinit(struct drm_device *dev);
20void exynos_drm_fbdev_fini(struct drm_device *dev); 21void exynos_drm_fbdev_fini(struct drm_device *dev);
21void exynos_drm_fbdev_restore_mode(struct drm_device *dev); 22void exynos_drm_fbdev_restore_mode(struct drm_device *dev);
23void exynos_drm_output_poll_changed(struct drm_device *dev);
24
25#else
26
27static inline int exynos_drm_fbdev_init(struct drm_device *dev)
28{
29 return 0;
30}
31
32static inline void exynos_drm_fbdev_fini(struct drm_device *dev)
33{
34}
35
36static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
37{
38}
39
40#define exynos_drm_output_poll_changed (NULL)
41
42#endif
22 43
23#endif 44#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 51d484ae9f49..018449f8d557 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -888,7 +888,7 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
888 * clock. On these SoCs the bootloader may enable it but any 888 * clock. On these SoCs the bootloader may enable it but any
889 * power domain off/on will reset it to disable state. 889 * power domain off/on will reset it to disable state.
890 */ 890 */
891 if (ctx->driver_data != &exynos5_fimd_driver_data || 891 if (ctx->driver_data != &exynos5_fimd_driver_data &&
892 ctx->driver_data != &exynos5420_fimd_driver_data) 892 ctx->driver_data != &exynos5420_fimd_driver_data)
893 return; 893 return;
894 894
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 9869d70e9e54..a0def0be6d65 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -129,7 +129,7 @@ static void mic_set_path(struct exynos_mic *mic, bool enable)
129 } else 129 } else
130 val &= ~(MIC0_RGB_MUX | MIC0_I80_MUX | MIC0_ON_MUX); 130 val &= ~(MIC0_RGB_MUX | MIC0_I80_MUX | MIC0_ON_MUX);
131 131
132 regmap_write(mic->sysreg, DSD_CFG_MUX, val); 132 ret = regmap_write(mic->sysreg, DSD_CFG_MUX, val);
133 if (ret) 133 if (ret)
134 DRM_ERROR("mic: Failed to read system register\n"); 134 DRM_ERROR("mic: Failed to read system register\n");
135} 135}
@@ -457,6 +457,7 @@ static int exynos_mic_probe(struct platform_device *pdev)
457 "samsung,disp-syscon"); 457 "samsung,disp-syscon");
458 if (IS_ERR(mic->sysreg)) { 458 if (IS_ERR(mic->sysreg)) {
459 DRM_ERROR("mic: Failed to get system register.\n"); 459 DRM_ERROR("mic: Failed to get system register.\n");
460 ret = PTR_ERR(mic->sysreg);
460 goto err; 461 goto err;
461 } 462 }
462 463
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index d86227236f55..50185ac347b2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -11,9 +11,10 @@
11 11
12#include <drm/drmP.h> 12#include <drm/drmP.h>
13 13
14#include <drm/exynos_drm.h> 14#include <drm/drm_atomic.h>
15#include <drm/drm_plane_helper.h>
16#include <drm/drm_atomic_helper.h> 15#include <drm/drm_atomic_helper.h>
16#include <drm/drm_plane_helper.h>
17#include <drm/exynos_drm.h>
17#include "exynos_drm_drv.h" 18#include "exynos_drm_drv.h"
18#include "exynos_drm_crtc.h" 19#include "exynos_drm_crtc.h"
19#include "exynos_drm_fb.h" 20#include "exynos_drm_fb.h"
@@ -57,11 +58,12 @@ static int exynos_plane_get_size(int start, unsigned length, unsigned last)
57} 58}
58 59
59static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state) 60static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state)
60
61{ 61{
62 struct drm_plane_state *state = &exynos_state->base; 62 struct drm_plane_state *state = &exynos_state->base;
63 struct drm_crtc *crtc = exynos_state->base.crtc; 63 struct drm_crtc *crtc = state->crtc;
64 struct drm_display_mode *mode = &crtc->state->adjusted_mode; 64 struct drm_crtc_state *crtc_state =
65 drm_atomic_get_existing_crtc_state(state->state, crtc);
66 struct drm_display_mode *mode = &crtc_state->adjusted_mode;
65 int crtc_x, crtc_y; 67 int crtc_x, crtc_y;
66 unsigned int crtc_w, crtc_h; 68 unsigned int crtc_w, crtc_h;
67 unsigned int src_x, src_y; 69 unsigned int src_x, src_y;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 7bb1f1aff932..c52f9adf5e04 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -220,7 +220,7 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
220 * FIXME: This is the old dp aux helper, gma500 is the last driver that needs to 220 * FIXME: This is the old dp aux helper, gma500 is the last driver that needs to
221 * be ported over to the new helper code in drm_dp_helper.c like i915 or radeon. 221 * be ported over to the new helper code in drm_dp_helper.c like i915 or radeon.
222 */ 222 */
223static int __deprecated 223static int
224i2c_dp_aux_add_bus(struct i2c_adapter *adapter) 224i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
225{ 225{
226 int error; 226 int error;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 20e82008b8b6..6d2fb3f4ac62 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -758,10 +758,10 @@ static int i915_drm_resume(struct drm_device *dev)
758 dev_priv->display.hpd_irq_setup(dev); 758 dev_priv->display.hpd_irq_setup(dev);
759 spin_unlock_irq(&dev_priv->irq_lock); 759 spin_unlock_irq(&dev_priv->irq_lock);
760 760
761 intel_display_resume(dev);
762
763 intel_dp_mst_resume(dev); 761 intel_dp_mst_resume(dev);
764 762
763 intel_display_resume(dev);
764
765 /* 765 /*
766 * ... but also need to make sure that hotplug processing 766 * ... but also need to make sure that hotplug processing
767 * doesn't cause havoc. Like in the driver load code we don't 767 * doesn't cause havoc. Like in the driver load code we don't
@@ -792,7 +792,7 @@ static int i915_drm_resume(struct drm_device *dev)
792static int i915_drm_resume_early(struct drm_device *dev) 792static int i915_drm_resume_early(struct drm_device *dev)
793{ 793{
794 struct drm_i915_private *dev_priv = dev->dev_private; 794 struct drm_i915_private *dev_priv = dev->dev_private;
795 int ret = 0; 795 int ret;
796 796
797 /* 797 /*
798 * We have a resume ordering issue with the snd-hda driver also 798 * We have a resume ordering issue with the snd-hda driver also
@@ -803,6 +803,36 @@ static int i915_drm_resume_early(struct drm_device *dev)
803 * FIXME: This should be solved with a special hdmi sink device or 803 * FIXME: This should be solved with a special hdmi sink device or
804 * similar so that power domains can be employed. 804 * similar so that power domains can be employed.
805 */ 805 */
806
807 /*
808 * Note that we need to set the power state explicitly, since we
809 * powered off the device during freeze and the PCI core won't power
810 * it back up for us during thaw. Powering off the device during
811 * freeze is not a hard requirement though, and during the
812 * suspend/resume phases the PCI core makes sure we get here with the
813 * device powered on. So in case we change our freeze logic and keep
814 * the device powered we can also remove the following set power state
815 * call.
816 */
817 ret = pci_set_power_state(dev->pdev, PCI_D0);
818 if (ret) {
819 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
820 goto out;
821 }
822
823 /*
824 * Note that pci_enable_device() first enables any parent bridge
825 * device and only then sets the power state for this device. The
826 * bridge enabling is a nop though, since bridge devices are resumed
827 * first. The order of enabling power and enabling the device is
828 * imposed by the PCI core as described above, so here we preserve the
829 * same order for the freeze/thaw phases.
830 *
831 * TODO: eventually we should remove pci_disable_device() /
832 * pci_enable_enable_device() from suspend/resume. Due to how they
833 * depend on the device enable refcount we can't anyway depend on them
834 * disabling/enabling the device.
835 */
806 if (pci_enable_device(dev->pdev)) { 836 if (pci_enable_device(dev->pdev)) {
807 ret = -EIO; 837 ret = -EIO;
808 goto out; 838 goto out;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 10480939159c..daba7ebb9699 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2634,8 +2634,9 @@ struct drm_i915_cmd_table {
2634 2634
2635/* WaRsDisableCoarsePowerGating:skl,bxt */ 2635/* WaRsDisableCoarsePowerGating:skl,bxt */
2636#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \ 2636#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
2637 ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \ 2637 IS_SKL_GT3(dev) || \
2638 IS_SKL_REVID(dev, 0, SKL_REVID_F0))) 2638 IS_SKL_GT4(dev))
2639
2639/* 2640/*
2640 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 2641 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
2641 * even when in MSI mode. This results in spurious interrupt warnings if the 2642 * even when in MSI mode. This results in spurious interrupt warnings if the
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3d31d3ac589e..dabc08987b5e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
177 drm_clflush_virt_range(vaddr, PAGE_SIZE); 177 drm_clflush_virt_range(vaddr, PAGE_SIZE);
178 kunmap_atomic(src); 178 kunmap_atomic(src);
179 179
180 page_cache_release(page); 180 put_page(page);
181 vaddr += PAGE_SIZE; 181 vaddr += PAGE_SIZE;
182 } 182 }
183 183
@@ -243,7 +243,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
243 set_page_dirty(page); 243 set_page_dirty(page);
244 if (obj->madv == I915_MADV_WILLNEED) 244 if (obj->madv == I915_MADV_WILLNEED)
245 mark_page_accessed(page); 245 mark_page_accessed(page);
246 page_cache_release(page); 246 put_page(page);
247 vaddr += PAGE_SIZE; 247 vaddr += PAGE_SIZE;
248 } 248 }
249 obj->dirty = 0; 249 obj->dirty = 0;
@@ -2206,7 +2206,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2206 if (obj->madv == I915_MADV_WILLNEED) 2206 if (obj->madv == I915_MADV_WILLNEED)
2207 mark_page_accessed(page); 2207 mark_page_accessed(page);
2208 2208
2209 page_cache_release(page); 2209 put_page(page);
2210 } 2210 }
2211 obj->dirty = 0; 2211 obj->dirty = 0;
2212 2212
@@ -2346,7 +2346,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2346err_pages: 2346err_pages:
2347 sg_mark_end(sg); 2347 sg_mark_end(sg);
2348 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) 2348 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
2349 page_cache_release(sg_page_iter_page(&sg_iter)); 2349 put_page(sg_page_iter_page(&sg_iter));
2350 sg_free_table(st); 2350 sg_free_table(st);
2351 kfree(st); 2351 kfree(st);
2352 2352
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 6be40f3ba2c7..4d30b60defda 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -501,19 +501,24 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
501 if (pvec != NULL) { 501 if (pvec != NULL) {
502 struct mm_struct *mm = obj->userptr.mm->mm; 502 struct mm_struct *mm = obj->userptr.mm->mm;
503 503
504 down_read(&mm->mmap_sem); 504 ret = -EFAULT;
505 while (pinned < npages) { 505 if (atomic_inc_not_zero(&mm->mm_users)) {
506 ret = get_user_pages_remote(work->task, mm, 506 down_read(&mm->mmap_sem);
507 obj->userptr.ptr + pinned * PAGE_SIZE, 507 while (pinned < npages) {
508 npages - pinned, 508 ret = get_user_pages_remote
509 !obj->userptr.read_only, 0, 509 (work->task, mm,
510 pvec + pinned, NULL); 510 obj->userptr.ptr + pinned * PAGE_SIZE,
511 if (ret < 0) 511 npages - pinned,
512 break; 512 !obj->userptr.read_only, 0,
513 513 pvec + pinned, NULL);
514 pinned += ret; 514 if (ret < 0)
515 break;
516
517 pinned += ret;
518 }
519 up_read(&mm->mmap_sem);
520 mmput(mm);
515 } 521 }
516 up_read(&mm->mmap_sem);
517 } 522 }
518 523
519 mutex_lock(&dev->struct_mutex); 524 mutex_lock(&dev->struct_mutex);
@@ -683,7 +688,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
683 set_page_dirty(page); 688 set_page_dirty(page);
684 689
685 mark_page_accessed(page); 690 mark_page_accessed(page);
686 page_cache_release(page); 691 put_page(page);
687 } 692 }
688 obj->dirty = 0; 693 obj->dirty = 0;
689 694
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d1a46ef5ab3f..1c212205d0e7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1829,7 +1829,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1829 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1829 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1830 disable_rpm_wakeref_asserts(dev_priv); 1830 disable_rpm_wakeref_asserts(dev_priv);
1831 1831
1832 for (;;) { 1832 do {
1833 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1833 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1834 iir = I915_READ(VLV_IIR); 1834 iir = I915_READ(VLV_IIR);
1835 1835
@@ -1857,7 +1857,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1857 1857
1858 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 1858 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1859 POSTING_READ(GEN8_MASTER_IRQ); 1859 POSTING_READ(GEN8_MASTER_IRQ);
1860 } 1860 } while (0);
1861 1861
1862 enable_rpm_wakeref_asserts(dev_priv); 1862 enable_rpm_wakeref_asserts(dev_priv);
1863 1863
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f76cbf3e5d1e..fffdac801d3b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2907,7 +2907,14 @@ enum skl_disp_power_wells {
2907#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998) 2907#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
2908#define BXT_RP_STATE_CAP _MMIO(0x138170) 2908#define BXT_RP_STATE_CAP _MMIO(0x138170)
2909 2909
2910#define INTERVAL_1_28_US(us) (((us) * 100) >> 7) 2910/*
2911 * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
2912 * 8300) freezing up around GPU hangs. Looks as if even
2913 * scheduling/timer interrupts start misbehaving if the RPS
2914 * EI/thresholds are "bad", leading to a very sluggish or even
2915 * frozen machine.
2916 */
2917#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25)
2911#define INTERVAL_1_33_US(us) (((us) * 3) >> 2) 2918#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
2912#define INTERVAL_0_833_US(us) (((us) * 6) / 5) 2919#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
2913#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \ 2920#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 62de9f4bce09..3b57bf06abe8 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -443,9 +443,17 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
443 } else if (IS_BROADWELL(dev_priv)) { 443 } else if (IS_BROADWELL(dev_priv)) {
444 ddi_translations_fdi = bdw_ddi_translations_fdi; 444 ddi_translations_fdi = bdw_ddi_translations_fdi;
445 ddi_translations_dp = bdw_ddi_translations_dp; 445 ddi_translations_dp = bdw_ddi_translations_dp;
446 ddi_translations_edp = bdw_ddi_translations_edp; 446
447 if (dev_priv->edp_low_vswing) {
448 ddi_translations_edp = bdw_ddi_translations_edp;
449 n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
450 } else {
451 ddi_translations_edp = bdw_ddi_translations_dp;
452 n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
453 }
454
447 ddi_translations_hdmi = bdw_ddi_translations_hdmi; 455 ddi_translations_hdmi = bdw_ddi_translations_hdmi;
448 n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); 456
449 n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); 457 n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
450 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); 458 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
451 hdmi_default_entry = 7; 459 hdmi_default_entry = 7;
@@ -3201,12 +3209,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
3201 intel_ddi_clock_get(encoder, pipe_config); 3209 intel_ddi_clock_get(encoder, pipe_config);
3202} 3210}
3203 3211
3204static void intel_ddi_destroy(struct drm_encoder *encoder)
3205{
3206 /* HDMI has nothing special to destroy, so we can go with this. */
3207 intel_dp_encoder_destroy(encoder);
3208}
3209
3210static bool intel_ddi_compute_config(struct intel_encoder *encoder, 3212static bool intel_ddi_compute_config(struct intel_encoder *encoder,
3211 struct intel_crtc_state *pipe_config) 3213 struct intel_crtc_state *pipe_config)
3212{ 3214{
@@ -3225,7 +3227,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
3225} 3227}
3226 3228
3227static const struct drm_encoder_funcs intel_ddi_funcs = { 3229static const struct drm_encoder_funcs intel_ddi_funcs = {
3228 .destroy = intel_ddi_destroy, 3230 .reset = intel_dp_encoder_reset,
3231 .destroy = intel_dp_encoder_destroy,
3229}; 3232};
3230 3233
3231static struct intel_connector * 3234static struct intel_connector *
@@ -3324,6 +3327,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
3324 intel_encoder->post_disable = intel_ddi_post_disable; 3327 intel_encoder->post_disable = intel_ddi_post_disable;
3325 intel_encoder->get_hw_state = intel_ddi_get_hw_state; 3328 intel_encoder->get_hw_state = intel_ddi_get_hw_state;
3326 intel_encoder->get_config = intel_ddi_get_config; 3329 intel_encoder->get_config = intel_ddi_get_config;
3330 intel_encoder->suspend = intel_dp_encoder_suspend;
3327 3331
3328 intel_dig_port->port = port; 3332 intel_dig_port->port = port;
3329 intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & 3333 intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 6e0d8283daa6..182f84937345 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -13351,6 +13351,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
13351 } 13351 }
13352 13352
13353 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13353 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13354 if (state->legacy_cursor_update)
13355 continue;
13356
13354 ret = intel_crtc_wait_for_pending_flips(crtc); 13357 ret = intel_crtc_wait_for_pending_flips(crtc);
13355 if (ret) 13358 if (ret)
13356 return ret; 13359 return ret;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f069a82deb57..412a34c39522 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4898,7 +4898,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4898 kfree(intel_dig_port); 4898 kfree(intel_dig_port);
4899} 4899}
4900 4900
4901static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 4901void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4902{ 4902{
4903 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 4903 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4904 4904
@@ -4940,7 +4940,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4940 edp_panel_vdd_schedule_off(intel_dp); 4940 edp_panel_vdd_schedule_off(intel_dp);
4941} 4941}
4942 4942
4943static void intel_dp_encoder_reset(struct drm_encoder *encoder) 4943void intel_dp_encoder_reset(struct drm_encoder *encoder)
4944{ 4944{
4945 struct intel_dp *intel_dp; 4945 struct intel_dp *intel_dp;
4946 4946
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index a2bd698fe2f7..937e77228466 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -506,6 +506,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
506 struct intel_connector *intel_connector = to_intel_connector(connector); 506 struct intel_connector *intel_connector = to_intel_connector(connector);
507 struct drm_device *dev = connector->dev; 507 struct drm_device *dev = connector->dev;
508 508
509 intel_connector->unregister(intel_connector);
510
509 /* need to nuke the connector */ 511 /* need to nuke the connector */
510 drm_modeset_lock_all(dev); 512 drm_modeset_lock_all(dev);
511 if (connector->state->crtc) { 513 if (connector->state->crtc) {
@@ -519,11 +521,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
519 521
520 WARN(ret, "Disabling mst crtc failed with %i\n", ret); 522 WARN(ret, "Disabling mst crtc failed with %i\n", ret);
521 } 523 }
522 drm_modeset_unlock_all(dev);
523 524
524 intel_connector->unregister(intel_connector);
525
526 drm_modeset_lock_all(dev);
527 intel_connector_remove_from_fbdev(intel_connector); 525 intel_connector_remove_from_fbdev(intel_connector);
528 drm_connector_cleanup(connector); 526 drm_connector_cleanup(connector);
529 drm_modeset_unlock_all(dev); 527 drm_modeset_unlock_all(dev);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 4c027d69fac9..7d3af3a72abe 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1238,6 +1238,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
1238void intel_dp_start_link_train(struct intel_dp *intel_dp); 1238void intel_dp_start_link_train(struct intel_dp *intel_dp);
1239void intel_dp_stop_link_train(struct intel_dp *intel_dp); 1239void intel_dp_stop_link_train(struct intel_dp *intel_dp);
1240void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); 1240void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
1241void intel_dp_encoder_reset(struct drm_encoder *encoder);
1242void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
1241void intel_dp_encoder_destroy(struct drm_encoder *encoder); 1243void intel_dp_encoder_destroy(struct drm_encoder *encoder);
1242int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); 1244int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
1243bool intel_dp_compute_config(struct intel_encoder *encoder, 1245bool intel_dp_compute_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index a0d8daed2470..1ab6f687f640 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1415,8 +1415,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
1415 hdmi_to_dig_port(intel_hdmi)); 1415 hdmi_to_dig_port(intel_hdmi));
1416 } 1416 }
1417 1417
1418 if (!live_status) 1418 if (!live_status) {
1419 DRM_DEBUG_KMS("Live status not up!"); 1419 DRM_DEBUG_KMS("HDMI live status down\n");
1420 /*
1421 * Live status register is not reliable on all intel platforms.
1422 * So consider live_status only for certain platforms, for
1423 * others, read EDID to determine presence of sink.
1424 */
1425 if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
1426 live_status = true;
1427 }
1420 1428
1421 intel_hdmi_unset_edid(connector); 1429 intel_hdmi_unset_edid(connector);
1422 1430
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 6a978ce80244..5c6080fd0968 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -841,11 +841,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
841 if (unlikely(total_bytes > remain_usable)) { 841 if (unlikely(total_bytes > remain_usable)) {
842 /* 842 /*
843 * The base request will fit but the reserved space 843 * The base request will fit but the reserved space
844 * falls off the end. So only need to to wait for the 844 * falls off the end. So don't need an immediate wrap
845 * reserved size after flushing out the remainder. 845 * and only need to effectively wait for the reserved
846 * size space from the start of ringbuffer.
846 */ 847 */
847 wait_bytes = remain_actual + ringbuf->reserved_size; 848 wait_bytes = remain_actual + ringbuf->reserved_size;
848 need_wrap = true;
849 } else if (total_bytes > ringbuf->space) { 849 } else if (total_bytes > ringbuf->space) {
850 /* No wrapping required, just waiting. */ 850 /* No wrapping required, just waiting. */
851 wait_bytes = total_bytes; 851 wait_bytes = total_bytes;
@@ -1913,15 +1913,18 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1913 struct intel_ringbuffer *ringbuf = request->ringbuf; 1913 struct intel_ringbuffer *ringbuf = request->ringbuf;
1914 int ret; 1914 int ret;
1915 1915
1916 ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS); 1916 ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
1917 if (ret) 1917 if (ret)
1918 return ret; 1918 return ret;
1919 1919
1920 /* We're using qword write, seqno should be aligned to 8 bytes. */
1921 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
1922
1920 /* w/a for post sync ops following a GPGPU operation we 1923 /* w/a for post sync ops following a GPGPU operation we
1921 * need a prior CS_STALL, which is emitted by the flush 1924 * need a prior CS_STALL, which is emitted by the flush
1922 * following the batch. 1925 * following the batch.
1923 */ 1926 */
1924 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5)); 1927 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1925 intel_logical_ring_emit(ringbuf, 1928 intel_logical_ring_emit(ringbuf,
1926 (PIPE_CONTROL_GLOBAL_GTT_IVB | 1929 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1927 PIPE_CONTROL_CS_STALL | 1930 PIPE_CONTROL_CS_STALL |
@@ -1929,7 +1932,10 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1929 intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring)); 1932 intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
1930 intel_logical_ring_emit(ringbuf, 0); 1933 intel_logical_ring_emit(ringbuf, 0);
1931 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); 1934 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
1935 /* We're thrashing one dword of HWS. */
1936 intel_logical_ring_emit(ringbuf, 0);
1932 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); 1937 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1938 intel_logical_ring_emit(ringbuf, MI_NOOP);
1933 return intel_logical_ring_advance_and_submit(request); 1939 return intel_logical_ring_advance_and_submit(request);
1934} 1940}
1935 1941
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 30a8403a8f4f..cd9fe609aefb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -478,11 +478,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
478 * and as part of the cleanup in the hw state restore we also redisable 478 * and as part of the cleanup in the hw state restore we also redisable
479 * the vga plane. 479 * the vga plane.
480 */ 480 */
481 if (!HAS_PCH_SPLIT(dev)) { 481 if (!HAS_PCH_SPLIT(dev))
482 drm_modeset_lock_all(dev);
483 intel_display_resume(dev); 482 intel_display_resume(dev);
484 drm_modeset_unlock_all(dev);
485 }
486 483
487 dev_priv->modeset_restore = MODESET_DONE; 484 dev_priv->modeset_restore = MODESET_DONE;
488 485
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 347d4df49a9b..8ed3cf34f82d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2876,25 +2876,28 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2876 const struct drm_plane_state *pstate, 2876 const struct drm_plane_state *pstate,
2877 int y) 2877 int y)
2878{ 2878{
2879 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 2879 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
2880 struct drm_framebuffer *fb = pstate->fb; 2880 struct drm_framebuffer *fb = pstate->fb;
2881 uint32_t width = 0, height = 0;
2882
2883 width = drm_rect_width(&intel_pstate->src) >> 16;
2884 height = drm_rect_height(&intel_pstate->src) >> 16;
2885
2886 if (intel_rotation_90_or_270(pstate->rotation))
2887 swap(width, height);
2881 2888
2882 /* for planar format */ 2889 /* for planar format */
2883 if (fb->pixel_format == DRM_FORMAT_NV12) { 2890 if (fb->pixel_format == DRM_FORMAT_NV12) {
2884 if (y) /* y-plane data rate */ 2891 if (y) /* y-plane data rate */
2885 return intel_crtc->config->pipe_src_w * 2892 return width * height *
2886 intel_crtc->config->pipe_src_h *
2887 drm_format_plane_cpp(fb->pixel_format, 0); 2893 drm_format_plane_cpp(fb->pixel_format, 0);
2888 else /* uv-plane data rate */ 2894 else /* uv-plane data rate */
2889 return (intel_crtc->config->pipe_src_w/2) * 2895 return (width / 2) * (height / 2) *
2890 (intel_crtc->config->pipe_src_h/2) *
2891 drm_format_plane_cpp(fb->pixel_format, 1); 2896 drm_format_plane_cpp(fb->pixel_format, 1);
2892 } 2897 }
2893 2898
2894 /* for packed formats */ 2899 /* for packed formats */
2895 return intel_crtc->config->pipe_src_w * 2900 return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
2896 intel_crtc->config->pipe_src_h *
2897 drm_format_plane_cpp(fb->pixel_format, 0);
2898} 2901}
2899 2902
2900/* 2903/*
@@ -2973,8 +2976,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
2973 struct drm_framebuffer *fb = plane->state->fb; 2976 struct drm_framebuffer *fb = plane->state->fb;
2974 int id = skl_wm_plane_id(intel_plane); 2977 int id = skl_wm_plane_id(intel_plane);
2975 2978
2976 if (fb == NULL) 2979 if (!to_intel_plane_state(plane->state)->visible)
2977 continue; 2980 continue;
2981
2978 if (plane->type == DRM_PLANE_TYPE_CURSOR) 2982 if (plane->type == DRM_PLANE_TYPE_CURSOR)
2979 continue; 2983 continue;
2980 2984
@@ -3000,7 +3004,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3000 uint16_t plane_blocks, y_plane_blocks = 0; 3004 uint16_t plane_blocks, y_plane_blocks = 0;
3001 int id = skl_wm_plane_id(intel_plane); 3005 int id = skl_wm_plane_id(intel_plane);
3002 3006
3003 if (pstate->fb == NULL) 3007 if (!to_intel_plane_state(pstate)->visible)
3004 continue; 3008 continue;
3005 if (plane->type == DRM_PLANE_TYPE_CURSOR) 3009 if (plane->type == DRM_PLANE_TYPE_CURSOR)
3006 continue; 3010 continue;
@@ -3123,26 +3127,36 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3123{ 3127{
3124 struct drm_plane *plane = &intel_plane->base; 3128 struct drm_plane *plane = &intel_plane->base;
3125 struct drm_framebuffer *fb = plane->state->fb; 3129 struct drm_framebuffer *fb = plane->state->fb;
3130 struct intel_plane_state *intel_pstate =
3131 to_intel_plane_state(plane->state);
3126 uint32_t latency = dev_priv->wm.skl_latency[level]; 3132 uint32_t latency = dev_priv->wm.skl_latency[level];
3127 uint32_t method1, method2; 3133 uint32_t method1, method2;
3128 uint32_t plane_bytes_per_line, plane_blocks_per_line; 3134 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3129 uint32_t res_blocks, res_lines; 3135 uint32_t res_blocks, res_lines;
3130 uint32_t selected_result; 3136 uint32_t selected_result;
3131 uint8_t cpp; 3137 uint8_t cpp;
3138 uint32_t width = 0, height = 0;
3132 3139
3133 if (latency == 0 || !cstate->base.active || !fb) 3140 if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
3134 return false; 3141 return false;
3135 3142
3143 width = drm_rect_width(&intel_pstate->src) >> 16;
3144 height = drm_rect_height(&intel_pstate->src) >> 16;
3145
3146 if (intel_rotation_90_or_270(plane->state->rotation))
3147 swap(width, height);
3148
3136 cpp = drm_format_plane_cpp(fb->pixel_format, 0); 3149 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3137 method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate), 3150 method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
3138 cpp, latency); 3151 cpp, latency);
3139 method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate), 3152 method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
3140 cstate->base.adjusted_mode.crtc_htotal, 3153 cstate->base.adjusted_mode.crtc_htotal,
3141 cstate->pipe_src_w, 3154 width,
3142 cpp, fb->modifier[0], 3155 cpp,
3156 fb->modifier[0],
3143 latency); 3157 latency);
3144 3158
3145 plane_bytes_per_line = cstate->pipe_src_w * cpp; 3159 plane_bytes_per_line = width * cpp;
3146 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 3160 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3147 3161
3148 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || 3162 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 45ce45a5e122..9121646d7c4d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -968,7 +968,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
968 968
969 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ 969 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
970 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; 970 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
971 if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) || 971 if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
972 IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) 972 IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
973 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; 973 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
974 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); 974 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
@@ -1085,7 +1085,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
1085 WA_SET_BIT_MASKED(HIZ_CHICKEN, 1085 WA_SET_BIT_MASKED(HIZ_CHICKEN,
1086 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); 1086 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1087 1087
1088 if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) { 1088 /* This is tied to WaForceContextSaveRestoreNonCoherent */
1089 if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
1089 /* 1090 /*
1090 *Use Force Non-Coherent whenever executing a 3D context. This 1091 *Use Force Non-Coherent whenever executing a 3D context. This
1091 * is a workaround for a possible hang in the unlikely event 1092 * is a workaround for a possible hang in the unlikely event
@@ -2090,10 +2091,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2090{ 2091{
2091 struct drm_i915_private *dev_priv = to_i915(dev); 2092 struct drm_i915_private *dev_priv = to_i915(dev);
2092 struct drm_i915_gem_object *obj = ringbuf->obj; 2093 struct drm_i915_gem_object *obj = ringbuf->obj;
2094 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
2095 unsigned flags = PIN_OFFSET_BIAS | 4096;
2093 int ret; 2096 int ret;
2094 2097
2095 if (HAS_LLC(dev_priv) && !obj->stolen) { 2098 if (HAS_LLC(dev_priv) && !obj->stolen) {
2096 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0); 2099 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
2097 if (ret) 2100 if (ret)
2098 return ret; 2101 return ret;
2099 2102
@@ -2109,7 +2112,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2109 return -ENOMEM; 2112 return -ENOMEM;
2110 } 2113 }
2111 } else { 2114 } else {
2112 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); 2115 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
2116 flags | PIN_MAPPABLE);
2113 if (ret) 2117 if (ret)
2114 return ret; 2118 return ret;
2115 2119
@@ -2454,11 +2458,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
2454 if (unlikely(total_bytes > remain_usable)) { 2458 if (unlikely(total_bytes > remain_usable)) {
2455 /* 2459 /*
2456 * The base request will fit but the reserved space 2460 * The base request will fit but the reserved space
2457 * falls off the end. So only need to to wait for the 2461 * falls off the end. So don't need an immediate wrap
2458 * reserved size after flushing out the remainder. 2462 * and only need to effectively wait for the reserved
2463 * size space from the start of ringbuffer.
2459 */ 2464 */
2460 wait_bytes = remain_actual + ringbuf->reserved_size; 2465 wait_bytes = remain_actual + ringbuf->reserved_size;
2461 need_wrap = true;
2462 } else if (total_bytes > ringbuf->space) { 2466 } else if (total_bytes > ringbuf->space) {
2463 /* No wrapping required, just waiting. */ 2467 /* No wrapping required, just waiting. */
2464 wait_bytes = total_bytes; 2468 wait_bytes = total_bytes;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 436d8f2b8682..68b6f69aa682 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1189,7 +1189,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1189 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 1189 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1190 dev_priv->uncore.funcs.force_wake_get = 1190 dev_priv->uncore.funcs.force_wake_get =
1191 fw_domains_get_with_thread_status; 1191 fw_domains_get_with_thread_status;
1192 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1192 if (IS_HASWELL(dev))
1193 dev_priv->uncore.funcs.force_wake_put =
1194 fw_domains_put_with_fifo;
1195 else
1196 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1193 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1197 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1194 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1198 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1195 } else if (IS_IVYBRIDGE(dev)) { 1199 } else if (IS_IVYBRIDGE(dev)) {
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 2a95d10e9d92..a24631fdf4ad 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -225,8 +225,6 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
225 if (!iores) 225 if (!iores)
226 return -ENXIO; 226 return -ENXIO;
227 227
228 platform_set_drvdata(pdev, hdmi);
229
230 encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); 228 encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
231 /* 229 /*
232 * If we failed to find the CRTC(s) which this encoder is 230 * If we failed to find the CRTC(s) which this encoder is
@@ -245,7 +243,16 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
245 drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs, 243 drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
246 DRM_MODE_ENCODER_TMDS, NULL); 244 DRM_MODE_ENCODER_TMDS, NULL);
247 245
248 return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data); 246 ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
247
248 /*
249 * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
250 * which would have called the encoder cleanup. Do it manually.
251 */
252 if (ret)
253 drm_encoder_cleanup(encoder);
254
255 return ret;
249} 256}
250 257
251static void dw_hdmi_imx_unbind(struct device *dev, struct device *master, 258static void dw_hdmi_imx_unbind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 9876e0f0c3e1..e26dcdec2aba 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -326,7 +326,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
326{ 326{
327 struct imx_drm_device *imxdrm = drm->dev_private; 327 struct imx_drm_device *imxdrm = drm->dev_private;
328 struct imx_drm_crtc *imx_drm_crtc; 328 struct imx_drm_crtc *imx_drm_crtc;
329 int ret;
330 329
331 /* 330 /*
332 * The vblank arrays are dimensioned by MAX_CRTC - we can't 331 * The vblank arrays are dimensioned by MAX_CRTC - we can't
@@ -351,10 +350,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
351 350
352 *new_crtc = imx_drm_crtc; 351 *new_crtc = imx_drm_crtc;
353 352
354 ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
355 if (ret)
356 goto err_register;
357
358 drm_crtc_helper_add(crtc, 353 drm_crtc_helper_add(crtc,
359 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); 354 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
360 355
@@ -362,11 +357,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
362 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs, NULL); 357 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs, NULL);
363 358
364 return 0; 359 return 0;
365
366err_register:
367 imxdrm->crtc[--imxdrm->pipes] = NULL;
368 kfree(imx_drm_crtc);
369 return ret;
370} 360}
371EXPORT_SYMBOL_GPL(imx_drm_add_crtc); 361EXPORT_SYMBOL_GPL(imx_drm_add_crtc);
372 362
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 588827844f30..681ec6eb77d9 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -72,22 +72,101 @@ static inline int calc_bandwidth(int width, int height, unsigned int vref)
72int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb, 72int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
73 int x, int y) 73 int x, int y)
74{ 74{
75 struct drm_gem_cma_object *cma_obj; 75 struct drm_gem_cma_object *cma_obj[3];
76 unsigned long eba; 76 unsigned long eba, ubo, vbo;
77 int active; 77 int active, i;
78 78
79 cma_obj = drm_fb_cma_get_gem_obj(fb, 0); 79 for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
80 if (!cma_obj) { 80 cma_obj[i] = drm_fb_cma_get_gem_obj(fb, i);
81 DRM_DEBUG_KMS("entry is null.\n"); 81 if (!cma_obj[i]) {
82 return -EFAULT; 82 DRM_DEBUG_KMS("plane %d entry is null.\n", i);
83 return -EFAULT;
84 }
83 } 85 }
84 86
85 dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d", 87 eba = cma_obj[0]->paddr + fb->offsets[0] +
86 &cma_obj->paddr, x, y);
87
88 eba = cma_obj->paddr + fb->offsets[0] +
89 fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x; 88 fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x;
90 89
90 if (eba & 0x7) {
91 DRM_DEBUG_KMS("base address must be a multiple of 8.\n");
92 return -EINVAL;
93 }
94
95 if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) {
96 DRM_DEBUG_KMS("pitches out of range.\n");
97 return -EINVAL;
98 }
99
100 if (ipu_plane->enabled && fb->pitches[0] != ipu_plane->stride[0]) {
101 DRM_DEBUG_KMS("pitches must not change while plane is enabled.\n");
102 return -EINVAL;
103 }
104
105 ipu_plane->stride[0] = fb->pitches[0];
106
107 switch (fb->pixel_format) {
108 case DRM_FORMAT_YUV420:
109 case DRM_FORMAT_YVU420:
110 /*
111 * Multiplanar formats have to meet the following restrictions:
112 * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
113 * - EBA, UBO and VBO are a multiple of 8
114 * - UBO and VBO are unsigned and not larger than 0xfffff8
115 * - Only EBA may be changed while scanout is active
116 * - The strides of U and V planes must be identical.
117 */
118 ubo = cma_obj[1]->paddr + fb->offsets[1] +
119 fb->pitches[1] * y / 2 + x / 2 - eba;
120 vbo = cma_obj[2]->paddr + fb->offsets[2] +
121 fb->pitches[2] * y / 2 + x / 2 - eba;
122
123 if ((ubo & 0x7) || (vbo & 0x7)) {
124 DRM_DEBUG_KMS("U/V buffer offsets must be a multiple of 8.\n");
125 return -EINVAL;
126 }
127
128 if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) {
129 DRM_DEBUG_KMS("U/V buffer offsets must be positive and not larger than 0xfffff8.\n");
130 return -EINVAL;
131 }
132
133 if (ipu_plane->enabled && ((ipu_plane->u_offset != ubo) ||
134 (ipu_plane->v_offset != vbo))) {
135 DRM_DEBUG_KMS("U/V buffer offsets must not change while plane is enabled.\n");
136 return -EINVAL;
137 }
138
139 if (fb->pitches[1] != fb->pitches[2]) {
140 DRM_DEBUG_KMS("U/V pitches must be identical.\n");
141 return -EINVAL;
142 }
143
144 if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) {
145 DRM_DEBUG_KMS("U/V pitches out of range.\n");
146 return -EINVAL;
147 }
148
149 if (ipu_plane->enabled &&
150 (ipu_plane->stride[1] != fb->pitches[1])) {
151 DRM_DEBUG_KMS("U/V pitches must not change while plane is enabled.\n");
152 return -EINVAL;
153 }
154
155 ipu_plane->u_offset = ubo;
156 ipu_plane->v_offset = vbo;
157 ipu_plane->stride[1] = fb->pitches[1];
158
159 dev_dbg(ipu_plane->base.dev->dev,
160 "phys = %pad %pad %pad, x = %d, y = %d",
161 &cma_obj[0]->paddr, &cma_obj[1]->paddr,
162 &cma_obj[2]->paddr, x, y);
163 break;
164 default:
165 dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
166 &cma_obj[0]->paddr, x, y);
167 break;
168 }
169
91 if (ipu_plane->enabled) { 170 if (ipu_plane->enabled) {
92 active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch); 171 active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
93 ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba); 172 ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
@@ -201,12 +280,6 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
201 } 280 }
202 } 281 }
203 282
204 ret = ipu_dmfc_init_channel(ipu_plane->dmfc, crtc_w);
205 if (ret) {
206 dev_err(dev, "initializing dmfc channel failed with %d\n", ret);
207 return ret;
208 }
209
210 ret = ipu_dmfc_alloc_bandwidth(ipu_plane->dmfc, 283 ret = ipu_dmfc_alloc_bandwidth(ipu_plane->dmfc,
211 calc_bandwidth(crtc_w, crtc_h, 284 calc_bandwidth(crtc_w, crtc_h,
212 calc_vref(mode)), 64); 285 calc_vref(mode)), 64);
@@ -215,6 +288,8 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
215 return ret; 288 return ret;
216 } 289 }
217 290
291 ipu_dmfc_config_wait4eot(ipu_plane->dmfc, crtc_w);
292
218 ipu_cpmem_zero(ipu_plane->ipu_ch); 293 ipu_cpmem_zero(ipu_plane->ipu_ch);
219 ipu_cpmem_set_resolution(ipu_plane->ipu_ch, src_w, src_h); 294 ipu_cpmem_set_resolution(ipu_plane->ipu_ch, src_w, src_h);
220 ret = ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->pixel_format); 295 ret = ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->pixel_format);
@@ -233,6 +308,18 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
233 if (interlaced) 308 if (interlaced)
234 ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]); 309 ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]);
235 310
311 if (fb->pixel_format == DRM_FORMAT_YUV420) {
312 ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
313 ipu_plane->stride[1],
314 ipu_plane->u_offset,
315 ipu_plane->v_offset);
316 } else if (fb->pixel_format == DRM_FORMAT_YVU420) {
317 ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
318 ipu_plane->stride[1],
319 ipu_plane->v_offset,
320 ipu_plane->u_offset);
321 }
322
236 ipu_plane->w = src_w; 323 ipu_plane->w = src_w;
237 ipu_plane->h = src_h; 324 ipu_plane->h = src_h;
238 325
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index 3a443b413c60..4448fd4ad4eb 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -29,6 +29,10 @@ struct ipu_plane {
29 int w; 29 int w;
30 int h; 30 int h;
31 31
32 unsigned int u_offset;
33 unsigned int v_offset;
34 unsigned int stride[2];
35
32 bool enabled; 36 bool enabled;
33}; 37};
34 38
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index b04a64664673..65428cf233ce 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -196,7 +196,7 @@ void __exit msm_hdmi_phy_driver_unregister(void);
196int msm_hdmi_pll_8960_init(struct platform_device *pdev); 196int msm_hdmi_pll_8960_init(struct platform_device *pdev);
197int msm_hdmi_pll_8996_init(struct platform_device *pdev); 197int msm_hdmi_pll_8996_init(struct platform_device *pdev);
198#else 198#else
199static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev); 199static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev)
200{ 200{
201 return -ENODEV; 201 return -ENODEV;
202} 202}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index d52910e2c26c..c03b96709179 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -467,9 +467,6 @@ static void msm_preclose(struct drm_device *dev, struct drm_file *file)
467 struct msm_file_private *ctx = file->driver_priv; 467 struct msm_file_private *ctx = file->driver_priv;
468 struct msm_kms *kms = priv->kms; 468 struct msm_kms *kms = priv->kms;
469 469
470 if (kms)
471 kms->funcs->preclose(kms, file);
472
473 mutex_lock(&dev->struct_mutex); 470 mutex_lock(&dev->struct_mutex);
474 if (ctx == priv->lastctx) 471 if (ctx == priv->lastctx)
475 priv->lastctx = NULL; 472 priv->lastctx = NULL;
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 9bcabaada179..e32222c3d44f 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -55,7 +55,6 @@ struct msm_kms_funcs {
55 struct drm_encoder *slave_encoder, 55 struct drm_encoder *slave_encoder,
56 bool is_cmd_mode); 56 bool is_cmd_mode);
57 /* cleanup: */ 57 /* cleanup: */
58 void (*preclose)(struct msm_kms *kms, struct drm_file *file);
59 void (*destroy)(struct msm_kms *kms); 58 void (*destroy)(struct msm_kms *kms);
60}; 59};
61 60
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
index 16641cec18a2..b5370cb56e3c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -11,6 +11,7 @@ struct nvkm_device_tegra {
11 11
12 struct reset_control *rst; 12 struct reset_control *rst;
13 struct clk *clk; 13 struct clk *clk;
14 struct clk *clk_ref;
14 struct clk *clk_pwr; 15 struct clk *clk_pwr;
15 16
16 struct regulator *vdd; 17 struct regulator *vdd;
@@ -36,6 +37,10 @@ struct nvkm_device_tegra_func {
36 * bypassed). A value of 0 means an IOMMU is never used. 37 * bypassed). A value of 0 means an IOMMU is never used.
37 */ 38 */
38 u8 iommu_bit; 39 u8 iommu_bit;
40 /*
41 * Whether the chip requires a reference clock
42 */
43 bool require_ref_clk;
39}; 44};
40 45
41int nvkm_device_tegra_new(const struct nvkm_device_tegra_func *, 46int nvkm_device_tegra_new(const struct nvkm_device_tegra_func *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index ae96ebc490fb..e81aefe5ffa7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1276,18 +1276,18 @@ nouveau_connector_create(struct drm_device *dev, int index)
1276 break; 1276 break;
1277 default: 1277 default:
1278 if (disp->dithering_mode) { 1278 if (disp->dithering_mode) {
1279 nv_connector->dithering_mode = DITHERING_MODE_AUTO;
1279 drm_object_attach_property(&connector->base, 1280 drm_object_attach_property(&connector->base,
1280 disp->dithering_mode, 1281 disp->dithering_mode,
1281 nv_connector-> 1282 nv_connector->
1282 dithering_mode); 1283 dithering_mode);
1283 nv_connector->dithering_mode = DITHERING_MODE_AUTO;
1284 } 1284 }
1285 if (disp->dithering_depth) { 1285 if (disp->dithering_depth) {
1286 nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
1286 drm_object_attach_property(&connector->base, 1287 drm_object_attach_property(&connector->base,
1287 disp->dithering_depth, 1288 disp->dithering_depth,
1288 nv_connector-> 1289 nv_connector->
1289 dithering_depth); 1290 dithering_depth);
1290 nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
1291 } 1291 }
1292 break; 1292 break;
1293 } 1293 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 2dfe58af12e4..4c4cc2260257 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -55,6 +55,11 @@ static const struct nvkm_device_tegra_func gk20a_platform_data = {
55 .iommu_bit = 34, 55 .iommu_bit = 34,
56}; 56};
57 57
58static const struct nvkm_device_tegra_func gm20b_platform_data = {
59 .iommu_bit = 34,
60 .require_ref_clk = true,
61};
62
58static const struct of_device_id nouveau_platform_match[] = { 63static const struct of_device_id nouveau_platform_match[] = {
59 { 64 {
60 .compatible = "nvidia,gk20a", 65 .compatible = "nvidia,gk20a",
@@ -62,7 +67,7 @@ static const struct of_device_id nouveau_platform_match[] = {
62 }, 67 },
63 { 68 {
64 .compatible = "nvidia,gm20b", 69 .compatible = "nvidia,gm20b",
65 .data = &gk20a_platform_data, 70 .data = &gm20b_platform_data,
66 }, 71 },
67 { } 72 { }
68}; 73};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 9afa5f3e3c1c..ec12efb4689a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -35,6 +35,11 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
35 ret = clk_prepare_enable(tdev->clk); 35 ret = clk_prepare_enable(tdev->clk);
36 if (ret) 36 if (ret)
37 goto err_clk; 37 goto err_clk;
38 if (tdev->clk_ref) {
39 ret = clk_prepare_enable(tdev->clk_ref);
40 if (ret)
41 goto err_clk_ref;
42 }
38 ret = clk_prepare_enable(tdev->clk_pwr); 43 ret = clk_prepare_enable(tdev->clk_pwr);
39 if (ret) 44 if (ret)
40 goto err_clk_pwr; 45 goto err_clk_pwr;
@@ -57,6 +62,9 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
57err_clamp: 62err_clamp:
58 clk_disable_unprepare(tdev->clk_pwr); 63 clk_disable_unprepare(tdev->clk_pwr);
59err_clk_pwr: 64err_clk_pwr:
65 if (tdev->clk_ref)
66 clk_disable_unprepare(tdev->clk_ref);
67err_clk_ref:
60 clk_disable_unprepare(tdev->clk); 68 clk_disable_unprepare(tdev->clk);
61err_clk: 69err_clk:
62 regulator_disable(tdev->vdd); 70 regulator_disable(tdev->vdd);
@@ -71,6 +79,8 @@ nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
71 udelay(10); 79 udelay(10);
72 80
73 clk_disable_unprepare(tdev->clk_pwr); 81 clk_disable_unprepare(tdev->clk_pwr);
82 if (tdev->clk_ref)
83 clk_disable_unprepare(tdev->clk_ref);
74 clk_disable_unprepare(tdev->clk); 84 clk_disable_unprepare(tdev->clk);
75 udelay(10); 85 udelay(10);
76 86
@@ -274,6 +284,13 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
274 goto free; 284 goto free;
275 } 285 }
276 286
287 if (func->require_ref_clk)
288 tdev->clk_ref = devm_clk_get(&pdev->dev, "ref");
289 if (IS_ERR(tdev->clk_ref)) {
290 ret = PTR_ERR(tdev->clk_ref);
291 goto free;
292 }
293
277 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr"); 294 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
278 if (IS_ERR(tdev->clk_pwr)) { 295 if (IS_ERR(tdev->clk_pwr)) {
279 ret = PTR_ERR(tdev->clk_pwr); 296 ret = PTR_ERR(tdev->clk_pwr);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index c56a886229f1..b2de290da16f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1832,6 +1832,8 @@ gf100_gr_init(struct gf100_gr *gr)
1832 1832
1833 gf100_gr_mmio(gr, gr->func->mmio); 1833 gf100_gr_mmio(gr, gr->func->mmio);
1834 1834
1835 nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
1836
1835 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr)); 1837 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
1836 for (i = 0, gpc = -1; i < gr->tpc_total; i++) { 1838 for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
1837 do { 1839 do {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 43e5f503d1c5..030409a3ee4e 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
375 375
376 qxl_bo_kunmap(user_bo); 376 qxl_bo_kunmap(user_bo);
377 377
378 qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
379 qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
380 qcrtc->hot_spot_x = hot_x;
381 qcrtc->hot_spot_y = hot_y;
382
378 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); 383 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
379 cmd->type = QXL_CURSOR_SET; 384 cmd->type = QXL_CURSOR_SET;
380 cmd->u.set.position.x = qcrtc->cur_x; 385 cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
381 cmd->u.set.position.y = qcrtc->cur_y; 386 cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
382 387
383 cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); 388 cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
384 389
@@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
441 446
442 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); 447 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
443 cmd->type = QXL_CURSOR_MOVE; 448 cmd->type = QXL_CURSOR_MOVE;
444 cmd->u.position.x = qcrtc->cur_x; 449 cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
445 cmd->u.position.y = qcrtc->cur_y; 450 cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
446 qxl_release_unmap(qdev, release, &cmd->release_info); 451 qxl_release_unmap(qdev, release, &cmd->release_info);
447 452
448 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 453 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 6e6b9b1519b8..3f3897eb458c 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -135,6 +135,8 @@ struct qxl_crtc {
135 int index; 135 int index;
136 int cur_x; 136 int cur_x;
137 int cur_y; 137 int cur_y;
138 int hot_spot_x;
139 int hot_spot_y;
138}; 140};
139 141
140struct qxl_output { 142struct qxl_output {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index cf61e0856f4a..b80b08f71cb4 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -275,13 +275,15 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
275 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) 275 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
276 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); 276 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
277 atombios_blank_crtc(crtc, ATOM_DISABLE); 277 atombios_blank_crtc(crtc, ATOM_DISABLE);
278 drm_vblank_on(dev, radeon_crtc->crtc_id); 278 if (dev->num_crtcs > radeon_crtc->crtc_id)
279 drm_vblank_on(dev, radeon_crtc->crtc_id);
279 radeon_crtc_load_lut(crtc); 280 radeon_crtc_load_lut(crtc);
280 break; 281 break;
281 case DRM_MODE_DPMS_STANDBY: 282 case DRM_MODE_DPMS_STANDBY:
282 case DRM_MODE_DPMS_SUSPEND: 283 case DRM_MODE_DPMS_SUSPEND:
283 case DRM_MODE_DPMS_OFF: 284 case DRM_MODE_DPMS_OFF:
284 drm_vblank_off(dev, radeon_crtc->crtc_id); 285 if (dev->num_crtcs > radeon_crtc->crtc_id)
286 drm_vblank_off(dev, radeon_crtc->crtc_id);
285 if (radeon_crtc->enabled) 287 if (radeon_crtc->enabled)
286 atombios_blank_crtc(crtc, ATOM_ENABLE); 288 atombios_blank_crtc(crtc, ATOM_ENABLE);
287 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) 289 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index edd05cdb0cd8..587cae4e73c9 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -310,6 +310,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
310 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) 310 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
311 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; 311 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
312 312
313 /* vertical FP must be at least 1 */
314 if (mode->crtc_vsync_start == mode->crtc_vdisplay)
315 adjusted_mode->crtc_vsync_start++;
316
313 /* get the native mode for scaling */ 317 /* get the native mode for scaling */
314 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { 318 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
315 radeon_panel_mode_fixup(encoder, adjusted_mode); 319 radeon_panel_mode_fixup(encoder, adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 76c4bdf21b20..34f7a29d9366 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
2608 WREG32(VM_CONTEXT1_CNTL, 0); 2608 WREG32(VM_CONTEXT1_CNTL, 0);
2609} 2609}
2610 2610
2611static const unsigned ni_dig_offsets[] =
2612{
2613 NI_DIG0_REGISTER_OFFSET,
2614 NI_DIG1_REGISTER_OFFSET,
2615 NI_DIG2_REGISTER_OFFSET,
2616 NI_DIG3_REGISTER_OFFSET,
2617 NI_DIG4_REGISTER_OFFSET,
2618 NI_DIG5_REGISTER_OFFSET
2619};
2620
2621static const unsigned ni_tx_offsets[] =
2622{
2623 NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
2624 NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
2625 NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
2626 NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
2627 NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
2628 NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
2629};
2630
2631static const unsigned evergreen_dp_offsets[] =
2632{
2633 EVERGREEN_DP0_REGISTER_OFFSET,
2634 EVERGREEN_DP1_REGISTER_OFFSET,
2635 EVERGREEN_DP2_REGISTER_OFFSET,
2636 EVERGREEN_DP3_REGISTER_OFFSET,
2637 EVERGREEN_DP4_REGISTER_OFFSET,
2638 EVERGREEN_DP5_REGISTER_OFFSET
2639};
2640
2641
2642/*
2643 * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
2644 * We go from crtc to connector and it is not relible since it
2645 * should be an opposite direction .If crtc is enable then
2646 * find the dig_fe which selects this crtc and insure that it enable.
2647 * if such dig_fe is found then find dig_be which selects found dig_be and
2648 * insure that it enable and in DP_SST mode.
2649 * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
2650 * from dp symbols clocks .
2651 */
2652static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
2653 unsigned crtc_id, unsigned *ret_dig_fe)
2654{
2655 unsigned i;
2656 unsigned dig_fe;
2657 unsigned dig_be;
2658 unsigned dig_en_be;
2659 unsigned uniphy_pll;
2660 unsigned digs_fe_selected;
2661 unsigned dig_be_mode;
2662 unsigned dig_fe_mask;
2663 bool is_enabled = false;
2664 bool found_crtc = false;
2665
2666 /* loop through all running dig_fe to find selected crtc */
2667 for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
2668 dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
2669 if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
2670 crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
2671 /* found running pipe */
2672 found_crtc = true;
2673 dig_fe_mask = 1 << i;
2674 dig_fe = i;
2675 break;
2676 }
2677 }
2678
2679 if (found_crtc) {
2680 /* loop through all running dig_be to find selected dig_fe */
2681 for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
2682 dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
2683 /* if dig_fe_selected by dig_be? */
2684 digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
2685 dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
2686 if (dig_fe_mask & digs_fe_selected &&
2687 /* if dig_be in sst mode? */
2688 dig_be_mode == NI_DIG_BE_DPSST) {
2689 dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
2690 ni_dig_offsets[i]);
2691 uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
2692 ni_tx_offsets[i]);
2693 /* dig_be enable and tx is running */
2694 if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
2695 dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
2696 uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
2697 is_enabled = true;
2698 *ret_dig_fe = dig_fe;
2699 break;
2700 }
2701 }
2702 }
2703 }
2704
2705 return is_enabled;
2706}
2707
2708/*
2709 * Blank dig when in dp sst mode
2710 * Dig ignores crtc timing
2711 */
2712static void evergreen_blank_dp_output(struct radeon_device *rdev,
2713 unsigned dig_fe)
2714{
2715 unsigned stream_ctrl;
2716 unsigned fifo_ctrl;
2717 unsigned counter = 0;
2718
2719 if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
2720 DRM_ERROR("invalid dig_fe %d\n", dig_fe);
2721 return;
2722 }
2723
2724 stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2725 evergreen_dp_offsets[dig_fe]);
2726 if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
2727 DRM_ERROR("dig %d , should be enable\n", dig_fe);
2728 return;
2729 }
2730
2731 stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
2732 WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2733 evergreen_dp_offsets[dig_fe], stream_ctrl);
2734
2735 stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2736 evergreen_dp_offsets[dig_fe]);
2737 while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
2738 msleep(1);
2739 counter++;
2740 stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2741 evergreen_dp_offsets[dig_fe]);
2742 }
2743 if (counter >= 32 )
2744 DRM_ERROR("counter exceeds %d\n", counter);
2745
2746 fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
2747 fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
2748 WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
2749
2750}
2751
2611void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) 2752void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
2612{ 2753{
2613 u32 crtc_enabled, tmp, frame_count, blackout; 2754 u32 crtc_enabled, tmp, frame_count, blackout;
2614 int i, j; 2755 int i, j;
2756 unsigned dig_fe;
2615 2757
2616 if (!ASIC_IS_NODCE(rdev)) { 2758 if (!ASIC_IS_NODCE(rdev)) {
2617 save->vga_render_control = RREG32(VGA_RENDER_CONTROL); 2759 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
@@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
2651 break; 2793 break;
2652 udelay(1); 2794 udelay(1);
2653 } 2795 }
2654 2796 /*we should disable dig if it drives dp sst*/
2797 /*but we are in radeon_device_init and the topology is unknown*/
2798 /*and it is available after radeon_modeset_init*/
2799 /*the following method radeon_atom_encoder_dpms_dig*/
2800 /*does the job if we initialize it properly*/
2801 /*for now we do it this manually*/
2802 /**/
2803 if (ASIC_IS_DCE5(rdev) &&
2804 evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
2805 evergreen_blank_dp_output(rdev, dig_fe);
2806 /*we could remove 6 lines below*/
2655 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ 2807 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
2656 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 2808 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2657 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); 2809 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index aa939dfed3a3..b436badf9efa 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -250,8 +250,43 @@
250 250
251/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */ 251/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
252#define EVERGREEN_HDMI_BASE 0x7030 252#define EVERGREEN_HDMI_BASE 0x7030
253/*DIG block*/
254#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000)
255#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000)
256#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000)
257#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000)
258#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000)
259#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000)
260
261
262#define NI_DIG_FE_CNTL 0x7000
263# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3)
264# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24)
265
266
267#define NI_DIG_BE_CNTL 0x7140
268# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
269# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
270
271#define NI_DIG_BE_EN_CNTL 0x7144
272# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
273# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8)
274# define NI_DIG_BE_DPSST 0
253 275
254/* Display Port block */ 276/* Display Port block */
277#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C)
278#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C)
279#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C)
280#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C)
281#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C)
282#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C)
283
284
285#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
286# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
287# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
288#define EVERGREEN_DP_STEER_FIFO 0x7310
289# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
255#define EVERGREEN_DP_SEC_CNTL 0x7280 290#define EVERGREEN_DP_SEC_CNTL 0x7280
256# define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0) 291# define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
257# define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4) 292# define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
@@ -266,4 +301,15 @@
266# define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24) 301# define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
267# define EVERGREEN_DP_SEC_SS_EN (1 << 28) 302# define EVERGREEN_DP_SEC_SS_EN (1 << 28)
268 303
304/*DCIO_UNIPHY block*/
305#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
306#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
307#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
308#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
309#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
310#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600)
311
312#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618
313# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0)
314
269#endif 315#endif
diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h
index da310a70c0f0..827ccc87cbc3 100644
--- a/drivers/gpu/drm/radeon/ni_reg.h
+++ b/drivers/gpu/drm/radeon/ni_reg.h
@@ -109,6 +109,8 @@
109#define NI_DP_MSE_SAT2 0x7398 109#define NI_DP_MSE_SAT2 0x7398
110 110
111#define NI_DP_MSE_SAT_UPDATE 0x739c 111#define NI_DP_MSE_SAT_UPDATE 0x739c
112# define NI_DP_MSE_SAT_UPDATE_MASK 0x3
113# define NI_DP_MSE_16_MTP_KEEPOUT 0x100
112 114
113#define NI_DIG_BE_CNTL 0x7140 115#define NI_DIG_BE_CNTL 0x7140
114# define NI_DIG_FE_SOURCE_SELECT(x) (((x) & 0x7f) << 8) 116# define NI_DIG_FE_SOURCE_SELECT(x) (((x) & 0x7f) << 8)
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index fd8c4d317e60..95f4fea89302 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -62,10 +62,6 @@ bool radeon_has_atpx(void) {
62 return radeon_atpx_priv.atpx_detected; 62 return radeon_atpx_priv.atpx_detected;
63} 63}
64 64
65bool radeon_has_atpx_dgpu_power_cntl(void) {
66 return radeon_atpx_priv.atpx.functions.power_cntl;
67}
68
69/** 65/**
70 * radeon_atpx_call - call an ATPX method 66 * radeon_atpx_call - call an ATPX method
71 * 67 *
@@ -145,6 +141,13 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
145 */ 141 */
146static int radeon_atpx_validate(struct radeon_atpx *atpx) 142static int radeon_atpx_validate(struct radeon_atpx *atpx)
147{ 143{
144 /* make sure required functions are enabled */
145 /* dGPU power control is required */
146 if (atpx->functions.power_cntl == false) {
147 printk("ATPX dGPU power cntl not present, forcing\n");
148 atpx->functions.power_cntl = true;
149 }
150
148 if (atpx->functions.px_params) { 151 if (atpx->functions.px_params) {
149 union acpi_object *info; 152 union acpi_object *info;
150 struct atpx_px_params output; 153 struct atpx_px_params output;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cfcc099c537d..81a63d7f5cd9 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -2002,10 +2002,12 @@ radeon_add_atom_connector(struct drm_device *dev,
2002 rdev->mode_info.dither_property, 2002 rdev->mode_info.dither_property,
2003 RADEON_FMT_DITHER_DISABLE); 2003 RADEON_FMT_DITHER_DISABLE);
2004 2004
2005 if (radeon_audio != 0) 2005 if (radeon_audio != 0) {
2006 drm_object_attach_property(&radeon_connector->base.base, 2006 drm_object_attach_property(&radeon_connector->base.base,
2007 rdev->mode_info.audio_property, 2007 rdev->mode_info.audio_property,
2008 RADEON_AUDIO_AUTO); 2008 RADEON_AUDIO_AUTO);
2009 radeon_connector->audio = RADEON_AUDIO_AUTO;
2010 }
2009 if (ASIC_IS_DCE5(rdev)) 2011 if (ASIC_IS_DCE5(rdev))
2010 drm_object_attach_property(&radeon_connector->base.base, 2012 drm_object_attach_property(&radeon_connector->base.base,
2011 rdev->mode_info.output_csc_property, 2013 rdev->mode_info.output_csc_property,
@@ -2130,6 +2132,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2130 drm_object_attach_property(&radeon_connector->base.base, 2132 drm_object_attach_property(&radeon_connector->base.base,
2131 rdev->mode_info.audio_property, 2133 rdev->mode_info.audio_property,
2132 RADEON_AUDIO_AUTO); 2134 RADEON_AUDIO_AUTO);
2135 radeon_connector->audio = RADEON_AUDIO_AUTO;
2133 } 2136 }
2134 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 2137 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
2135 radeon_connector->dac_load_detect = true; 2138 radeon_connector->dac_load_detect = true;
@@ -2185,6 +2188,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2185 drm_object_attach_property(&radeon_connector->base.base, 2188 drm_object_attach_property(&radeon_connector->base.base,
2186 rdev->mode_info.audio_property, 2189 rdev->mode_info.audio_property,
2187 RADEON_AUDIO_AUTO); 2190 RADEON_AUDIO_AUTO);
2191 radeon_connector->audio = RADEON_AUDIO_AUTO;
2188 } 2192 }
2189 if (ASIC_IS_DCE5(rdev)) 2193 if (ASIC_IS_DCE5(rdev))
2190 drm_object_attach_property(&radeon_connector->base.base, 2194 drm_object_attach_property(&radeon_connector->base.base,
@@ -2237,6 +2241,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2237 drm_object_attach_property(&radeon_connector->base.base, 2241 drm_object_attach_property(&radeon_connector->base.base,
2238 rdev->mode_info.audio_property, 2242 rdev->mode_info.audio_property,
2239 RADEON_AUDIO_AUTO); 2243 RADEON_AUDIO_AUTO);
2244 radeon_connector->audio = RADEON_AUDIO_AUTO;
2240 } 2245 }
2241 if (ASIC_IS_DCE5(rdev)) 2246 if (ASIC_IS_DCE5(rdev))
2242 drm_object_attach_property(&radeon_connector->base.base, 2247 drm_object_attach_property(&radeon_connector->base.base,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 4fd1a961012d..d0826fb0434c 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -103,12 +103,6 @@ static const char radeon_family_name[][16] = {
103 "LAST", 103 "LAST",
104}; 104};
105 105
106#if defined(CONFIG_VGA_SWITCHEROO)
107bool radeon_has_atpx_dgpu_power_cntl(void);
108#else
109static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
110#endif
111
112#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) 106#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
113#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1) 107#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
114 108
@@ -1305,9 +1299,9 @@ int radeon_device_init(struct radeon_device *rdev,
1305 } 1299 }
1306 rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS); 1300 rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
1307 1301
1308 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", 1302 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1309 radeon_family_name[rdev->family], pdev->vendor, pdev->device, 1303 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1310 pdev->subsystem_vendor, pdev->subsystem_device); 1304 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1311 1305
1312 /* mutex initialization are all done here so we 1306 /* mutex initialization are all done here so we
1313 * can recall function without having locking issues */ 1307 * can recall function without having locking issues */
@@ -1439,7 +1433,7 @@ int radeon_device_init(struct radeon_device *rdev,
1439 * ignore it */ 1433 * ignore it */
1440 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 1434 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
1441 1435
1442 if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl()) 1436 if (rdev->flags & RADEON_IS_PX)
1443 runtime = true; 1437 runtime = true;
1444 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime); 1438 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
1445 if (runtime) 1439 if (runtime)
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 43cffb526b0c..de504ea29c06 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -89,8 +89,16 @@ static int radeon_dp_mst_set_stream_attrib(struct radeon_encoder *primary,
89 WREG32(NI_DP_MSE_SAT_UPDATE + primary->offset, 1); 89 WREG32(NI_DP_MSE_SAT_UPDATE + primary->offset, 1);
90 90
91 do { 91 do {
92 unsigned value1, value2;
93 udelay(10);
92 temp = RREG32(NI_DP_MSE_SAT_UPDATE + primary->offset); 94 temp = RREG32(NI_DP_MSE_SAT_UPDATE + primary->offset);
93 } while ((temp & 0x1) && retries++ < 10000); 95
96 value1 = temp & NI_DP_MSE_SAT_UPDATE_MASK;
97 value2 = temp & NI_DP_MSE_16_MTP_KEEPOUT;
98
99 if (!value1 && !value2)
100 break;
101 } while (retries++ < 50);
94 102
95 if (retries == 10000) 103 if (retries == 10000)
96 DRM_ERROR("timed out waitin for SAT update %d\n", primary->offset); 104 DRM_ERROR("timed out waitin for SAT update %d\n", primary->offset);
@@ -150,7 +158,7 @@ static int radeon_dp_mst_update_stream_attribs(struct radeon_connector *mst_conn
150 return 0; 158 return 0;
151} 159}
152 160
153static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, uint32_t y) 161static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, s64 avg_time_slots_per_mtp)
154{ 162{
155 struct drm_device *dev = mst->base.dev; 163 struct drm_device *dev = mst->base.dev;
156 struct radeon_device *rdev = dev->dev_private; 164 struct radeon_device *rdev = dev->dev_private;
@@ -158,6 +166,8 @@ static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, ui
158 uint32_t val, temp; 166 uint32_t val, temp;
159 uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe); 167 uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe);
160 int retries = 0; 168 int retries = 0;
169 uint32_t x = drm_fixp2int(avg_time_slots_per_mtp);
170 uint32_t y = drm_fixp2int_ceil((avg_time_slots_per_mtp - x) << 26);
161 171
162 val = NI_DP_MSE_RATE_X(x) | NI_DP_MSE_RATE_Y(y); 172 val = NI_DP_MSE_RATE_X(x) | NI_DP_MSE_RATE_Y(y);
163 173
@@ -165,6 +175,7 @@ static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, ui
165 175
166 do { 176 do {
167 temp = RREG32(NI_DP_MSE_RATE_UPDATE + offset); 177 temp = RREG32(NI_DP_MSE_RATE_UPDATE + offset);
178 udelay(10);
168 } while ((temp & 0x1) && (retries++ < 10000)); 179 } while ((temp & 0x1) && (retries++ < 10000));
169 180
170 if (retries >= 10000) 181 if (retries >= 10000)
@@ -246,14 +257,8 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector)
246 kfree(radeon_connector); 257 kfree(radeon_connector);
247} 258}
248 259
249static int radeon_connector_dpms(struct drm_connector *connector, int mode)
250{
251 DRM_DEBUG_KMS("\n");
252 return 0;
253}
254
255static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = { 260static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
256 .dpms = radeon_connector_dpms, 261 .dpms = drm_helper_connector_dpms,
257 .detect = radeon_dp_mst_detect, 262 .detect = radeon_dp_mst_detect,
258 .fill_modes = drm_helper_probe_single_connector_modes, 263 .fill_modes = drm_helper_probe_single_connector_modes,
259 .destroy = radeon_dp_mst_connector_destroy, 264 .destroy = radeon_dp_mst_connector_destroy,
@@ -394,7 +399,7 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
394 struct drm_crtc *crtc; 399 struct drm_crtc *crtc;
395 struct radeon_crtc *radeon_crtc; 400 struct radeon_crtc *radeon_crtc;
396 int ret, slots; 401 int ret, slots;
397 402 s64 fixed_pbn, fixed_pbn_per_slot, avg_time_slots_per_mtp;
398 if (!ASIC_IS_DCE5(rdev)) { 403 if (!ASIC_IS_DCE5(rdev)) {
399 DRM_ERROR("got mst dpms on non-DCE5\n"); 404 DRM_ERROR("got mst dpms on non-DCE5\n");
400 return; 405 return;
@@ -456,7 +461,11 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
456 461
457 mst_enc->enc_active = true; 462 mst_enc->enc_active = true;
458 radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary); 463 radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary);
459 radeon_dp_mst_set_vcp_size(radeon_encoder, slots, 0); 464
465 fixed_pbn = drm_int2fixp(mst_enc->pbn);
466 fixed_pbn_per_slot = drm_int2fixp(radeon_connector->mst_port->mst_mgr.pbn_div);
467 avg_time_slots_per_mtp = drm_fixp_div(fixed_pbn, fixed_pbn_per_slot);
468 radeon_dp_mst_set_vcp_size(radeon_encoder, avg_time_slots_per_mtp);
460 469
461 atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0, 470 atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0,
462 mst_enc->fe); 471 mst_enc->fe);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 979f3bf65f2c..1e9304d1c88f 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -291,6 +291,8 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
291 if (r) { 291 if (r) {
292 return r; 292 return r;
293 } 293 }
294 rdev->ddev->vblank_disable_allowed = true;
295
294 /* enable msi */ 296 /* enable msi */
295 rdev->msi_enabled = 0; 297 rdev->msi_enabled = 0;
296 298
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 24152dfef199..478d4099b0d0 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -331,13 +331,15 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
331 RADEON_CRTC_DISP_REQ_EN_B)); 331 RADEON_CRTC_DISP_REQ_EN_B));
332 WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl)); 332 WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
333 } 333 }
334 drm_vblank_on(dev, radeon_crtc->crtc_id); 334 if (dev->num_crtcs > radeon_crtc->crtc_id)
335 drm_vblank_on(dev, radeon_crtc->crtc_id);
335 radeon_crtc_load_lut(crtc); 336 radeon_crtc_load_lut(crtc);
336 break; 337 break;
337 case DRM_MODE_DPMS_STANDBY: 338 case DRM_MODE_DPMS_STANDBY:
338 case DRM_MODE_DPMS_SUSPEND: 339 case DRM_MODE_DPMS_SUSPEND:
339 case DRM_MODE_DPMS_OFF: 340 case DRM_MODE_DPMS_OFF:
340 drm_vblank_off(dev, radeon_crtc->crtc_id); 341 if (dev->num_crtcs > radeon_crtc->crtc_id)
342 drm_vblank_off(dev, radeon_crtc->crtc_id);
341 if (radeon_crtc->crtc_id) 343 if (radeon_crtc->crtc_id)
342 WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask)); 344 WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
343 else { 345 else {
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index dd46c38676db..2d901bf28a94 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -799,6 +799,10 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
799 if ((offset + size) <= rdev->mc.visible_vram_size) 799 if ((offset + size) <= rdev->mc.visible_vram_size)
800 return 0; 800 return 0;
801 801
802 /* Can't move a pinned BO to visible VRAM */
803 if (rbo->pin_count > 0)
804 return -EINVAL;
805
802 /* hurrah the memory is not visible ! */ 806 /* hurrah the memory is not visible ! */
803 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 807 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
804 lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; 808 lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 6d8c32377c6f..90f739478a1b 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
235{ 235{
236 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); 236 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
237 237
238 if (radeon_ttm_tt_has_userptr(bo->ttm))
239 return -EPERM;
238 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); 240 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
239} 241}
240 242
@@ -397,9 +399,15 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
397 struct ttm_mem_reg *new_mem) 399 struct ttm_mem_reg *new_mem)
398{ 400{
399 struct radeon_device *rdev; 401 struct radeon_device *rdev;
402 struct radeon_bo *rbo;
400 struct ttm_mem_reg *old_mem = &bo->mem; 403 struct ttm_mem_reg *old_mem = &bo->mem;
401 int r; 404 int r;
402 405
406 /* Can't move a pinned BO */
407 rbo = container_of(bo, struct radeon_bo, tbo);
408 if (WARN_ON_ONCE(rbo->pin_count > 0))
409 return -EINVAL;
410
403 rdev = radeon_get_rdev(bo->bdev); 411 rdev = radeon_get_rdev(bo->bdev);
404 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 412 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
405 radeon_move_null(bo, new_mem); 413 radeon_move_null(bo, new_mem);
@@ -609,7 +617,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
609 set_page_dirty(page); 617 set_page_dirty(page);
610 618
611 mark_page_accessed(page); 619 mark_page_accessed(page);
612 page_cache_release(page); 620 put_page(page);
613 } 621 }
614 622
615 sg_free_table(ttm->sg); 623 sg_free_table(ttm->sg);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index cb75ab72098a..e6abc09b67e3 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2926,9 +2926,12 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
2926 /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ 2926 /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, 2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, 2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
2929 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
2929 { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, 2930 { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
2930 { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, 2931 { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
2931 { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, 2932 { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
2933 { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
2934 { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
2932 { 0, 0, 0, 0 }, 2935 { 0, 0, 0, 0 },
2933}; 2936};
2934 2937
@@ -3008,6 +3011,10 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
3008 } 3011 }
3009 ++p; 3012 ++p;
3010 } 3013 }
3014 /* limit mclk on all R7 370 parts for stability */
3015 if (rdev->pdev->device == 0x6811 &&
3016 rdev->pdev->revision == 0x81)
3017 max_mclk = 120000;
3011 3018
3012 if (rps->vce_active) { 3019 if (rps->vce_active) {
3013 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; 3020 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 3d3cf2f8891e..d5cfef75fc80 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -271,8 +271,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
271 if (!iores) 271 if (!iores)
272 return -ENXIO; 272 return -ENXIO;
273 273
274 platform_set_drvdata(pdev, hdmi);
275
276 encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); 274 encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
277 /* 275 /*
278 * If we failed to find the CRTC(s) which this encoder is 276 * If we failed to find the CRTC(s) which this encoder is
@@ -293,7 +291,16 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
293 drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs, 291 drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
294 DRM_MODE_ENCODER_TMDS, NULL); 292 DRM_MODE_ENCODER_TMDS, NULL);
295 293
296 return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data); 294 ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
295
296 /*
297 * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
298 * which would have called the encoder cleanup. Do it manually.
299 */
300 if (ret)
301 drm_encoder_cleanup(encoder);
302
303 return ret;
297} 304}
298 305
299static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master, 306static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 896da09e49ee..f556a8f4fde6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -251,6 +251,27 @@ static int rockchip_drm_unload(struct drm_device *drm_dev)
251 return 0; 251 return 0;
252} 252}
253 253
254static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
255 struct drm_file *file_priv)
256{
257 struct rockchip_drm_private *priv = crtc->dev->dev_private;
258 int pipe = drm_crtc_index(crtc);
259
260 if (pipe < ROCKCHIP_MAX_CRTC &&
261 priv->crtc_funcs[pipe] &&
262 priv->crtc_funcs[pipe]->cancel_pending_vblank)
263 priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv);
264}
265
266static void rockchip_drm_preclose(struct drm_device *dev,
267 struct drm_file *file_priv)
268{
269 struct drm_crtc *crtc;
270
271 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
272 rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv);
273}
274
254void rockchip_drm_lastclose(struct drm_device *dev) 275void rockchip_drm_lastclose(struct drm_device *dev)
255{ 276{
256 struct rockchip_drm_private *priv = dev->dev_private; 277 struct rockchip_drm_private *priv = dev->dev_private;
@@ -281,6 +302,7 @@ static struct drm_driver rockchip_drm_driver = {
281 DRIVER_PRIME | DRIVER_ATOMIC, 302 DRIVER_PRIME | DRIVER_ATOMIC,
282 .load = rockchip_drm_load, 303 .load = rockchip_drm_load,
283 .unload = rockchip_drm_unload, 304 .unload = rockchip_drm_unload,
305 .preclose = rockchip_drm_preclose,
284 .lastclose = rockchip_drm_lastclose, 306 .lastclose = rockchip_drm_lastclose,
285 .get_vblank_counter = drm_vblank_no_hw_counter, 307 .get_vblank_counter = drm_vblank_no_hw_counter,
286 .enable_vblank = rockchip_drm_crtc_enable_vblank, 308 .enable_vblank = rockchip_drm_crtc_enable_vblank,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 3529f692edb8..00d17d71aa4c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -40,6 +40,7 @@ struct rockchip_crtc_funcs {
40 int (*enable_vblank)(struct drm_crtc *crtc); 40 int (*enable_vblank)(struct drm_crtc *crtc);
41 void (*disable_vblank)(struct drm_crtc *crtc); 41 void (*disable_vblank)(struct drm_crtc *crtc);
42 void (*wait_for_update)(struct drm_crtc *crtc); 42 void (*wait_for_update)(struct drm_crtc *crtc);
43 void (*cancel_pending_vblank)(struct drm_crtc *crtc, struct drm_file *file_priv);
43}; 44};
44 45
45struct rockchip_atomic_commit { 46struct rockchip_atomic_commit {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index fd370548d7d7..a619f120f801 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -499,10 +499,25 @@ err_disable_hclk:
499static void vop_crtc_disable(struct drm_crtc *crtc) 499static void vop_crtc_disable(struct drm_crtc *crtc)
500{ 500{
501 struct vop *vop = to_vop(crtc); 501 struct vop *vop = to_vop(crtc);
502 int i;
502 503
503 if (!vop->is_enabled) 504 if (!vop->is_enabled)
504 return; 505 return;
505 506
507 /*
508 * We need to make sure that all windows are disabled before we
509 * disable that crtc. Otherwise we might try to scan from a destroyed
510 * buffer later.
511 */
512 for (i = 0; i < vop->data->win_size; i++) {
513 struct vop_win *vop_win = &vop->win[i];
514 const struct vop_win_data *win = vop_win->data;
515
516 spin_lock(&vop->reg_lock);
517 VOP_WIN_SET(vop, win, enable, 0);
518 spin_unlock(&vop->reg_lock);
519 }
520
506 drm_crtc_vblank_off(crtc); 521 drm_crtc_vblank_off(crtc);
507 522
508 /* 523 /*
@@ -549,6 +564,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
549 struct drm_plane_state *state) 564 struct drm_plane_state *state)
550{ 565{
551 struct drm_crtc *crtc = state->crtc; 566 struct drm_crtc *crtc = state->crtc;
567 struct drm_crtc_state *crtc_state;
552 struct drm_framebuffer *fb = state->fb; 568 struct drm_framebuffer *fb = state->fb;
553 struct vop_win *vop_win = to_vop_win(plane); 569 struct vop_win *vop_win = to_vop_win(plane);
554 struct vop_plane_state *vop_plane_state = to_vop_plane_state(state); 570 struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
@@ -563,12 +579,13 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
563 int max_scale = win->phy->scl ? FRAC_16_16(8, 1) : 579 int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
564 DRM_PLANE_HELPER_NO_SCALING; 580 DRM_PLANE_HELPER_NO_SCALING;
565 581
566 crtc = crtc ? crtc : plane->state->crtc;
567 /*
568 * Both crtc or plane->state->crtc can be null.
569 */
570 if (!crtc || !fb) 582 if (!crtc || !fb)
571 goto out_disable; 583 goto out_disable;
584
585 crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
586 if (WARN_ON(!crtc_state))
587 return -EINVAL;
588
572 src->x1 = state->src_x; 589 src->x1 = state->src_x;
573 src->y1 = state->src_y; 590 src->y1 = state->src_y;
574 src->x2 = state->src_x + state->src_w; 591 src->x2 = state->src_x + state->src_w;
@@ -580,8 +597,8 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
580 597
581 clip.x1 = 0; 598 clip.x1 = 0;
582 clip.y1 = 0; 599 clip.y1 = 0;
583 clip.x2 = crtc->mode.hdisplay; 600 clip.x2 = crtc_state->adjusted_mode.hdisplay;
584 clip.y2 = crtc->mode.vdisplay; 601 clip.y2 = crtc_state->adjusted_mode.vdisplay;
585 602
586 ret = drm_plane_helper_check_update(plane, crtc, state->fb, 603 ret = drm_plane_helper_check_update(plane, crtc, state->fb,
587 src, dest, &clip, 604 src, dest, &clip,
@@ -873,10 +890,30 @@ static void vop_crtc_wait_for_update(struct drm_crtc *crtc)
873 WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100)); 890 WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100));
874} 891}
875 892
893static void vop_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
894 struct drm_file *file_priv)
895{
896 struct drm_device *drm = crtc->dev;
897 struct vop *vop = to_vop(crtc);
898 struct drm_pending_vblank_event *e;
899 unsigned long flags;
900
901 spin_lock_irqsave(&drm->event_lock, flags);
902 e = vop->event;
903 if (e && e->base.file_priv == file_priv) {
904 vop->event = NULL;
905
906 e->base.destroy(&e->base);
907 file_priv->event_space += sizeof(e->event);
908 }
909 spin_unlock_irqrestore(&drm->event_lock, flags);
910}
911
876static const struct rockchip_crtc_funcs private_crtc_funcs = { 912static const struct rockchip_crtc_funcs private_crtc_funcs = {
877 .enable_vblank = vop_crtc_enable_vblank, 913 .enable_vblank = vop_crtc_enable_vblank,
878 .disable_vblank = vop_crtc_disable_vblank, 914 .disable_vblank = vop_crtc_disable_vblank,
879 .wait_for_update = vop_crtc_wait_for_update, 915 .wait_for_update = vop_crtc_wait_for_update,
916 .cancel_pending_vblank = vop_crtc_cancel_pending_vblank,
880}; 917};
881 918
882static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, 919static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -885,9 +922,6 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
885{ 922{
886 struct vop *vop = to_vop(crtc); 923 struct vop *vop = to_vop(crtc);
887 924
888 if (adjusted_mode->htotal == 0 || adjusted_mode->vtotal == 0)
889 return false;
890
891 adjusted_mode->clock = 925 adjusted_mode->clock =
892 clk_round_rate(vop->dclk, mode->clock * 1000) / 1000; 926 clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
893 927
@@ -1108,7 +1142,7 @@ static int vop_create_crtc(struct vop *vop)
1108 const struct vop_data *vop_data = vop->data; 1142 const struct vop_data *vop_data = vop->data;
1109 struct device *dev = vop->dev; 1143 struct device *dev = vop->dev;
1110 struct drm_device *drm_dev = vop->drm_dev; 1144 struct drm_device *drm_dev = vop->drm_dev;
1111 struct drm_plane *primary = NULL, *cursor = NULL, *plane; 1145 struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp;
1112 struct drm_crtc *crtc = &vop->crtc; 1146 struct drm_crtc *crtc = &vop->crtc;
1113 struct device_node *port; 1147 struct device_node *port;
1114 int ret; 1148 int ret;
@@ -1148,7 +1182,7 @@ static int vop_create_crtc(struct vop *vop)
1148 ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, 1182 ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
1149 &vop_crtc_funcs, NULL); 1183 &vop_crtc_funcs, NULL);
1150 if (ret) 1184 if (ret)
1151 return ret; 1185 goto err_cleanup_planes;
1152 1186
1153 drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs); 1187 drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
1154 1188
@@ -1181,6 +1215,7 @@ static int vop_create_crtc(struct vop *vop)
1181 if (!port) { 1215 if (!port) {
1182 DRM_ERROR("no port node found in %s\n", 1216 DRM_ERROR("no port node found in %s\n",
1183 dev->of_node->full_name); 1217 dev->of_node->full_name);
1218 ret = -ENOENT;
1184 goto err_cleanup_crtc; 1219 goto err_cleanup_crtc;
1185 } 1220 }
1186 1221
@@ -1194,7 +1229,8 @@ static int vop_create_crtc(struct vop *vop)
1194err_cleanup_crtc: 1229err_cleanup_crtc:
1195 drm_crtc_cleanup(crtc); 1230 drm_crtc_cleanup(crtc);
1196err_cleanup_planes: 1231err_cleanup_planes:
1197 list_for_each_entry(plane, &drm_dev->mode_config.plane_list, head) 1232 list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
1233 head)
1198 drm_plane_cleanup(plane); 1234 drm_plane_cleanup(plane);
1199 return ret; 1235 return ret;
1200} 1236}
@@ -1202,9 +1238,28 @@ err_cleanup_planes:
1202static void vop_destroy_crtc(struct vop *vop) 1238static void vop_destroy_crtc(struct vop *vop)
1203{ 1239{
1204 struct drm_crtc *crtc = &vop->crtc; 1240 struct drm_crtc *crtc = &vop->crtc;
1241 struct drm_device *drm_dev = vop->drm_dev;
1242 struct drm_plane *plane, *tmp;
1205 1243
1206 rockchip_unregister_crtc_funcs(crtc); 1244 rockchip_unregister_crtc_funcs(crtc);
1207 of_node_put(crtc->port); 1245 of_node_put(crtc->port);
1246
1247 /*
1248 * We need to cleanup the planes now. Why?
1249 *
1250 * The planes are "&vop->win[i].base". That means the memory is
1251 * all part of the big "struct vop" chunk of memory. That memory
1252 * was devm allocated and associated with this component. We need to
1253 * free it ourselves before vop_unbind() finishes.
1254 */
1255 list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
1256 head)
1257 vop_plane_destroy(plane);
1258
1259 /*
1260 * Destroy CRTC after vop_plane_destroy() since vop_disable_plane()
1261 * references the CRTC.
1262 */
1208 drm_crtc_cleanup(crtc); 1263 drm_crtc_cleanup(crtc);
1209} 1264}
1210 1265
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 4cbf26555093..e3daafa1be13 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -230,22 +230,13 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
230 230
231void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) 231void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
232{ 232{
233 struct ttm_bo_device *bdev = bo->bdev; 233 int put_count = 0;
234 struct ttm_mem_type_manager *man;
235 234
236 lockdep_assert_held(&bo->resv->lock.base); 235 lockdep_assert_held(&bo->resv->lock.base);
237 236
238 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { 237 put_count = ttm_bo_del_from_lru(bo);
239 list_del_init(&bo->swap); 238 ttm_bo_list_ref_sub(bo, put_count, true);
240 list_del_init(&bo->lru); 239 ttm_bo_add_to_lru(bo);
241
242 } else {
243 if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
244 list_move_tail(&bo->swap, &bo->glob->swap_lru);
245
246 man = &bdev->man[bo->mem.mem_type];
247 list_move_tail(&bo->lru, &man->lru);
248 }
249} 240}
250EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); 241EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
251 242
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 4e19d0f9cc30..077ae9b2865d 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -311,7 +311,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
311 goto out_err; 311 goto out_err;
312 312
313 copy_highpage(to_page, from_page); 313 copy_highpage(to_page, from_page);
314 page_cache_release(from_page); 314 put_page(from_page);
315 } 315 }
316 316
317 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP)) 317 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
@@ -361,7 +361,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
361 copy_highpage(to_page, from_page); 361 copy_highpage(to_page, from_page);
362 set_page_dirty(to_page); 362 set_page_dirty(to_page);
363 mark_page_accessed(to_page); 363 mark_page_accessed(to_page);
364 page_cache_release(to_page); 364 put_page(to_page);
365 } 365 }
366 366
367 ttm_tt_unpopulate(ttm); 367 ttm_tt_unpopulate(ttm);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 33239a2b264a..fd1eb9d03f0b 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -536,7 +536,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
536out_destroy_fbi: 536out_destroy_fbi:
537 drm_fb_helper_release_fbi(helper); 537 drm_fb_helper_release_fbi(helper);
538out_gfree: 538out_gfree:
539 drm_gem_object_unreference(&ufbdev->ufb.obj->base); 539 drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
540out: 540out:
541 return ret; 541 return ret;
542} 542}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 2a0a784ab6ee..d7528e0d8442 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file,
52 return ret; 52 return ret;
53 } 53 }
54 54
55 drm_gem_object_unreference(&obj->base); 55 drm_gem_object_unreference_unlocked(&obj->base);
56 *handle_p = handle; 56 *handle_p = handle;
57 return 0; 57 return 0;
58} 58}
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index e797dfc07ae3..7e2a12c4fed2 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -188,7 +188,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
188 if (NULL != (page = vsg->pages[i])) { 188 if (NULL != (page = vsg->pages[i])) {
189 if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) 189 if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
190 SetPageDirty(page); 190 SetPageDirty(page);
191 page_cache_release(page); 191 put_page(page);
192 } 192 }
193 } 193 }
194 case dr_via_pages_alloc: 194 case dr_via_pages_alloc:
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 4854dac87e24..5fd1fd06effc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -267,11 +267,23 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
267 return 0; 267 return 0;
268} 268}
269 269
270static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
271 struct drm_crtc_state *old_state)
272{
273 unsigned long flags;
274
275 spin_lock_irqsave(&crtc->dev->event_lock, flags);
276 if (crtc->state->event)
277 drm_crtc_send_vblank_event(crtc, crtc->state->event);
278 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
279}
280
270static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = { 281static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
271 .enable = virtio_gpu_crtc_enable, 282 .enable = virtio_gpu_crtc_enable,
272 .disable = virtio_gpu_crtc_disable, 283 .disable = virtio_gpu_crtc_disable,
273 .mode_set_nofb = virtio_gpu_crtc_mode_set_nofb, 284 .mode_set_nofb = virtio_gpu_crtc_mode_set_nofb,
274 .atomic_check = virtio_gpu_crtc_atomic_check, 285 .atomic_check = virtio_gpu_crtc_atomic_check,
286 .atomic_flush = virtio_gpu_crtc_atomic_flush,
275}; 287};
276 288
277static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder, 289static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 723ba16c6084..1a1a87cbf109 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3293,19 +3293,19 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3293 &vmw_cmd_dx_cid_check, true, false, true), 3293 &vmw_cmd_dx_cid_check, true, false, true),
3294 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query, 3294 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3295 true, false, true), 3295 true, false, true),
3296 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok, 3296 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3297 true, false, true), 3297 true, false, true),
3298 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query, 3298 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3299 true, false, true), 3299 true, false, true),
3300 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET, 3300 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3301 &vmw_cmd_ok, true, false, true), 3301 &vmw_cmd_dx_cid_check, true, false, true),
3302 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok, 3302 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3303 true, false, true), 3303 true, false, true),
3304 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok, 3304 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3305 true, false, true), 3305 true, false, true),
3306 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid, 3306 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3307 true, false, true), 3307 true, false, true),
3308 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid, 3308 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3309 true, false, true), 3309 true, false, true),
3310 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check, 3310 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3311 true, false, true), 3311 true, false, true),
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 3b1faf7862a5..679a4cb98ee3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -573,9 +573,9 @@ static int vmw_fb_set_par(struct fb_info *info)
573 mode = old_mode; 573 mode = old_mode;
574 old_mode = NULL; 574 old_mode = NULL;
575 } else if (!vmw_kms_validate_mode_vram(vmw_priv, 575 } else if (!vmw_kms_validate_mode_vram(vmw_priv,
576 mode->hdisplay * 576 mode->hdisplay *
577 (var->bits_per_pixel + 7) / 8, 577 DIV_ROUND_UP(var->bits_per_pixel, 8),
578 mode->vdisplay)) { 578 mode->vdisplay)) {
579 drm_mode_destroy(vmw_priv->dev, mode); 579 drm_mode_destroy(vmw_priv->dev, mode);
580 return -EINVAL; 580 return -EINVAL;
581 } 581 }
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index e00db3f510dd..abb98c77bad2 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1068,7 +1068,6 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
1068 goto err_register; 1068 goto err_register;
1069 } 1069 }
1070 1070
1071 pdev->dev.of_node = of_node;
1072 pdev->dev.parent = dev; 1071 pdev->dev.parent = dev;
1073 1072
1074 ret = platform_device_add_data(pdev, &reg->pdata, 1073 ret = platform_device_add_data(pdev, &reg->pdata,
@@ -1079,6 +1078,12 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
1079 platform_device_put(pdev); 1078 platform_device_put(pdev);
1080 goto err_register; 1079 goto err_register;
1081 } 1080 }
1081
1082 /*
1083 * Set of_node only after calling platform_device_add. Otherwise
1084 * the platform:imx-ipuv3-crtc modalias won't be used.
1085 */
1086 pdev->dev.of_node = of_node;
1082 } 1087 }
1083 1088
1084 return 0; 1089 return 0;
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index 883a314cd83a..6494a4d28171 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -395,60 +395,48 @@ void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format)
395EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved); 395EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved);
396 396
397void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch, 397void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
398 u32 pixel_format, int stride, 398 unsigned int uv_stride,
399 int u_offset, int v_offset) 399 unsigned int u_offset, unsigned int v_offset)
400{ 400{
401 switch (pixel_format) { 401 ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, uv_stride - 1);
402 case V4L2_PIX_FMT_YUV420: 402 ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
403 case V4L2_PIX_FMT_YUV422P: 403 ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
404 ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
405 ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
406 ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
407 break;
408 case V4L2_PIX_FMT_YVU420:
409 ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
410 ipu_ch_param_write_field(ch, IPU_FIELD_UBO, v_offset / 8);
411 ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
412 break;
413 case V4L2_PIX_FMT_NV12:
414 case V4L2_PIX_FMT_NV16:
415 ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, stride - 1);
416 ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
417 ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
418 break;
419 }
420} 404}
421EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full); 405EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
422 406
423void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch, 407void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
424 u32 pixel_format, int stride, int height) 408 u32 pixel_format, int stride, int height)
425{ 409{
426 int u_offset, v_offset; 410 int fourcc, u_offset, v_offset;
427 int uv_stride = 0; 411 int uv_stride = 0;
428 412
429 switch (pixel_format) { 413 fourcc = v4l2_pix_fmt_to_drm_fourcc(pixel_format);
430 case V4L2_PIX_FMT_YUV420: 414 switch (fourcc) {
431 case V4L2_PIX_FMT_YVU420: 415 case DRM_FORMAT_YUV420:
432 uv_stride = stride / 2; 416 uv_stride = stride / 2;
433 u_offset = stride * height; 417 u_offset = stride * height;
434 v_offset = u_offset + (uv_stride * height / 2); 418 v_offset = u_offset + (uv_stride * height / 2);
435 ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
436 u_offset, v_offset);
437 break; 419 break;
438 case V4L2_PIX_FMT_YUV422P: 420 case DRM_FORMAT_YVU420:
421 uv_stride = stride / 2;
422 v_offset = stride * height;
423 u_offset = v_offset + (uv_stride * height / 2);
424 break;
425 case DRM_FORMAT_YUV422:
439 uv_stride = stride / 2; 426 uv_stride = stride / 2;
440 u_offset = stride * height; 427 u_offset = stride * height;
441 v_offset = u_offset + (uv_stride * height); 428 v_offset = u_offset + (uv_stride * height);
442 ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
443 u_offset, v_offset);
444 break; 429 break;
445 case V4L2_PIX_FMT_NV12: 430 case DRM_FORMAT_NV12:
446 case V4L2_PIX_FMT_NV16: 431 case DRM_FORMAT_NV16:
432 uv_stride = stride;
447 u_offset = stride * height; 433 u_offset = stride * height;
448 ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride, 434 v_offset = 0;
449 u_offset, 0);
450 break; 435 break;
436 default:
437 return;
451 } 438 }
439 ipu_cpmem_set_yuv_planar_full(ch, uv_stride, u_offset, v_offset);
452} 440}
453EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar); 441EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
454 442
@@ -684,17 +672,25 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
684 672
685 switch (pix->pixelformat) { 673 switch (pix->pixelformat) {
686 case V4L2_PIX_FMT_YUV420: 674 case V4L2_PIX_FMT_YUV420:
687 case V4L2_PIX_FMT_YVU420:
688 offset = Y_OFFSET(pix, image->rect.left, image->rect.top); 675 offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
689 u_offset = U_OFFSET(pix, image->rect.left, 676 u_offset = U_OFFSET(pix, image->rect.left,
690 image->rect.top) - offset; 677 image->rect.top) - offset;
691 v_offset = V_OFFSET(pix, image->rect.left, 678 v_offset = V_OFFSET(pix, image->rect.left,
692 image->rect.top) - offset; 679 image->rect.top) - offset;
693 680
694 ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat, 681 ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
695 pix->bytesperline,
696 u_offset, v_offset); 682 u_offset, v_offset);
697 break; 683 break;
684 case V4L2_PIX_FMT_YVU420:
685 offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
686 u_offset = U_OFFSET(pix, image->rect.left,
687 image->rect.top) - offset;
688 v_offset = V_OFFSET(pix, image->rect.left,
689 image->rect.top) - offset;
690
691 ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
692 v_offset, u_offset);
693 break;
698 case V4L2_PIX_FMT_YUV422P: 694 case V4L2_PIX_FMT_YUV422P:
699 offset = Y_OFFSET(pix, image->rect.left, image->rect.top); 695 offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
700 u_offset = U2_OFFSET(pix, image->rect.left, 696 u_offset = U2_OFFSET(pix, image->rect.left,
@@ -702,8 +698,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
702 v_offset = V2_OFFSET(pix, image->rect.left, 698 v_offset = V2_OFFSET(pix, image->rect.left,
703 image->rect.top) - offset; 699 image->rect.top) - offset;
704 700
705 ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat, 701 ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
706 pix->bytesperline,
707 u_offset, v_offset); 702 u_offset, v_offset);
708 break; 703 break;
709 case V4L2_PIX_FMT_NV12: 704 case V4L2_PIX_FMT_NV12:
@@ -712,8 +707,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
712 image->rect.top) - offset; 707 image->rect.top) - offset;
713 v_offset = 0; 708 v_offset = 0;
714 709
715 ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat, 710 ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
716 pix->bytesperline,
717 u_offset, v_offset); 711 u_offset, v_offset);
718 break; 712 break;
719 case V4L2_PIX_FMT_NV16: 713 case V4L2_PIX_FMT_NV16:
@@ -722,8 +716,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
722 image->rect.top) - offset; 716 image->rect.top) - offset;
723 v_offset = 0; 717 v_offset = 0;
724 718
725 ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat, 719 ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
726 pix->bytesperline,
727 u_offset, v_offset); 720 u_offset, v_offset);
728 break; 721 break;
729 case V4L2_PIX_FMT_UYVY: 722 case V4L2_PIX_FMT_UYVY:
diff --git a/drivers/gpu/ipu-v3/ipu-dmfc.c b/drivers/gpu/ipu-v3/ipu-dmfc.c
index 042c3958e2a0..837b1ec22800 100644
--- a/drivers/gpu/ipu-v3/ipu-dmfc.c
+++ b/drivers/gpu/ipu-v3/ipu-dmfc.c
@@ -350,11 +350,13 @@ out:
350} 350}
351EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth); 351EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth);
352 352
353int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width) 353void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width)
354{ 354{
355 struct ipu_dmfc_priv *priv = dmfc->priv; 355 struct ipu_dmfc_priv *priv = dmfc->priv;
356 u32 dmfc_gen1; 356 u32 dmfc_gen1;
357 357
358 mutex_lock(&priv->mutex);
359
358 dmfc_gen1 = readl(priv->base + DMFC_GENERAL1); 360 dmfc_gen1 = readl(priv->base + DMFC_GENERAL1);
359 361
360 if ((dmfc->slots * 64 * 4) / width > dmfc->data->max_fifo_lines) 362 if ((dmfc->slots * 64 * 4) / width > dmfc->data->max_fifo_lines)
@@ -364,9 +366,9 @@ int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width)
364 366
365 writel(dmfc_gen1, priv->base + DMFC_GENERAL1); 367 writel(dmfc_gen1, priv->base + DMFC_GENERAL1);
366 368
367 return 0; 369 mutex_unlock(&priv->mutex);
368} 370}
369EXPORT_SYMBOL_GPL(ipu_dmfc_init_channel); 371EXPORT_SYMBOL_GPL(ipu_dmfc_config_wait4eot);
370 372
371struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipu_channel) 373struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipu_channel)
372{ 374{
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index bdb8cc89cacc..4f9c5c6deaed 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1979,6 +1979,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
1979 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) }, 1979 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
1980 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) }, 1980 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
1981 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) }, 1981 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
1982 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
1983 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
1984 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
1982 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) }, 1985 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
1983 { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, 1986 { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
1984 { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, 1987 { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5c0e43ed5c53..0238f0169e48 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -259,6 +259,7 @@
259#define USB_DEVICE_ID_CORSAIR_K90 0x1b02 259#define USB_DEVICE_ID_CORSAIR_K90 0x1b02
260 260
261#define USB_VENDOR_ID_CREATIVELABS 0x041e 261#define USB_VENDOR_ID_CREATIVELABS 0x041e
262#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c
262#define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801 263#define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801
263 264
264#define USB_VENDOR_ID_CVTOUCH 0x1ff7 265#define USB_VENDOR_ID_CVTOUCH 0x1ff7
@@ -676,6 +677,7 @@
676#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b 677#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b
677#define USB_DEVICE_ID_MS_OFFICE_KB 0x0048 678#define USB_DEVICE_ID_MS_OFFICE_KB 0x0048
678#define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d 679#define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d
680#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K 0x00b4
679#define USB_DEVICE_ID_MS_NE4K 0x00db 681#define USB_DEVICE_ID_MS_NE4K 0x00db
680#define USB_DEVICE_ID_MS_NE4K_JP 0x00dc 682#define USB_DEVICE_ID_MS_NE4K_JP 0x00dc
681#define USB_DEVICE_ID_MS_LK6K 0x00f9 683#define USB_DEVICE_ID_MS_LK6K 0x00f9
@@ -683,6 +685,8 @@
683#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713 685#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713
684#define USB_DEVICE_ID_MS_NE7K 0x071d 686#define USB_DEVICE_ID_MS_NE7K 0x071d
685#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730 687#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730
688#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1 0x0732
689#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_600 0x0750
686#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c 690#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c
687#define USB_DEVICE_ID_MS_COMFORT_KEYBOARD 0x00e3 691#define USB_DEVICE_ID_MS_COMFORT_KEYBOARD 0x00e3
688#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799 692#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 0125e356bd8d..1ac4ff4d57a6 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -184,21 +184,31 @@ static int lenovo_send_cmd_cptkbd(struct hid_device *hdev,
184 unsigned char byte2, unsigned char byte3) 184 unsigned char byte2, unsigned char byte3)
185{ 185{
186 int ret; 186 int ret;
187 unsigned char buf[] = {0x18, byte2, byte3}; 187 unsigned char *buf;
188
189 buf = kzalloc(3, GFP_KERNEL);
190 if (!buf)
191 return -ENOMEM;
192
193 buf[0] = 0x18;
194 buf[1] = byte2;
195 buf[2] = byte3;
188 196
189 switch (hdev->product) { 197 switch (hdev->product) {
190 case USB_DEVICE_ID_LENOVO_CUSBKBD: 198 case USB_DEVICE_ID_LENOVO_CUSBKBD:
191 ret = hid_hw_raw_request(hdev, 0x13, buf, sizeof(buf), 199 ret = hid_hw_raw_request(hdev, 0x13, buf, 3,
192 HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 200 HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
193 break; 201 break;
194 case USB_DEVICE_ID_LENOVO_CBTKBD: 202 case USB_DEVICE_ID_LENOVO_CBTKBD:
195 ret = hid_hw_output_report(hdev, buf, sizeof(buf)); 203 ret = hid_hw_output_report(hdev, buf, 3);
196 break; 204 break;
197 default: 205 default:
198 ret = -EINVAL; 206 ret = -EINVAL;
199 break; 207 break;
200 } 208 }
201 209
210 kfree(buf);
211
202 return ret < 0 ? ret : 0; /* BT returns 0, USB returns sizeof(buf) */ 212 return ret < 0 ? ret : 0; /* BT returns 0, USB returns sizeof(buf) */
203} 213}
204 214
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 75cd3bc59c54..e924d555536c 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -272,6 +272,12 @@ static const struct hid_device_id ms_devices[] = {
272 .driver_data = MS_PRESENTER }, 272 .driver_data = MS_PRESENTER },
273 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K), 273 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K),
274 .driver_data = MS_ERGONOMY | MS_RDESC_3K }, 274 .driver_data = MS_ERGONOMY | MS_RDESC_3K },
275 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K),
276 .driver_data = MS_ERGONOMY },
277 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600),
278 .driver_data = MS_ERGONOMY },
279 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1),
280 .driver_data = MS_ERGONOMY },
275 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0), 281 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0),
276 .driver_data = MS_NOGET }, 282 .driver_data = MS_NOGET },
277 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500), 283 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 25d3c4330bf6..c741f5e50a66 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1169,6 +1169,7 @@ static void mt_release_contacts(struct hid_device *hid)
1169 MT_TOOL_FINGER, 1169 MT_TOOL_FINGER,
1170 false); 1170 false);
1171 } 1171 }
1172 input_mt_sync_frame(input_dev);
1172 input_sync(input_dev); 1173 input_sync(input_dev);
1173 } 1174 }
1174 } 1175 }
diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c
index 4390eee2ce84..c830ed39348f 100644
--- a/drivers/hid/hid-wiimote-modules.c
+++ b/drivers/hid/hid-wiimote-modules.c
@@ -2049,9 +2049,11 @@ static void wiimod_mp_in_mp(struct wiimote_data *wdata, const __u8 *ext)
2049 * -----+------------------------------+-----+-----+ 2049 * -----+------------------------------+-----+-----+
2050 * The single bits Yaw, Roll, Pitch in the lower right corner specify 2050 * The single bits Yaw, Roll, Pitch in the lower right corner specify
2051 * whether the wiimote is rotating fast (0) or slow (1). Speed for slow 2051 * whether the wiimote is rotating fast (0) or slow (1). Speed for slow
2052 * roation is 440 deg/s and for fast rotation 2000 deg/s. To get a 2052 * roation is 8192/440 units / deg/s and for fast rotation 8192/2000
2053 * linear scale we multiply by 2000/440 = ~4.5454 which is 18 for fast 2053 * units / deg/s. To get a linear scale for fast rotation we multiply
2054 * and 9 for slow. 2054 * by 2000/440 = ~4.5454 and scale both fast and slow by 9 to match the
2055 * previous scale reported by this driver.
2056 * This leaves a linear scale with 8192*9/440 (~167.564) units / deg/s.
2055 * If the wiimote is not rotating the sensor reports 2^13 = 8192. 2057 * If the wiimote is not rotating the sensor reports 2^13 = 8192.
2056 * Ext specifies whether an extension is connected to the motionp. 2058 * Ext specifies whether an extension is connected to the motionp.
2057 * which is parsed by wiimote-core. 2059 * which is parsed by wiimote-core.
@@ -2070,15 +2072,15 @@ static void wiimod_mp_in_mp(struct wiimote_data *wdata, const __u8 *ext)
2070 z -= 8192; 2072 z -= 8192;
2071 2073
2072 if (!(ext[3] & 0x02)) 2074 if (!(ext[3] & 0x02))
2073 x *= 18; 2075 x = (x * 2000 * 9) / 440;
2074 else 2076 else
2075 x *= 9; 2077 x *= 9;
2076 if (!(ext[4] & 0x02)) 2078 if (!(ext[4] & 0x02))
2077 y *= 18; 2079 y = (y * 2000 * 9) / 440;
2078 else 2080 else
2079 y *= 9; 2081 y *= 9;
2080 if (!(ext[3] & 0x01)) 2082 if (!(ext[3] & 0x01))
2081 z *= 18; 2083 z = (z * 2000 * 9) / 440;
2082 else 2084 else
2083 z *= 9; 2085 z *= 9;
2084 2086
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index ad71160b9ea4..ae83af649a60 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -951,14 +951,6 @@ static int usbhid_output_report(struct hid_device *hid, __u8 *buf, size_t count)
951 return ret; 951 return ret;
952} 952}
953 953
954static void usbhid_restart_queues(struct usbhid_device *usbhid)
955{
956 if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
957 usbhid_restart_out_queue(usbhid);
958 if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
959 usbhid_restart_ctrl_queue(usbhid);
960}
961
962static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid) 954static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
963{ 955{
964 struct usbhid_device *usbhid = hid->driver_data; 956 struct usbhid_device *usbhid = hid->driver_data;
@@ -1404,6 +1396,37 @@ static void hid_cease_io(struct usbhid_device *usbhid)
1404 usb_kill_urb(usbhid->urbout); 1396 usb_kill_urb(usbhid->urbout);
1405} 1397}
1406 1398
1399static void hid_restart_io(struct hid_device *hid)
1400{
1401 struct usbhid_device *usbhid = hid->driver_data;
1402 int clear_halt = test_bit(HID_CLEAR_HALT, &usbhid->iofl);
1403 int reset_pending = test_bit(HID_RESET_PENDING, &usbhid->iofl);
1404
1405 spin_lock_irq(&usbhid->lock);
1406 clear_bit(HID_SUSPENDED, &usbhid->iofl);
1407 usbhid_mark_busy(usbhid);
1408
1409 if (clear_halt || reset_pending)
1410 schedule_work(&usbhid->reset_work);
1411 usbhid->retry_delay = 0;
1412 spin_unlock_irq(&usbhid->lock);
1413
1414 if (reset_pending || !test_bit(HID_STARTED, &usbhid->iofl))
1415 return;
1416
1417 if (!clear_halt) {
1418 if (hid_start_in(hid) < 0)
1419 hid_io_error(hid);
1420 }
1421
1422 spin_lock_irq(&usbhid->lock);
1423 if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
1424 usbhid_restart_out_queue(usbhid);
1425 if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
1426 usbhid_restart_ctrl_queue(usbhid);
1427 spin_unlock_irq(&usbhid->lock);
1428}
1429
1407/* Treat USB reset pretty much the same as suspend/resume */ 1430/* Treat USB reset pretty much the same as suspend/resume */
1408static int hid_pre_reset(struct usb_interface *intf) 1431static int hid_pre_reset(struct usb_interface *intf)
1409{ 1432{
@@ -1453,14 +1476,14 @@ static int hid_post_reset(struct usb_interface *intf)
1453 return 1; 1476 return 1;
1454 } 1477 }
1455 1478
1479 /* No need to do another reset or clear a halted endpoint */
1456 spin_lock_irq(&usbhid->lock); 1480 spin_lock_irq(&usbhid->lock);
1457 clear_bit(HID_RESET_PENDING, &usbhid->iofl); 1481 clear_bit(HID_RESET_PENDING, &usbhid->iofl);
1482 clear_bit(HID_CLEAR_HALT, &usbhid->iofl);
1458 spin_unlock_irq(&usbhid->lock); 1483 spin_unlock_irq(&usbhid->lock);
1459 hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0); 1484 hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
1460 status = hid_start_in(hid); 1485
1461 if (status < 0) 1486 hid_restart_io(hid);
1462 hid_io_error(hid);
1463 usbhid_restart_queues(usbhid);
1464 1487
1465 return 0; 1488 return 0;
1466} 1489}
@@ -1483,25 +1506,9 @@ void usbhid_put_power(struct hid_device *hid)
1483#ifdef CONFIG_PM 1506#ifdef CONFIG_PM
1484static int hid_resume_common(struct hid_device *hid, bool driver_suspended) 1507static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
1485{ 1508{
1486 struct usbhid_device *usbhid = hid->driver_data; 1509 int status = 0;
1487 int status;
1488
1489 spin_lock_irq(&usbhid->lock);
1490 clear_bit(HID_SUSPENDED, &usbhid->iofl);
1491 usbhid_mark_busy(usbhid);
1492
1493 if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
1494 test_bit(HID_RESET_PENDING, &usbhid->iofl))
1495 schedule_work(&usbhid->reset_work);
1496 usbhid->retry_delay = 0;
1497
1498 usbhid_restart_queues(usbhid);
1499 spin_unlock_irq(&usbhid->lock);
1500
1501 status = hid_start_in(hid);
1502 if (status < 0)
1503 hid_io_error(hid);
1504 1510
1511 hid_restart_io(hid);
1505 if (driver_suspended && hid->driver && hid->driver->resume) 1512 if (driver_suspended && hid->driver && hid->driver->resume)
1506 status = hid->driver->resume(hid); 1513 status = hid->driver->resume(hid);
1507 return status; 1514 return status;
@@ -1570,12 +1577,8 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
1570static int hid_resume(struct usb_interface *intf) 1577static int hid_resume(struct usb_interface *intf)
1571{ 1578{
1572 struct hid_device *hid = usb_get_intfdata (intf); 1579 struct hid_device *hid = usb_get_intfdata (intf);
1573 struct usbhid_device *usbhid = hid->driver_data;
1574 int status; 1580 int status;
1575 1581
1576 if (!test_bit(HID_STARTED, &usbhid->iofl))
1577 return 0;
1578
1579 status = hid_resume_common(hid, true); 1582 status = hid_resume_common(hid, true);
1580 dev_dbg(&intf->dev, "resume status %d\n", status); 1583 dev_dbg(&intf->dev, "resume status %d\n", status);
1581 return 0; 1584 return 0;
@@ -1584,10 +1587,8 @@ static int hid_resume(struct usb_interface *intf)
1584static int hid_reset_resume(struct usb_interface *intf) 1587static int hid_reset_resume(struct usb_interface *intf)
1585{ 1588{
1586 struct hid_device *hid = usb_get_intfdata(intf); 1589 struct hid_device *hid = usb_get_intfdata(intf);
1587 struct usbhid_device *usbhid = hid->driver_data;
1588 int status; 1590 int status;
1589 1591
1590 clear_bit(HID_SUSPENDED, &usbhid->iofl);
1591 status = hid_post_reset(intf); 1592 status = hid_post_reset(intf);
1592 if (status >= 0 && hid->driver && hid->driver->reset_resume) { 1593 if (status >= 0 && hid->driver && hid->driver->reset_resume) {
1593 int ret = hid->driver->reset_resume(hid); 1594 int ret = hid->driver->reset_resume(hid);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index ed2f68edc8f1..53fc856d6867 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -71,6 +71,7 @@ static const struct hid_blacklist {
71 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET }, 71 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
72 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, 72 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
73 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, 73 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
74 { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
74 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 75 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
75 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, 76 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
76 { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL }, 77 { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 68a560957871..ccf1883318c3 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -152,6 +152,25 @@ static void wacom_feature_mapping(struct hid_device *hdev,
152 hid_data->inputmode = field->report->id; 152 hid_data->inputmode = field->report->id;
153 hid_data->inputmode_index = usage->usage_index; 153 hid_data->inputmode_index = usage->usage_index;
154 break; 154 break;
155
156 case HID_UP_DIGITIZER:
157 if (field->report->id == 0x0B &&
158 (field->application == WACOM_G9_DIGITIZER ||
159 field->application == WACOM_G11_DIGITIZER)) {
160 wacom->wacom_wac.mode_report = field->report->id;
161 wacom->wacom_wac.mode_value = 0;
162 }
163 break;
164
165 case WACOM_G9_PAGE:
166 case WACOM_G11_PAGE:
167 if (field->report->id == 0x03 &&
168 (field->application == WACOM_G9_TOUCHSCREEN ||
169 field->application == WACOM_G11_TOUCHSCREEN)) {
170 wacom->wacom_wac.mode_report = field->report->id;
171 wacom->wacom_wac.mode_value = 0;
172 }
173 break;
155 } 174 }
156} 175}
157 176
@@ -322,26 +341,41 @@ static int wacom_hid_set_device_mode(struct hid_device *hdev)
322 return 0; 341 return 0;
323} 342}
324 343
325static int wacom_set_device_mode(struct hid_device *hdev, int report_id, 344static int wacom_set_device_mode(struct hid_device *hdev,
326 int length, int mode) 345 struct wacom_wac *wacom_wac)
327{ 346{
328 unsigned char *rep_data; 347 u8 *rep_data;
348 struct hid_report *r;
349 struct hid_report_enum *re;
350 int length;
329 int error = -ENOMEM, limit = 0; 351 int error = -ENOMEM, limit = 0;
330 352
331 rep_data = kzalloc(length, GFP_KERNEL); 353 if (wacom_wac->mode_report < 0)
354 return 0;
355
356 re = &(hdev->report_enum[HID_FEATURE_REPORT]);
357 r = re->report_id_hash[wacom_wac->mode_report];
358 if (!r)
359 return -EINVAL;
360
361 rep_data = hid_alloc_report_buf(r, GFP_KERNEL);
332 if (!rep_data) 362 if (!rep_data)
333 return error; 363 return -ENOMEM;
364
365 length = hid_report_len(r);
334 366
335 do { 367 do {
336 rep_data[0] = report_id; 368 rep_data[0] = wacom_wac->mode_report;
337 rep_data[1] = mode; 369 rep_data[1] = wacom_wac->mode_value;
338 370
339 error = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data, 371 error = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data,
340 length, 1); 372 length, 1);
341 if (error >= 0) 373 if (error >= 0)
342 error = wacom_get_report(hdev, HID_FEATURE_REPORT, 374 error = wacom_get_report(hdev, HID_FEATURE_REPORT,
343 rep_data, length, 1); 375 rep_data, length, 1);
344 } while (error >= 0 && rep_data[1] != mode && limit++ < WAC_MSG_RETRIES); 376 } while (error >= 0 &&
377 rep_data[1] != wacom_wac->mode_report &&
378 limit++ < WAC_MSG_RETRIES);
345 379
346 kfree(rep_data); 380 kfree(rep_data);
347 381
@@ -411,32 +445,41 @@ static int wacom_bt_query_tablet_data(struct hid_device *hdev, u8 speed,
411static int wacom_query_tablet_data(struct hid_device *hdev, 445static int wacom_query_tablet_data(struct hid_device *hdev,
412 struct wacom_features *features) 446 struct wacom_features *features)
413{ 447{
448 struct wacom *wacom = hid_get_drvdata(hdev);
449 struct wacom_wac *wacom_wac = &wacom->wacom_wac;
450
414 if (hdev->bus == BUS_BLUETOOTH) 451 if (hdev->bus == BUS_BLUETOOTH)
415 return wacom_bt_query_tablet_data(hdev, 1, features); 452 return wacom_bt_query_tablet_data(hdev, 1, features);
416 453
417 if (features->type == HID_GENERIC) 454 if (features->type != HID_GENERIC) {
418 return wacom_hid_set_device_mode(hdev); 455 if (features->device_type & WACOM_DEVICETYPE_TOUCH) {
419 456 if (features->type > TABLETPC) {
420 if (features->device_type & WACOM_DEVICETYPE_TOUCH) { 457 /* MT Tablet PC touch */
421 if (features->type > TABLETPC) { 458 wacom_wac->mode_report = 3;
422 /* MT Tablet PC touch */ 459 wacom_wac->mode_value = 4;
423 return wacom_set_device_mode(hdev, 3, 4, 4); 460 } else if (features->type == WACOM_24HDT) {
424 } 461 wacom_wac->mode_report = 18;
425 else if (features->type == WACOM_24HDT) { 462 wacom_wac->mode_value = 2;
426 return wacom_set_device_mode(hdev, 18, 3, 2); 463 } else if (features->type == WACOM_27QHDT) {
427 } 464 wacom_wac->mode_report = 131;
428 else if (features->type == WACOM_27QHDT) { 465 wacom_wac->mode_value = 2;
429 return wacom_set_device_mode(hdev, 131, 3, 2); 466 } else if (features->type == BAMBOO_PAD) {
430 } 467 wacom_wac->mode_report = 2;
431 else if (features->type == BAMBOO_PAD) { 468 wacom_wac->mode_value = 2;
432 return wacom_set_device_mode(hdev, 2, 2, 2); 469 }
433 } 470 } else if (features->device_type & WACOM_DEVICETYPE_PEN) {
434 } else if (features->device_type & WACOM_DEVICETYPE_PEN) { 471 if (features->type <= BAMBOO_PT) {
435 if (features->type <= BAMBOO_PT) { 472 wacom_wac->mode_report = 2;
436 return wacom_set_device_mode(hdev, 2, 2, 2); 473 wacom_wac->mode_value = 2;
474 }
437 } 475 }
438 } 476 }
439 477
478 wacom_set_device_mode(hdev, wacom_wac);
479
480 if (features->type == HID_GENERIC)
481 return wacom_hid_set_device_mode(hdev);
482
440 return 0; 483 return 0;
441} 484}
442 485
@@ -1817,6 +1860,9 @@ static int wacom_probe(struct hid_device *hdev,
1817 goto fail_type; 1860 goto fail_type;
1818 } 1861 }
1819 1862
1863 wacom_wac->hid_data.inputmode = -1;
1864 wacom_wac->mode_report = -1;
1865
1820 wacom->usbdev = dev; 1866 wacom->usbdev = dev;
1821 wacom->intf = intf; 1867 wacom->intf = intf;
1822 mutex_init(&wacom->lock); 1868 mutex_init(&wacom->lock);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index bd198bbd4df0..cf2ba43453fd 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -684,6 +684,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
684 684
685 wacom->tool[idx] = wacom_intuos_get_tool_type(wacom->id[idx]); 685 wacom->tool[idx] = wacom_intuos_get_tool_type(wacom->id[idx]);
686 686
687 wacom->shared->stylus_in_proximity = true;
687 return 1; 688 return 1;
688 } 689 }
689 690
@@ -2426,6 +2427,17 @@ void wacom_setup_device_quirks(struct wacom *wacom)
2426 } 2427 }
2427 2428
2428 /* 2429 /*
2430 * Hack for the Bamboo One:
2431 * the device presents a PAD/Touch interface as most Bamboos and even
2432 * sends ghosts PAD data on it. However, later, we must disable this
2433 * ghost interface, and we can not detect it unless we set it here
2434 * to WACOM_DEVICETYPE_PAD or WACOM_DEVICETYPE_TOUCH.
2435 */
2436 if (features->type == BAMBOO_PEN &&
2437 features->pktlen == WACOM_PKGLEN_BBTOUCH3)
2438 features->device_type |= WACOM_DEVICETYPE_PAD;
2439
2440 /*
2429 * Raw Wacom-mode pen and touch events both come from interface 2441 * Raw Wacom-mode pen and touch events both come from interface
2430 * 0, whose HID descriptor has an application usage of 0xFF0D 2442 * 0, whose HID descriptor has an application usage of 0xFF0D
2431 * (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back 2443 * (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
@@ -3384,6 +3396,10 @@ static const struct wacom_features wacom_features_0x33E =
3384 { "Wacom Intuos PT M 2", 21600, 13500, 2047, 63, 3396 { "Wacom Intuos PT M 2", 21600, 13500, 2047, 63,
3385 INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16, 3397 INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
3386 .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; 3398 .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
3399static const struct wacom_features wacom_features_0x343 =
3400 { "Wacom DTK1651", 34616, 19559, 1023, 0,
3401 DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
3402 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
3387 3403
3388static const struct wacom_features wacom_features_HID_ANY_ID = 3404static const struct wacom_features wacom_features_HID_ANY_ID =
3389 { "Wacom HID", .type = HID_GENERIC }; 3405 { "Wacom HID", .type = HID_GENERIC };
@@ -3549,6 +3565,7 @@ const struct hid_device_id wacom_ids[] = {
3549 { USB_DEVICE_WACOM(0x33C) }, 3565 { USB_DEVICE_WACOM(0x33C) },
3550 { USB_DEVICE_WACOM(0x33D) }, 3566 { USB_DEVICE_WACOM(0x33D) },
3551 { USB_DEVICE_WACOM(0x33E) }, 3567 { USB_DEVICE_WACOM(0x33E) },
3568 { USB_DEVICE_WACOM(0x343) },
3552 { USB_DEVICE_WACOM(0x4001) }, 3569 { USB_DEVICE_WACOM(0x4001) },
3553 { USB_DEVICE_WACOM(0x4004) }, 3570 { USB_DEVICE_WACOM(0x4004) },
3554 { USB_DEVICE_WACOM(0x5000) }, 3571 { USB_DEVICE_WACOM(0x5000) },
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 25baa7f29599..e2084d914c14 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -84,6 +84,12 @@
84#define WACOM_DEVICETYPE_WL_MONITOR 0x0008 84#define WACOM_DEVICETYPE_WL_MONITOR 0x0008
85 85
86#define WACOM_VENDORDEFINED_PEN 0xff0d0001 86#define WACOM_VENDORDEFINED_PEN 0xff0d0001
87#define WACOM_G9_PAGE 0xff090000
88#define WACOM_G9_DIGITIZER (WACOM_G9_PAGE | 0x02)
89#define WACOM_G9_TOUCHSCREEN (WACOM_G9_PAGE | 0x11)
90#define WACOM_G11_PAGE 0xff110000
91#define WACOM_G11_DIGITIZER (WACOM_G11_PAGE | 0x02)
92#define WACOM_G11_TOUCHSCREEN (WACOM_G11_PAGE | 0x11)
87 93
88#define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \ 94#define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \
89 ((f)->physical == HID_DG_STYLUS) || \ 95 ((f)->physical == HID_DG_STYLUS) || \
@@ -238,6 +244,8 @@ struct wacom_wac {
238 int ps_connected; 244 int ps_connected;
239 u8 bt_features; 245 u8 bt_features;
240 u8 bt_high_speed; 246 u8 bt_high_speed;
247 int mode_report;
248 int mode_value;
241 struct hid_data hid_data; 249 struct hid_data hid_data;
242}; 250};
243 251
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 5613e2b5cff7..a40a73a7b71d 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -103,15 +103,29 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
103 * there is room for the producer to send the pending packet. 103 * there is room for the producer to send the pending packet.
104 */ 104 */
105 105
106static bool hv_need_to_signal_on_read(u32 prev_write_sz, 106static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
107 struct hv_ring_buffer_info *rbi)
108{ 107{
109 u32 cur_write_sz; 108 u32 cur_write_sz;
110 u32 r_size; 109 u32 r_size;
111 u32 write_loc = rbi->ring_buffer->write_index; 110 u32 write_loc;
112 u32 read_loc = rbi->ring_buffer->read_index; 111 u32 read_loc = rbi->ring_buffer->read_index;
113 u32 pending_sz = rbi->ring_buffer->pending_send_sz; 112 u32 pending_sz;
114 113
114 /*
115 * Issue a full memory barrier before making the signaling decision.
116 * Here is the reason for having this barrier:
117 * If the reading of the pend_sz (in this function)
118 * were to be reordered and read before we commit the new read
119 * index (in the calling function) we could
120 * have a problem. If the host were to set the pending_sz after we
121 * have sampled pending_sz and go to sleep before we commit the
122 * read index, we could miss sending the interrupt. Issue a full
123 * memory barrier to address this.
124 */
125 mb();
126
127 pending_sz = rbi->ring_buffer->pending_send_sz;
128 write_loc = rbi->ring_buffer->write_index;
115 /* If the other end is not blocked on write don't bother. */ 129 /* If the other end is not blocked on write don't bother. */
116 if (pending_sz == 0) 130 if (pending_sz == 0)
117 return false; 131 return false;
@@ -120,7 +134,7 @@ static bool hv_need_to_signal_on_read(u32 prev_write_sz,
120 cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) : 134 cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
121 read_loc - write_loc; 135 read_loc - write_loc;
122 136
123 if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz)) 137 if (cur_write_sz >= pending_sz)
124 return true; 138 return true;
125 139
126 return false; 140 return false;
@@ -455,7 +469,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
455 /* Update the read index */ 469 /* Update the read index */
456 hv_set_next_read_location(inring_info, next_read_location); 470 hv_set_next_read_location(inring_info, next_read_location);
457 471
458 *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info); 472 *signal = hv_need_to_signal_on_read(inring_info);
459 473
460 return ret; 474 return ret;
461} 475}
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
index 36544c4f653c..303d0c9df907 100644
--- a/drivers/hwmon/max1111.c
+++ b/drivers/hwmon/max1111.c
@@ -85,6 +85,9 @@ static struct max1111_data *the_max1111;
85 85
86int max1111_read_channel(int channel) 86int max1111_read_channel(int channel)
87{ 87{
88 if (!the_max1111 || !the_max1111->spi)
89 return -ENODEV;
90
88 return max1111_read(&the_max1111->spi->dev, channel); 91 return max1111_read(&the_max1111->spi->dev, channel);
89} 92}
90EXPORT_SYMBOL(max1111_read_channel); 93EXPORT_SYMBOL(max1111_read_channel);
@@ -258,6 +261,9 @@ static int max1111_remove(struct spi_device *spi)
258{ 261{
259 struct max1111_data *data = spi_get_drvdata(spi); 262 struct max1111_data *data = spi_get_drvdata(spi);
260 263
264#ifdef CONFIG_SHARPSL_PM
265 the_max1111 = NULL;
266#endif
261 hwmon_device_unregister(data->hwmon_dev); 267 hwmon_device_unregister(data->hwmon_dev);
262 sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group); 268 sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
263 sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group); 269 sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index faa8e6821fea..0967e1a5b3a2 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -975,10 +975,10 @@ config I2C_XLR
975 975
976config I2C_XLP9XX 976config I2C_XLP9XX
977 tristate "XLP9XX I2C support" 977 tristate "XLP9XX I2C support"
978 depends on CPU_XLP || COMPILE_TEST 978 depends on CPU_XLP || ARCH_VULCAN || COMPILE_TEST
979 help 979 help
980 This driver enables support for the on-chip I2C interface of 980 This driver enables support for the on-chip I2C interface of
981 the Broadcom XLP9xx/XLP5xx MIPS processors. 981 the Broadcom XLP9xx/XLP5xx MIPS and Vulcan ARM64 processors.
982 982
983 This driver can also be built as a module. If so, the module will 983 This driver can also be built as a module. If so, the module will
984 be called i2c-xlp9xx. 984 be called i2c-xlp9xx.
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 714bdc837769..b167ab25310a 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -116,8 +116,8 @@ struct cpm_i2c {
116 cbd_t __iomem *rbase; 116 cbd_t __iomem *rbase;
117 u_char *txbuf[CPM_MAXBD]; 117 u_char *txbuf[CPM_MAXBD];
118 u_char *rxbuf[CPM_MAXBD]; 118 u_char *rxbuf[CPM_MAXBD];
119 u32 txdma[CPM_MAXBD]; 119 dma_addr_t txdma[CPM_MAXBD];
120 u32 rxdma[CPM_MAXBD]; 120 dma_addr_t rxdma[CPM_MAXBD];
121}; 121};
122 122
123static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id) 123static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index b29c7500461a..f54ece8fce78 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
671 return -EIO; 671 return -EIO;
672 } 672 }
673 673
674 clk_prepare_enable(i2c->clk); 674 ret = clk_enable(i2c->clk);
675 if (ret)
676 return ret;
675 677
676 for (i = 0; i < num; i++, msgs++) { 678 for (i = 0; i < num; i++, msgs++) {
677 stop = (i == num - 1); 679 stop = (i == num - 1);
@@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
695 } 697 }
696 698
697 out: 699 out:
698 clk_disable_unprepare(i2c->clk); 700 clk_disable(i2c->clk);
699 return ret; 701 return ret;
700} 702}
701 703
@@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
747 return -ENOENT; 749 return -ENOENT;
748 } 750 }
749 751
750 clk_prepare_enable(i2c->clk); 752 ret = clk_prepare_enable(i2c->clk);
753 if (ret)
754 return ret;
751 755
752 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 756 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
753 i2c->regs = devm_ioremap_resource(&pdev->dev, mem); 757 i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
@@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
799 803
800 platform_set_drvdata(pdev, i2c); 804 platform_set_drvdata(pdev, i2c);
801 805
806 clk_disable(i2c->clk);
807
808 return 0;
809
802 err_clk: 810 err_clk:
803 clk_disable_unprepare(i2c->clk); 811 clk_disable_unprepare(i2c->clk);
804 return ret; 812 return ret;
@@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
810 818
811 i2c_del_adapter(&i2c->adap); 819 i2c_del_adapter(&i2c->adap);
812 820
821 clk_unprepare(i2c->clk);
822
813 return 0; 823 return 0;
814} 824}
815 825
@@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
821 831
822 i2c->suspended = 1; 832 i2c->suspended = 1;
823 833
834 clk_unprepare(i2c->clk);
835
824 return 0; 836 return 0;
825} 837}
826 838
@@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
830 struct exynos5_i2c *i2c = platform_get_drvdata(pdev); 842 struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
831 int ret = 0; 843 int ret = 0;
832 844
833 clk_prepare_enable(i2c->clk); 845 ret = clk_prepare_enable(i2c->clk);
846 if (ret)
847 return ret;
834 848
835 ret = exynos5_hsi2c_clock_setup(i2c); 849 ret = exynos5_hsi2c_clock_setup(i2c);
836 if (ret) { 850 if (ret) {
@@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
839 } 853 }
840 854
841 exynos5_i2c_init(i2c); 855 exynos5_i2c_init(i2c);
842 clk_disable_unprepare(i2c->clk); 856 clk_disable(i2c->clk);
843 i2c->suspended = 0; 857 i2c->suspended = 0;
844 858
845 return 0; 859 return 0;
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 7ba795b24e75..1c8707710098 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -75,6 +75,7 @@
75/* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */ 75/* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */
76#define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59 76#define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59
77#define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a 77#define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a
78#define PCI_DEVICE_ID_INTEL_DNV_SMT 0x19ac
78#define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15 79#define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15
79 80
80#define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */ 81#define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
@@ -180,6 +181,7 @@ struct ismt_priv {
180static const struct pci_device_id ismt_ids[] = { 181static const struct pci_device_id ismt_ids[] = {
181 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) }, 182 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) },
182 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) }, 183 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) },
184 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMT) },
183 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) }, 185 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) },
184 { 0, } 186 { 0, }
185}; 187};
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index f325663c27c5..ba14a863b451 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -771,11 +771,16 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
771 ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", 771 ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
772 &clk_freq); 772 &clk_freq);
773 if (ret) { 773 if (ret) {
774 dev_err(&pdev->dev, "clock-frequency not specified in DT"); 774 dev_err(&pdev->dev, "clock-frequency not specified in DT\n");
775 goto err; 775 goto err;
776 } 776 }
777 777
778 i2c->speed = clk_freq / 1000; 778 i2c->speed = clk_freq / 1000;
779 if (i2c->speed == 0) {
780 ret = -EINVAL;
781 dev_err(&pdev->dev, "clock-frequency minimum is 1000\n");
782 goto err;
783 }
779 jz4780_i2c_set_speed(i2c); 784 jz4780_i2c_set_speed(i2c);
780 785
781 dev_info(&pdev->dev, "Bus frequency is %d KHz\n", i2c->speed); 786 dev_info(&pdev->dev, "Bus frequency is %d KHz\n", i2c->speed);
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 9096d17beb5b..3dcc5f3f26cb 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -855,6 +855,7 @@ static struct rk3x_i2c_soc_data soc_data[3] = {
855static const struct of_device_id rk3x_i2c_match[] = { 855static const struct of_device_id rk3x_i2c_match[] = {
856 { .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] }, 856 { .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] },
857 { .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] }, 857 { .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] },
858 { .compatible = "rockchip,rk3228-i2c", .data = (void *)&soc_data[2] },
858 { .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] }, 859 { .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] },
859 {}, 860 {},
860}; 861};
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 0f2f8484e8ec..e584d88ee337 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -525,22 +525,16 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv)
525 return 0; 525 return 0;
526} 526}
527 527
528
529/* uevent helps with hotplug: modprobe -q $(MODALIAS) */
530static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env) 528static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
531{ 529{
532 struct i2c_client *client = to_i2c_client(dev); 530 struct i2c_client *client = to_i2c_client(dev);
533 int rc; 531 int rc;
534 532
535 rc = acpi_device_uevent_modalias(dev, env); 533 rc = acpi_device_uevent_modalias(dev, env);
536 if (rc != -ENODEV) 534 if (rc != -ENODEV)
537 return rc; 535 return rc;
538 536
539 if (add_uevent_var(env, "MODALIAS=%s%s", 537 return add_uevent_var(env, "MODALIAS=%s%s", I2C_MODULE_PREFIX, client->name);
540 I2C_MODULE_PREFIX, client->name))
541 return -ENOMEM;
542 dev_dbg(dev, "uevent\n");
543 return 0;
544} 538}
545 539
546/* i2c bus recovery routines */ 540/* i2c bus recovery routines */
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index 7748a0a5ddb9..8de073aed001 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -140,22 +140,34 @@ static int i2c_demux_change_master(struct i2c_demux_pinctrl_priv *priv, u32 new_
140 return i2c_demux_activate_master(priv, new_chan); 140 return i2c_demux_activate_master(priv, new_chan);
141} 141}
142 142
143static ssize_t cur_master_show(struct device *dev, struct device_attribute *attr, 143static ssize_t available_masters_show(struct device *dev,
144 char *buf) 144 struct device_attribute *attr,
145 char *buf)
145{ 146{
146 struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev); 147 struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
147 int count = 0, i; 148 int count = 0, i;
148 149
149 for (i = 0; i < priv->num_chan && count < PAGE_SIZE; i++) 150 for (i = 0; i < priv->num_chan && count < PAGE_SIZE; i++)
150 count += scnprintf(buf + count, PAGE_SIZE - count, "%c %d - %s\n", 151 count += scnprintf(buf + count, PAGE_SIZE - count, "%d:%s%c",
151 i == priv->cur_chan ? '*' : ' ', i, 152 i, priv->chan[i].parent_np->full_name,
152 priv->chan[i].parent_np->full_name); 153 i == priv->num_chan - 1 ? '\n' : ' ');
153 154
154 return count; 155 return count;
155} 156}
157static DEVICE_ATTR_RO(available_masters);
156 158
157static ssize_t cur_master_store(struct device *dev, struct device_attribute *attr, 159static ssize_t current_master_show(struct device *dev,
158 const char *buf, size_t count) 160 struct device_attribute *attr,
161 char *buf)
162{
163 struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
164
165 return sprintf(buf, "%d\n", priv->cur_chan);
166}
167
168static ssize_t current_master_store(struct device *dev,
169 struct device_attribute *attr,
170 const char *buf, size_t count)
159{ 171{
160 struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev); 172 struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
161 unsigned int val; 173 unsigned int val;
@@ -172,7 +184,7 @@ static ssize_t cur_master_store(struct device *dev, struct device_attribute *att
172 184
173 return ret < 0 ? ret : count; 185 return ret < 0 ? ret : count;
174} 186}
175static DEVICE_ATTR_RW(cur_master); 187static DEVICE_ATTR_RW(current_master);
176 188
177static int i2c_demux_pinctrl_probe(struct platform_device *pdev) 189static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
178{ 190{
@@ -218,12 +230,18 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
218 /* switch to first parent as active master */ 230 /* switch to first parent as active master */
219 i2c_demux_activate_master(priv, 0); 231 i2c_demux_activate_master(priv, 0);
220 232
221 err = device_create_file(&pdev->dev, &dev_attr_cur_master); 233 err = device_create_file(&pdev->dev, &dev_attr_available_masters);
222 if (err) 234 if (err)
223 goto err_rollback; 235 goto err_rollback;
224 236
237 err = device_create_file(&pdev->dev, &dev_attr_current_master);
238 if (err)
239 goto err_rollback_available;
240
225 return 0; 241 return 0;
226 242
243err_rollback_available:
244 device_remove_file(&pdev->dev, &dev_attr_available_masters);
227err_rollback: 245err_rollback:
228 for (j = 0; j < i; j++) { 246 for (j = 0; j < i; j++) {
229 of_node_put(priv->chan[j].parent_np); 247 of_node_put(priv->chan[j].parent_np);
@@ -238,7 +256,8 @@ static int i2c_demux_pinctrl_remove(struct platform_device *pdev)
238 struct i2c_demux_pinctrl_priv *priv = platform_get_drvdata(pdev); 256 struct i2c_demux_pinctrl_priv *priv = platform_get_drvdata(pdev);
239 int i; 257 int i;
240 258
241 device_remove_file(&pdev->dev, &dev_attr_cur_master); 259 device_remove_file(&pdev->dev, &dev_attr_current_master);
260 device_remove_file(&pdev->dev, &dev_attr_available_masters);
242 261
243 i2c_demux_deactivate_master(priv); 262 i2c_demux_deactivate_master(priv);
244 263
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 9f0a48e39b8a..80e933b296f6 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -451,7 +451,7 @@ err_free:
451 return ret; 451 return ret;
452} 452}
453 453
454static const struct ide_port_info icside_v6_port_info __initconst = { 454static const struct ide_port_info icside_v6_port_info = {
455 .init_dma = icside_dma_off_init, 455 .init_dma = icside_dma_off_init,
456 .port_ops = &icside_v6_no_dma_port_ops, 456 .port_ops = &icside_v6_no_dma_port_ops,
457 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, 457 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index 8012e43bf8f6..46427ea01753 100644
--- a/drivers/ide/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
@@ -325,6 +325,8 @@ static int __init palm_bk3710_probe(struct platform_device *pdev)
325 325
326 clk_enable(clk); 326 clk_enable(clk);
327 rate = clk_get_rate(clk); 327 rate = clk_get_rate(clk);
328 if (!rate)
329 return -EINVAL;
328 330
329 /* NOTE: round *down* to meet minimum timings; we count in clocks */ 331 /* NOTE: round *down* to meet minimum timings; we count in clocks */
330 ideclk_period = 1000000000UL / rate; 332 ideclk_period = 1000000000UL / rate;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index ba947df5a8c7..c6935de425fa 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -660,6 +660,35 @@ static struct cpuidle_state skl_cstates[] = {
660 .enter = NULL } 660 .enter = NULL }
661}; 661};
662 662
663static struct cpuidle_state skx_cstates[] = {
664 {
665 .name = "C1-SKX",
666 .desc = "MWAIT 0x00",
667 .flags = MWAIT2flg(0x00),
668 .exit_latency = 2,
669 .target_residency = 2,
670 .enter = &intel_idle,
671 .enter_freeze = intel_idle_freeze, },
672 {
673 .name = "C1E-SKX",
674 .desc = "MWAIT 0x01",
675 .flags = MWAIT2flg(0x01),
676 .exit_latency = 10,
677 .target_residency = 20,
678 .enter = &intel_idle,
679 .enter_freeze = intel_idle_freeze, },
680 {
681 .name = "C6-SKX",
682 .desc = "MWAIT 0x20",
683 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
684 .exit_latency = 133,
685 .target_residency = 600,
686 .enter = &intel_idle,
687 .enter_freeze = intel_idle_freeze, },
688 {
689 .enter = NULL }
690};
691
663static struct cpuidle_state atom_cstates[] = { 692static struct cpuidle_state atom_cstates[] = {
664 { 693 {
665 .name = "C1E-ATM", 694 .name = "C1E-ATM",
@@ -818,8 +847,11 @@ static int cpu_hotplug_notify(struct notifier_block *n,
818 * driver in this case 847 * driver in this case
819 */ 848 */
820 dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu); 849 dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu);
821 if (!dev->registered) 850 if (dev->registered)
822 intel_idle_cpu_init(hotcpu); 851 break;
852
853 if (intel_idle_cpu_init(hotcpu))
854 return NOTIFY_BAD;
823 855
824 break; 856 break;
825 } 857 }
@@ -904,6 +936,10 @@ static const struct idle_cpu idle_cpu_skl = {
904 .disable_promotion_to_c1e = true, 936 .disable_promotion_to_c1e = true,
905}; 937};
906 938
939static const struct idle_cpu idle_cpu_skx = {
940 .state_table = skx_cstates,
941 .disable_promotion_to_c1e = true,
942};
907 943
908static const struct idle_cpu idle_cpu_avn = { 944static const struct idle_cpu idle_cpu_avn = {
909 .state_table = avn_cstates, 945 .state_table = avn_cstates,
@@ -945,6 +981,9 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
945 ICPU(0x56, idle_cpu_bdw), 981 ICPU(0x56, idle_cpu_bdw),
946 ICPU(0x4e, idle_cpu_skl), 982 ICPU(0x4e, idle_cpu_skl),
947 ICPU(0x5e, idle_cpu_skl), 983 ICPU(0x5e, idle_cpu_skl),
984 ICPU(0x8e, idle_cpu_skl),
985 ICPU(0x9e, idle_cpu_skl),
986 ICPU(0x55, idle_cpu_skx),
948 ICPU(0x57, idle_cpu_knl), 987 ICPU(0x57, idle_cpu_knl),
949 {} 988 {}
950}; 989};
@@ -987,22 +1026,15 @@ static int __init intel_idle_probe(void)
987 icpu = (const struct idle_cpu *)id->driver_data; 1026 icpu = (const struct idle_cpu *)id->driver_data;
988 cpuidle_state_table = icpu->state_table; 1027 cpuidle_state_table = icpu->state_table;
989 1028
990 if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
991 lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
992 else
993 on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
994
995 pr_debug(PREFIX "v" INTEL_IDLE_VERSION 1029 pr_debug(PREFIX "v" INTEL_IDLE_VERSION
996 " model 0x%X\n", boot_cpu_data.x86_model); 1030 " model 0x%X\n", boot_cpu_data.x86_model);
997 1031
998 pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
999 lapic_timer_reliable_states);
1000 return 0; 1032 return 0;
1001} 1033}
1002 1034
1003/* 1035/*
1004 * intel_idle_cpuidle_devices_uninit() 1036 * intel_idle_cpuidle_devices_uninit()
1005 * unregister, free cpuidle_devices 1037 * Unregisters the cpuidle devices.
1006 */ 1038 */
1007static void intel_idle_cpuidle_devices_uninit(void) 1039static void intel_idle_cpuidle_devices_uninit(void)
1008{ 1040{
@@ -1013,9 +1045,6 @@ static void intel_idle_cpuidle_devices_uninit(void)
1013 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); 1045 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
1014 cpuidle_unregister_device(dev); 1046 cpuidle_unregister_device(dev);
1015 } 1047 }
1016
1017 free_percpu(intel_idle_cpuidle_devices);
1018 return;
1019} 1048}
1020 1049
1021/* 1050/*
@@ -1111,7 +1140,7 @@ static void intel_idle_state_table_update(void)
1111 * intel_idle_cpuidle_driver_init() 1140 * intel_idle_cpuidle_driver_init()
1112 * allocate, initialize cpuidle_states 1141 * allocate, initialize cpuidle_states
1113 */ 1142 */
1114static int __init intel_idle_cpuidle_driver_init(void) 1143static void __init intel_idle_cpuidle_driver_init(void)
1115{ 1144{
1116 int cstate; 1145 int cstate;
1117 struct cpuidle_driver *drv = &intel_idle_driver; 1146 struct cpuidle_driver *drv = &intel_idle_driver;
@@ -1163,18 +1192,10 @@ static int __init intel_idle_cpuidle_driver_init(void)
1163 drv->state_count += 1; 1192 drv->state_count += 1;
1164 } 1193 }
1165 1194
1166 if (icpu->auto_demotion_disable_flags)
1167 on_each_cpu(auto_demotion_disable, NULL, 1);
1168
1169 if (icpu->byt_auto_demotion_disable_flag) { 1195 if (icpu->byt_auto_demotion_disable_flag) {
1170 wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0); 1196 wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
1171 wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0); 1197 wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
1172 } 1198 }
1173
1174 if (icpu->disable_promotion_to_c1e) /* each-cpu is redundant */
1175 on_each_cpu(c1e_promotion_disable, NULL, 1);
1176
1177 return 0;
1178} 1199}
1179 1200
1180 1201
@@ -1193,7 +1214,6 @@ static int intel_idle_cpu_init(int cpu)
1193 1214
1194 if (cpuidle_register_device(dev)) { 1215 if (cpuidle_register_device(dev)) {
1195 pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu); 1216 pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
1196 intel_idle_cpuidle_devices_uninit();
1197 return -EIO; 1217 return -EIO;
1198 } 1218 }
1199 1219
@@ -1218,40 +1238,51 @@ static int __init intel_idle_init(void)
1218 if (retval) 1238 if (retval)
1219 return retval; 1239 return retval;
1220 1240
1241 intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
1242 if (intel_idle_cpuidle_devices == NULL)
1243 return -ENOMEM;
1244
1221 intel_idle_cpuidle_driver_init(); 1245 intel_idle_cpuidle_driver_init();
1222 retval = cpuidle_register_driver(&intel_idle_driver); 1246 retval = cpuidle_register_driver(&intel_idle_driver);
1223 if (retval) { 1247 if (retval) {
1224 struct cpuidle_driver *drv = cpuidle_get_driver(); 1248 struct cpuidle_driver *drv = cpuidle_get_driver();
1225 printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", 1249 printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
1226 drv ? drv->name : "none"); 1250 drv ? drv->name : "none");
1251 free_percpu(intel_idle_cpuidle_devices);
1227 return retval; 1252 return retval;
1228 } 1253 }
1229 1254
1230 intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
1231 if (intel_idle_cpuidle_devices == NULL)
1232 return -ENOMEM;
1233
1234 cpu_notifier_register_begin(); 1255 cpu_notifier_register_begin();
1235 1256
1236 for_each_online_cpu(i) { 1257 for_each_online_cpu(i) {
1237 retval = intel_idle_cpu_init(i); 1258 retval = intel_idle_cpu_init(i);
1238 if (retval) { 1259 if (retval) {
1260 intel_idle_cpuidle_devices_uninit();
1239 cpu_notifier_register_done(); 1261 cpu_notifier_register_done();
1240 cpuidle_unregister_driver(&intel_idle_driver); 1262 cpuidle_unregister_driver(&intel_idle_driver);
1263 free_percpu(intel_idle_cpuidle_devices);
1241 return retval; 1264 return retval;
1242 } 1265 }
1243 } 1266 }
1244 __register_cpu_notifier(&cpu_hotplug_notifier); 1267 __register_cpu_notifier(&cpu_hotplug_notifier);
1245 1268
1269 if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
1270 lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
1271 else
1272 on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
1273
1246 cpu_notifier_register_done(); 1274 cpu_notifier_register_done();
1247 1275
1276 pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
1277 lapic_timer_reliable_states);
1278
1248 return 0; 1279 return 0;
1249} 1280}
1250 1281
1251static void __exit intel_idle_exit(void) 1282static void __exit intel_idle_exit(void)
1252{ 1283{
1253 intel_idle_cpuidle_devices_uninit(); 1284 struct cpuidle_device *dev;
1254 cpuidle_unregister_driver(&intel_idle_driver); 1285 int i;
1255 1286
1256 cpu_notifier_register_begin(); 1287 cpu_notifier_register_begin();
1257 1288
@@ -1259,9 +1290,15 @@ static void __exit intel_idle_exit(void)
1259 on_each_cpu(__setup_broadcast_timer, (void *)false, 1); 1290 on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
1260 __unregister_cpu_notifier(&cpu_hotplug_notifier); 1291 __unregister_cpu_notifier(&cpu_hotplug_notifier);
1261 1292
1293 for_each_possible_cpu(i) {
1294 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
1295 cpuidle_unregister_device(dev);
1296 }
1297
1262 cpu_notifier_register_done(); 1298 cpu_notifier_register_done();
1263 1299
1264 return; 1300 cpuidle_unregister_driver(&intel_idle_driver);
1301 free_percpu(intel_idle_cpuidle_devices);
1265} 1302}
1266 1303
1267module_init(intel_idle_init); 1304module_init(intel_idle_init);
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index c73331f7782b..2072a31e813b 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -547,7 +547,7 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
547{ 547{
548 int ret; 548 int ret;
549 int axis = chan->scan_index; 549 int axis = chan->scan_index;
550 unsigned int raw_val; 550 __le16 raw_val;
551 551
552 mutex_lock(&data->mutex); 552 mutex_lock(&data->mutex);
553 ret = bmc150_accel_set_power_state(data, true); 553 ret = bmc150_accel_set_power_state(data, true);
@@ -557,14 +557,14 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
557 } 557 }
558 558
559 ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis), 559 ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
560 &raw_val, 2); 560 &raw_val, sizeof(raw_val));
561 if (ret < 0) { 561 if (ret < 0) {
562 dev_err(data->dev, "Error reading axis %d\n", axis); 562 dev_err(data->dev, "Error reading axis %d\n", axis);
563 bmc150_accel_set_power_state(data, false); 563 bmc150_accel_set_power_state(data, false);
564 mutex_unlock(&data->mutex); 564 mutex_unlock(&data->mutex);
565 return ret; 565 return ret;
566 } 566 }
567 *val = sign_extend32(raw_val >> chan->scan_type.shift, 567 *val = sign_extend32(le16_to_cpu(raw_val) >> chan->scan_type.shift,
568 chan->scan_type.realbits - 1); 568 chan->scan_type.realbits - 1);
569 ret = bmc150_accel_set_power_state(data, false); 569 ret = bmc150_accel_set_power_state(data, false);
570 mutex_unlock(&data->mutex); 570 mutex_unlock(&data->mutex);
@@ -988,6 +988,7 @@ static const struct iio_event_spec bmc150_accel_event = {
988 .realbits = (bits), \ 988 .realbits = (bits), \
989 .storagebits = 16, \ 989 .storagebits = 16, \
990 .shift = 16 - (bits), \ 990 .shift = 16 - (bits), \
991 .endianness = IIO_LE, \
991 }, \ 992 }, \
992 .event_spec = &bmc150_accel_event, \ 993 .event_spec = &bmc150_accel_event, \
993 .num_event_specs = 1 \ 994 .num_event_specs = 1 \
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index af4aea7b20f9..82c718c515a0 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -134,6 +134,7 @@ config AT91_ADC
134config AT91_SAMA5D2_ADC 134config AT91_SAMA5D2_ADC
135 tristate "Atmel AT91 SAMA5D2 ADC" 135 tristate "Atmel AT91 SAMA5D2 ADC"
136 depends on ARCH_AT91 || COMPILE_TEST 136 depends on ARCH_AT91 || COMPILE_TEST
137 depends on HAS_IOMEM
137 help 138 help
138 Say yes here to build support for Atmel SAMA5D2 ADC which is 139 Say yes here to build support for Atmel SAMA5D2 ADC which is
139 available on SAMA5D2 SoC family. 140 available on SAMA5D2 SoC family.
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index dbee13ad33a3..2e154cb51685 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -451,6 +451,8 @@ static int at91_adc_probe(struct platform_device *pdev)
451 if (ret) 451 if (ret)
452 goto vref_disable; 452 goto vref_disable;
453 453
454 platform_set_drvdata(pdev, indio_dev);
455
454 ret = iio_device_register(indio_dev); 456 ret = iio_device_register(indio_dev);
455 if (ret < 0) 457 if (ret < 0)
456 goto per_clk_disable_unprepare; 458 goto per_clk_disable_unprepare;
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 929508e5266c..998dc3caad4c 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1386,7 +1386,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
1386 }, 1386 },
1387 [max11644] = { 1387 [max11644] = {
1388 .bits = 12, 1388 .bits = 12,
1389 .int_vref_mv = 2048, 1389 .int_vref_mv = 4096,
1390 .mode_list = max11644_mode_list, 1390 .mode_list = max11644_mode_list,
1391 .num_modes = ARRAY_SIZE(max11644_mode_list), 1391 .num_modes = ARRAY_SIZE(max11644_mode_list),
1392 .default_mode = s0to1, 1392 .default_mode = s0to1,
@@ -1396,7 +1396,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
1396 }, 1396 },
1397 [max11645] = { 1397 [max11645] = {
1398 .bits = 12, 1398 .bits = 12,
1399 .int_vref_mv = 4096, 1399 .int_vref_mv = 2048,
1400 .mode_list = max11644_mode_list, 1400 .mode_list = max11644_mode_list,
1401 .num_modes = ARRAY_SIZE(max11644_mode_list), 1401 .num_modes = ARRAY_SIZE(max11644_mode_list),
1402 .default_mode = s0to1, 1402 .default_mode = s0to1,
@@ -1406,7 +1406,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
1406 }, 1406 },
1407 [max11646] = { 1407 [max11646] = {
1408 .bits = 10, 1408 .bits = 10,
1409 .int_vref_mv = 2048, 1409 .int_vref_mv = 4096,
1410 .mode_list = max11644_mode_list, 1410 .mode_list = max11644_mode_list,
1411 .num_modes = ARRAY_SIZE(max11644_mode_list), 1411 .num_modes = ARRAY_SIZE(max11644_mode_list),
1412 .default_mode = s0to1, 1412 .default_mode = s0to1,
@@ -1416,7 +1416,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
1416 }, 1416 },
1417 [max11647] = { 1417 [max11647] = {
1418 .bits = 10, 1418 .bits = 10,
1419 .int_vref_mv = 4096, 1419 .int_vref_mv = 2048,
1420 .mode_list = max11644_mode_list, 1420 .mode_list = max11644_mode_list,
1421 .num_modes = ARRAY_SIZE(max11644_mode_list), 1421 .num_modes = ARRAY_SIZE(max11644_mode_list),
1422 .default_mode = s0to1, 1422 .default_mode = s0to1,
@@ -1680,6 +1680,10 @@ static const struct i2c_device_id max1363_id[] = {
1680 { "max11615", max11615 }, 1680 { "max11615", max11615 },
1681 { "max11616", max11616 }, 1681 { "max11616", max11616 },
1682 { "max11617", max11617 }, 1682 { "max11617", max11617 },
1683 { "max11644", max11644 },
1684 { "max11645", max11645 },
1685 { "max11646", max11646 },
1686 { "max11647", max11647 },
1683 {} 1687 {}
1684}; 1688};
1685 1689
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index bbce3b09ac45..4dac567e75b4 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -452,7 +452,7 @@ static int bmg160_get_temp(struct bmg160_data *data, int *val)
452static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val) 452static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
453{ 453{
454 int ret; 454 int ret;
455 unsigned int raw_val; 455 __le16 raw_val;
456 456
457 mutex_lock(&data->mutex); 457 mutex_lock(&data->mutex);
458 ret = bmg160_set_power_state(data, true); 458 ret = bmg160_set_power_state(data, true);
@@ -462,7 +462,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
462 } 462 }
463 463
464 ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val, 464 ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val,
465 2); 465 sizeof(raw_val));
466 if (ret < 0) { 466 if (ret < 0) {
467 dev_err(data->dev, "Error reading axis %d\n", axis); 467 dev_err(data->dev, "Error reading axis %d\n", axis);
468 bmg160_set_power_state(data, false); 468 bmg160_set_power_state(data, false);
@@ -470,7 +470,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
470 return ret; 470 return ret;
471 } 471 }
472 472
473 *val = sign_extend32(raw_val, 15); 473 *val = sign_extend32(le16_to_cpu(raw_val), 15);
474 ret = bmg160_set_power_state(data, false); 474 ret = bmg160_set_power_state(data, false);
475 mutex_unlock(&data->mutex); 475 mutex_unlock(&data->mutex);
476 if (ret < 0) 476 if (ret < 0)
@@ -733,6 +733,7 @@ static const struct iio_event_spec bmg160_event = {
733 .sign = 's', \ 733 .sign = 's', \
734 .realbits = 16, \ 734 .realbits = 16, \
735 .storagebits = 16, \ 735 .storagebits = 16, \
736 .endianness = IIO_LE, \
736 }, \ 737 }, \
737 .event_spec = &bmg160_event, \ 738 .event_spec = &bmg160_event, \
738 .num_event_specs = 1 \ 739 .num_event_specs = 1 \
@@ -780,7 +781,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
780 mutex_unlock(&data->mutex); 781 mutex_unlock(&data->mutex);
781 goto err; 782 goto err;
782 } 783 }
783 data->buffer[i++] = ret; 784 data->buffer[i++] = val;
784 } 785 }
785 mutex_unlock(&data->mutex); 786 mutex_unlock(&data->mutex);
786 787
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 09db89359544..90ab8a2d2846 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -238,12 +238,13 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
238 238
239 mutex_lock(&data->lock); 239 mutex_lock(&data->lock);
240 240
241 while (cnt-- || (cnt = max30100_fifo_count(data) > 0)) { 241 while (cnt || (cnt = max30100_fifo_count(data) > 0)) {
242 ret = max30100_read_measurement(data); 242 ret = max30100_read_measurement(data);
243 if (ret) 243 if (ret)
244 break; 244 break;
245 245
246 iio_push_to_buffers(data->indio_dev, data->buffer); 246 iio_push_to_buffers(data->indio_dev, data->buffer);
247 cnt--;
247 } 248 }
248 249
249 mutex_unlock(&data->lock); 250 mutex_unlock(&data->lock);
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index a7f557af4389..847455a2d6bb 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -9,9 +9,8 @@ config INV_MPU6050_IIO
9 9
10config INV_MPU6050_I2C 10config INV_MPU6050_I2C
11 tristate "Invensense MPU6050 devices (I2C)" 11 tristate "Invensense MPU6050 devices (I2C)"
12 depends on I2C 12 depends on I2C_MUX
13 select INV_MPU6050_IIO 13 select INV_MPU6050_IIO
14 select I2C_MUX
15 select REGMAP_I2C 14 select REGMAP_I2C
16 help 15 help
17 This driver supports the Invensense MPU6050 devices. 16 This driver supports the Invensense MPU6050 devices.
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index f581256d9d4c..5ee4e0dc093e 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -104,6 +104,19 @@ static int inv_mpu6050_deselect_bypass(struct i2c_adapter *adap,
104 return 0; 104 return 0;
105} 105}
106 106
107static const char *inv_mpu_match_acpi_device(struct device *dev, int *chip_id)
108{
109 const struct acpi_device_id *id;
110
111 id = acpi_match_device(dev->driver->acpi_match_table, dev);
112 if (!id)
113 return NULL;
114
115 *chip_id = (int)id->driver_data;
116
117 return dev_name(dev);
118}
119
107/** 120/**
108 * inv_mpu_probe() - probe function. 121 * inv_mpu_probe() - probe function.
109 * @client: i2c client. 122 * @client: i2c client.
@@ -115,14 +128,25 @@ static int inv_mpu_probe(struct i2c_client *client,
115 const struct i2c_device_id *id) 128 const struct i2c_device_id *id)
116{ 129{
117 struct inv_mpu6050_state *st; 130 struct inv_mpu6050_state *st;
118 int result; 131 int result, chip_type;
119 const char *name = id ? id->name : NULL;
120 struct regmap *regmap; 132 struct regmap *regmap;
133 const char *name;
121 134
122 if (!i2c_check_functionality(client->adapter, 135 if (!i2c_check_functionality(client->adapter,
123 I2C_FUNC_SMBUS_I2C_BLOCK)) 136 I2C_FUNC_SMBUS_I2C_BLOCK))
124 return -EOPNOTSUPP; 137 return -EOPNOTSUPP;
125 138
139 if (id) {
140 chip_type = (int)id->driver_data;
141 name = id->name;
142 } else if (ACPI_HANDLE(&client->dev)) {
143 name = inv_mpu_match_acpi_device(&client->dev, &chip_type);
144 if (!name)
145 return -ENODEV;
146 } else {
147 return -ENOSYS;
148 }
149
126 regmap = devm_regmap_init_i2c(client, &inv_mpu_regmap_config); 150 regmap = devm_regmap_init_i2c(client, &inv_mpu_regmap_config);
127 if (IS_ERR(regmap)) { 151 if (IS_ERR(regmap)) {
128 dev_err(&client->dev, "Failed to register i2c regmap %d\n", 152 dev_err(&client->dev, "Failed to register i2c regmap %d\n",
@@ -131,7 +155,7 @@ static int inv_mpu_probe(struct i2c_client *client,
131 } 155 }
132 156
133 result = inv_mpu_core_probe(regmap, client->irq, name, 157 result = inv_mpu_core_probe(regmap, client->irq, name,
134 NULL, id->driver_data); 158 NULL, chip_type);
135 if (result < 0) 159 if (result < 0)
136 return result; 160 return result;
137 161
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index dea6c4361de0..7bcb8d839f05 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -46,6 +46,7 @@ static int inv_mpu_probe(struct spi_device *spi)
46 struct regmap *regmap; 46 struct regmap *regmap;
47 const struct spi_device_id *id = spi_get_device_id(spi); 47 const struct spi_device_id *id = spi_get_device_id(spi);
48 const char *name = id ? id->name : NULL; 48 const char *name = id ? id->name : NULL;
49 const int chip_type = id ? id->driver_data : 0;
49 50
50 regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config); 51 regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config);
51 if (IS_ERR(regmap)) { 52 if (IS_ERR(regmap)) {
@@ -55,7 +56,7 @@ static int inv_mpu_probe(struct spi_device *spi)
55 } 56 }
56 57
57 return inv_mpu_core_probe(regmap, spi->irq, name, 58 return inv_mpu_core_probe(regmap, spi->irq, name,
58 inv_mpu_i2c_disable, id->driver_data); 59 inv_mpu_i2c_disable, chip_type);
59} 60}
60 61
61static int inv_mpu_remove(struct spi_device *spi) 62static int inv_mpu_remove(struct spi_device *spi)
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index b976332d45d3..90462fcf5436 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -653,6 +653,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
653 unsigned int modes; 653 unsigned int modes;
654 654
655 memset(config, 0, sizeof(*config)); 655 memset(config, 0, sizeof(*config));
656 config->watermark = ~0;
656 657
657 /* 658 /*
658 * If there is just one buffer and we are removing it there is nothing 659 * If there is just one buffer and we are removing it there is nothing
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index f6a07dc32ae4..a6af56ad10e1 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -769,7 +769,7 @@ static void apds9960_read_gesture_fifo(struct apds9960_data *data)
769 mutex_lock(&data->lock); 769 mutex_lock(&data->lock);
770 data->gesture_mode_running = 1; 770 data->gesture_mode_running = 1;
771 771
772 while (cnt-- || (cnt = apds9660_fifo_is_empty(data) > 0)) { 772 while (cnt || (cnt = apds9660_fifo_is_empty(data) > 0)) {
773 ret = regmap_bulk_read(data->regmap, APDS9960_REG_GFIFO_BASE, 773 ret = regmap_bulk_read(data->regmap, APDS9960_REG_GFIFO_BASE,
774 &data->buffer, 4); 774 &data->buffer, 4);
775 775
@@ -777,6 +777,7 @@ static void apds9960_read_gesture_fifo(struct apds9960_data *data)
777 goto err_read; 777 goto err_read;
778 778
779 iio_push_to_buffers(data->indio_dev, data->buffer); 779 iio_push_to_buffers(data->indio_dev, data->buffer);
780 cnt--;
780 } 781 }
781 782
782err_read: 783err_read:
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 9c5c9ef3f1da..0e931a9a1669 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -462,6 +462,8 @@ static int ak8975_setup_irq(struct ak8975_data *data)
462 int rc; 462 int rc;
463 int irq; 463 int irq;
464 464
465 init_waitqueue_head(&data->data_ready_queue);
466 clear_bit(0, &data->flags);
465 if (client->irq) 467 if (client->irq)
466 irq = client->irq; 468 irq = client->irq;
467 else 469 else
@@ -477,8 +479,6 @@ static int ak8975_setup_irq(struct ak8975_data *data)
477 return rc; 479 return rc;
478 } 480 }
479 481
480 init_waitqueue_head(&data->data_ready_queue);
481 clear_bit(0, &data->flags);
482 data->eoc_irq = irq; 482 data->eoc_irq = irq;
483 483
484 return rc; 484 return rc;
@@ -732,7 +732,7 @@ static int ak8975_probe(struct i2c_client *client,
732 int eoc_gpio; 732 int eoc_gpio;
733 int err; 733 int err;
734 const char *name = NULL; 734 const char *name = NULL;
735 enum asahi_compass_chipset chipset; 735 enum asahi_compass_chipset chipset = AK_MAX_TYPE;
736 736
737 /* Grab and set up the supplied GPIO. */ 737 /* Grab and set up the supplied GPIO. */
738 if (client->dev.platform_data) 738 if (client->dev.platform_data)
diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
index 06a4d9c35581..9daca4681922 100644
--- a/drivers/iio/magnetometer/st_magn.h
+++ b/drivers/iio/magnetometer/st_magn.h
@@ -44,6 +44,7 @@ static inline int st_magn_allocate_ring(struct iio_dev *indio_dev)
44static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev) 44static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev)
45{ 45{
46} 46}
47#define ST_MAGN_TRIGGER_SET_STATE NULL
47#endif /* CONFIG_IIO_BUFFER */ 48#endif /* CONFIG_IIO_BUFFER */
48 49
49#endif /* ST_MAGN_H */ 50#endif /* ST_MAGN_H */
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index cb00d59da456..c2e257d97eff 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -691,7 +691,8 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
691 NULL); 691 NULL);
692 692
693 /* Coudn't find default GID location */ 693 /* Coudn't find default GID location */
694 WARN_ON(ix < 0); 694 if (WARN_ON(ix < 0))
695 goto release;
695 696
696 zattr_type.gid_type = gid_type; 697 zattr_type.gid_type = gid_type;
697 698
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 4a9aa0433b07..7713ef089c3c 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -48,6 +48,7 @@
48 48
49#include <asm/uaccess.h> 49#include <asm/uaccess.h>
50 50
51#include <rdma/ib.h>
51#include <rdma/ib_cm.h> 52#include <rdma/ib_cm.h>
52#include <rdma/ib_user_cm.h> 53#include <rdma/ib_user_cm.h>
53#include <rdma/ib_marshall.h> 54#include <rdma/ib_marshall.h>
@@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
1103 struct ib_ucm_cmd_hdr hdr; 1104 struct ib_ucm_cmd_hdr hdr;
1104 ssize_t result; 1105 ssize_t result;
1105 1106
1107 if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
1108 return -EACCES;
1109
1106 if (len < sizeof(hdr)) 1110 if (len < sizeof(hdr))
1107 return -EINVAL; 1111 return -EINVAL;
1108 1112
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index dd3bcceadfde..c0f3826abb30 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
1574 struct rdma_ucm_cmd_hdr hdr; 1574 struct rdma_ucm_cmd_hdr hdr;
1575 ssize_t ret; 1575 ssize_t ret;
1576 1576
1577 if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
1578 return -EACCES;
1579
1577 if (len < sizeof(hdr)) 1580 if (len < sizeof(hdr))
1578 return -EINVAL; 1581 return -EINVAL;
1579 1582
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 28ba2cc81535..31f422a70623 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -48,6 +48,8 @@
48 48
49#include <asm/uaccess.h> 49#include <asm/uaccess.h>
50 50
51#include <rdma/ib.h>
52
51#include "uverbs.h" 53#include "uverbs.h"
52 54
53MODULE_AUTHOR("Roland Dreier"); 55MODULE_AUTHOR("Roland Dreier");
@@ -709,6 +711,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
709 int srcu_key; 711 int srcu_key;
710 ssize_t ret; 712 ssize_t ret;
711 713
714 if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
715 return -EACCES;
716
712 if (count < sizeof hdr) 717 if (count < sizeof hdr)
713 return -EINVAL; 718 return -EINVAL;
714 719
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 15b8adbf39c0..b65b3541e732 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1860,6 +1860,7 @@ EXPORT_SYMBOL(ib_drain_rq);
1860void ib_drain_qp(struct ib_qp *qp) 1860void ib_drain_qp(struct ib_qp *qp)
1861{ 1861{
1862 ib_drain_sq(qp); 1862 ib_drain_sq(qp);
1863 ib_drain_rq(qp); 1863 if (!qp->srq)
1864 ib_drain_rq(qp);
1864} 1865}
1865EXPORT_SYMBOL(ib_drain_qp); 1866EXPORT_SYMBOL(ib_drain_qp);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 42a7b8952d13..3234a8be16f6 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1390,6 +1390,8 @@ int iwch_register_device(struct iwch_dev *dev)
1390 dev->ibdev.iwcm->add_ref = iwch_qp_add_ref; 1390 dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1391 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref; 1391 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1392 dev->ibdev.iwcm->get_qp = iwch_get_qp; 1392 dev->ibdev.iwcm->get_qp = iwch_get_qp;
1393 memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name,
1394 sizeof(dev->ibdev.iwcm->ifname));
1393 1395
1394 ret = ib_register_device(&dev->ibdev, NULL); 1396 ret = ib_register_device(&dev->ibdev, NULL);
1395 if (ret) 1397 if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index b4eeb783573c..b0b955724458 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -162,7 +162,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
162 cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS, 162 cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
163 &cq->bar2_qid, 163 &cq->bar2_qid,
164 user ? &cq->bar2_pa : NULL); 164 user ? &cq->bar2_pa : NULL);
165 if (user && !cq->bar2_va) { 165 if (user && !cq->bar2_pa) {
166 pr_warn(MOD "%s: cqid %u not in BAR2 range.\n", 166 pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
167 pci_name(rdev->lldi.pdev), cq->cqid); 167 pci_name(rdev->lldi.pdev), cq->cqid);
168 ret = -EINVAL; 168 ret = -EINVAL;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 124682dc5709..7574f394fdac 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -580,6 +580,8 @@ int c4iw_register_device(struct c4iw_dev *dev)
580 dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref; 580 dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
581 dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref; 581 dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
582 dev->ibdev.iwcm->get_qp = c4iw_get_qp; 582 dev->ibdev.iwcm->get_qp = c4iw_get_qp;
583 memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
584 sizeof(dev->ibdev.iwcm->ifname));
583 585
584 ret = ib_register_device(&dev->ibdev, NULL); 586 ret = ib_register_device(&dev->ibdev, NULL);
585 if (ret) 587 if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index e17fb5d5e033..e8993e49b8b3 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -185,6 +185,10 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
185 185
186 if (pbar2_pa) 186 if (pbar2_pa)
187 *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK; 187 *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
188
189 if (is_t4(rdev->lldi.adapter_type))
190 return NULL;
191
188 return rdev->bar2_kva + bar2_qoffset; 192 return rdev->bar2_kva + bar2_qoffset;
189} 193}
190 194
@@ -270,7 +274,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
270 /* 274 /*
271 * User mode must have bar2 access. 275 * User mode must have bar2 access.
272 */ 276 */
273 if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) { 277 if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
274 pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n", 278 pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
275 pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); 279 pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
276 goto free_dma; 280 goto free_dma;
@@ -1895,13 +1899,27 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1895void c4iw_drain_sq(struct ib_qp *ibqp) 1899void c4iw_drain_sq(struct ib_qp *ibqp)
1896{ 1900{
1897 struct c4iw_qp *qp = to_c4iw_qp(ibqp); 1901 struct c4iw_qp *qp = to_c4iw_qp(ibqp);
1902 unsigned long flag;
1903 bool need_to_wait;
1898 1904
1899 wait_for_completion(&qp->sq_drained); 1905 spin_lock_irqsave(&qp->lock, flag);
1906 need_to_wait = !t4_sq_empty(&qp->wq);
1907 spin_unlock_irqrestore(&qp->lock, flag);
1908
1909 if (need_to_wait)
1910 wait_for_completion(&qp->sq_drained);
1900} 1911}
1901 1912
1902void c4iw_drain_rq(struct ib_qp *ibqp) 1913void c4iw_drain_rq(struct ib_qp *ibqp)
1903{ 1914{
1904 struct c4iw_qp *qp = to_c4iw_qp(ibqp); 1915 struct c4iw_qp *qp = to_c4iw_qp(ibqp);
1916 unsigned long flag;
1917 bool need_to_wait;
1918
1919 spin_lock_irqsave(&qp->lock, flag);
1920 need_to_wait = !t4_rq_empty(&qp->wq);
1921 spin_unlock_irqrestore(&qp->lock, flag);
1905 1922
1906 wait_for_completion(&qp->rq_drained); 1923 if (need_to_wait)
1924 wait_for_completion(&qp->rq_drained);
1907} 1925}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 92745d755272..38f917a6c778 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1992,7 +1992,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
1992/** 1992/**
1993 * i40iw_get_dst_ipv6 1993 * i40iw_get_dst_ipv6
1994 */ 1994 */
1995#if IS_ENABLED(CONFIG_IPV6)
1996static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr, 1995static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
1997 struct sockaddr_in6 *dst_addr) 1996 struct sockaddr_in6 *dst_addr)
1998{ 1997{
@@ -2008,7 +2007,6 @@ static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
2008 dst = ip6_route_output(&init_net, NULL, &fl6); 2007 dst = ip6_route_output(&init_net, NULL, &fl6);
2009 return dst; 2008 return dst;
2010} 2009}
2011#endif
2012 2010
2013/** 2011/**
2014 * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address 2012 * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
@@ -2016,7 +2014,6 @@ static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
2016 * @dst_ip: remote ip address 2014 * @dst_ip: remote ip address
2017 * @arpindex: if there is an arp entry 2015 * @arpindex: if there is an arp entry
2018 */ 2016 */
2019#if IS_ENABLED(CONFIG_IPV6)
2020static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, 2017static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
2021 u32 *src, 2018 u32 *src,
2022 u32 *dest, 2019 u32 *dest,
@@ -2089,7 +2086,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
2089 dst_release(dst); 2086 dst_release(dst);
2090 return rc; 2087 return rc;
2091} 2088}
2092#endif
2093 2089
2094/** 2090/**
2095 * i40iw_ipv4_is_loopback - check if loopback 2091 * i40iw_ipv4_is_loopback - check if loopback
@@ -2190,13 +2186,13 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
2190 cm_info->loc_addr[0], 2186 cm_info->loc_addr[0],
2191 cm_info->rem_addr[0], 2187 cm_info->rem_addr[0],
2192 oldarpindex); 2188 oldarpindex);
2193#if IS_ENABLED(CONFIG_IPV6) 2189 else if (IS_ENABLED(CONFIG_IPV6))
2194 else
2195 arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev, 2190 arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev,
2196 cm_info->loc_addr, 2191 cm_info->loc_addr,
2197 cm_info->rem_addr, 2192 cm_info->rem_addr,
2198 oldarpindex); 2193 oldarpindex);
2199#endif 2194 else
2195 arpindex = -EINVAL;
2200 } 2196 }
2201 if (arpindex < 0) { 2197 if (arpindex < 0) {
2202 i40iw_pr_err("cm_node arpindex\n"); 2198 i40iw_pr_err("cm_node arpindex\n");
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 5acf346e048e..6ad0489cb3c5 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -530,7 +530,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
530 sizeof(struct mlx5_wqe_ctrl_seg)) / 530 sizeof(struct mlx5_wqe_ctrl_seg)) /
531 sizeof(struct mlx5_wqe_data_seg); 531 sizeof(struct mlx5_wqe_data_seg);
532 props->max_sge = min(max_rq_sg, max_sq_sg); 532 props->max_sge = min(max_rq_sg, max_sq_sg);
533 props->max_sge_rd = props->max_sge; 533 props->max_sge_rd = MLX5_MAX_SGE_RD;
534 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); 534 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
535 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1; 535 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
536 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 536 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
@@ -671,8 +671,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
671 struct mlx5_ib_dev *dev = to_mdev(ibdev); 671 struct mlx5_ib_dev *dev = to_mdev(ibdev);
672 struct mlx5_core_dev *mdev = dev->mdev; 672 struct mlx5_core_dev *mdev = dev->mdev;
673 struct mlx5_hca_vport_context *rep; 673 struct mlx5_hca_vport_context *rep;
674 int max_mtu; 674 u16 max_mtu;
675 int oper_mtu; 675 u16 oper_mtu;
676 int err; 676 int err;
677 u8 ib_link_width_oper; 677 u8 ib_link_width_oper;
678 u8 vl_hw_cap; 678 u8 vl_hw_cap;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index f16c818ad2e6..b46c25542a7c 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -776,15 +776,6 @@ void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
776void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp); 776void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
777void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, 777void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
778 unsigned long end); 778 unsigned long end);
779int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
780 u8 port, struct ifla_vf_info *info);
781int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
782 u8 port, int state);
783int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
784 u8 port, struct ifla_vf_stats *stats);
785int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
786 u64 guid, int type);
787
788#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ 779#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
789static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) 780static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
790{ 781{
@@ -801,6 +792,15 @@ static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
801 792
802#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ 793#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
803 794
795int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
796 u8 port, struct ifla_vf_info *info);
797int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
798 u8 port, int state);
799int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
800 u8 port, struct ifla_vf_stats *stats);
801int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
802 u64 guid, int type);
803
804__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, 804__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
805 int index); 805 int index);
806 806
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 3ea9e055fdd3..92914539edc7 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -500,9 +500,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
500 * skb_shinfo(skb)->nr_frags, skb_is_gso(skb)); 500 * skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
501 */ 501 */
502 502
503 if (!netif_carrier_ok(netdev))
504 return NETDEV_TX_OK;
505
506 if (netif_queue_stopped(netdev)) 503 if (netif_queue_stopped(netdev))
507 return NETDEV_TX_BUSY; 504 return NETDEV_TX_BUSY;
508 505
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index e449e394963f..24f4a782e0f4 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -45,6 +45,8 @@
45#include <linux/export.h> 45#include <linux/export.h>
46#include <linux/uio.h> 46#include <linux/uio.h>
47 47
48#include <rdma/ib.h>
49
48#include "qib.h" 50#include "qib.h"
49#include "qib_common.h" 51#include "qib_common.h"
50#include "qib_user_sdma.h" 52#include "qib_user_sdma.h"
@@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
2067 ssize_t ret = 0; 2069 ssize_t ret = 0;
2068 void *dest; 2070 void *dest;
2069 2071
2072 if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
2073 return -EACCES;
2074
2070 if (count < sizeof(cmd.type)) { 2075 if (count < sizeof(cmd.type)) {
2071 ret = -EINVAL; 2076 ret = -EINVAL;
2072 goto bail; 2077 goto bail;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index bd82a6948dc8..a9e3bcc522c4 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1637,9 +1637,9 @@ bail:
1637 spin_unlock_irqrestore(&qp->s_hlock, flags); 1637 spin_unlock_irqrestore(&qp->s_hlock, flags);
1638 if (nreq) { 1638 if (nreq) {
1639 if (call_send) 1639 if (call_send)
1640 rdi->driver_f.schedule_send_no_lock(qp);
1641 else
1642 rdi->driver_f.do_send(qp); 1640 rdi->driver_f.do_send(qp);
1641 else
1642 rdi->driver_f.schedule_send_no_lock(qp);
1643 } 1643 }
1644 return err; 1644 return err;
1645} 1645}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 80b6bedc172f..64b3d11dcf1e 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -612,6 +612,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
612 struct Scsi_Host *shost; 612 struct Scsi_Host *shost;
613 struct iser_conn *iser_conn = NULL; 613 struct iser_conn *iser_conn = NULL;
614 struct ib_conn *ib_conn; 614 struct ib_conn *ib_conn;
615 u32 max_fr_sectors;
615 u16 max_cmds; 616 u16 max_cmds;
616 617
617 shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0); 618 shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
@@ -632,7 +633,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
632 iser_conn = ep->dd_data; 633 iser_conn = ep->dd_data;
633 max_cmds = iser_conn->max_cmds; 634 max_cmds = iser_conn->max_cmds;
634 shost->sg_tablesize = iser_conn->scsi_sg_tablesize; 635 shost->sg_tablesize = iser_conn->scsi_sg_tablesize;
635 shost->max_sectors = iser_conn->scsi_max_sectors;
636 636
637 mutex_lock(&iser_conn->state_mutex); 637 mutex_lock(&iser_conn->state_mutex);
638 if (iser_conn->state != ISER_CONN_UP) { 638 if (iser_conn->state != ISER_CONN_UP) {
@@ -657,8 +657,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
657 */ 657 */
658 shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize, 658 shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
659 ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len); 659 ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len);
660 shost->max_sectors = min_t(unsigned int,
661 1024, (shost->sg_tablesize * PAGE_SIZE) >> 9);
662 660
663 if (iscsi_host_add(shost, 661 if (iscsi_host_add(shost,
664 ib_conn->device->ib_device->dma_device)) { 662 ib_conn->device->ib_device->dma_device)) {
@@ -672,6 +670,15 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
672 goto free_host; 670 goto free_host;
673 } 671 }
674 672
673 /*
674 * FRs or FMRs can only map up to a (device) page per entry, but if the
675 * first entry is misaligned we'll end up using using two entries
676 * (head and tail) for a single page worth data, so we have to drop
677 * one segment from the calculation.
678 */
679 max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
680 shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
681
675 if (cmds_max > max_cmds) { 682 if (cmds_max > max_cmds) {
676 iser_info("cmds_max changed from %u to %u\n", 683 iser_info("cmds_max changed from %u to %u\n",
677 cmds_max, max_cmds); 684 cmds_max, max_cmds);
@@ -989,7 +996,6 @@ static struct scsi_host_template iscsi_iser_sht = {
989 .queuecommand = iscsi_queuecommand, 996 .queuecommand = iscsi_queuecommand,
990 .change_queue_depth = scsi_change_queue_depth, 997 .change_queue_depth = scsi_change_queue_depth,
991 .sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE, 998 .sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE,
992 .max_sectors = ISER_DEF_MAX_SECTORS,
993 .cmd_per_lun = ISER_DEF_CMD_PER_LUN, 999 .cmd_per_lun = ISER_DEF_CMD_PER_LUN,
994 .eh_abort_handler = iscsi_eh_abort, 1000 .eh_abort_handler = iscsi_eh_abort,
995 .eh_device_reset_handler= iscsi_eh_device_reset, 1001 .eh_device_reset_handler= iscsi_eh_device_reset,
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 60b30d338a81..411e4464ca23 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -63,7 +63,6 @@ isert_rdma_accept(struct isert_conn *isert_conn);
63struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); 63struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
64 64
65static void isert_release_work(struct work_struct *work); 65static void isert_release_work(struct work_struct *work);
66static void isert_wait4flush(struct isert_conn *isert_conn);
67static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc); 66static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc);
68static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc); 67static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
69static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc); 68static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
@@ -141,7 +140,7 @@ isert_create_qp(struct isert_conn *isert_conn,
141 attr.qp_context = isert_conn; 140 attr.qp_context = isert_conn;
142 attr.send_cq = comp->cq; 141 attr.send_cq = comp->cq;
143 attr.recv_cq = comp->cq; 142 attr.recv_cq = comp->cq;
144 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; 143 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
145 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; 144 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
146 attr.cap.max_send_sge = device->ib_device->attrs.max_sge; 145 attr.cap.max_send_sge = device->ib_device->attrs.max_sge;
147 isert_conn->max_sge = min(device->ib_device->attrs.max_sge, 146 isert_conn->max_sge = min(device->ib_device->attrs.max_sge,
@@ -887,7 +886,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
887 break; 886 break;
888 case ISER_CONN_UP: 887 case ISER_CONN_UP:
889 isert_conn_terminate(isert_conn); 888 isert_conn_terminate(isert_conn);
890 isert_wait4flush(isert_conn); 889 ib_drain_qp(isert_conn->qp);
891 isert_handle_unbound_conn(isert_conn); 890 isert_handle_unbound_conn(isert_conn);
892 break; 891 break;
893 case ISER_CONN_BOUND: 892 case ISER_CONN_BOUND:
@@ -3213,36 +3212,6 @@ isert_wait4cmds(struct iscsi_conn *conn)
3213 } 3212 }
3214} 3213}
3215 3214
3216static void
3217isert_beacon_done(struct ib_cq *cq, struct ib_wc *wc)
3218{
3219 struct isert_conn *isert_conn = wc->qp->qp_context;
3220
3221 isert_print_wc(wc, "beacon");
3222
3223 isert_info("conn %p completing wait_comp_err\n", isert_conn);
3224 complete(&isert_conn->wait_comp_err);
3225}
3226
3227static void
3228isert_wait4flush(struct isert_conn *isert_conn)
3229{
3230 struct ib_recv_wr *bad_wr;
3231 static struct ib_cqe cqe = { .done = isert_beacon_done };
3232
3233 isert_info("conn %p\n", isert_conn);
3234
3235 init_completion(&isert_conn->wait_comp_err);
3236 isert_conn->beacon.wr_cqe = &cqe;
3237 /* post an indication that all flush errors were consumed */
3238 if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) {
3239 isert_err("conn %p failed to post beacon", isert_conn);
3240 return;
3241 }
3242
3243 wait_for_completion(&isert_conn->wait_comp_err);
3244}
3245
3246/** 3215/**
3247 * isert_put_unsol_pending_cmds() - Drop commands waiting for 3216 * isert_put_unsol_pending_cmds() - Drop commands waiting for
3248 * unsolicitate dataout 3217 * unsolicitate dataout
@@ -3288,7 +3257,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
3288 isert_conn_terminate(isert_conn); 3257 isert_conn_terminate(isert_conn);
3289 mutex_unlock(&isert_conn->mutex); 3258 mutex_unlock(&isert_conn->mutex);
3290 3259
3291 isert_wait4flush(isert_conn); 3260 ib_drain_qp(isert_conn->qp);
3292 isert_put_unsol_pending_cmds(conn); 3261 isert_put_unsol_pending_cmds(conn);
3293 isert_wait4cmds(conn); 3262 isert_wait4cmds(conn);
3294 isert_wait4logout(isert_conn); 3263 isert_wait4logout(isert_conn);
@@ -3300,7 +3269,7 @@ static void isert_free_conn(struct iscsi_conn *conn)
3300{ 3269{
3301 struct isert_conn *isert_conn = conn->context; 3270 struct isert_conn *isert_conn = conn->context;
3302 3271
3303 isert_wait4flush(isert_conn); 3272 ib_drain_qp(isert_conn->qp);
3304 isert_put_conn(isert_conn); 3273 isert_put_conn(isert_conn);
3305} 3274}
3306 3275
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 192788a4820c..147900cbb578 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -209,14 +209,12 @@ struct isert_conn {
209 struct ib_qp *qp; 209 struct ib_qp *qp;
210 struct isert_device *device; 210 struct isert_device *device;
211 struct mutex mutex; 211 struct mutex mutex;
212 struct completion wait_comp_err;
213 struct kref kref; 212 struct kref kref;
214 struct list_head fr_pool; 213 struct list_head fr_pool;
215 int fr_pool_size; 214 int fr_pool_size;
216 /* lock to protect fastreg pool */ 215 /* lock to protect fastreg pool */
217 spinlock_t pool_lock; 216 spinlock_t pool_lock;
218 struct work_struct release_work; 217 struct work_struct release_work;
219 struct ib_recv_wr beacon;
220 bool logout_posted; 218 bool logout_posted;
221 bool snd_w_inv; 219 bool snd_w_inv;
222}; 220};
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0bd3cb2f3c67..8b42401d4795 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1264,26 +1264,40 @@ free_mem:
1264 */ 1264 */
1265static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch) 1265static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1266{ 1266{
1267 struct se_session *se_sess;
1268 struct srpt_send_ioctx *ioctx; 1267 struct srpt_send_ioctx *ioctx;
1269 int tag; 1268 unsigned long flags;
1270 1269
1271 BUG_ON(!ch); 1270 BUG_ON(!ch);
1272 se_sess = ch->sess;
1273 1271
1274 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); 1272 ioctx = NULL;
1275 if (tag < 0) { 1273 spin_lock_irqsave(&ch->spinlock, flags);
1276 pr_err("Unable to obtain tag for srpt_send_ioctx\n"); 1274 if (!list_empty(&ch->free_list)) {
1277 return NULL; 1275 ioctx = list_first_entry(&ch->free_list,
1276 struct srpt_send_ioctx, free_list);
1277 list_del(&ioctx->free_list);
1278 } 1278 }
1279 ioctx = &((struct srpt_send_ioctx *)se_sess->sess_cmd_map)[tag]; 1279 spin_unlock_irqrestore(&ch->spinlock, flags);
1280 memset(ioctx, 0, sizeof(struct srpt_send_ioctx)); 1280
1281 ioctx->ch = ch; 1281 if (!ioctx)
1282 return ioctx;
1283
1284 BUG_ON(ioctx->ch != ch);
1282 spin_lock_init(&ioctx->spinlock); 1285 spin_lock_init(&ioctx->spinlock);
1283 ioctx->state = SRPT_STATE_NEW; 1286 ioctx->state = SRPT_STATE_NEW;
1287 ioctx->n_rbuf = 0;
1288 ioctx->rbufs = NULL;
1289 ioctx->n_rdma = 0;
1290 ioctx->n_rdma_wrs = 0;
1291 ioctx->rdma_wrs = NULL;
1292 ioctx->mapped_sg_count = 0;
1284 init_completion(&ioctx->tx_done); 1293 init_completion(&ioctx->tx_done);
1285 1294 ioctx->queue_status_only = false;
1286 ioctx->cmd.map_tag = tag; 1295 /*
1296 * transport_init_se_cmd() does not initialize all fields, so do it
1297 * here.
1298 */
1299 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
1300 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1287 1301
1288 return ioctx; 1302 return ioctx;
1289} 1303}
@@ -2021,7 +2035,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2021 struct ib_cm_rep_param *rep_param; 2035 struct ib_cm_rep_param *rep_param;
2022 struct srpt_rdma_ch *ch, *tmp_ch; 2036 struct srpt_rdma_ch *ch, *tmp_ch;
2023 u32 it_iu_len; 2037 u32 it_iu_len;
2024 int ret = 0; 2038 int i, ret = 0;
2025 unsigned char *p; 2039 unsigned char *p;
2026 2040
2027 WARN_ON_ONCE(irqs_disabled()); 2041 WARN_ON_ONCE(irqs_disabled());
@@ -2143,6 +2157,12 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2143 if (!ch->ioctx_ring) 2157 if (!ch->ioctx_ring)
2144 goto free_ch; 2158 goto free_ch;
2145 2159
2160 INIT_LIST_HEAD(&ch->free_list);
2161 for (i = 0; i < ch->rq_size; i++) {
2162 ch->ioctx_ring[i]->ch = ch;
2163 list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
2164 }
2165
2146 ret = srpt_create_ch_ib(ch); 2166 ret = srpt_create_ch_ib(ch);
2147 if (ret) { 2167 if (ret) {
2148 rej->reason = cpu_to_be32( 2168 rej->reason = cpu_to_be32(
@@ -2173,8 +2193,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2173 p = &ch->sess_name[0]; 2193 p = &ch->sess_name[0];
2174 2194
2175try_again: 2195try_again:
2176 ch->sess = target_alloc_session(&sport->port_tpg_1, ch->rq_size, 2196 ch->sess = target_alloc_session(&sport->port_tpg_1, 0, 0,
2177 sizeof(struct srpt_send_ioctx),
2178 TARGET_PROT_NORMAL, p, ch, NULL); 2197 TARGET_PROT_NORMAL, p, ch, NULL);
2179 if (IS_ERR(ch->sess)) { 2198 if (IS_ERR(ch->sess)) {
2180 pr_info("Rejected login because no ACL has been" 2199 pr_info("Rejected login because no ACL has been"
@@ -2881,7 +2900,7 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
2881 struct srpt_send_ioctx *ioctx = container_of(se_cmd, 2900 struct srpt_send_ioctx *ioctx = container_of(se_cmd,
2882 struct srpt_send_ioctx, cmd); 2901 struct srpt_send_ioctx, cmd);
2883 struct srpt_rdma_ch *ch = ioctx->ch; 2902 struct srpt_rdma_ch *ch = ioctx->ch;
2884 struct se_session *se_sess = ch->sess; 2903 unsigned long flags;
2885 2904
2886 WARN_ON(ioctx->state != SRPT_STATE_DONE); 2905 WARN_ON(ioctx->state != SRPT_STATE_DONE);
2887 WARN_ON(ioctx->mapped_sg_count != 0); 2906 WARN_ON(ioctx->mapped_sg_count != 0);
@@ -2892,7 +2911,9 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
2892 ioctx->n_rbuf = 0; 2911 ioctx->n_rbuf = 0;
2893 } 2912 }
2894 2913
2895 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); 2914 spin_lock_irqsave(&ch->spinlock, flags);
2915 list_add(&ioctx->free_list, &ch->free_list);
2916 spin_unlock_irqrestore(&ch->spinlock, flags);
2896} 2917}
2897 2918
2898/** 2919/**
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index ca288f019315..af9b8b527340 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -179,6 +179,7 @@ struct srpt_recv_ioctx {
179 * struct srpt_send_ioctx - SRPT send I/O context. 179 * struct srpt_send_ioctx - SRPT send I/O context.
180 * @ioctx: See above. 180 * @ioctx: See above.
181 * @ch: Channel pointer. 181 * @ch: Channel pointer.
182 * @free_list: Node in srpt_rdma_ch.free_list.
182 * @n_rbuf: Number of data buffers in the received SRP command. 183 * @n_rbuf: Number of data buffers in the received SRP command.
183 * @rbufs: Pointer to SRP data buffer array. 184 * @rbufs: Pointer to SRP data buffer array.
184 * @single_rbuf: SRP data buffer if the command has only a single buffer. 185 * @single_rbuf: SRP data buffer if the command has only a single buffer.
@@ -201,6 +202,7 @@ struct srpt_send_ioctx {
201 struct srp_direct_buf *rbufs; 202 struct srp_direct_buf *rbufs;
202 struct srp_direct_buf single_rbuf; 203 struct srp_direct_buf single_rbuf;
203 struct scatterlist *sg; 204 struct scatterlist *sg;
205 struct list_head free_list;
204 spinlock_t spinlock; 206 spinlock_t spinlock;
205 enum srpt_command_state state; 207 enum srpt_command_state state;
206 struct se_cmd cmd; 208 struct se_cmd cmd;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index e8a84d12b7ff..1142a93dd90b 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -153,6 +153,7 @@ static const struct xpad_device {
153 { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 153 { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
154 { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 154 { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
155 { 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 }, 155 { 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 },
156 { 0x0738, 0x4a01, "Mad Catz FightStick TE 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
156 { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 157 { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
157 { 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 }, 158 { 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
158 { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, 159 { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
@@ -304,6 +305,7 @@ static struct usb_device_id xpad_table[] = {
304 XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */ 305 XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
305 XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */ 306 XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
306 { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */ 307 { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
308 XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */
307 XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */ 309 XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
308 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ 310 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
309 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ 311 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index d5994a745ffa..982936334537 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -178,7 +178,6 @@ static int arizona_haptics_probe(struct platform_device *pdev)
178 input_set_drvdata(haptics->input_dev, haptics); 178 input_set_drvdata(haptics->input_dev, haptics);
179 179
180 haptics->input_dev->name = "arizona:haptics"; 180 haptics->input_dev->name = "arizona:haptics";
181 haptics->input_dev->dev.parent = pdev->dev.parent;
182 haptics->input_dev->close = arizona_haptics_close; 181 haptics->input_dev->close = arizona_haptics_close;
183 __set_bit(FF_RUMBLE, haptics->input_dev->ffbit); 182 __set_bit(FF_RUMBLE, haptics->input_dev->ffbit);
184 183
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index 3f02e0e03d12..67aab86048ad 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -353,7 +353,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
353 if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay)) 353 if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
354 kpd_delay = 15625; 354 kpd_delay = 15625;
355 355
356 if (kpd_delay > 62500 || kpd_delay == 0) { 356 /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
357 if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
357 dev_err(&pdev->dev, "invalid power key trigger delay\n"); 358 dev_err(&pdev->dev, "invalid power key trigger delay\n");
358 return -EINVAL; 359 return -EINVAL;
359 } 360 }
@@ -385,8 +386,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
385 pwr->name = "pmic8xxx_pwrkey"; 386 pwr->name = "pmic8xxx_pwrkey";
386 pwr->phys = "pmic8xxx_pwrkey/input0"; 387 pwr->phys = "pmic8xxx_pwrkey/input0";
387 388
388 delay = (kpd_delay << 10) / USEC_PER_SEC; 389 delay = (kpd_delay << 6) / USEC_PER_SEC;
389 delay = 1 + ilog2(delay); 390 delay = ilog2(delay);
390 391
391 err = regmap_read(regmap, PON_CNTL_1, &pon_cntl); 392 err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
392 if (err < 0) { 393 if (err < 0) {
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 10c4e3d462f1..caa5a62c42fb 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -222,7 +222,6 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
222 222
223 info->input_dev->name = "twl4030:vibrator"; 223 info->input_dev->name = "twl4030:vibrator";
224 info->input_dev->id.version = 1; 224 info->input_dev->id.version = 1;
225 info->input_dev->dev.parent = pdev->dev.parent;
226 info->input_dev->close = twl4030_vibra_close; 225 info->input_dev->close = twl4030_vibra_close;
227 __set_bit(FF_RUMBLE, info->input_dev->ffbit); 226 __set_bit(FF_RUMBLE, info->input_dev->ffbit);
228 227
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index ea63fad48de6..df3581f60628 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -45,7 +45,6 @@
45struct vibra_info { 45struct vibra_info {
46 struct device *dev; 46 struct device *dev;
47 struct input_dev *input_dev; 47 struct input_dev *input_dev;
48 struct workqueue_struct *workqueue;
49 struct work_struct play_work; 48 struct work_struct play_work;
50 struct mutex mutex; 49 struct mutex mutex;
51 int irq; 50 int irq;
@@ -182,6 +181,14 @@ static void vibra_play_work(struct work_struct *work)
182{ 181{
183 struct vibra_info *info = container_of(work, 182 struct vibra_info *info = container_of(work,
184 struct vibra_info, play_work); 183 struct vibra_info, play_work);
184 int ret;
185
186 /* Do not allow effect, while the routing is set to use audio */
187 ret = twl6040_get_vibralr_status(info->twl6040);
188 if (ret & TWL6040_VIBSEL) {
189 dev_info(info->dev, "Vibra is configured for audio\n");
190 return;
191 }
185 192
186 mutex_lock(&info->mutex); 193 mutex_lock(&info->mutex);
187 194
@@ -200,24 +207,12 @@ static int vibra_play(struct input_dev *input, void *data,
200 struct ff_effect *effect) 207 struct ff_effect *effect)
201{ 208{
202 struct vibra_info *info = input_get_drvdata(input); 209 struct vibra_info *info = input_get_drvdata(input);
203 int ret;
204
205 /* Do not allow effect, while the routing is set to use audio */
206 ret = twl6040_get_vibralr_status(info->twl6040);
207 if (ret & TWL6040_VIBSEL) {
208 dev_info(&input->dev, "Vibra is configured for audio\n");
209 return -EBUSY;
210 }
211 210
212 info->weak_speed = effect->u.rumble.weak_magnitude; 211 info->weak_speed = effect->u.rumble.weak_magnitude;
213 info->strong_speed = effect->u.rumble.strong_magnitude; 212 info->strong_speed = effect->u.rumble.strong_magnitude;
214 info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1; 213 info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1;
215 214
216 ret = queue_work(info->workqueue, &info->play_work); 215 schedule_work(&info->play_work);
217 if (!ret) {
218 dev_info(&input->dev, "work is already on queue\n");
219 return ret;
220 }
221 216
222 return 0; 217 return 0;
223} 218}
@@ -362,7 +357,6 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
362 357
363 info->input_dev->name = "twl6040:vibrator"; 358 info->input_dev->name = "twl6040:vibrator";
364 info->input_dev->id.version = 1; 359 info->input_dev->id.version = 1;
365 info->input_dev->dev.parent = pdev->dev.parent;
366 info->input_dev->close = twl6040_vibra_close; 360 info->input_dev->close = twl6040_vibra_close;
367 __set_bit(FF_RUMBLE, info->input_dev->ffbit); 361 __set_bit(FF_RUMBLE, info->input_dev->ffbit);
368 362
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 3a7f3a4a4396..7c18249d6c8e 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
858 goto err_free_buf; 858 goto err_free_buf;
859 } 859 }
860 860
861 /* Sanity check that a device has an endpoint */
862 if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
863 dev_err(&usbinterface->dev,
864 "Invalid number of endpoints\n");
865 error = -EINVAL;
866 goto err_free_urb;
867 }
868
861 /* 869 /*
862 * The endpoint is always altsetting 0, we know this since we know 870 * The endpoint is always altsetting 0, we know this since we know
863 * this device only has one interrupt endpoint 871 * this device only has one interrupt endpoint
@@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
879 * HID report descriptor 887 * HID report descriptor
880 */ 888 */
881 if (usb_get_extra_descriptor(usbinterface->cur_altsetting, 889 if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
882 HID_DEVICE_TYPE, &hid_desc) != 0){ 890 HID_DEVICE_TYPE, &hid_desc) != 0) {
883 dev_err(&usbinterface->dev, 891 dev_err(&usbinterface->dev,
884 "Can't retrieve exta USB descriptor to get hid report descriptor length\n"); 892 "Can't retrieve exta USB descriptor to get hid report descriptor length\n");
885 error = -EIO; 893 error = -EIO;
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 2160512e861a..5af7907d0af4 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -1093,6 +1093,19 @@ static int mxt_t6_command(struct mxt_data *data, u16 cmd_offset,
1093 return 0; 1093 return 0;
1094} 1094}
1095 1095
1096static int mxt_acquire_irq(struct mxt_data *data)
1097{
1098 int error;
1099
1100 enable_irq(data->irq);
1101
1102 error = mxt_process_messages_until_invalid(data);
1103 if (error)
1104 return error;
1105
1106 return 0;
1107}
1108
1096static int mxt_soft_reset(struct mxt_data *data) 1109static int mxt_soft_reset(struct mxt_data *data)
1097{ 1110{
1098 struct device *dev = &data->client->dev; 1111 struct device *dev = &data->client->dev;
@@ -1111,7 +1124,7 @@ static int mxt_soft_reset(struct mxt_data *data)
1111 /* Ignore CHG line for 100ms after reset */ 1124 /* Ignore CHG line for 100ms after reset */
1112 msleep(100); 1125 msleep(100);
1113 1126
1114 enable_irq(data->irq); 1127 mxt_acquire_irq(data);
1115 1128
1116 ret = mxt_wait_for_completion(data, &data->reset_completion, 1129 ret = mxt_wait_for_completion(data, &data->reset_completion,
1117 MXT_RESET_TIMEOUT); 1130 MXT_RESET_TIMEOUT);
@@ -1466,19 +1479,6 @@ release_mem:
1466 return ret; 1479 return ret;
1467} 1480}
1468 1481
1469static int mxt_acquire_irq(struct mxt_data *data)
1470{
1471 int error;
1472
1473 enable_irq(data->irq);
1474
1475 error = mxt_process_messages_until_invalid(data);
1476 if (error)
1477 return error;
1478
1479 return 0;
1480}
1481
1482static int mxt_get_info(struct mxt_data *data) 1482static int mxt_get_info(struct mxt_data *data)
1483{ 1483{
1484 struct i2c_client *client = data->client; 1484 struct i2c_client *client = data->client;
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index 9bbadaaf6bc3..7b3845aa5983 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -370,8 +370,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
370 point.coord_x = point.coord_y = 0; 370 point.coord_x = point.coord_y = 0;
371 } 371 }
372 372
373 point.state = payload[9 * i + 5] & 0x03; 373 point.state = payload[9 * i + 5] & 0x0f;
374 point.id = (payload[9 * i + 5] & 0xfc) >> 2; 374 point.id = (payload[9 * i + 5] & 0xf0) >> 4;
375 375
376 /* determine touch major, minor and orientation */ 376 /* determine touch major, minor and orientation */
377 point.area_major = max(payload[9 * i + 6], 377 point.area_major = max(payload[9 * i + 6],
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 374c129219ef..5efadad4615b 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -92,6 +92,7 @@ struct iommu_dev_data {
92 struct list_head dev_data_list; /* For global dev_data_list */ 92 struct list_head dev_data_list; /* For global dev_data_list */
93 struct protection_domain *domain; /* Domain the device is bound to */ 93 struct protection_domain *domain; /* Domain the device is bound to */
94 u16 devid; /* PCI Device ID */ 94 u16 devid; /* PCI Device ID */
95 u16 alias; /* Alias Device ID */
95 bool iommu_v2; /* Device can make use of IOMMUv2 */ 96 bool iommu_v2; /* Device can make use of IOMMUv2 */
96 bool passthrough; /* Device is identity mapped */ 97 bool passthrough; /* Device is identity mapped */
97 struct { 98 struct {
@@ -166,6 +167,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
166 return container_of(dom, struct protection_domain, domain); 167 return container_of(dom, struct protection_domain, domain);
167} 168}
168 169
170static inline u16 get_device_id(struct device *dev)
171{
172 struct pci_dev *pdev = to_pci_dev(dev);
173
174 return PCI_DEVID(pdev->bus->number, pdev->devfn);
175}
176
169static struct iommu_dev_data *alloc_dev_data(u16 devid) 177static struct iommu_dev_data *alloc_dev_data(u16 devid)
170{ 178{
171 struct iommu_dev_data *dev_data; 179 struct iommu_dev_data *dev_data;
@@ -203,6 +211,68 @@ out_unlock:
203 return dev_data; 211 return dev_data;
204} 212}
205 213
214static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
215{
216 *(u16 *)data = alias;
217 return 0;
218}
219
220static u16 get_alias(struct device *dev)
221{
222 struct pci_dev *pdev = to_pci_dev(dev);
223 u16 devid, ivrs_alias, pci_alias;
224
225 devid = get_device_id(dev);
226 ivrs_alias = amd_iommu_alias_table[devid];
227 pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
228
229 if (ivrs_alias == pci_alias)
230 return ivrs_alias;
231
232 /*
233 * DMA alias showdown
234 *
235 * The IVRS is fairly reliable in telling us about aliases, but it
236 * can't know about every screwy device. If we don't have an IVRS
237 * reported alias, use the PCI reported alias. In that case we may
238 * still need to initialize the rlookup and dev_table entries if the
239 * alias is to a non-existent device.
240 */
241 if (ivrs_alias == devid) {
242 if (!amd_iommu_rlookup_table[pci_alias]) {
243 amd_iommu_rlookup_table[pci_alias] =
244 amd_iommu_rlookup_table[devid];
245 memcpy(amd_iommu_dev_table[pci_alias].data,
246 amd_iommu_dev_table[devid].data,
247 sizeof(amd_iommu_dev_table[pci_alias].data));
248 }
249
250 return pci_alias;
251 }
252
253 pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
254 "for device %s[%04x:%04x], kernel reported alias "
255 "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
256 PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
257 PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
258 PCI_FUNC(pci_alias));
259
260 /*
261 * If we don't have a PCI DMA alias and the IVRS alias is on the same
262 * bus, then the IVRS table may know about a quirk that we don't.
263 */
264 if (pci_alias == devid &&
265 PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
266 pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
267 pdev->dma_alias_devfn = ivrs_alias & 0xff;
268 pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
269 PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
270 dev_name(dev));
271 }
272
273 return ivrs_alias;
274}
275
206static struct iommu_dev_data *find_dev_data(u16 devid) 276static struct iommu_dev_data *find_dev_data(u16 devid)
207{ 277{
208 struct iommu_dev_data *dev_data; 278 struct iommu_dev_data *dev_data;
@@ -215,13 +285,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
215 return dev_data; 285 return dev_data;
216} 286}
217 287
218static inline u16 get_device_id(struct device *dev)
219{
220 struct pci_dev *pdev = to_pci_dev(dev);
221
222 return PCI_DEVID(pdev->bus->number, pdev->devfn);
223}
224
225static struct iommu_dev_data *get_dev_data(struct device *dev) 288static struct iommu_dev_data *get_dev_data(struct device *dev)
226{ 289{
227 return dev->archdata.iommu; 290 return dev->archdata.iommu;
@@ -349,6 +412,8 @@ static int iommu_init_device(struct device *dev)
349 if (!dev_data) 412 if (!dev_data)
350 return -ENOMEM; 413 return -ENOMEM;
351 414
415 dev_data->alias = get_alias(dev);
416
352 if (pci_iommuv2_capable(pdev)) { 417 if (pci_iommuv2_capable(pdev)) {
353 struct amd_iommu *iommu; 418 struct amd_iommu *iommu;
354 419
@@ -369,7 +434,7 @@ static void iommu_ignore_device(struct device *dev)
369 u16 devid, alias; 434 u16 devid, alias;
370 435
371 devid = get_device_id(dev); 436 devid = get_device_id(dev);
372 alias = amd_iommu_alias_table[devid]; 437 alias = get_alias(dev);
373 438
374 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry)); 439 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
375 memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry)); 440 memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
@@ -1061,7 +1126,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
1061 int ret; 1126 int ret;
1062 1127
1063 iommu = amd_iommu_rlookup_table[dev_data->devid]; 1128 iommu = amd_iommu_rlookup_table[dev_data->devid];
1064 alias = amd_iommu_alias_table[dev_data->devid]; 1129 alias = dev_data->alias;
1065 1130
1066 ret = iommu_flush_dte(iommu, dev_data->devid); 1131 ret = iommu_flush_dte(iommu, dev_data->devid);
1067 if (!ret && alias != dev_data->devid) 1132 if (!ret && alias != dev_data->devid)
@@ -2039,7 +2104,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
2039 bool ats; 2104 bool ats;
2040 2105
2041 iommu = amd_iommu_rlookup_table[dev_data->devid]; 2106 iommu = amd_iommu_rlookup_table[dev_data->devid];
2042 alias = amd_iommu_alias_table[dev_data->devid]; 2107 alias = dev_data->alias;
2043 ats = dev_data->ats.enabled; 2108 ats = dev_data->ats.enabled;
2044 2109
2045 /* Update data structures */ 2110 /* Update data structures */
@@ -2073,7 +2138,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
2073 return; 2138 return;
2074 2139
2075 iommu = amd_iommu_rlookup_table[dev_data->devid]; 2140 iommu = amd_iommu_rlookup_table[dev_data->devid];
2076 alias = amd_iommu_alias_table[dev_data->devid]; 2141 alias = dev_data->alias;
2077 2142
2078 /* decrease reference counters */ 2143 /* decrease reference counters */
2079 dev_data->domain->dev_iommu[iommu->index] -= 1; 2144 dev_data->domain->dev_iommu[iommu->index] -= 1;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 2409e3bd3df2..7c39ac4b9c53 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -826,6 +826,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
826 if (smmu_domain->smmu) 826 if (smmu_domain->smmu)
827 goto out_unlock; 827 goto out_unlock;
828 828
829 /* We're bypassing these SIDs, so don't allocate an actual context */
830 if (domain->type == IOMMU_DOMAIN_DMA) {
831 smmu_domain->smmu = smmu;
832 goto out_unlock;
833 }
834
829 /* 835 /*
830 * Mapping the requested stage onto what we support is surprisingly 836 * Mapping the requested stage onto what we support is surprisingly
831 * complicated, mainly because the spec allows S1+S2 SMMUs without 837 * complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -948,7 +954,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
948 void __iomem *cb_base; 954 void __iomem *cb_base;
949 int irq; 955 int irq;
950 956
951 if (!smmu) 957 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
952 return; 958 return;
953 959
954 /* 960 /*
@@ -1089,18 +1095,20 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1089 struct arm_smmu_device *smmu = smmu_domain->smmu; 1095 struct arm_smmu_device *smmu = smmu_domain->smmu;
1090 void __iomem *gr0_base = ARM_SMMU_GR0(smmu); 1096 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1091 1097
1092 /* Devices in an IOMMU group may already be configured */
1093 ret = arm_smmu_master_configure_smrs(smmu, cfg);
1094 if (ret)
1095 return ret == -EEXIST ? 0 : ret;
1096
1097 /* 1098 /*
1098 * FIXME: This won't be needed once we have IOMMU-backed DMA ops 1099 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1099 * for all devices behind the SMMU. 1100 * for all devices behind the SMMU. Note that we need to take
1101 * care configuring SMRs for devices both a platform_device and
1102 * and a PCI device (i.e. a PCI host controller)
1100 */ 1103 */
1101 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA) 1104 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1102 return 0; 1105 return 0;
1103 1106
1107 /* Devices in an IOMMU group may already be configured */
1108 ret = arm_smmu_master_configure_smrs(smmu, cfg);
1109 if (ret)
1110 return ret == -EEXIST ? 0 : ret;
1111
1104 for (i = 0; i < cfg->num_streamids; ++i) { 1112 for (i = 0; i < cfg->num_streamids; ++i) {
1105 u32 idx, s2cr; 1113 u32 idx, s2cr;
1106 1114
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 72d6182666cb..58f2fe687a24 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -403,7 +403,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
403 unsigned int s_length = sg_dma_len(s); 403 unsigned int s_length = sg_dma_len(s);
404 unsigned int s_dma_len = s->length; 404 unsigned int s_dma_len = s->length;
405 405
406 s->offset = s_offset; 406 s->offset += s_offset;
407 s->length = s_length; 407 s->length = s_length;
408 sg_dma_address(s) = dma_addr + s_offset; 408 sg_dma_address(s) = dma_addr + s_offset;
409 dma_addr += s_dma_len; 409 dma_addr += s_dma_len;
@@ -422,7 +422,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
422 422
423 for_each_sg(sg, s, nents, i) { 423 for_each_sg(sg, s, nents, i) {
424 if (sg_dma_address(s) != DMA_ERROR_CODE) 424 if (sg_dma_address(s) != DMA_ERROR_CODE)
425 s->offset = sg_dma_address(s); 425 s->offset += sg_dma_address(s);
426 if (sg_dma_len(s)) 426 if (sg_dma_len(s))
427 s->length = sg_dma_len(s); 427 s->length = sg_dma_len(s);
428 sg_dma_address(s) = DMA_ERROR_CODE; 428 sg_dma_address(s) = DMA_ERROR_CODE;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a2e1b7f14df2..e1852e845d21 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2458,7 +2458,7 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2458 } 2458 }
2459 2459
2460 /* register PCI DMA alias device */ 2460 /* register PCI DMA alias device */
2461 if (req_id != dma_alias && dev_is_pci(dev)) { 2461 if (dev_is_pci(dev) && req_id != dma_alias) {
2462 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias), 2462 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2463 dma_alias & 0xff, NULL, domain); 2463 dma_alias & 0xff, NULL, domain);
2464 2464
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index bfd4f7c3b1d8..b9df1411c894 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -848,7 +848,8 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
848 if (!group->default_domain) { 848 if (!group->default_domain) {
849 group->default_domain = __iommu_domain_alloc(dev->bus, 849 group->default_domain = __iommu_domain_alloc(dev->bus,
850 IOMMU_DOMAIN_DMA); 850 IOMMU_DOMAIN_DMA);
851 group->domain = group->default_domain; 851 if (!group->domain)
852 group->domain = group->default_domain;
852 } 853 }
853 854
854 ret = iommu_group_add_device(group, dev); 855 ret = iommu_group_add_device(group, dev);
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index a6f593a0a29e..5710a06c3049 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -315,8 +315,8 @@ static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
315 int i; 315 int i;
316 316
317 for (i = 0; i < iommu->num_mmu; i++) 317 for (i = 0; i < iommu->num_mmu; i++)
318 active &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & 318 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
319 RK_MMU_STATUS_STALL_ACTIVE; 319 RK_MMU_STATUS_STALL_ACTIVE);
320 320
321 return active; 321 return active;
322} 322}
@@ -327,8 +327,8 @@ static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
327 int i; 327 int i;
328 328
329 for (i = 0; i < iommu->num_mmu; i++) 329 for (i = 0; i < iommu->num_mmu; i++)
330 enable &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & 330 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
331 RK_MMU_STATUS_PAGING_ENABLED; 331 RK_MMU_STATUS_PAGING_ENABLED);
332 332
333 return enable; 333 return enable;
334} 334}
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 94a30da0cfac..4dffccf532a2 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -467,7 +467,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
467 gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp))); 467 gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
468 468
469 /* Update the pcpu_masks */ 469 /* Update the pcpu_masks */
470 for (i = 0; i < gic_vpes; i++) 470 for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
471 clear_bit(irq, pcpu_masks[i].pcpu_mask); 471 clear_bit(irq, pcpu_masks[i].pcpu_mask);
472 set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask); 472 set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
473 473
@@ -707,7 +707,7 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
707 spin_lock_irqsave(&gic_lock, flags); 707 spin_lock_irqsave(&gic_lock, flags);
708 gic_map_to_pin(intr, gic_cpu_pin); 708 gic_map_to_pin(intr, gic_cpu_pin);
709 gic_map_to_vpe(intr, vpe); 709 gic_map_to_vpe(intr, vpe);
710 for (i = 0; i < gic_vpes; i++) 710 for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
711 clear_bit(intr, pcpu_masks[i].pcpu_mask); 711 clear_bit(intr, pcpu_masks[i].pcpu_mask);
712 set_bit(intr, pcpu_masks[vpe].pcpu_mask); 712 set_bit(intr, pcpu_masks[vpe].pcpu_mask);
713 spin_unlock_irqrestore(&gic_lock, flags); 713 spin_unlock_irqrestore(&gic_lock, flags);
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
index 7fdf78f46433..df7e05ca8f9c 100644
--- a/drivers/isdn/hisax/isac.c
+++ b/drivers/isdn/hisax/isac.c
@@ -215,9 +215,11 @@ isac_interrupt(struct IsdnCardState *cs, u_char val)
215 if (count == 0) 215 if (count == 0)
216 count = 32; 216 count = 32;
217 isac_empty_fifo(cs, count); 217 isac_empty_fifo(cs, count);
218 if ((count = cs->rcvidx) > 0) { 218 count = cs->rcvidx;
219 if (count > 0) {
219 cs->rcvidx = 0; 220 cs->rcvidx = 0;
220 if (!(skb = alloc_skb(count, GFP_ATOMIC))) 221 skb = alloc_skb(count, GFP_ATOMIC);
222 if (!skb)
221 printk(KERN_WARNING "HiSax: D receive out of memory\n"); 223 printk(KERN_WARNING "HiSax: D receive out of memory\n");
222 else { 224 else {
223 memcpy(skb_put(skb, count), cs->rcvbuf, count); 225 memcpy(skb_put(skb, count), cs->rcvbuf, count);
@@ -251,7 +253,8 @@ isac_interrupt(struct IsdnCardState *cs, u_char val)
251 cs->tx_skb = NULL; 253 cs->tx_skb = NULL;
252 } 254 }
253 } 255 }
254 if ((cs->tx_skb = skb_dequeue(&cs->sq))) { 256 cs->tx_skb = skb_dequeue(&cs->sq);
257 if (cs->tx_skb) {
255 cs->tx_cnt = 0; 258 cs->tx_cnt = 0;
256 isac_fill_fifo(cs); 259 isac_fill_fifo(cs);
257 } else 260 } else
@@ -313,7 +316,8 @@ afterXPR:
313#if ARCOFI_USE 316#if ARCOFI_USE
314 if (v1 & 0x08) { 317 if (v1 & 0x08) {
315 if (!cs->dc.isac.mon_rx) { 318 if (!cs->dc.isac.mon_rx) {
316 if (!(cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) { 319 cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC);
320 if (!cs->dc.isac.mon_rx) {
317 if (cs->debug & L1_DEB_WARN) 321 if (cs->debug & L1_DEB_WARN)
318 debugl1(cs, "ISAC MON RX out of memory!"); 322 debugl1(cs, "ISAC MON RX out of memory!");
319 cs->dc.isac.mocr &= 0xf0; 323 cs->dc.isac.mocr &= 0xf0;
@@ -343,7 +347,8 @@ afterXPR:
343 afterMONR0: 347 afterMONR0:
344 if (v1 & 0x80) { 348 if (v1 & 0x80) {
345 if (!cs->dc.isac.mon_rx) { 349 if (!cs->dc.isac.mon_rx) {
346 if (!(cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) { 350 cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC);
351 if (!cs->dc.isac.mon_rx) {
347 if (cs->debug & L1_DEB_WARN) 352 if (cs->debug & L1_DEB_WARN)
348 debugl1(cs, "ISAC MON RX out of memory!"); 353 debugl1(cs, "ISAC MON RX out of memory!");
349 cs->dc.isac.mocr &= 0x0f; 354 cs->dc.isac.mocr &= 0x0f;
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 0d29b5a6356d..99e5f9751e8b 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -715,6 +715,9 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
715 if (!maddr || maddr->family != AF_ISDN) 715 if (!maddr || maddr->family != AF_ISDN)
716 return -EINVAL; 716 return -EINVAL;
717 717
718 if (addr_len < sizeof(struct sockaddr_mISDN))
719 return -EINVAL;
720
718 lock_sock(sk); 721 lock_sock(sk);
719 722
720 if (_pms(sk)->dev) { 723 if (_pms(sk)->dev) {
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index eb934b0242e0..67392b6ab845 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -331,7 +331,7 @@ void set_interrupt(struct lg_cpu *cpu, unsigned int irq)
331 * Actually now I think of it, it's possible that Ron *is* half the Plan 9 331 * Actually now I think of it, it's possible that Ron *is* half the Plan 9
332 * userbase. Oh well. 332 * userbase. Oh well.
333 */ 333 */
334static bool could_be_syscall(unsigned int num) 334bool could_be_syscall(unsigned int num)
335{ 335{
336 /* Normal Linux IA32_SYSCALL_VECTOR or reserved vector? */ 336 /* Normal Linux IA32_SYSCALL_VECTOR or reserved vector? */
337 return num == IA32_SYSCALL_VECTOR || num == syscall_vector; 337 return num == IA32_SYSCALL_VECTOR || num == syscall_vector;
@@ -416,6 +416,10 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
416 * 416 *
417 * This routine indicates if a particular trap number could be delivered 417 * This routine indicates if a particular trap number could be delivered
418 * directly. 418 * directly.
419 *
420 * Unfortunately, Linux 4.6 started using an interrupt gate instead of a
421 * trap gate for syscalls, so this trick is ineffective. See Mastery for
422 * how we could do this anyway...
419 */ 423 */
420static bool direct_trap(unsigned int num) 424static bool direct_trap(unsigned int num)
421{ 425{
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index ac8ad0461e80..69b3814afd2f 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -167,6 +167,7 @@ void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta);
167bool send_notify_to_eventfd(struct lg_cpu *cpu); 167bool send_notify_to_eventfd(struct lg_cpu *cpu);
168void init_clockdev(struct lg_cpu *cpu); 168void init_clockdev(struct lg_cpu *cpu);
169bool check_syscall_vector(struct lguest *lg); 169bool check_syscall_vector(struct lguest *lg);
170bool could_be_syscall(unsigned int num);
170int init_interrupts(void); 171int init_interrupts(void);
171void free_interrupts(void); 172void free_interrupts(void);
172 173
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 6a4cd771a2be..adc162c7040d 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -429,8 +429,12 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
429 return; 429 return;
430 break; 430 break;
431 case 32 ... 255: 431 case 32 ... 255:
432 /* This might be a syscall. */
433 if (could_be_syscall(cpu->regs->trapnum))
434 break;
435
432 /* 436 /*
433 * These values mean a real interrupt occurred, in which case 437 * Other values mean a real interrupt occurred, in which case
434 * the Host handler has already been run. We just do a 438 * the Host handler has already been run. We just do a
435 * friendly check if another process should now be run, then 439 * friendly check if another process should now be run, then
436 * return to run the Guest again. 440 * return to run the Guest again.
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index dc11bbf27274..58d04726cdd7 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -46,7 +46,6 @@ static ssize_t mbox_test_signal_write(struct file *filp,
46 size_t count, loff_t *ppos) 46 size_t count, loff_t *ppos)
47{ 47{
48 struct mbox_test_device *tdev = filp->private_data; 48 struct mbox_test_device *tdev = filp->private_data;
49 int ret;
50 49
51 if (!tdev->tx_channel) { 50 if (!tdev->tx_channel) {
52 dev_err(tdev->dev, "Channel cannot do Tx\n"); 51 dev_err(tdev->dev, "Channel cannot do Tx\n");
@@ -60,17 +59,20 @@ static ssize_t mbox_test_signal_write(struct file *filp,
60 return -EINVAL; 59 return -EINVAL;
61 } 60 }
62 61
63 tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL); 62 /* Only allocate memory if we need to */
64 if (!tdev->signal) 63 if (!tdev->signal) {
65 return -ENOMEM; 64 tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL);
65 if (!tdev->signal)
66 return -ENOMEM;
67 }
66 68
67 ret = copy_from_user(tdev->signal, userbuf, count); 69 if (copy_from_user(tdev->signal, userbuf, count)) {
68 if (ret) {
69 kfree(tdev->signal); 70 kfree(tdev->signal);
71 tdev->signal = NULL;
70 return -EFAULT; 72 return -EFAULT;
71 } 73 }
72 74
73 return ret < 0 ? ret : count; 75 return count;
74} 76}
75 77
76static const struct file_operations mbox_test_signal_ops = { 78static const struct file_operations mbox_test_signal_ops = {
diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c
index bd07f39f0692..dd2afbca51c9 100644
--- a/drivers/mailbox/mailbox-xgene-slimpro.c
+++ b/drivers/mailbox/mailbox-xgene-slimpro.c
@@ -189,8 +189,8 @@ static int slimpro_mbox_probe(struct platform_device *pdev)
189 int i; 189 int i;
190 190
191 ctx = devm_kzalloc(&pdev->dev, sizeof(struct slimpro_mbox), GFP_KERNEL); 191 ctx = devm_kzalloc(&pdev->dev, sizeof(struct slimpro_mbox), GFP_KERNEL);
192 if (IS_ERR(ctx)) 192 if (!ctx)
193 return PTR_ERR(ctx); 193 return -ENOMEM;
194 194
195 platform_set_drvdata(pdev, ctx); 195 platform_set_drvdata(pdev, ctx);
196 196
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 6a4811f85705..4a36632c236f 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -375,13 +375,13 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
375 375
376 if (!np) { 376 if (!np) {
377 dev_err(cl->dev, "%s() currently only supports DT\n", __func__); 377 dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
378 return ERR_PTR(-ENOSYS); 378 return ERR_PTR(-EINVAL);
379 } 379 }
380 380
381 if (!of_get_property(np, "mbox-names", NULL)) { 381 if (!of_get_property(np, "mbox-names", NULL)) {
382 dev_err(cl->dev, 382 dev_err(cl->dev,
383 "%s() requires an \"mbox-names\" property\n", __func__); 383 "%s() requires an \"mbox-names\" property\n", __func__);
384 return ERR_PTR(-ENOSYS); 384 return ERR_PTR(-EINVAL);
385 } 385 }
386 386
387 of_property_for_each_string(np, "mbox-names", prop, mbox_name) { 387 of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 0ddf638d60f3..043828d541f7 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -361,8 +361,6 @@ static int __init acpi_pcc_probe(void)
361 struct acpi_generic_address *db_reg; 361 struct acpi_generic_address *db_reg;
362 struct acpi_pcct_hw_reduced *pcct_ss; 362 struct acpi_pcct_hw_reduced *pcct_ss;
363 pcc_mbox_channels[i].con_priv = pcct_entry; 363 pcc_mbox_channels[i].con_priv = pcct_entry;
364 pcct_entry = (struct acpi_subtable_header *)
365 ((unsigned long) pcct_entry + pcct_entry->length);
366 364
367 /* If doorbell is in system memory cache the virt address */ 365 /* If doorbell is in system memory cache the virt address */
368 pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry; 366 pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry;
@@ -370,6 +368,8 @@ static int __init acpi_pcc_probe(void)
370 if (db_reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 368 if (db_reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
371 pcc_doorbell_vaddr[i] = acpi_os_ioremap(db_reg->address, 369 pcc_doorbell_vaddr[i] = acpi_os_ioremap(db_reg->address,
372 db_reg->bit_width/8); 370 db_reg->bit_width/8);
371 pcct_entry = (struct acpi_subtable_header *)
372 ((unsigned long) pcct_entry + pcct_entry->length);
373 } 373 }
374 374
375 pcc_mbox_ctrl.num_chans = count; 375 pcc_mbox_ctrl.num_chans = count;
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 7df6b4f1548a..3fe86b54d50b 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -322,7 +322,7 @@ __clear_page_buffers(struct page *page)
322{ 322{
323 ClearPagePrivate(page); 323 ClearPagePrivate(page);
324 set_page_private(page, 0); 324 set_page_private(page, 0);
325 page_cache_release(page); 325 put_page(page);
326} 326}
327static void free_buffers(struct page *page) 327static void free_buffers(struct page *page)
328{ 328{
@@ -1673,6 +1673,9 @@ static void bitmap_free(struct bitmap *bitmap)
1673 if (!bitmap) /* there was no bitmap */ 1673 if (!bitmap) /* there was no bitmap */
1674 return; 1674 return;
1675 1675
1676 if (bitmap->sysfs_can_clear)
1677 sysfs_put(bitmap->sysfs_can_clear);
1678
1676 if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info && 1679 if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
1677 bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev)) 1680 bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
1678 md_cluster_stop(bitmap->mddev); 1681 md_cluster_stop(bitmap->mddev);
@@ -1712,15 +1715,13 @@ void bitmap_destroy(struct mddev *mddev)
1712 if (mddev->thread) 1715 if (mddev->thread)
1713 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; 1716 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1714 1717
1715 if (bitmap->sysfs_can_clear)
1716 sysfs_put(bitmap->sysfs_can_clear);
1717
1718 bitmap_free(bitmap); 1718 bitmap_free(bitmap);
1719} 1719}
1720 1720
1721/* 1721/*
1722 * initialize the bitmap structure 1722 * initialize the bitmap structure
1723 * if this returns an error, bitmap_destroy must be called to do clean up 1723 * if this returns an error, bitmap_destroy must be called to do clean up
1724 * once mddev->bitmap is set
1724 */ 1725 */
1725struct bitmap *bitmap_create(struct mddev *mddev, int slot) 1726struct bitmap *bitmap_create(struct mddev *mddev, int slot)
1726{ 1727{
@@ -1865,8 +1866,10 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
1865 struct bitmap_counts *counts; 1866 struct bitmap_counts *counts;
1866 struct bitmap *bitmap = bitmap_create(mddev, slot); 1867 struct bitmap *bitmap = bitmap_create(mddev, slot);
1867 1868
1868 if (IS_ERR(bitmap)) 1869 if (IS_ERR(bitmap)) {
1870 bitmap_free(bitmap);
1869 return PTR_ERR(bitmap); 1871 return PTR_ERR(bitmap);
1872 }
1870 1873
1871 rv = bitmap_init_from_disk(bitmap, 0); 1874 rv = bitmap_init_from_disk(bitmap, 0);
1872 if (rv) 1875 if (rv)
@@ -2170,14 +2173,14 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
2170 else { 2173 else {
2171 mddev->bitmap = bitmap; 2174 mddev->bitmap = bitmap;
2172 rv = bitmap_load(mddev); 2175 rv = bitmap_load(mddev);
2173 if (rv) { 2176 if (rv)
2174 bitmap_destroy(mddev);
2175 mddev->bitmap_info.offset = 0; 2177 mddev->bitmap_info.offset = 0;
2176 }
2177 } 2178 }
2178 mddev->pers->quiesce(mddev, 0); 2179 mddev->pers->quiesce(mddev, 0);
2179 if (rv) 2180 if (rv) {
2181 bitmap_destroy(mddev);
2180 return rv; 2182 return rv;
2183 }
2181 } 2184 }
2182 } 2185 }
2183 } 2186 }
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 27f2ef300f8b..3970cda10080 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -867,39 +867,55 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
867 return 0; 867 return 0;
868} 868}
869 869
870#define WRITE_LOCK(cmd) \ 870static bool cmd_write_lock(struct dm_cache_metadata *cmd)
871 down_write(&cmd->root_lock); \ 871{
872 if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \ 872 down_write(&cmd->root_lock);
873 up_write(&cmd->root_lock); \ 873 if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
874 return -EINVAL; \ 874 up_write(&cmd->root_lock);
875 return false;
875 } 876 }
877 return true;
878}
876 879
877#define WRITE_LOCK_VOID(cmd) \ 880#define WRITE_LOCK(cmd) \
878 down_write(&cmd->root_lock); \ 881 do { \
879 if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \ 882 if (!cmd_write_lock((cmd))) \
880 up_write(&cmd->root_lock); \ 883 return -EINVAL; \
881 return; \ 884 } while(0)
882 } 885
886#define WRITE_LOCK_VOID(cmd) \
887 do { \
888 if (!cmd_write_lock((cmd))) \
889 return; \
890 } while(0)
883 891
884#define WRITE_UNLOCK(cmd) \ 892#define WRITE_UNLOCK(cmd) \
885 up_write(&cmd->root_lock) 893 up_write(&(cmd)->root_lock)
886 894
887#define READ_LOCK(cmd) \ 895static bool cmd_read_lock(struct dm_cache_metadata *cmd)
888 down_read(&cmd->root_lock); \ 896{
889 if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \ 897 down_read(&cmd->root_lock);
890 up_read(&cmd->root_lock); \ 898 if (cmd->fail_io) {
891 return -EINVAL; \ 899 up_read(&cmd->root_lock);
900 return false;
892 } 901 }
902 return true;
903}
893 904
894#define READ_LOCK_VOID(cmd) \ 905#define READ_LOCK(cmd) \
895 down_read(&cmd->root_lock); \ 906 do { \
896 if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \ 907 if (!cmd_read_lock((cmd))) \
897 up_read(&cmd->root_lock); \ 908 return -EINVAL; \
898 return; \ 909 } while(0)
899 } 910
911#define READ_LOCK_VOID(cmd) \
912 do { \
913 if (!cmd_read_lock((cmd))) \
914 return; \
915 } while(0)
900 916
901#define READ_UNLOCK(cmd) \ 917#define READ_UNLOCK(cmd) \
902 up_read(&cmd->root_lock) 918 up_read(&(cmd)->root_lock)
903 919
904int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size) 920int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
905{ 921{
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index be4905769a45..3d3ac13287a4 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1662,8 +1662,10 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1662 tio = alloc_tio(ci, ti, target_bio_nr); 1662 tio = alloc_tio(ci, ti, target_bio_nr);
1663 tio->len_ptr = len; 1663 tio->len_ptr = len;
1664 r = clone_bio(tio, bio, sector, *len); 1664 r = clone_bio(tio, bio, sector, *len);
1665 if (r < 0) 1665 if (r < 0) {
1666 free_tio(ci->md, tio);
1666 break; 1667 break;
1668 }
1667 __map_bio(tio); 1669 __map_bio(tio);
1668 } 1670 }
1669 1671
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c068f171b4eb..14d3b37944df 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -284,6 +284,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
284 * go away inside make_request 284 * go away inside make_request
285 */ 285 */
286 sectors = bio_sectors(bio); 286 sectors = bio_sectors(bio);
287 /* bio could be mergeable after passing to underlayer */
288 bio->bi_rw &= ~REQ_NOMERGE;
287 mddev->pers->make_request(mddev, bio); 289 mddev->pers->make_request(mddev, bio);
288 290
289 cpu = part_stat_lock(); 291 cpu = part_stat_lock();
@@ -718,6 +720,7 @@ static void super_written(struct bio *bio)
718 720
719 if (atomic_dec_and_test(&mddev->pending_writes)) 721 if (atomic_dec_and_test(&mddev->pending_writes))
720 wake_up(&mddev->sb_wait); 722 wake_up(&mddev->sb_wait);
723 rdev_dec_pending(rdev, mddev);
721 bio_put(bio); 724 bio_put(bio);
722} 725}
723 726
@@ -732,6 +735,8 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
732 */ 735 */
733 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); 736 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
734 737
738 atomic_inc(&rdev->nr_pending);
739
735 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; 740 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
736 bio->bi_iter.bi_sector = sector; 741 bio->bi_iter.bi_sector = sector;
737 bio_add_page(bio, page, size, 0); 742 bio_add_page(bio, page, size, 0);
@@ -6883,7 +6888,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
6883 6888
6884 case ADD_NEW_DISK: 6889 case ADD_NEW_DISK:
6885 /* We can support ADD_NEW_DISK on read-only arrays 6890 /* We can support ADD_NEW_DISK on read-only arrays
6886 * on if we are re-adding a preexisting device. 6891 * only if we are re-adding a preexisting device.
6887 * So require mddev->pers and MD_DISK_SYNC. 6892 * So require mddev->pers and MD_DISK_SYNC.
6888 */ 6893 */
6889 if (mddev->pers) { 6894 if (mddev->pers) {
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 2ea12c6bf659..34783a3c8b3c 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -70,7 +70,6 @@ static void dump_zones(struct mddev *mddev)
70 (unsigned long long)zone_size>>1); 70 (unsigned long long)zone_size>>1);
71 zone_start = conf->strip_zone[j].zone_end; 71 zone_start = conf->strip_zone[j].zone_end;
72 } 72 }
73 printk(KERN_INFO "\n");
74} 73}
75 74
76static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) 75static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
@@ -85,6 +84,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
85 struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL); 84 struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
86 unsigned short blksize = 512; 85 unsigned short blksize = 512;
87 86
87 *private_conf = ERR_PTR(-ENOMEM);
88 if (!conf) 88 if (!conf)
89 return -ENOMEM; 89 return -ENOMEM;
90 rdev_for_each(rdev1, mddev) { 90 rdev_for_each(rdev1, mddev) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 39fb21e048e6..a7f2b9c9f8a0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -570,7 +570,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
570 if (best_dist_disk < 0) { 570 if (best_dist_disk < 0) {
571 if (is_badblock(rdev, this_sector, sectors, 571 if (is_badblock(rdev, this_sector, sectors,
572 &first_bad, &bad_sectors)) { 572 &first_bad, &bad_sectors)) {
573 if (first_bad < this_sector) 573 if (first_bad <= this_sector)
574 /* Cannot use this */ 574 /* Cannot use this */
575 continue; 575 continue;
576 best_good_sectors = first_bad - this_sector; 576 best_good_sectors = first_bad - this_sector;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8ab8b65e1741..e48c262ce032 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3502,8 +3502,6 @@ returnbi:
3502 dev = &sh->dev[i]; 3502 dev = &sh->dev[i];
3503 } else if (test_bit(R5_Discard, &dev->flags)) 3503 } else if (test_bit(R5_Discard, &dev->flags))
3504 discard_pending = 1; 3504 discard_pending = 1;
3505 WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
3506 WARN_ON(dev->page != dev->orig_page);
3507 } 3505 }
3508 3506
3509 r5l_stripe_write_finished(sh); 3507 r5l_stripe_write_finished(sh);
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 6e43c95629ea..3cfd7af8c5ca 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -846,11 +846,11 @@ struct media_device *media_device_find_devres(struct device *dev)
846} 846}
847EXPORT_SYMBOL_GPL(media_device_find_devres); 847EXPORT_SYMBOL_GPL(media_device_find_devres);
848 848
849#if IS_ENABLED(CONFIG_PCI)
849void media_device_pci_init(struct media_device *mdev, 850void media_device_pci_init(struct media_device *mdev,
850 struct pci_dev *pci_dev, 851 struct pci_dev *pci_dev,
851 const char *name) 852 const char *name)
852{ 853{
853#ifdef CONFIG_PCI
854 mdev->dev = &pci_dev->dev; 854 mdev->dev = &pci_dev->dev;
855 855
856 if (name) 856 if (name)
@@ -866,16 +866,16 @@ void media_device_pci_init(struct media_device *mdev,
866 mdev->driver_version = LINUX_VERSION_CODE; 866 mdev->driver_version = LINUX_VERSION_CODE;
867 867
868 media_device_init(mdev); 868 media_device_init(mdev);
869#endif
870} 869}
871EXPORT_SYMBOL_GPL(media_device_pci_init); 870EXPORT_SYMBOL_GPL(media_device_pci_init);
871#endif
872 872
873#if IS_ENABLED(CONFIG_USB)
873void __media_device_usb_init(struct media_device *mdev, 874void __media_device_usb_init(struct media_device *mdev,
874 struct usb_device *udev, 875 struct usb_device *udev,
875 const char *board_name, 876 const char *board_name,
876 const char *driver_name) 877 const char *driver_name)
877{ 878{
878#ifdef CONFIG_USB
879 mdev->dev = &udev->dev; 879 mdev->dev = &udev->dev;
880 880
881 if (driver_name) 881 if (driver_name)
@@ -895,9 +895,9 @@ void __media_device_usb_init(struct media_device *mdev,
895 mdev->driver_version = LINUX_VERSION_CODE; 895 mdev->driver_version = LINUX_VERSION_CODE;
896 896
897 media_device_init(mdev); 897 media_device_init(mdev);
898#endif
899} 898}
900EXPORT_SYMBOL_GPL(__media_device_usb_init); 899EXPORT_SYMBOL_GPL(__media_device_usb_init);
900#endif
901 901
902 902
903#endif /* CONFIG_MEDIA_CONTROLLER */ 903#endif /* CONFIG_MEDIA_CONTROLLER */
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index feb521f28e14..4f494acd8150 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -1446,22 +1446,13 @@ static int fimc_md_probe(struct platform_device *pdev)
1446 1446
1447 platform_set_drvdata(pdev, fmd); 1447 platform_set_drvdata(pdev, fmd);
1448 1448
1449 /* Protect the media graph while we're registering entities */
1450 mutex_lock(&fmd->media_dev.graph_mutex);
1451
1452 ret = fimc_md_register_platform_entities(fmd, dev->of_node); 1449 ret = fimc_md_register_platform_entities(fmd, dev->of_node);
1453 if (ret) { 1450 if (ret)
1454 mutex_unlock(&fmd->media_dev.graph_mutex);
1455 goto err_clk; 1451 goto err_clk;
1456 }
1457 1452
1458 ret = fimc_md_register_sensor_entities(fmd); 1453 ret = fimc_md_register_sensor_entities(fmd);
1459 if (ret) { 1454 if (ret)
1460 mutex_unlock(&fmd->media_dev.graph_mutex);
1461 goto err_m_ent; 1455 goto err_m_ent;
1462 }
1463
1464 mutex_unlock(&fmd->media_dev.graph_mutex);
1465 1456
1466 ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode); 1457 ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode);
1467 if (ret) 1458 if (ret)
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
index 0b44b9accf50..af237af204e2 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -493,21 +493,17 @@ static int s3c_camif_probe(struct platform_device *pdev)
493 if (ret < 0) 493 if (ret < 0)
494 goto err_sens; 494 goto err_sens;
495 495
496 mutex_lock(&camif->media_dev.graph_mutex);
497
498 ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev); 496 ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev);
499 if (ret < 0) 497 if (ret < 0)
500 goto err_unlock; 498 goto err_sens;
501 499
502 ret = camif_register_video_nodes(camif); 500 ret = camif_register_video_nodes(camif);
503 if (ret < 0) 501 if (ret < 0)
504 goto err_unlock; 502 goto err_sens;
505 503
506 ret = camif_create_media_links(camif); 504 ret = camif_create_media_links(camif);
507 if (ret < 0) 505 if (ret < 0)
508 goto err_unlock; 506 goto err_sens;
509
510 mutex_unlock(&camif->media_dev.graph_mutex);
511 507
512 ret = media_device_register(&camif->media_dev); 508 ret = media_device_register(&camif->media_dev);
513 if (ret < 0) 509 if (ret < 0)
@@ -516,8 +512,6 @@ static int s3c_camif_probe(struct platform_device *pdev)
516 pm_runtime_put(dev); 512 pm_runtime_put(dev);
517 return 0; 513 return 0;
518 514
519err_unlock:
520 mutex_unlock(&camif->media_dev.graph_mutex);
521err_sens: 515err_sens:
522 v4l2_device_unregister(&camif->v4l2_dev); 516 v4l2_device_unregister(&camif->v4l2_dev);
523 media_device_unregister(&camif->media_dev); 517 media_device_unregister(&camif->media_dev);
diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
index ca861aea68a5..6b469e8c4c6e 100644
--- a/drivers/media/usb/au0828/au0828-cards.c
+++ b/drivers/media/usb/au0828/au0828-cards.c
@@ -228,10 +228,6 @@ void au0828_card_analog_fe_setup(struct au0828_dev *dev)
228 "au8522", 0x8e >> 1, NULL); 228 "au8522", 0x8e >> 1, NULL);
229 if (sd == NULL) 229 if (sd == NULL)
230 pr_err("analog subdev registration failed\n"); 230 pr_err("analog subdev registration failed\n");
231#ifdef CONFIG_MEDIA_CONTROLLER
232 if (sd)
233 dev->decoder = &sd->entity;
234#endif
235 } 231 }
236 232
237 /* Setup tuners */ 233 /* Setup tuners */
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index 5dc82e8c8670..cc22b32776ad 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -137,8 +137,14 @@ static void au0828_unregister_media_device(struct au0828_dev *dev)
137#ifdef CONFIG_MEDIA_CONTROLLER 137#ifdef CONFIG_MEDIA_CONTROLLER
138 if (dev->media_dev && 138 if (dev->media_dev &&
139 media_devnode_is_registered(&dev->media_dev->devnode)) { 139 media_devnode_is_registered(&dev->media_dev->devnode)) {
140 /* clear enable_source, disable_source */
141 dev->media_dev->source_priv = NULL;
142 dev->media_dev->enable_source = NULL;
143 dev->media_dev->disable_source = NULL;
144
140 media_device_unregister(dev->media_dev); 145 media_device_unregister(dev->media_dev);
141 media_device_cleanup(dev->media_dev); 146 media_device_cleanup(dev->media_dev);
147 kfree(dev->media_dev);
142 dev->media_dev = NULL; 148 dev->media_dev = NULL;
143 } 149 }
144#endif 150#endif
@@ -166,7 +172,7 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
166 Set the status so poll routines can check and avoid 172 Set the status so poll routines can check and avoid
167 access after disconnect. 173 access after disconnect.
168 */ 174 */
169 dev->dev_state = DEV_DISCONNECTED; 175 set_bit(DEV_DISCONNECTED, &dev->dev_state);
170 176
171 au0828_rc_unregister(dev); 177 au0828_rc_unregister(dev);
172 /* Digital TV */ 178 /* Digital TV */
@@ -192,7 +198,7 @@ static int au0828_media_device_init(struct au0828_dev *dev,
192#ifdef CONFIG_MEDIA_CONTROLLER 198#ifdef CONFIG_MEDIA_CONTROLLER
193 struct media_device *mdev; 199 struct media_device *mdev;
194 200
195 mdev = media_device_get_devres(&udev->dev); 201 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
196 if (!mdev) 202 if (!mdev)
197 return -ENOMEM; 203 return -ENOMEM;
198 204
@@ -456,7 +462,8 @@ static int au0828_media_device_register(struct au0828_dev *dev,
456{ 462{
457#ifdef CONFIG_MEDIA_CONTROLLER 463#ifdef CONFIG_MEDIA_CONTROLLER
458 int ret; 464 int ret;
459 struct media_entity *entity, *demod = NULL, *tuner = NULL; 465 struct media_entity *entity, *demod = NULL;
466 struct media_link *link;
460 467
461 if (!dev->media_dev) 468 if (!dev->media_dev)
462 return 0; 469 return 0;
@@ -482,26 +489,37 @@ static int au0828_media_device_register(struct au0828_dev *dev,
482 } 489 }
483 490
484 /* 491 /*
485 * Find tuner and demod to disable the link between 492 * Find tuner, decoder and demod.
486 * the two to avoid disable step when tuner is requested 493 *
487 * by video or audio. Note that this step can't be done 494 * The tuner and decoder should be cached, as they'll be used by
488 * until dvb graph is created during dvb register. 495 * au0828_enable_source.
496 *
497 * It also needs to disable the link between tuner and
498 * decoder/demod, to avoid disable step when tuner is requested
499 * by video or audio. Note that this step can't be done until dvb
500 * graph is created during dvb register.
489 */ 501 */
490 media_device_for_each_entity(entity, dev->media_dev) { 502 media_device_for_each_entity(entity, dev->media_dev) {
491 if (entity->function == MEDIA_ENT_F_DTV_DEMOD) 503 switch (entity->function) {
504 case MEDIA_ENT_F_TUNER:
505 dev->tuner = entity;
506 break;
507 case MEDIA_ENT_F_ATV_DECODER:
508 dev->decoder = entity;
509 break;
510 case MEDIA_ENT_F_DTV_DEMOD:
492 demod = entity; 511 demod = entity;
493 else if (entity->function == MEDIA_ENT_F_TUNER) 512 break;
494 tuner = entity; 513 }
495 } 514 }
496 /* Disable link between tuner and demod */
497 if (tuner && demod) {
498 struct media_link *link;
499 515
500 list_for_each_entry(link, &demod->links, list) { 516 /* Disable link between tuner->demod and/or tuner->decoder */
501 if (link->sink->entity == demod && 517 if (dev->tuner) {
502 link->source->entity == tuner) { 518 list_for_each_entry(link, &dev->tuner->links, list) {
519 if (demod && link->sink->entity == demod)
520 media_entity_setup_link(link, 0);
521 if (dev->decoder && link->sink->entity == dev->decoder)
503 media_entity_setup_link(link, 0); 522 media_entity_setup_link(link, 0);
504 }
505 } 523 }
506 } 524 }
507 525
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index b0f067971979..3d6687f0407d 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -130,7 +130,7 @@ static int au0828_get_key_au8522(struct au0828_rc *ir)
130 bool first = true; 130 bool first = true;
131 131
132 /* do nothing if device is disconnected */ 132 /* do nothing if device is disconnected */
133 if (ir->dev->dev_state == DEV_DISCONNECTED) 133 if (test_bit(DEV_DISCONNECTED, &ir->dev->dev_state))
134 return 0; 134 return 0;
135 135
136 /* Check IR int */ 136 /* Check IR int */
@@ -260,7 +260,7 @@ static void au0828_rc_stop(struct rc_dev *rc)
260 cancel_delayed_work_sync(&ir->work); 260 cancel_delayed_work_sync(&ir->work);
261 261
262 /* do nothing if device is disconnected */ 262 /* do nothing if device is disconnected */
263 if (ir->dev->dev_state != DEV_DISCONNECTED) { 263 if (!test_bit(DEV_DISCONNECTED, &ir->dev->dev_state)) {
264 /* Disable IR */ 264 /* Disable IR */
265 au8522_rc_clear(ir, 0xe0, 1 << 4); 265 au8522_rc_clear(ir, 0xe0, 1 << 4);
266 } 266 }
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 13f6dab9ccc2..32d7db96479c 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -106,14 +106,13 @@ static inline void print_err_status(struct au0828_dev *dev,
106 106
107static int check_dev(struct au0828_dev *dev) 107static int check_dev(struct au0828_dev *dev)
108{ 108{
109 if (dev->dev_state & DEV_DISCONNECTED) { 109 if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) {
110 pr_info("v4l2 ioctl: device not present\n"); 110 pr_info("v4l2 ioctl: device not present\n");
111 return -ENODEV; 111 return -ENODEV;
112 } 112 }
113 113
114 if (dev->dev_state & DEV_MISCONFIGURED) { 114 if (test_bit(DEV_MISCONFIGURED, &dev->dev_state)) {
115 pr_info("v4l2 ioctl: device is misconfigured; " 115 pr_info("v4l2 ioctl: device is misconfigured; close and open it again\n");
116 "close and open it again\n");
117 return -EIO; 116 return -EIO;
118 } 117 }
119 return 0; 118 return 0;
@@ -521,8 +520,8 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
521 if (!dev) 520 if (!dev)
522 return 0; 521 return 0;
523 522
524 if ((dev->dev_state & DEV_DISCONNECTED) || 523 if (test_bit(DEV_DISCONNECTED, &dev->dev_state) ||
525 (dev->dev_state & DEV_MISCONFIGURED)) 524 test_bit(DEV_MISCONFIGURED, &dev->dev_state))
526 return 0; 525 return 0;
527 526
528 if (urb->status < 0) { 527 if (urb->status < 0) {
@@ -824,10 +823,10 @@ static int au0828_stream_interrupt(struct au0828_dev *dev)
824 int ret = 0; 823 int ret = 0;
825 824
826 dev->stream_state = STREAM_INTERRUPT; 825 dev->stream_state = STREAM_INTERRUPT;
827 if (dev->dev_state == DEV_DISCONNECTED) 826 if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
828 return -ENODEV; 827 return -ENODEV;
829 else if (ret) { 828 else if (ret) {
830 dev->dev_state = DEV_MISCONFIGURED; 829 set_bit(DEV_MISCONFIGURED, &dev->dev_state);
831 dprintk(1, "%s device is misconfigured!\n", __func__); 830 dprintk(1, "%s device is misconfigured!\n", __func__);
832 return ret; 831 return ret;
833 } 832 }
@@ -1026,7 +1025,7 @@ static int au0828_v4l2_open(struct file *filp)
1026 int ret; 1025 int ret;
1027 1026
1028 dprintk(1, 1027 dprintk(1,
1029 "%s called std_set %d dev_state %d stream users %d users %d\n", 1028 "%s called std_set %d dev_state %ld stream users %d users %d\n",
1030 __func__, dev->std_set_in_tuner_core, dev->dev_state, 1029 __func__, dev->std_set_in_tuner_core, dev->dev_state,
1031 dev->streaming_users, dev->users); 1030 dev->streaming_users, dev->users);
1032 1031
@@ -1045,7 +1044,7 @@ static int au0828_v4l2_open(struct file *filp)
1045 au0828_analog_stream_enable(dev); 1044 au0828_analog_stream_enable(dev);
1046 au0828_analog_stream_reset(dev); 1045 au0828_analog_stream_reset(dev);
1047 dev->stream_state = STREAM_OFF; 1046 dev->stream_state = STREAM_OFF;
1048 dev->dev_state |= DEV_INITIALIZED; 1047 set_bit(DEV_INITIALIZED, &dev->dev_state);
1049 } 1048 }
1050 dev->users++; 1049 dev->users++;
1051 mutex_unlock(&dev->lock); 1050 mutex_unlock(&dev->lock);
@@ -1059,7 +1058,7 @@ static int au0828_v4l2_close(struct file *filp)
1059 struct video_device *vdev = video_devdata(filp); 1058 struct video_device *vdev = video_devdata(filp);
1060 1059
1061 dprintk(1, 1060 dprintk(1,
1062 "%s called std_set %d dev_state %d stream users %d users %d\n", 1061 "%s called std_set %d dev_state %ld stream users %d users %d\n",
1063 __func__, dev->std_set_in_tuner_core, dev->dev_state, 1062 __func__, dev->std_set_in_tuner_core, dev->dev_state,
1064 dev->streaming_users, dev->users); 1063 dev->streaming_users, dev->users);
1065 1064
@@ -1075,7 +1074,7 @@ static int au0828_v4l2_close(struct file *filp)
1075 del_timer_sync(&dev->vbi_timeout); 1074 del_timer_sync(&dev->vbi_timeout);
1076 } 1075 }
1077 1076
1078 if (dev->dev_state == DEV_DISCONNECTED) 1077 if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
1079 goto end; 1078 goto end;
1080 1079
1081 if (dev->users == 1) { 1080 if (dev->users == 1) {
@@ -1135,7 +1134,7 @@ static void au0828_init_tuner(struct au0828_dev *dev)
1135 .type = V4L2_TUNER_ANALOG_TV, 1134 .type = V4L2_TUNER_ANALOG_TV,
1136 }; 1135 };
1137 1136
1138 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1137 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1139 dev->std_set_in_tuner_core, dev->dev_state); 1138 dev->std_set_in_tuner_core, dev->dev_state);
1140 1139
1141 if (dev->std_set_in_tuner_core) 1140 if (dev->std_set_in_tuner_core)
@@ -1207,7 +1206,7 @@ static int vidioc_querycap(struct file *file, void *priv,
1207 struct video_device *vdev = video_devdata(file); 1206 struct video_device *vdev = video_devdata(file);
1208 struct au0828_dev *dev = video_drvdata(file); 1207 struct au0828_dev *dev = video_drvdata(file);
1209 1208
1210 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1209 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1211 dev->std_set_in_tuner_core, dev->dev_state); 1210 dev->std_set_in_tuner_core, dev->dev_state);
1212 1211
1213 strlcpy(cap->driver, "au0828", sizeof(cap->driver)); 1212 strlcpy(cap->driver, "au0828", sizeof(cap->driver));
@@ -1250,7 +1249,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
1250{ 1249{
1251 struct au0828_dev *dev = video_drvdata(file); 1250 struct au0828_dev *dev = video_drvdata(file);
1252 1251
1253 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1252 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1254 dev->std_set_in_tuner_core, dev->dev_state); 1253 dev->std_set_in_tuner_core, dev->dev_state);
1255 1254
1256 f->fmt.pix.width = dev->width; 1255 f->fmt.pix.width = dev->width;
@@ -1269,7 +1268,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
1269{ 1268{
1270 struct au0828_dev *dev = video_drvdata(file); 1269 struct au0828_dev *dev = video_drvdata(file);
1271 1270
1272 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1271 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1273 dev->std_set_in_tuner_core, dev->dev_state); 1272 dev->std_set_in_tuner_core, dev->dev_state);
1274 1273
1275 return au0828_set_format(dev, VIDIOC_TRY_FMT, f); 1274 return au0828_set_format(dev, VIDIOC_TRY_FMT, f);
@@ -1281,7 +1280,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
1281 struct au0828_dev *dev = video_drvdata(file); 1280 struct au0828_dev *dev = video_drvdata(file);
1282 int rc; 1281 int rc;
1283 1282
1284 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1283 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1285 dev->std_set_in_tuner_core, dev->dev_state); 1284 dev->std_set_in_tuner_core, dev->dev_state);
1286 1285
1287 rc = check_dev(dev); 1286 rc = check_dev(dev);
@@ -1303,7 +1302,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
1303{ 1302{
1304 struct au0828_dev *dev = video_drvdata(file); 1303 struct au0828_dev *dev = video_drvdata(file);
1305 1304
1306 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1305 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1307 dev->std_set_in_tuner_core, dev->dev_state); 1306 dev->std_set_in_tuner_core, dev->dev_state);
1308 1307
1309 if (norm == dev->std) 1308 if (norm == dev->std)
@@ -1335,7 +1334,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
1335{ 1334{
1336 struct au0828_dev *dev = video_drvdata(file); 1335 struct au0828_dev *dev = video_drvdata(file);
1337 1336
1338 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1337 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1339 dev->std_set_in_tuner_core, dev->dev_state); 1338 dev->std_set_in_tuner_core, dev->dev_state);
1340 1339
1341 *norm = dev->std; 1340 *norm = dev->std;
@@ -1357,7 +1356,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
1357 [AU0828_VMUX_DVB] = "DVB", 1356 [AU0828_VMUX_DVB] = "DVB",
1358 }; 1357 };
1359 1358
1360 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1359 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1361 dev->std_set_in_tuner_core, dev->dev_state); 1360 dev->std_set_in_tuner_core, dev->dev_state);
1362 1361
1363 tmp = input->index; 1362 tmp = input->index;
@@ -1387,7 +1386,7 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
1387{ 1386{
1388 struct au0828_dev *dev = video_drvdata(file); 1387 struct au0828_dev *dev = video_drvdata(file);
1389 1388
1390 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1389 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1391 dev->std_set_in_tuner_core, dev->dev_state); 1390 dev->std_set_in_tuner_core, dev->dev_state);
1392 1391
1393 *i = dev->ctrl_input; 1392 *i = dev->ctrl_input;
@@ -1398,7 +1397,7 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
1398{ 1397{
1399 int i; 1398 int i;
1400 1399
1401 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1400 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1402 dev->std_set_in_tuner_core, dev->dev_state); 1401 dev->std_set_in_tuner_core, dev->dev_state);
1403 1402
1404 switch (AUVI_INPUT(index).type) { 1403 switch (AUVI_INPUT(index).type) {
@@ -1496,7 +1495,7 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
1496{ 1495{
1497 struct au0828_dev *dev = video_drvdata(file); 1496 struct au0828_dev *dev = video_drvdata(file);
1498 1497
1499 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1498 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1500 dev->std_set_in_tuner_core, dev->dev_state); 1499 dev->std_set_in_tuner_core, dev->dev_state);
1501 1500
1502 a->index = dev->ctrl_ainput; 1501 a->index = dev->ctrl_ainput;
@@ -1516,7 +1515,7 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
1516 if (a->index != dev->ctrl_ainput) 1515 if (a->index != dev->ctrl_ainput)
1517 return -EINVAL; 1516 return -EINVAL;
1518 1517
1519 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1518 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1520 dev->std_set_in_tuner_core, dev->dev_state); 1519 dev->std_set_in_tuner_core, dev->dev_state);
1521 return 0; 1520 return 0;
1522} 1521}
@@ -1534,7 +1533,7 @@ static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
1534 if (ret) 1533 if (ret)
1535 return ret; 1534 return ret;
1536 1535
1537 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1536 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1538 dev->std_set_in_tuner_core, dev->dev_state); 1537 dev->std_set_in_tuner_core, dev->dev_state);
1539 1538
1540 strcpy(t->name, "Auvitek tuner"); 1539 strcpy(t->name, "Auvitek tuner");
@@ -1554,7 +1553,7 @@ static int vidioc_s_tuner(struct file *file, void *priv,
1554 if (t->index != 0) 1553 if (t->index != 0)
1555 return -EINVAL; 1554 return -EINVAL;
1556 1555
1557 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1556 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1558 dev->std_set_in_tuner_core, dev->dev_state); 1557 dev->std_set_in_tuner_core, dev->dev_state);
1559 1558
1560 au0828_init_tuner(dev); 1559 au0828_init_tuner(dev);
@@ -1576,7 +1575,7 @@ static int vidioc_g_frequency(struct file *file, void *priv,
1576 1575
1577 if (freq->tuner != 0) 1576 if (freq->tuner != 0)
1578 return -EINVAL; 1577 return -EINVAL;
1579 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1578 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1580 dev->std_set_in_tuner_core, dev->dev_state); 1579 dev->std_set_in_tuner_core, dev->dev_state);
1581 freq->frequency = dev->ctrl_freq; 1580 freq->frequency = dev->ctrl_freq;
1582 return 0; 1581 return 0;
@@ -1591,7 +1590,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
1591 if (freq->tuner != 0) 1590 if (freq->tuner != 0)
1592 return -EINVAL; 1591 return -EINVAL;
1593 1592
1594 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1593 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1595 dev->std_set_in_tuner_core, dev->dev_state); 1594 dev->std_set_in_tuner_core, dev->dev_state);
1596 1595
1597 au0828_init_tuner(dev); 1596 au0828_init_tuner(dev);
@@ -1617,7 +1616,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
1617{ 1616{
1618 struct au0828_dev *dev = video_drvdata(file); 1617 struct au0828_dev *dev = video_drvdata(file);
1619 1618
1620 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1619 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1621 dev->std_set_in_tuner_core, dev->dev_state); 1620 dev->std_set_in_tuner_core, dev->dev_state);
1622 1621
1623 format->fmt.vbi.samples_per_line = dev->vbi_width; 1622 format->fmt.vbi.samples_per_line = dev->vbi_width;
@@ -1643,7 +1642,7 @@ static int vidioc_cropcap(struct file *file, void *priv,
1643 if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 1642 if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1644 return -EINVAL; 1643 return -EINVAL;
1645 1644
1646 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1645 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1647 dev->std_set_in_tuner_core, dev->dev_state); 1646 dev->std_set_in_tuner_core, dev->dev_state);
1648 1647
1649 cc->bounds.left = 0; 1648 cc->bounds.left = 0;
@@ -1665,7 +1664,7 @@ static int vidioc_g_register(struct file *file, void *priv,
1665{ 1664{
1666 struct au0828_dev *dev = video_drvdata(file); 1665 struct au0828_dev *dev = video_drvdata(file);
1667 1666
1668 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1667 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1669 dev->std_set_in_tuner_core, dev->dev_state); 1668 dev->std_set_in_tuner_core, dev->dev_state);
1670 1669
1671 reg->val = au0828_read(dev, reg->reg); 1670 reg->val = au0828_read(dev, reg->reg);
@@ -1678,7 +1677,7 @@ static int vidioc_s_register(struct file *file, void *priv,
1678{ 1677{
1679 struct au0828_dev *dev = video_drvdata(file); 1678 struct au0828_dev *dev = video_drvdata(file);
1680 1679
1681 dprintk(1, "%s called std_set %d dev_state %d\n", __func__, 1680 dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
1682 dev->std_set_in_tuner_core, dev->dev_state); 1681 dev->std_set_in_tuner_core, dev->dev_state);
1683 1682
1684 return au0828_writereg(dev, reg->reg, reg->val); 1683 return au0828_writereg(dev, reg->reg, reg->val);
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index ff7f8510fb77..87f32846f1c0 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -21,6 +21,7 @@
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 23
24#include <linux/bitops.h>
24#include <linux/usb.h> 25#include <linux/usb.h>
25#include <linux/i2c.h> 26#include <linux/i2c.h>
26#include <linux/i2c-algo-bit.h> 27#include <linux/i2c-algo-bit.h>
@@ -121,9 +122,9 @@ enum au0828_stream_state {
121 122
122/* device state */ 123/* device state */
123enum au0828_dev_state { 124enum au0828_dev_state {
124 DEV_INITIALIZED = 0x01, 125 DEV_INITIALIZED = 0,
125 DEV_DISCONNECTED = 0x02, 126 DEV_DISCONNECTED = 1,
126 DEV_MISCONFIGURED = 0x04 127 DEV_MISCONFIGURED = 2
127}; 128};
128 129
129struct au0828_dev; 130struct au0828_dev;
@@ -247,7 +248,7 @@ struct au0828_dev {
247 int input_type; 248 int input_type;
248 int std_set_in_tuner_core; 249 int std_set_in_tuner_core;
249 unsigned int ctrl_input; 250 unsigned int ctrl_input;
250 enum au0828_dev_state dev_state; 251 long unsigned int dev_state; /* defined at enum au0828_dev_state */;
251 enum au0828_stream_state stream_state; 252 enum au0828_stream_state stream_state;
252 wait_queue_head_t open; 253 wait_queue_head_t open;
253 254
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index 12f5ebbd0436..ad2f3d27b266 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1452,13 +1452,6 @@ static int usbvision_probe(struct usb_interface *intf,
1452 printk(KERN_INFO "%s: %s found\n", __func__, 1452 printk(KERN_INFO "%s: %s found\n", __func__,
1453 usbvision_device_data[model].model_string); 1453 usbvision_device_data[model].model_string);
1454 1454
1455 /*
1456 * this is a security check.
1457 * an exploit using an incorrect bInterfaceNumber is known
1458 */
1459 if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
1460 return -ENODEV;
1461
1462 if (usbvision_device_data[model].interface >= 0) 1455 if (usbvision_device_data[model].interface >= 0)
1463 interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0]; 1456 interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
1464 else if (ifnum < dev->actconfig->desc.bNumInterfaces) 1457 else if (ifnum < dev->actconfig->desc.bNumInterfaces)
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
index 2a7b79bc90fd..2228cd3a846e 100644
--- a/drivers/media/v4l2-core/v4l2-mc.c
+++ b/drivers/media/v4l2-core/v4l2-mc.c
@@ -34,7 +34,7 @@ int v4l2_mc_create_media_graph(struct media_device *mdev)
34{ 34{
35 struct media_entity *entity; 35 struct media_entity *entity;
36 struct media_entity *if_vid = NULL, *if_aud = NULL; 36 struct media_entity *if_vid = NULL, *if_aud = NULL;
37 struct media_entity *tuner = NULL, *decoder = NULL, *dtv_demod = NULL; 37 struct media_entity *tuner = NULL, *decoder = NULL;
38 struct media_entity *io_v4l = NULL, *io_vbi = NULL, *io_swradio = NULL; 38 struct media_entity *io_v4l = NULL, *io_vbi = NULL, *io_swradio = NULL;
39 bool is_webcam = false; 39 bool is_webcam = false;
40 u32 flags; 40 u32 flags;
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index df4c052c6bd6..f300f060b3f3 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -349,7 +349,7 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
349 349
350 if (dma->pages) { 350 if (dma->pages) {
351 for (i = 0; i < dma->nr_pages; i++) 351 for (i = 0; i < dma->nr_pages; i++)
352 page_cache_release(dma->pages[i]); 352 put_page(dma->pages[i]);
353 kfree(dma->pages); 353 kfree(dma->pages);
354 dma->pages = NULL; 354 dma->pages = NULL;
355 } 355 }
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 5d016f496e0e..9fbcb67a9ee6 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1645,7 +1645,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1645 * Will sleep if required for nonblocking == false. 1645 * Will sleep if required for nonblocking == false.
1646 */ 1646 */
1647static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, 1647static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
1648 int nonblocking) 1648 void *pb, int nonblocking)
1649{ 1649{
1650 unsigned long flags; 1650 unsigned long flags;
1651 int ret; 1651 int ret;
@@ -1666,10 +1666,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
1666 /* 1666 /*
1667 * Only remove the buffer from done_list if v4l2_buffer can handle all 1667 * Only remove the buffer from done_list if v4l2_buffer can handle all
1668 * the planes. 1668 * the planes.
1669 * Verifying planes is NOT necessary since it already has been checked
1670 * before the buffer is queued/prepared. So it can never fail.
1671 */ 1669 */
1672 list_del(&(*vb)->done_entry); 1670 ret = call_bufop(q, verify_planes_array, *vb, pb);
1671 if (!ret)
1672 list_del(&(*vb)->done_entry);
1673 spin_unlock_irqrestore(&q->done_lock, flags); 1673 spin_unlock_irqrestore(&q->done_lock, flags);
1674 1674
1675 return ret; 1675 return ret;
@@ -1748,7 +1748,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
1748 struct vb2_buffer *vb = NULL; 1748 struct vb2_buffer *vb = NULL;
1749 int ret; 1749 int ret;
1750 1750
1751 ret = __vb2_get_done_vb(q, &vb, nonblocking); 1751 ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
1752 if (ret < 0) 1752 if (ret < 0)
1753 return ret; 1753 return ret;
1754 1754
@@ -2298,6 +2298,16 @@ unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
2298 return POLLERR; 2298 return POLLERR;
2299 2299
2300 /* 2300 /*
2301 * If this quirk is set and QBUF hasn't been called yet then
2302 * return POLLERR as well. This only affects capture queues, output
2303 * queues will always initialize waiting_for_buffers to false.
2304 * This quirk is set by V4L2 for backwards compatibility reasons.
2305 */
2306 if (q->quirk_poll_must_check_waiting_for_buffers &&
2307 q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
2308 return POLLERR;
2309
2310 /*
2301 * For output streams you can call write() as long as there are fewer 2311 * For output streams you can call write() as long as there are fewer
2302 * buffers queued than there are buffers available. 2312 * buffers queued than there are buffers available.
2303 */ 2313 */
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index dbec5923fcf0..3c3b517f1d1c 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
49 vec = frame_vector_create(nr); 49 vec = frame_vector_create(nr);
50 if (!vec) 50 if (!vec)
51 return ERR_PTR(-ENOMEM); 51 return ERR_PTR(-ENOMEM);
52 ret = get_vaddr_frames(start, nr, write, 1, vec); 52 ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
53 if (ret < 0) 53 if (ret < 0)
54 goto out_destroy; 54 goto out_destroy;
55 /* We accept only complete set of PFNs */ 55 /* We accept only complete set of PFNs */
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 91f552124050..7f366f1b0377 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -74,6 +74,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
74 return 0; 74 return 0;
75} 75}
76 76
77static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
78{
79 return __verify_planes_array(vb, pb);
80}
81
77/** 82/**
78 * __verify_length() - Verify that the bytesused value for each plane fits in 83 * __verify_length() - Verify that the bytesused value for each plane fits in
79 * the plane length and that the data offset doesn't exceed the bytesused value. 84 * the plane length and that the data offset doesn't exceed the bytesused value.
@@ -437,6 +442,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
437} 442}
438 443
439static const struct vb2_buf_ops v4l2_buf_ops = { 444static const struct vb2_buf_ops v4l2_buf_ops = {
445 .verify_planes_array = __verify_planes_array_core,
440 .fill_user_buffer = __fill_v4l2_buffer, 446 .fill_user_buffer = __fill_v4l2_buffer,
441 .fill_vb2_buffer = __fill_vb2_buffer, 447 .fill_vb2_buffer = __fill_vb2_buffer,
442 .copy_timestamp = __copy_timestamp, 448 .copy_timestamp = __copy_timestamp,
@@ -765,6 +771,12 @@ int vb2_queue_init(struct vb2_queue *q)
765 q->is_output = V4L2_TYPE_IS_OUTPUT(q->type); 771 q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
766 q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) 772 q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
767 == V4L2_BUF_FLAG_TIMESTAMP_COPY; 773 == V4L2_BUF_FLAG_TIMESTAMP_COPY;
774 /*
775 * For compatibility with vb1: if QBUF hasn't been called yet, then
776 * return POLLERR as well. This only affects capture queues, output
777 * queues will always initialize waiting_for_buffers to false.
778 */
779 q->quirk_poll_must_check_waiting_for_buffers = true;
768 780
769 return vb2_core_queue_init(q); 781 return vb2_core_queue_init(q);
770} 782}
@@ -818,14 +830,6 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
818 poll_wait(file, &fh->wait, wait); 830 poll_wait(file, &fh->wait, wait);
819 } 831 }
820 832
821 /*
822 * For compatibility with vb1: if QBUF hasn't been called yet, then
823 * return POLLERR as well. This only affects capture queues, output
824 * queues will always initialize waiting_for_buffers to false.
825 */
826 if (q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
827 return POLLERR;
828
829 return res | vb2_core_poll(q, file, wait); 833 return res | vb2_core_poll(q, file, wait);
830} 834}
831EXPORT_SYMBOL_GPL(vb2_poll); 835EXPORT_SYMBOL_GPL(vb2_poll);
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 10370f280500..7edea9c19199 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -223,6 +223,13 @@ int __detach_context(struct cxl_context *ctx)
223 cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)); 223 cxl_ops->link_ok(ctx->afu->adapter, ctx->afu));
224 flush_work(&ctx->fault_work); /* Only needed for dedicated process */ 224 flush_work(&ctx->fault_work); /* Only needed for dedicated process */
225 225
226 /*
227 * Wait until no further interrupts are presented by the PSL
228 * for this context.
229 */
230 if (cxl_ops->irq_wait)
231 cxl_ops->irq_wait(ctx);
232
226 /* release the reference to the group leader and mm handling pid */ 233 /* release the reference to the group leader and mm handling pid */
227 put_pid(ctx->pid); 234 put_pid(ctx->pid);
228 put_pid(ctx->glpid); 235 put_pid(ctx->glpid);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 38e21cf7806e..73dc2a33da74 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -274,6 +274,7 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
274#define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */ 274#define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */
275#define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */ 275#define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */
276#define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */ 276#define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */
277#define CXL_PSL_DSISR_PENDING (CXL_PSL_DSISR_TRANS | CXL_PSL_DSISR_An_PE | CXL_PSL_DSISR_An_AE | CXL_PSL_DSISR_An_OC)
277/* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */ 278/* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */
278#define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */ 279#define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */
279#define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */ 280#define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */
@@ -855,6 +856,7 @@ struct cxl_backend_ops {
855 u64 dsisr, u64 errstat); 856 u64 dsisr, u64 errstat);
856 irqreturn_t (*psl_interrupt)(int irq, void *data); 857 irqreturn_t (*psl_interrupt)(int irq, void *data);
857 int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask); 858 int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask);
859 void (*irq_wait)(struct cxl_context *ctx);
858 int (*attach_process)(struct cxl_context *ctx, bool kernel, 860 int (*attach_process)(struct cxl_context *ctx, bool kernel,
859 u64 wed, u64 amr); 861 u64 wed, u64 amr);
860 int (*detach_process)(struct cxl_context *ctx); 862 int (*detach_process)(struct cxl_context *ctx);
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index be646dc41a2c..8def4553acba 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -203,7 +203,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
203void cxl_unmap_irq(unsigned int virq, void *cookie) 203void cxl_unmap_irq(unsigned int virq, void *cookie)
204{ 204{
205 free_irq(virq, cookie); 205 free_irq(virq, cookie);
206 irq_dispose_mapping(virq);
207} 206}
208 207
209int cxl_register_one_irq(struct cxl *adapter, 208int cxl_register_one_irq(struct cxl *adapter,
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 387fcbdf9793..ecf7557cd657 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -14,6 +14,7 @@
14#include <linux/mutex.h> 14#include <linux/mutex.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <linux/delay.h>
17#include <asm/synch.h> 18#include <asm/synch.h>
18#include <misc/cxl-base.h> 19#include <misc/cxl-base.h>
19 20
@@ -797,6 +798,35 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data)
797 return fail_psl_irq(afu, &irq_info); 798 return fail_psl_irq(afu, &irq_info);
798} 799}
799 800
801void native_irq_wait(struct cxl_context *ctx)
802{
803 u64 dsisr;
804 int timeout = 1000;
805 int ph;
806
807 /*
808 * Wait until no further interrupts are presented by the PSL
809 * for this context.
810 */
811 while (timeout--) {
812 ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
813 if (ph != ctx->pe)
814 return;
815 dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
816 if ((dsisr & CXL_PSL_DSISR_PENDING) == 0)
817 return;
818 /*
819 * We are waiting for the workqueue to process our
820 * irq, so need to let that run here.
821 */
822 msleep(1);
823 }
824
825 dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
826 " DSISR %016llx!\n", ph, dsisr);
827 return;
828}
829
800static irqreturn_t native_slice_irq_err(int irq, void *data) 830static irqreturn_t native_slice_irq_err(int irq, void *data)
801{ 831{
802 struct cxl_afu *afu = data; 832 struct cxl_afu *afu = data;
@@ -1076,6 +1106,7 @@ const struct cxl_backend_ops cxl_native_ops = {
1076 .handle_psl_slice_error = native_handle_psl_slice_error, 1106 .handle_psl_slice_error = native_handle_psl_slice_error,
1077 .psl_interrupt = NULL, 1107 .psl_interrupt = NULL,
1078 .ack_irq = native_ack_irq, 1108 .ack_irq = native_ack_irq,
1109 .irq_wait = native_irq_wait,
1079 .attach_process = native_attach_process, 1110 .attach_process = native_attach_process,
1080 .detach_process = native_detach_process, 1111 .detach_process = native_detach_process,
1081 .support_attributes = native_support_attributes, 1112 .support_attributes = native_support_attributes,
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index e8b933111e0d..9c677f3f3c26 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -116,8 +116,8 @@ static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent)
116{ 116{
117 struct inode *root; 117 struct inode *root;
118 118
119 sb->s_blocksize = PAGE_CACHE_SIZE; 119 sb->s_blocksize = PAGE_SIZE;
120 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 120 sb->s_blocksize_bits = PAGE_SHIFT;
121 sb->s_magic = IBMASMFS_MAGIC; 121 sb->s_magic = IBMASMFS_MAGIC;
122 sb->s_op = &ibmasmfs_s_ops; 122 sb->s_op = &ibmasmfs_s_ops;
123 sb->s_time_gran = 1; 123 sb->s_time_gran = 1;
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 5f1a36b8fbb0..0a5cbbe12452 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -458,8 +458,10 @@ static void lkdtm_do_action(enum ctype which)
458 break; 458 break;
459 459
460 val = kmalloc(len, GFP_KERNEL); 460 val = kmalloc(len, GFP_KERNEL);
461 if (!val) 461 if (!val) {
462 kfree(base);
462 break; 463 break;
464 }
463 465
464 *val = 0x12345678; 466 *val = 0x12345678;
465 base[offset] = *val; 467 base[offset] = *val;
@@ -498,14 +500,17 @@ static void lkdtm_do_action(enum ctype which)
498 } 500 }
499 case CT_READ_BUDDY_AFTER_FREE: { 501 case CT_READ_BUDDY_AFTER_FREE: {
500 unsigned long p = __get_free_page(GFP_KERNEL); 502 unsigned long p = __get_free_page(GFP_KERNEL);
501 int saw, *val = kmalloc(1024, GFP_KERNEL); 503 int saw, *val;
502 int *base; 504 int *base;
503 505
504 if (!p) 506 if (!p)
505 break; 507 break;
506 508
507 if (!val) 509 val = kmalloc(1024, GFP_KERNEL);
510 if (!val) {
511 free_page(p);
508 break; 512 break;
513 }
509 514
510 base = (int *)p; 515 base = (int *)p;
511 516
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index e94c7fb6712a..88e45234d527 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -945,6 +945,11 @@ static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
945 ret = -EFAULT; 945 ret = -EFAULT;
946 goto free_ret; 946 goto free_ret;
947 } 947 }
948 /* Ensure desc has not changed between the two reads */
949 if (memcmp(&dd, dd_config, sizeof(dd))) {
950 ret = -EINVAL;
951 goto free_ret;
952 }
948 mutex_lock(&vdev->vdev_mutex); 953 mutex_lock(&vdev->vdev_mutex);
949 mutex_lock(&vi->vop_mutex); 954 mutex_lock(&vi->vop_mutex);
950 ret = vop_virtio_add_device(vdev, dd_config); 955 ret = vop_virtio_add_device(vdev, dd_config);
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index f42d9c4e4561..f84a4275ca29 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -728,7 +728,7 @@ static void qp_release_pages(struct page **pages,
728 if (dirty) 728 if (dirty)
729 set_page_dirty(pages[i]); 729 set_page_dirty(pages[i]);
730 730
731 page_cache_release(pages[i]); 731 put_page(pages[i]);
732 pages[i] = NULL; 732 pages[i] = NULL;
733 } 733 }
734} 734}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 3bdbe50a363f..8a0147dfed27 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -86,7 +86,6 @@ static int max_devices;
86 86
87/* TODO: Replace these with struct ida */ 87/* TODO: Replace these with struct ida */
88static DECLARE_BITMAP(dev_use, MAX_DEVICES); 88static DECLARE_BITMAP(dev_use, MAX_DEVICES);
89static DECLARE_BITMAP(name_use, MAX_DEVICES);
90 89
91/* 90/*
92 * There is one mmc_blk_data per slot. 91 * There is one mmc_blk_data per slot.
@@ -105,7 +104,6 @@ struct mmc_blk_data {
105 unsigned int usage; 104 unsigned int usage;
106 unsigned int read_only; 105 unsigned int read_only;
107 unsigned int part_type; 106 unsigned int part_type;
108 unsigned int name_idx;
109 unsigned int reset_done; 107 unsigned int reset_done;
110#define MMC_BLK_READ BIT(0) 108#define MMC_BLK_READ BIT(0)
111#define MMC_BLK_WRITE BIT(1) 109#define MMC_BLK_WRITE BIT(1)
@@ -2202,19 +2200,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2202 goto out; 2200 goto out;
2203 } 2201 }
2204 2202
2205 /*
2206 * !subname implies we are creating main mmc_blk_data that will be
2207 * associated with mmc_card with dev_set_drvdata. Due to device
2208 * partitions, devidx will not coincide with a per-physical card
2209 * index anymore so we keep track of a name index.
2210 */
2211 if (!subname) {
2212 md->name_idx = find_first_zero_bit(name_use, max_devices);
2213 __set_bit(md->name_idx, name_use);
2214 } else
2215 md->name_idx = ((struct mmc_blk_data *)
2216 dev_to_disk(parent)->private_data)->name_idx;
2217
2218 md->area_type = area_type; 2203 md->area_type = area_type;
2219 2204
2220 /* 2205 /*
@@ -2264,7 +2249,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2264 */ 2249 */
2265 2250
2266 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), 2251 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2267 "mmcblk%u%s", md->name_idx, subname ? subname : ""); 2252 "mmcblk%u%s", card->host->index, subname ? subname : "");
2268 2253
2269 if (mmc_card_mmc(card)) 2254 if (mmc_card_mmc(card))
2270 blk_queue_logical_block_size(md->queue.queue, 2255 blk_queue_logical_block_size(md->queue.queue,
@@ -2418,7 +2403,6 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
2418 struct list_head *pos, *q; 2403 struct list_head *pos, *q;
2419 struct mmc_blk_data *part_md; 2404 struct mmc_blk_data *part_md;
2420 2405
2421 __clear_bit(md->name_idx, name_use);
2422 list_for_each_safe(pos, q, &md->part) { 2406 list_for_each_safe(pos, q, &md->part) {
2423 part_md = list_entry(pos, struct mmc_blk_data, part); 2407 part_md = list_entry(pos, struct mmc_blk_data, part);
2424 list_del(pos); 2408 list_del(pos);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 1d94607611d8..6e4c55a4aab5 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -356,11 +356,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
356 * They have to set these according to their abilities. 356 * They have to set these according to their abilities.
357 */ 357 */
358 host->max_segs = 1; 358 host->max_segs = 1;
359 host->max_seg_size = PAGE_CACHE_SIZE; 359 host->max_seg_size = PAGE_SIZE;
360 360
361 host->max_req_size = PAGE_CACHE_SIZE; 361 host->max_req_size = PAGE_SIZE;
362 host->max_blk_size = 512; 362 host->max_blk_size = 512;
363 host->max_blk_count = PAGE_CACHE_SIZE / 512; 363 host->max_blk_count = PAGE_SIZE / 512;
364 364
365 return host; 365 return host;
366} 366}
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 04feea8354cb..e657af0e95fa 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -97,6 +97,7 @@ config MMC_RICOH_MMC
97config MMC_SDHCI_ACPI 97config MMC_SDHCI_ACPI
98 tristate "SDHCI support for ACPI enumerated SDHCI controllers" 98 tristate "SDHCI support for ACPI enumerated SDHCI controllers"
99 depends on MMC_SDHCI && ACPI 99 depends on MMC_SDHCI && ACPI
100 select IOSF_MBI if X86
100 help 101 help
101 This selects support for ACPI enumerated SDHCI controllers, 102 This selects support for ACPI enumerated SDHCI controllers,
102 identified by ACPI Compatibility ID PNP0D40 or specific 103 identified by ACPI Compatibility ID PNP0D40 or specific
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 6839e41c6d58..bed6a494f52c 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -41,6 +41,11 @@
41#include <linux/mmc/pm.h> 41#include <linux/mmc/pm.h>
42#include <linux/mmc/slot-gpio.h> 42#include <linux/mmc/slot-gpio.h>
43 43
44#ifdef CONFIG_X86
45#include <asm/cpu_device_id.h>
46#include <asm/iosf_mbi.h>
47#endif
48
44#include "sdhci.h" 49#include "sdhci.h"
45 50
46enum { 51enum {
@@ -116,6 +121,75 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
116 .ops = &sdhci_acpi_ops_int, 121 .ops = &sdhci_acpi_ops_int,
117}; 122};
118 123
124#ifdef CONFIG_X86
125
126static bool sdhci_acpi_byt(void)
127{
128 static const struct x86_cpu_id byt[] = {
129 { X86_VENDOR_INTEL, 6, 0x37 },
130 {}
131 };
132
133 return x86_match_cpu(byt);
134}
135
136#define BYT_IOSF_SCCEP 0x63
137#define BYT_IOSF_OCP_NETCTRL0 0x1078
138#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
139
140static void sdhci_acpi_byt_setting(struct device *dev)
141{
142 u32 val = 0;
143
144 if (!sdhci_acpi_byt())
145 return;
146
147 if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
148 &val)) {
149 dev_err(dev, "%s read error\n", __func__);
150 return;
151 }
152
153 if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
154 return;
155
156 val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
157
158 if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
159 val)) {
160 dev_err(dev, "%s write error\n", __func__);
161 return;
162 }
163
164 dev_dbg(dev, "%s completed\n", __func__);
165}
166
167static bool sdhci_acpi_byt_defer(struct device *dev)
168{
169 if (!sdhci_acpi_byt())
170 return false;
171
172 if (!iosf_mbi_available())
173 return true;
174
175 sdhci_acpi_byt_setting(dev);
176
177 return false;
178}
179
180#else
181
182static inline void sdhci_acpi_byt_setting(struct device *dev)
183{
184}
185
186static inline bool sdhci_acpi_byt_defer(struct device *dev)
187{
188 return false;
189}
190
191#endif
192
119static int bxt_get_cd(struct mmc_host *mmc) 193static int bxt_get_cd(struct mmc_host *mmc)
120{ 194{
121 int gpio_cd = mmc_gpio_get_cd(mmc); 195 int gpio_cd = mmc_gpio_get_cd(mmc);
@@ -322,6 +396,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
322 if (acpi_bus_get_status(device) || !device->status.present) 396 if (acpi_bus_get_status(device) || !device->status.present)
323 return -ENODEV; 397 return -ENODEV;
324 398
399 if (sdhci_acpi_byt_defer(dev))
400 return -EPROBE_DEFER;
401
325 hid = acpi_device_hid(device); 402 hid = acpi_device_hid(device);
326 uid = device->pnp.unique_id; 403 uid = device->pnp.unique_id;
327 404
@@ -447,6 +524,8 @@ static int sdhci_acpi_resume(struct device *dev)
447{ 524{
448 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 525 struct sdhci_acpi_host *c = dev_get_drvdata(dev);
449 526
527 sdhci_acpi_byt_setting(&c->pdev->dev);
528
450 return sdhci_resume_host(c->host); 529 return sdhci_resume_host(c->host);
451} 530}
452 531
@@ -470,6 +549,8 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
470{ 549{
471 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 550 struct sdhci_acpi_host *c = dev_get_drvdata(dev);
472 551
552 sdhci_acpi_byt_setting(&c->pdev->dev);
553
473 return sdhci_runtime_resume_host(c->host); 554 return sdhci_runtime_resume_host(c->host);
474} 555}
475 556
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 62aa5d0efcee..79e19017343e 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -390,6 +390,7 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
390 slot->cd_idx = 0; 390 slot->cd_idx = 0;
391 slot->cd_override_level = true; 391 slot->cd_override_level = true;
392 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD || 392 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
393 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
393 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD) 394 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
394 slot->host->mmc_host_ops.get_cd = bxt_get_cd; 395 slot->host->mmc_host_ops.get_cd = bxt_get_cd;
395 396
@@ -1173,6 +1174,30 @@ static const struct pci_device_id pci_ids[] = {
1173 1174
1174 { 1175 {
1175 .vendor = PCI_VENDOR_ID_INTEL, 1176 .vendor = PCI_VENDOR_ID_INTEL,
1177 .device = PCI_DEVICE_ID_INTEL_BXTM_EMMC,
1178 .subvendor = PCI_ANY_ID,
1179 .subdevice = PCI_ANY_ID,
1180 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
1181 },
1182
1183 {
1184 .vendor = PCI_VENDOR_ID_INTEL,
1185 .device = PCI_DEVICE_ID_INTEL_BXTM_SDIO,
1186 .subvendor = PCI_ANY_ID,
1187 .subdevice = PCI_ANY_ID,
1188 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
1189 },
1190
1191 {
1192 .vendor = PCI_VENDOR_ID_INTEL,
1193 .device = PCI_DEVICE_ID_INTEL_BXTM_SD,
1194 .subvendor = PCI_ANY_ID,
1195 .subdevice = PCI_ANY_ID,
1196 .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
1197 },
1198
1199 {
1200 .vendor = PCI_VENDOR_ID_INTEL,
1176 .device = PCI_DEVICE_ID_INTEL_APL_EMMC, 1201 .device = PCI_DEVICE_ID_INTEL_APL_EMMC,
1177 .subvendor = PCI_ANY_ID, 1202 .subvendor = PCI_ANY_ID,
1178 .subdevice = PCI_ANY_ID, 1203 .subdevice = PCI_ANY_ID,
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index d1a0b4db60db..89e7151684a1 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -28,6 +28,9 @@
28#define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca 28#define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca
29#define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc 29#define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc
30#define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0 30#define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0
31#define PCI_DEVICE_ID_INTEL_BXTM_SD 0x1aca
32#define PCI_DEVICE_ID_INTEL_BXTM_EMMC 0x1acc
33#define PCI_DEVICE_ID_INTEL_BXTM_SDIO 0x1ad0
31#define PCI_DEVICE_ID_INTEL_APL_SD 0x5aca 34#define PCI_DEVICE_ID_INTEL_APL_SD 0x5aca
32#define PCI_DEVICE_ID_INTEL_APL_EMMC 0x5acc 35#define PCI_DEVICE_ID_INTEL_APL_EMMC 0x5acc
33#define PCI_DEVICE_ID_INTEL_APL_SDIO 0x5ad0 36#define PCI_DEVICE_ID_INTEL_APL_SDIO 0x5ad0
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index aca439d3ca83..30132500aa1c 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -309,8 +309,30 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
309 __func__, uhs, ctrl_2); 309 __func__, uhs, ctrl_2);
310} 310}
311 311
312static void pxav3_set_power(struct sdhci_host *host, unsigned char mode,
313 unsigned short vdd)
314{
315 struct mmc_host *mmc = host->mmc;
316 u8 pwr = host->pwr;
317
318 sdhci_set_power(host, mode, vdd);
319
320 if (host->pwr == pwr)
321 return;
322
323 if (host->pwr == 0)
324 vdd = 0;
325
326 if (!IS_ERR(mmc->supply.vmmc)) {
327 spin_unlock_irq(&host->lock);
328 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
329 spin_lock_irq(&host->lock);
330 }
331}
332
312static const struct sdhci_ops pxav3_sdhci_ops = { 333static const struct sdhci_ops pxav3_sdhci_ops = {
313 .set_clock = sdhci_set_clock, 334 .set_clock = sdhci_set_clock,
335 .set_power = pxav3_set_power,
314 .platform_send_init_74_clocks = pxav3_gen_init_74_clocks, 336 .platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
315 .get_max_clock = sdhci_pltfm_clk_get_max_clock, 337 .get_max_clock = sdhci_pltfm_clk_get_max_clock,
316 .set_bus_width = sdhci_set_bus_width, 338 .set_bus_width = sdhci_set_bus_width,
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index f8c4762bb48d..bcc0de47fe7e 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -382,14 +382,6 @@ static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
382 .pdata = &sdhci_tegra114_pdata, 382 .pdata = &sdhci_tegra114_pdata,
383}; 383};
384 384
385static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
386 .pdata = &sdhci_tegra114_pdata,
387 .nvquirks = NVQUIRK_ENABLE_SDR50 |
388 NVQUIRK_ENABLE_DDR50 |
389 NVQUIRK_ENABLE_SDR104 |
390 NVQUIRK_HAS_PADCALIB,
391};
392
393static const struct sdhci_pltfm_data sdhci_tegra210_pdata = { 385static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
394 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | 386 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
395 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | 387 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
@@ -407,7 +399,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
407 399
408static const struct of_device_id sdhci_tegra_dt_match[] = { 400static const struct of_device_id sdhci_tegra_dt_match[] = {
409 { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 }, 401 { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
410 { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 }, 402 { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 },
411 { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 }, 403 { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
412 { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 }, 404 { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
413 { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 }, 405 { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 8670f162dec7..6bd3d1794966 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1210,10 +1210,24 @@ clock_set:
1210} 1210}
1211EXPORT_SYMBOL_GPL(sdhci_set_clock); 1211EXPORT_SYMBOL_GPL(sdhci_set_clock);
1212 1212
1213static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 1213static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1214 unsigned short vdd) 1214 unsigned short vdd)
1215{ 1215{
1216 struct mmc_host *mmc = host->mmc; 1216 struct mmc_host *mmc = host->mmc;
1217
1218 spin_unlock_irq(&host->lock);
1219 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1220 spin_lock_irq(&host->lock);
1221
1222 if (mode != MMC_POWER_OFF)
1223 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1224 else
1225 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1226}
1227
1228void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1229 unsigned short vdd)
1230{
1217 u8 pwr = 0; 1231 u8 pwr = 0;
1218 1232
1219 if (mode != MMC_POWER_OFF) { 1233 if (mode != MMC_POWER_OFF) {
@@ -1245,7 +1259,6 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1245 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1259 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1246 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 1260 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1247 sdhci_runtime_pm_bus_off(host); 1261 sdhci_runtime_pm_bus_off(host);
1248 vdd = 0;
1249 } else { 1262 } else {
1250 /* 1263 /*
1251 * Spec says that we should clear the power reg before setting 1264 * Spec says that we should clear the power reg before setting
@@ -1276,12 +1289,20 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1276 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 1289 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1277 mdelay(10); 1290 mdelay(10);
1278 } 1291 }
1292}
1293EXPORT_SYMBOL_GPL(sdhci_set_power);
1279 1294
1280 if (!IS_ERR(mmc->supply.vmmc)) { 1295static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1281 spin_unlock_irq(&host->lock); 1296 unsigned short vdd)
1282 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 1297{
1283 spin_lock_irq(&host->lock); 1298 struct mmc_host *mmc = host->mmc;
1284 } 1299
1300 if (host->ops->set_power)
1301 host->ops->set_power(host, mode, vdd);
1302 else if (!IS_ERR(mmc->supply.vmmc))
1303 sdhci_set_power_reg(host, mode, vdd);
1304 else
1305 sdhci_set_power(host, mode, vdd);
1285} 1306}
1286 1307
1287/*****************************************************************************\ 1308/*****************************************************************************\
@@ -1431,7 +1452,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
1431 } 1452 }
1432 } 1453 }
1433 1454
1434 sdhci_set_power(host, ios->power_mode, ios->vdd); 1455 __sdhci_set_power(host, ios->power_mode, ios->vdd);
1435 1456
1436 if (host->ops->platform_send_init_74_clocks) 1457 if (host->ops->platform_send_init_74_clocks)
1437 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 1458 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 3bd28033dbd9..0f39f4f84d10 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -529,6 +529,8 @@ struct sdhci_ops {
529#endif 529#endif
530 530
531 void (*set_clock)(struct sdhci_host *host, unsigned int clock); 531 void (*set_clock)(struct sdhci_host *host, unsigned int clock);
532 void (*set_power)(struct sdhci_host *host, unsigned char mode,
533 unsigned short vdd);
532 534
533 int (*enable_dma)(struct sdhci_host *host); 535 int (*enable_dma)(struct sdhci_host *host);
534 unsigned int (*get_max_clock)(struct sdhci_host *host); 536 unsigned int (*get_max_clock)(struct sdhci_host *host);
@@ -660,6 +662,8 @@ static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
660} 662}
661 663
662void sdhci_set_clock(struct sdhci_host *host, unsigned int clock); 664void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
665void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
666 unsigned short vdd);
663void sdhci_set_bus_width(struct sdhci_host *host, int width); 667void sdhci_set_bus_width(struct sdhci_host *host, int width);
664void sdhci_reset(struct sdhci_host *host, u8 mask); 668void sdhci_reset(struct sdhci_host *host, u8 mask);
665void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing); 669void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 8d870ce9f944..d9a655f47d41 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1513,7 +1513,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
1513 mmc->caps |= pd->caps; 1513 mmc->caps |= pd->caps;
1514 mmc->max_segs = 32; 1514 mmc->max_segs = 32;
1515 mmc->max_blk_size = 512; 1515 mmc->max_blk_size = 512;
1516 mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs; 1516 mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
1517 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; 1517 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1518 mmc->max_seg_size = mmc->max_req_size; 1518 mmc->max_seg_size = mmc->max_req_size;
1519 1519
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 8372a413848c..7fc8b7aa83f0 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1129,6 +1129,11 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
1129 MMC_CAP_1_8V_DDR | 1129 MMC_CAP_1_8V_DDR |
1130 MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; 1130 MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
1131 1131
1132 /* TODO MMC DDR is not working on A80 */
1133 if (of_device_is_compatible(pdev->dev.of_node,
1134 "allwinner,sun9i-a80-mmc"))
1135 mmc->caps &= ~MMC_CAP_1_8V_DDR;
1136
1132 ret = mmc_of_parse(mmc); 1137 ret = mmc_of_parse(mmc);
1133 if (ret) 1138 if (ret)
1134 goto error_free_dma; 1139 goto error_free_dma;
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 675435873823..7fb0c034dcb6 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -63,7 +63,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
63 } 63 }
64 } 64 }
65 65
66 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 66 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
67 (align & PAGE_MASK))) || !multiple) { 67 (align & PAGE_MASK))) || !multiple) {
68 ret = -EINVAL; 68 ret = -EINVAL;
69 goto pio; 69 goto pio;
@@ -133,7 +133,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
133 } 133 }
134 } 134 }
135 135
136 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || 136 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
137 (align & PAGE_MASK))) || !multiple) { 137 (align & PAGE_MASK))) || !multiple) {
138 ret = -EINVAL; 138 ret = -EINVAL;
139 goto pio; 139 goto pio;
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 03f6e74c1906..0521b4662748 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -1125,7 +1125,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
1125 mmc->caps2 |= pdata->capabilities2; 1125 mmc->caps2 |= pdata->capabilities2;
1126 mmc->max_segs = 32; 1126 mmc->max_segs = 32;
1127 mmc->max_blk_size = 512; 1127 mmc->max_blk_size = 512;
1128 mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * 1128 mmc->max_blk_count = (PAGE_SIZE / mmc->max_blk_size) *
1129 mmc->max_segs; 1129 mmc->max_segs;
1130 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1130 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1131 mmc->max_seg_size = mmc->max_req_size; 1131 mmc->max_seg_size = mmc->max_req_size;
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index b2752fe711f2..807c06e203c3 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1789,7 +1789,7 @@ static int usdhi6_probe(struct platform_device *pdev)
1789 /* Set .max_segs to some random number. Feel free to adjust. */ 1789 /* Set .max_segs to some random number. Feel free to adjust. */
1790 mmc->max_segs = 32; 1790 mmc->max_segs = 32;
1791 mmc->max_blk_size = 512; 1791 mmc->max_blk_size = 512;
1792 mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs; 1792 mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
1793 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; 1793 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1794 /* 1794 /*
1795 * Setting .max_seg_size to 1 page would simplify our page-mapping code, 1795 * Setting .max_seg_size to 1 page would simplify our page-mapping code,
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index e2c0057737e6..7c887f111a7d 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -75,7 +75,7 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
75 break; 75 break;
76 } 76 }
77 77
78 page_cache_release(page); 78 put_page(page);
79 pages--; 79 pages--;
80 index++; 80 index++;
81 } 81 }
@@ -124,7 +124,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
124 return PTR_ERR(page); 124 return PTR_ERR(page);
125 125
126 memcpy(buf, page_address(page) + offset, cpylen); 126 memcpy(buf, page_address(page) + offset, cpylen);
127 page_cache_release(page); 127 put_page(page);
128 128
129 if (retlen) 129 if (retlen)
130 *retlen += cpylen; 130 *retlen += cpylen;
@@ -164,7 +164,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
164 unlock_page(page); 164 unlock_page(page);
165 balance_dirty_pages_ratelimited(mapping); 165 balance_dirty_pages_ratelimited(mapping);
166 } 166 }
167 page_cache_release(page); 167 put_page(page);
168 168
169 if (retlen) 169 if (retlen)
170 *retlen += cpylen; 170 *retlen += cpylen;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index b6facac54fc0..557b8462f55e 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -4009,7 +4009,6 @@ static int nand_dt_init(struct nand_chip *chip)
4009 * This is the first phase of the normal nand_scan() function. It reads the 4009 * This is the first phase of the normal nand_scan() function. It reads the
4010 * flash ID and sets up MTD fields accordingly. 4010 * flash ID and sets up MTD fields accordingly.
4011 * 4011 *
4012 * The mtd->owner field must be set to the module of the caller.
4013 */ 4012 */
4014int nand_scan_ident(struct mtd_info *mtd, int maxchips, 4013int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4015 struct nand_flash_dev *table) 4014 struct nand_flash_dev *table)
@@ -4429,19 +4428,12 @@ EXPORT_SYMBOL(nand_scan_tail);
4429 * 4428 *
4430 * This fills out all the uninitialized function pointers with the defaults. 4429 * This fills out all the uninitialized function pointers with the defaults.
4431 * The flash ID is read and the mtd/chip structures are filled with the 4430 * The flash ID is read and the mtd/chip structures are filled with the
4432 * appropriate values. The mtd->owner field must be set to the module of the 4431 * appropriate values.
4433 * caller.
4434 */ 4432 */
4435int nand_scan(struct mtd_info *mtd, int maxchips) 4433int nand_scan(struct mtd_info *mtd, int maxchips)
4436{ 4434{
4437 int ret; 4435 int ret;
4438 4436
4439 /* Many callers got this wrong, so check for it for a while... */
4440 if (!mtd->owner && caller_is_module()) {
4441 pr_crit("%s called with NULL mtd->owner!\n", __func__);
4442 BUG();
4443 }
4444
4445 ret = nand_scan_ident(mtd, maxchips, NULL); 4437 ret = nand_scan_ident(mtd, maxchips, NULL);
4446 if (!ret) 4438 if (!ret)
4447 ret = nand_scan_tail(mtd); 4439 ret = nand_scan_tail(mtd);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 1fd519503bb1..a58169a28741 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -1339,7 +1339,7 @@ static void put_pages(struct nandsim *ns)
1339 int i; 1339 int i;
1340 1340
1341 for (i = 0; i < ns->held_cnt; i++) 1341 for (i = 0; i < ns->held_cnt; i++)
1342 page_cache_release(ns->held_pages[i]); 1342 put_page(ns->held_pages[i]);
1343} 1343}
1344 1344
1345/* Get page cache pages in advance to provide NOFS memory allocation */ 1345/* Get page cache pages in advance to provide NOFS memory allocation */
@@ -1349,8 +1349,8 @@ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t
1349 struct page *page; 1349 struct page *page;
1350 struct address_space *mapping = file->f_mapping; 1350 struct address_space *mapping = file->f_mapping;
1351 1351
1352 start_index = pos >> PAGE_CACHE_SHIFT; 1352 start_index = pos >> PAGE_SHIFT;
1353 end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT; 1353 end_index = (pos + count - 1) >> PAGE_SHIFT;
1354 if (end_index - start_index + 1 > NS_MAX_HELD_PAGES) 1354 if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
1355 return -EINVAL; 1355 return -EINVAL;
1356 ns->held_cnt = 0; 1356 ns->held_cnt = 0;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2a1ba62b7da2..befd67df08e1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -62,9 +62,8 @@ config DUMMY
62 this device is consigned into oblivion) with a configurable IP 62 this device is consigned into oblivion) with a configurable IP
63 address. It is most commonly used in order to make your currently 63 address. It is most commonly used in order to make your currently
64 inactive SLIP address seem like a real address for local programs. 64 inactive SLIP address seem like a real address for local programs.
65 If you use SLIP or PPP, you might want to say Y here. Since this 65 If you use SLIP or PPP, you might want to say Y here. It won't
66 thing often comes in handy, the default is Y. It won't enlarge your 66 enlarge your kernel. What a deal. Read about it in the Network
67 kernel either. What a deal. Read about it in the Network
68 Administrator's Guide, available from 67 Administrator's Guide, available from
69 <http://www.tldp.org/docs.html#guide>. 68 <http://www.tldp.org/docs.html#guide>.
70 69
@@ -195,6 +194,7 @@ config GENEVE
195 194
196config MACSEC 195config MACSEC
197 tristate "IEEE 802.1AE MAC-level encryption (MACsec)" 196 tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
197 select CRYPTO
198 select CRYPTO_AES 198 select CRYPTO_AES
199 select CRYPTO_GCM 199 select CRYPTO_GCM
200 ---help--- 200 ---help---
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index fa086e09d6b7..5e572b3510b9 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -2181,27 +2181,10 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
2181 struct net_device *bridge) 2181 struct net_device *bridge)
2182{ 2182{
2183 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2183 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2184 u16 fid; 2184 int i, err = 0;
2185 int i, err;
2186 2185
2187 mutex_lock(&ps->smi_mutex); 2186 mutex_lock(&ps->smi_mutex);
2188 2187
2189 /* Get or create the bridge FID and assign it to the port */
2190 for (i = 0; i < ps->num_ports; ++i)
2191 if (ps->ports[i].bridge_dev == bridge)
2192 break;
2193
2194 if (i < ps->num_ports)
2195 err = _mv88e6xxx_port_fid_get(ds, i, &fid);
2196 else
2197 err = _mv88e6xxx_fid_new(ds, &fid);
2198 if (err)
2199 goto unlock;
2200
2201 err = _mv88e6xxx_port_fid_set(ds, port, fid);
2202 if (err)
2203 goto unlock;
2204
2205 /* Assign the bridge and remap each port's VLANTable */ 2188 /* Assign the bridge and remap each port's VLANTable */
2206 ps->ports[port].bridge_dev = bridge; 2189 ps->ports[port].bridge_dev = bridge;
2207 2190
@@ -2213,7 +2196,6 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
2213 } 2196 }
2214 } 2197 }
2215 2198
2216unlock:
2217 mutex_unlock(&ps->smi_mutex); 2199 mutex_unlock(&ps->smi_mutex);
2218 2200
2219 return err; 2201 return err;
@@ -2223,16 +2205,10 @@ void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
2223{ 2205{
2224 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2206 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2225 struct net_device *bridge = ps->ports[port].bridge_dev; 2207 struct net_device *bridge = ps->ports[port].bridge_dev;
2226 u16 fid;
2227 int i; 2208 int i;
2228 2209
2229 mutex_lock(&ps->smi_mutex); 2210 mutex_lock(&ps->smi_mutex);
2230 2211
2231 /* Give the port a fresh Filtering Information Database */
2232 if (_mv88e6xxx_fid_new(ds, &fid) ||
2233 _mv88e6xxx_port_fid_set(ds, port, fid))
2234 netdev_warn(ds->ports[port], "failed to assign a new FID\n");
2235
2236 /* Unassign the bridge and remap each port's VLANTable */ 2212 /* Unassign the bridge and remap each port's VLANTable */
2237 ps->ports[port].bridge_dev = NULL; 2213 ps->ports[port].bridge_dev = NULL;
2238 2214
@@ -2264,6 +2240,57 @@ static void mv88e6xxx_bridge_work(struct work_struct *work)
2264 mutex_unlock(&ps->smi_mutex); 2240 mutex_unlock(&ps->smi_mutex);
2265} 2241}
2266 2242
2243static int _mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
2244 int reg, int val)
2245{
2246 int ret;
2247
2248 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
2249 if (ret < 0)
2250 goto restore_page_0;
2251
2252 ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
2253restore_page_0:
2254 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2255
2256 return ret;
2257}
2258
2259static int _mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page,
2260 int reg)
2261{
2262 int ret;
2263
2264 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
2265 if (ret < 0)
2266 goto restore_page_0;
2267
2268 ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
2269restore_page_0:
2270 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2271
2272 return ret;
2273}
2274
2275static int mv88e6xxx_power_on_serdes(struct dsa_switch *ds)
2276{
2277 int ret;
2278
2279 ret = _mv88e6xxx_phy_page_read(ds, REG_FIBER_SERDES, PAGE_FIBER_SERDES,
2280 MII_BMCR);
2281 if (ret < 0)
2282 return ret;
2283
2284 if (ret & BMCR_PDOWN) {
2285 ret &= ~BMCR_PDOWN;
2286 ret = _mv88e6xxx_phy_page_write(ds, REG_FIBER_SERDES,
2287 PAGE_FIBER_SERDES, MII_BMCR,
2288 ret);
2289 }
2290
2291 return ret;
2292}
2293
2267static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) 2294static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2268{ 2295{
2269 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2296 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -2367,6 +2394,23 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2367 goto abort; 2394 goto abort;
2368 } 2395 }
2369 2396
2397 /* If this port is connected to a SerDes, make sure the SerDes is not
2398 * powered down.
2399 */
2400 if (mv88e6xxx_6352_family(ds)) {
2401 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
2402 if (ret < 0)
2403 goto abort;
2404 ret &= PORT_STATUS_CMODE_MASK;
2405 if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
2406 (ret == PORT_STATUS_CMODE_1000BASE_X) ||
2407 (ret == PORT_STATUS_CMODE_SGMII)) {
2408 ret = mv88e6xxx_power_on_serdes(ds);
2409 if (ret < 0)
2410 goto abort;
2411 }
2412 }
2413
2370 /* Port Control 2: don't force a good FCS, set the maximum frame size to 2414 /* Port Control 2: don't force a good FCS, set the maximum frame size to
2371 * 10240 bytes, disable 802.1q tags checking, don't discard tagged or 2415 * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
2372 * untagged frames on this port, do a destination address lookup on all 2416 * untagged frames on this port, do a destination address lookup on all
@@ -2408,9 +2452,9 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2408 * the other bits clear. 2452 * the other bits clear.
2409 */ 2453 */
2410 reg = 1 << port; 2454 reg = 1 << port;
2411 /* Disable learning for DSA and CPU ports */ 2455 /* Disable learning for CPU port */
2412 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) 2456 if (dsa_is_cpu_port(ds, port))
2413 reg = PORT_ASSOC_VECTOR_LOCKED_PORT; 2457 reg = 0;
2414 2458
2415 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg); 2459 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
2416 if (ret) 2460 if (ret)
@@ -2490,11 +2534,11 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2490 if (ret) 2534 if (ret)
2491 goto abort; 2535 goto abort;
2492 2536
2493 /* Port based VLAN map: give each port its own address 2537 /* Port based VLAN map: give each port the same default address
2494 * database, and allow bidirectional communication between the 2538 * database, and allow bidirectional communication between the
2495 * CPU and DSA port(s), and the other ports. 2539 * CPU and DSA port(s), and the other ports.
2496 */ 2540 */
2497 ret = _mv88e6xxx_port_fid_set(ds, port, port + 1); 2541 ret = _mv88e6xxx_port_fid_set(ds, port, 0);
2498 if (ret) 2542 if (ret)
2499 goto abort; 2543 goto abort;
2500 2544
@@ -2714,13 +2758,9 @@ int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
2714 int ret; 2758 int ret;
2715 2759
2716 mutex_lock(&ps->smi_mutex); 2760 mutex_lock(&ps->smi_mutex);
2717 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page); 2761 ret = _mv88e6xxx_phy_page_read(ds, port, page, reg);
2718 if (ret < 0)
2719 goto error;
2720 ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
2721error:
2722 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2723 mutex_unlock(&ps->smi_mutex); 2762 mutex_unlock(&ps->smi_mutex);
2763
2724 return ret; 2764 return ret;
2725} 2765}
2726 2766
@@ -2731,14 +2771,9 @@ int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
2731 int ret; 2771 int ret;
2732 2772
2733 mutex_lock(&ps->smi_mutex); 2773 mutex_lock(&ps->smi_mutex);
2734 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page); 2774 ret = _mv88e6xxx_phy_page_write(ds, port, page, reg, val);
2735 if (ret < 0)
2736 goto error;
2737
2738 ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
2739error:
2740 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
2741 mutex_unlock(&ps->smi_mutex); 2775 mutex_unlock(&ps->smi_mutex);
2776
2742 return ret; 2777 return ret;
2743} 2778}
2744 2779
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index 9a038aba48fb..26a424acd10f 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -28,6 +28,10 @@
28#define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY) 28#define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY)
29#define SMI_DATA 0x01 29#define SMI_DATA 0x01
30 30
31/* Fiber/SERDES Registers are located at SMI address F, page 1 */
32#define REG_FIBER_SERDES 0x0f
33#define PAGE_FIBER_SERDES 0x01
34
31#define REG_PORT(p) (0x10 + (p)) 35#define REG_PORT(p) (0x10 + (p))
32#define PORT_STATUS 0x00 36#define PORT_STATUS 0x00
33#define PORT_STATUS_PAUSE_EN BIT(15) 37#define PORT_STATUS_PAUSE_EN BIT(15)
@@ -45,6 +49,10 @@
45#define PORT_STATUS_MGMII BIT(6) /* 6185 */ 49#define PORT_STATUS_MGMII BIT(6) /* 6185 */
46#define PORT_STATUS_TX_PAUSED BIT(5) 50#define PORT_STATUS_TX_PAUSED BIT(5)
47#define PORT_STATUS_FLOW_CTRL BIT(4) 51#define PORT_STATUS_FLOW_CTRL BIT(4)
52#define PORT_STATUS_CMODE_MASK 0x0f
53#define PORT_STATUS_CMODE_100BASE_X 0x8
54#define PORT_STATUS_CMODE_1000BASE_X 0x9
55#define PORT_STATUS_CMODE_SGMII 0xa
48#define PORT_PCS_CTRL 0x01 56#define PORT_PCS_CTRL 0x01
49#define PORT_PCS_CTRL_RGMII_DELAY_RXCLK BIT(15) 57#define PORT_PCS_CTRL_RGMII_DELAY_RXCLK BIT(15)
50#define PORT_PCS_CTRL_RGMII_DELAY_TXCLK BIT(14) 58#define PORT_PCS_CTRL_RGMII_DELAY_TXCLK BIT(14)
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 8f76f4558a88..2ff465848b65 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1412 1412
1413 err = -EIO; 1413 err = -EIO;
1414 1414
1415 netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX; 1415 netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
1416 netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); 1416 netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
1417 1417
1418 /* Init PHY as early as possible due to power saving issue */ 1418 /* Init PHY as early as possible due to power saving issue */
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 99b30a952b38..38db2e4d7d54 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1572,6 +1572,11 @@ static int bgmac_probe(struct bcma_device *core)
1572 dev_warn(&core->dev, "Using random MAC: %pM\n", mac); 1572 dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
1573 } 1573 }
1574 1574
1575 /* This (reset &) enable is not preset in specs or reference driver but
1576 * Broadcom does it in arch PCI code when enabling fake PCI device.
1577 */
1578 bcma_core_enable(core, 0);
1579
1575 /* Allocation and references */ 1580 /* Allocation and references */
1576 net_dev = alloc_etherdev(sizeof(*bgmac)); 1581 net_dev = alloc_etherdev(sizeof(*bgmac));
1577 if (!net_dev) 1582 if (!net_dev)
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 4fbb093e0d84..9a03c142b742 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -199,9 +199,9 @@
199#define BGMAC_CMDCFG_TAI 0x00000200 199#define BGMAC_CMDCFG_TAI 0x00000200
200#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */ 200#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
201#define BGMAC_CMDCFG_HD_SHIFT 10 201#define BGMAC_CMDCFG_HD_SHIFT 10
202#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */ 202#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */
203#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */ 203#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */
204#define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0) 204#define BGMAC_CMDCFG_SR(rev) ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
205#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */ 205#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
206#define BGMAC_CMDCFG_AE 0x00400000 206#define BGMAC_CMDCFG_AE 0x00400000
207#define BGMAC_CMDCFG_CFE 0x00800000 207#define BGMAC_CMDCFG_CFE 0x00800000
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index aabbd51db981..72eb29ed0359 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -581,12 +581,30 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
581 struct page *page; 581 struct page *page;
582 dma_addr_t mapping; 582 dma_addr_t mapping;
583 u16 sw_prod = rxr->rx_sw_agg_prod; 583 u16 sw_prod = rxr->rx_sw_agg_prod;
584 unsigned int offset = 0;
584 585
585 page = alloc_page(gfp); 586 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
586 if (!page) 587 page = rxr->rx_page;
587 return -ENOMEM; 588 if (!page) {
589 page = alloc_page(gfp);
590 if (!page)
591 return -ENOMEM;
592 rxr->rx_page = page;
593 rxr->rx_page_offset = 0;
594 }
595 offset = rxr->rx_page_offset;
596 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
597 if (rxr->rx_page_offset == PAGE_SIZE)
598 rxr->rx_page = NULL;
599 else
600 get_page(page);
601 } else {
602 page = alloc_page(gfp);
603 if (!page)
604 return -ENOMEM;
605 }
588 606
589 mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE, 607 mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
590 PCI_DMA_FROMDEVICE); 608 PCI_DMA_FROMDEVICE);
591 if (dma_mapping_error(&pdev->dev, mapping)) { 609 if (dma_mapping_error(&pdev->dev, mapping)) {
592 __free_page(page); 610 __free_page(page);
@@ -601,6 +619,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
601 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); 619 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
602 620
603 rx_agg_buf->page = page; 621 rx_agg_buf->page = page;
622 rx_agg_buf->offset = offset;
604 rx_agg_buf->mapping = mapping; 623 rx_agg_buf->mapping = mapping;
605 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 624 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
606 rxbd->rx_bd_opaque = sw_prod; 625 rxbd->rx_bd_opaque = sw_prod;
@@ -642,6 +661,7 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
642 page = cons_rx_buf->page; 661 page = cons_rx_buf->page;
643 cons_rx_buf->page = NULL; 662 cons_rx_buf->page = NULL;
644 prod_rx_buf->page = page; 663 prod_rx_buf->page = page;
664 prod_rx_buf->offset = cons_rx_buf->offset;
645 665
646 prod_rx_buf->mapping = cons_rx_buf->mapping; 666 prod_rx_buf->mapping = cons_rx_buf->mapping;
647 667
@@ -709,7 +729,8 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
709 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 729 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
710 730
711 cons_rx_buf = &rxr->rx_agg_ring[cons]; 731 cons_rx_buf = &rxr->rx_agg_ring[cons];
712 skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len); 732 skb_fill_page_desc(skb, i, cons_rx_buf->page,
733 cons_rx_buf->offset, frag_len);
713 __clear_bit(cons, rxr->rx_agg_bmap); 734 __clear_bit(cons, rxr->rx_agg_bmap);
714 735
715 /* It is possible for bnxt_alloc_rx_page() to allocate 736 /* It is possible for bnxt_alloc_rx_page() to allocate
@@ -740,7 +761,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
740 return NULL; 761 return NULL;
741 } 762 }
742 763
743 dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE, 764 dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
744 PCI_DMA_FROMDEVICE); 765 PCI_DMA_FROMDEVICE);
745 766
746 skb->data_len += frag_len; 767 skb->data_len += frag_len;
@@ -1584,13 +1605,17 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
1584 1605
1585 dma_unmap_page(&pdev->dev, 1606 dma_unmap_page(&pdev->dev,
1586 dma_unmap_addr(rx_agg_buf, mapping), 1607 dma_unmap_addr(rx_agg_buf, mapping),
1587 PAGE_SIZE, PCI_DMA_FROMDEVICE); 1608 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
1588 1609
1589 rx_agg_buf->page = NULL; 1610 rx_agg_buf->page = NULL;
1590 __clear_bit(j, rxr->rx_agg_bmap); 1611 __clear_bit(j, rxr->rx_agg_bmap);
1591 1612
1592 __free_page(page); 1613 __free_page(page);
1593 } 1614 }
1615 if (rxr->rx_page) {
1616 __free_page(rxr->rx_page);
1617 rxr->rx_page = NULL;
1618 }
1594 } 1619 }
1595} 1620}
1596 1621
@@ -1973,7 +1998,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
1973 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 1998 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
1974 return 0; 1999 return 0;
1975 2000
1976 type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) | 2001 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
1977 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 2002 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
1978 2003
1979 bnxt_init_rxbd_pages(ring, type); 2004 bnxt_init_rxbd_pages(ring, type);
@@ -2164,7 +2189,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
2164 bp->rx_agg_nr_pages = 0; 2189 bp->rx_agg_nr_pages = 0;
2165 2190
2166 if (bp->flags & BNXT_FLAG_TPA) 2191 if (bp->flags & BNXT_FLAG_TPA)
2167 agg_factor = 4; 2192 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
2168 2193
2169 bp->flags &= ~BNXT_FLAG_JUMBO; 2194 bp->flags &= ~BNXT_FLAG_JUMBO;
2170 if (rx_space > PAGE_SIZE) { 2195 if (rx_space > PAGE_SIZE) {
@@ -2653,7 +2678,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
2653 /* Write request msg to hwrm channel */ 2678 /* Write request msg to hwrm channel */
2654 __iowrite32_copy(bp->bar0, data, msg_len / 4); 2679 __iowrite32_copy(bp->bar0, data, msg_len / 4);
2655 2680
2656 for (i = msg_len; i < HWRM_MAX_REQ_LEN; i += 4) 2681 for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4)
2657 writel(0, bp->bar0 + i); 2682 writel(0, bp->bar0 + i);
2658 2683
2659 /* currently supports only one outstanding message */ 2684 /* currently supports only one outstanding message */
@@ -3020,12 +3045,12 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
3020 /* Number of segs are log2 units, and first packet is not 3045 /* Number of segs are log2 units, and first packet is not
3021 * included as part of this units. 3046 * included as part of this units.
3022 */ 3047 */
3023 if (mss <= PAGE_SIZE) { 3048 if (mss <= BNXT_RX_PAGE_SIZE) {
3024 n = PAGE_SIZE / mss; 3049 n = BNXT_RX_PAGE_SIZE / mss;
3025 nsegs = (MAX_SKB_FRAGS - 1) * n; 3050 nsegs = (MAX_SKB_FRAGS - 1) * n;
3026 } else { 3051 } else {
3027 n = mss / PAGE_SIZE; 3052 n = mss / BNXT_RX_PAGE_SIZE;
3028 if (mss & (PAGE_SIZE - 1)) 3053 if (mss & (BNXT_RX_PAGE_SIZE - 1))
3029 n++; 3054 n++;
3030 nsegs = (MAX_SKB_FRAGS - n) / n; 3055 nsegs = (MAX_SKB_FRAGS - n) / n;
3031 } 3056 }
@@ -3391,11 +3416,11 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
3391 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3416 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3392 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3417 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3393 3418
3419 cpr->cp_doorbell = bp->bar1 + i * 0x80;
3394 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i, 3420 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
3395 INVALID_STATS_CTX_ID); 3421 INVALID_STATS_CTX_ID);
3396 if (rc) 3422 if (rc)
3397 goto err_out; 3423 goto err_out;
3398 cpr->cp_doorbell = bp->bar1 + i * 0x80;
3399 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); 3424 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3400 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 3425 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
3401 } 3426 }
@@ -3830,6 +3855,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
3830 struct hwrm_ver_get_input req = {0}; 3855 struct hwrm_ver_get_input req = {0};
3831 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; 3856 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
3832 3857
3858 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
3833 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); 3859 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
3834 req.hwrm_intf_maj = HWRM_VERSION_MAJOR; 3860 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
3835 req.hwrm_intf_min = HWRM_VERSION_MINOR; 3861 req.hwrm_intf_min = HWRM_VERSION_MINOR;
@@ -3855,6 +3881,9 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
3855 if (!bp->hwrm_cmd_timeout) 3881 if (!bp->hwrm_cmd_timeout)
3856 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 3882 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
3857 3883
3884 if (resp->hwrm_intf_maj >= 1)
3885 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
3886
3858hwrm_ver_get_exit: 3887hwrm_ver_get_exit:
3859 mutex_unlock(&bp->hwrm_cmd_lock); 3888 mutex_unlock(&bp->hwrm_cmd_lock);
3860 return rc; 3889 return rc;
@@ -4305,7 +4334,7 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
4305 if (bp->flags & BNXT_FLAG_MSIX_CAP) 4334 if (bp->flags & BNXT_FLAG_MSIX_CAP)
4306 rc = bnxt_setup_msix(bp); 4335 rc = bnxt_setup_msix(bp);
4307 4336
4308 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { 4337 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
4309 /* fallback to INTA */ 4338 /* fallback to INTA */
4310 rc = bnxt_setup_inta(bp); 4339 rc = bnxt_setup_inta(bp);
4311 } 4340 }
@@ -4555,7 +4584,7 @@ bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
4555 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 4584 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
4556 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 4585 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
4557 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 4586 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
4558 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 4587 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
4559 req->enables |= 4588 req->enables |=
4560 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 4589 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
4561 } else { 4590 } else {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index ec04c47172b7..8b823ff558ff 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -407,6 +407,15 @@ struct rx_tpa_end_cmp_ext {
407 407
408#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT) 408#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT)
409 409
410/* The RXBD length is 16-bit so we can only support page sizes < 64K */
411#if (PAGE_SHIFT > 15)
412#define BNXT_RX_PAGE_SHIFT 15
413#else
414#define BNXT_RX_PAGE_SHIFT PAGE_SHIFT
415#endif
416
417#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
418
410#define BNXT_MIN_PKT_SIZE 45 419#define BNXT_MIN_PKT_SIZE 45
411 420
412#define BNXT_NUM_TESTS(bp) 0 421#define BNXT_NUM_TESTS(bp) 0
@@ -477,6 +486,7 @@ struct rx_tpa_end_cmp_ext {
477#define RING_CMP(idx) ((idx) & bp->cp_ring_mask) 486#define RING_CMP(idx) ((idx) & bp->cp_ring_mask)
478#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1)) 487#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
479 488
489#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
480#define DFLT_HWRM_CMD_TIMEOUT 500 490#define DFLT_HWRM_CMD_TIMEOUT 500
481#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) 491#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
482#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) 492#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
@@ -505,6 +515,7 @@ struct bnxt_sw_rx_bd {
505 515
506struct bnxt_sw_rx_agg_bd { 516struct bnxt_sw_rx_agg_bd {
507 struct page *page; 517 struct page *page;
518 unsigned int offset;
508 dma_addr_t mapping; 519 dma_addr_t mapping;
509}; 520};
510 521
@@ -585,6 +596,9 @@ struct bnxt_rx_ring_info {
585 unsigned long *rx_agg_bmap; 596 unsigned long *rx_agg_bmap;
586 u16 rx_agg_bmap_size; 597 u16 rx_agg_bmap_size;
587 598
599 struct page *rx_page;
600 unsigned int rx_page_offset;
601
588 dma_addr_t rx_desc_mapping[MAX_RX_PAGES]; 602 dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
589 dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES]; 603 dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
590 604
@@ -953,6 +967,7 @@ struct bnxt {
953 dma_addr_t hw_tx_port_stats_map; 967 dma_addr_t hw_tx_port_stats_map;
954 int hw_port_stats_size; 968 int hw_port_stats_size;
955 969
970 u16 hwrm_max_req_len;
956 int hwrm_cmd_timeout; 971 int hwrm_cmd_timeout;
957 struct mutex hwrm_cmd_lock; /* serialize hwrm messages */ 972 struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
958 struct hwrm_ver_get_output ver_resp; 973 struct hwrm_ver_get_output ver_resp;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 9ada1662b651..2e472f6dbf2d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -855,10 +855,8 @@ static void bnxt_get_pauseparam(struct net_device *dev,
855 if (BNXT_VF(bp)) 855 if (BNXT_VF(bp))
856 return; 856 return;
857 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL); 857 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
858 epause->rx_pause = 858 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
859 ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX) != 0); 859 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
860 epause->tx_pause =
861 ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_TX) != 0);
862} 860}
863 861
864static int bnxt_set_pauseparam(struct net_device *dev, 862static int bnxt_set_pauseparam(struct net_device *dev,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 6746fd03cb3a..44ad1490b472 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -878,7 +878,11 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
878 else 878 else
879 p = (char *)priv; 879 p = (char *)priv;
880 p += s->stat_offset; 880 p += s->stat_offset;
881 data[i] = *(u32 *)p; 881 if (sizeof(unsigned long) != sizeof(u32) &&
882 s->stat_sizeof == sizeof(unsigned long))
883 data[i] = *(unsigned long *)p;
884 else
885 data[i] = *(u32 *)p;
882 } 886 }
883} 887}
884 888
@@ -1171,6 +1175,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1171 struct enet_cb *tx_cb_ptr; 1175 struct enet_cb *tx_cb_ptr;
1172 struct netdev_queue *txq; 1176 struct netdev_queue *txq;
1173 unsigned int pkts_compl = 0; 1177 unsigned int pkts_compl = 0;
1178 unsigned int bytes_compl = 0;
1174 unsigned int c_index; 1179 unsigned int c_index;
1175 unsigned int txbds_ready; 1180 unsigned int txbds_ready;
1176 unsigned int txbds_processed = 0; 1181 unsigned int txbds_processed = 0;
@@ -1193,16 +1198,13 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1193 tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr]; 1198 tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
1194 if (tx_cb_ptr->skb) { 1199 if (tx_cb_ptr->skb) {
1195 pkts_compl++; 1200 pkts_compl++;
1196 dev->stats.tx_packets++; 1201 bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent;
1197 dev->stats.tx_bytes += tx_cb_ptr->skb->len;
1198 dma_unmap_single(&dev->dev, 1202 dma_unmap_single(&dev->dev,
1199 dma_unmap_addr(tx_cb_ptr, dma_addr), 1203 dma_unmap_addr(tx_cb_ptr, dma_addr),
1200 dma_unmap_len(tx_cb_ptr, dma_len), 1204 dma_unmap_len(tx_cb_ptr, dma_len),
1201 DMA_TO_DEVICE); 1205 DMA_TO_DEVICE);
1202 bcmgenet_free_cb(tx_cb_ptr); 1206 bcmgenet_free_cb(tx_cb_ptr);
1203 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { 1207 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
1204 dev->stats.tx_bytes +=
1205 dma_unmap_len(tx_cb_ptr, dma_len);
1206 dma_unmap_page(&dev->dev, 1208 dma_unmap_page(&dev->dev,
1207 dma_unmap_addr(tx_cb_ptr, dma_addr), 1209 dma_unmap_addr(tx_cb_ptr, dma_addr),
1208 dma_unmap_len(tx_cb_ptr, dma_len), 1210 dma_unmap_len(tx_cb_ptr, dma_len),
@@ -1220,6 +1222,9 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1220 ring->free_bds += txbds_processed; 1222 ring->free_bds += txbds_processed;
1221 ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK; 1223 ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
1222 1224
1225 dev->stats.tx_packets += pkts_compl;
1226 dev->stats.tx_bytes += bytes_compl;
1227
1223 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { 1228 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1224 txq = netdev_get_tx_queue(dev, ring->queue); 1229 txq = netdev_get_tx_queue(dev, ring->queue);
1225 if (netif_tx_queue_stopped(txq)) 1230 if (netif_tx_queue_stopped(txq))
@@ -1296,7 +1301,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
1296 1301
1297 tx_cb_ptr->skb = skb; 1302 tx_cb_ptr->skb = skb;
1298 1303
1299 skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb); 1304 skb_len = skb_headlen(skb);
1300 1305
1301 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); 1306 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1302 ret = dma_mapping_error(kdev, mapping); 1307 ret = dma_mapping_error(kdev, mapping);
@@ -1464,6 +1469,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1464 goto out; 1469 goto out;
1465 } 1470 }
1466 1471
1472 /* Retain how many bytes will be sent on the wire, without TSB inserted
1473 * by transmit checksum offload
1474 */
1475 GENET_CB(skb)->bytes_sent = skb->len;
1476
1467 /* set the SKB transmit checksum */ 1477 /* set the SKB transmit checksum */
1468 if (priv->desc_64b_en) { 1478 if (priv->desc_64b_en) {
1469 skb = bcmgenet_put_tx_csum(dev, skb); 1479 skb = bcmgenet_put_tx_csum(dev, skb);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 967367557309..1e2dc34d331a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -531,6 +531,12 @@ struct bcmgenet_hw_params {
531 u32 flags; 531 u32 flags;
532}; 532};
533 533
534struct bcmgenet_skb_cb {
535 unsigned int bytes_sent; /* bytes on the wire (no TSB) */
536};
537
538#define GENET_CB(skb) ((struct bcmgenet_skb_cb *)((skb)->cb))
539
534struct bcmgenet_tx_ring { 540struct bcmgenet_tx_ring {
535 spinlock_t lock; /* ring lock */ 541 spinlock_t lock; /* ring lock */
536 struct napi_struct napi; /* NAPI per tx queue */ 542 struct napi_struct napi; /* NAPI per tx queue */
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 6619178ed77b..a63551d0a18a 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -441,7 +441,7 @@ static int macb_mii_init(struct macb *bp)
441 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 441 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
442 bp->pdev->name, bp->pdev->id); 442 bp->pdev->name, bp->pdev->id);
443 bp->mii_bus->priv = bp; 443 bp->mii_bus->priv = bp;
444 bp->mii_bus->parent = &bp->dev->dev; 444 bp->mii_bus->parent = &bp->pdev->dev;
445 pdata = dev_get_platdata(&bp->pdev->dev); 445 pdata = dev_get_platdata(&bp->pdev->dev);
446 446
447 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); 447 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
@@ -458,7 +458,8 @@ static int macb_mii_init(struct macb *bp)
458 struct phy_device *phydev; 458 struct phy_device *phydev;
459 459
460 phydev = mdiobus_scan(bp->mii_bus, i); 460 phydev = mdiobus_scan(bp->mii_bus, i);
461 if (IS_ERR(phydev)) { 461 if (IS_ERR(phydev) &&
462 PTR_ERR(phydev) != -ENODEV) {
462 err = PTR_ERR(phydev); 463 err = PTR_ERR(phydev);
463 break; 464 break;
464 } 465 }
@@ -917,7 +918,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
917 unsigned int frag_len = bp->rx_buffer_size; 918 unsigned int frag_len = bp->rx_buffer_size;
918 919
919 if (offset + frag_len > len) { 920 if (offset + frag_len > len) {
920 BUG_ON(frag != last_frag); 921 if (unlikely(frag != last_frag)) {
922 dev_kfree_skb_any(skb);
923 return -1;
924 }
921 frag_len = len - offset; 925 frag_len = len - offset;
922 } 926 }
923 skb_copy_to_linear_data_offset(skb, offset, 927 skb_copy_to_linear_data_offset(skb, offset,
@@ -945,8 +949,23 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
945 return 0; 949 return 0;
946} 950}
947 951
952static inline void macb_init_rx_ring(struct macb *bp)
953{
954 dma_addr_t addr;
955 int i;
956
957 addr = bp->rx_buffers_dma;
958 for (i = 0; i < RX_RING_SIZE; i++) {
959 bp->rx_ring[i].addr = addr;
960 bp->rx_ring[i].ctrl = 0;
961 addr += bp->rx_buffer_size;
962 }
963 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
964}
965
948static int macb_rx(struct macb *bp, int budget) 966static int macb_rx(struct macb *bp, int budget)
949{ 967{
968 bool reset_rx_queue = false;
950 int received = 0; 969 int received = 0;
951 unsigned int tail; 970 unsigned int tail;
952 int first_frag = -1; 971 int first_frag = -1;
@@ -972,10 +991,18 @@ static int macb_rx(struct macb *bp, int budget)
972 991
973 if (ctrl & MACB_BIT(RX_EOF)) { 992 if (ctrl & MACB_BIT(RX_EOF)) {
974 int dropped; 993 int dropped;
975 BUG_ON(first_frag == -1); 994
995 if (unlikely(first_frag == -1)) {
996 reset_rx_queue = true;
997 continue;
998 }
976 999
977 dropped = macb_rx_frame(bp, first_frag, tail); 1000 dropped = macb_rx_frame(bp, first_frag, tail);
978 first_frag = -1; 1001 first_frag = -1;
1002 if (unlikely(dropped < 0)) {
1003 reset_rx_queue = true;
1004 continue;
1005 }
979 if (!dropped) { 1006 if (!dropped) {
980 received++; 1007 received++;
981 budget--; 1008 budget--;
@@ -983,6 +1010,26 @@ static int macb_rx(struct macb *bp, int budget)
983 } 1010 }
984 } 1011 }
985 1012
1013 if (unlikely(reset_rx_queue)) {
1014 unsigned long flags;
1015 u32 ctrl;
1016
1017 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1018
1019 spin_lock_irqsave(&bp->lock, flags);
1020
1021 ctrl = macb_readl(bp, NCR);
1022 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1023
1024 macb_init_rx_ring(bp);
1025 macb_writel(bp, RBQP, bp->rx_ring_dma);
1026
1027 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1028
1029 spin_unlock_irqrestore(&bp->lock, flags);
1030 return received;
1031 }
1032
986 if (first_frag != -1) 1033 if (first_frag != -1)
987 bp->rx_tail = first_frag; 1034 bp->rx_tail = first_frag;
988 else 1035 else
@@ -1100,7 +1147,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
1100 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1147 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1101 1148
1102 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1149 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1103 macb_writel(bp, ISR, MACB_BIT(RXUBR)); 1150 queue_writel(queue, ISR, MACB_BIT(RXUBR));
1104 } 1151 }
1105 1152
1106 if (status & MACB_BIT(ISR_ROVR)) { 1153 if (status & MACB_BIT(ISR_ROVR)) {
@@ -1523,15 +1570,8 @@ static void gem_init_rings(struct macb *bp)
1523static void macb_init_rings(struct macb *bp) 1570static void macb_init_rings(struct macb *bp)
1524{ 1571{
1525 int i; 1572 int i;
1526 dma_addr_t addr;
1527 1573
1528 addr = bp->rx_buffers_dma; 1574 macb_init_rx_ring(bp);
1529 for (i = 0; i < RX_RING_SIZE; i++) {
1530 bp->rx_ring[i].addr = addr;
1531 bp->rx_ring[i].ctrl = 0;
1532 addr += bp->rx_buffer_size;
1533 }
1534 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
1535 1575
1536 for (i = 0; i < TX_RING_SIZE; i++) { 1576 for (i = 0; i < TX_RING_SIZE; i++) {
1537 bp->queues[0].tx_ring[i].addr = 0; 1577 bp->queues[0].tx_ring[i].addr = 0;
@@ -2957,9 +2997,10 @@ static int macb_probe(struct platform_device *pdev)
2957 phy_node = of_get_next_available_child(np, NULL); 2997 phy_node = of_get_next_available_child(np, NULL);
2958 if (phy_node) { 2998 if (phy_node) {
2959 int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0); 2999 int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
2960 if (gpio_is_valid(gpio)) 3000 if (gpio_is_valid(gpio)) {
2961 bp->reset_gpio = gpio_to_desc(gpio); 3001 bp->reset_gpio = gpio_to_desc(gpio);
2962 gpiod_direction_output(bp->reset_gpio, 1); 3002 gpiod_direction_output(bp->reset_gpio, 1);
3003 }
2963 } 3004 }
2964 of_node_put(phy_node); 3005 of_node_put(phy_node);
2965 3006
@@ -2979,29 +3020,36 @@ static int macb_probe(struct platform_device *pdev)
2979 if (err) 3020 if (err)
2980 goto err_out_free_netdev; 3021 goto err_out_free_netdev;
2981 3022
3023 err = macb_mii_init(bp);
3024 if (err)
3025 goto err_out_free_netdev;
3026
3027 phydev = bp->phy_dev;
3028
3029 netif_carrier_off(dev);
3030
2982 err = register_netdev(dev); 3031 err = register_netdev(dev);
2983 if (err) { 3032 if (err) {
2984 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 3033 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2985 goto err_out_unregister_netdev; 3034 goto err_out_unregister_mdio;
2986 } 3035 }
2987 3036
2988 err = macb_mii_init(bp); 3037 phy_attached_info(phydev);
2989 if (err)
2990 goto err_out_unregister_netdev;
2991
2992 netif_carrier_off(dev);
2993 3038
2994 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", 3039 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
2995 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), 3040 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
2996 dev->base_addr, dev->irq, dev->dev_addr); 3041 dev->base_addr, dev->irq, dev->dev_addr);
2997 3042
2998 phydev = bp->phy_dev;
2999 phy_attached_info(phydev);
3000
3001 return 0; 3043 return 0;
3002 3044
3003err_out_unregister_netdev: 3045err_out_unregister_mdio:
3004 unregister_netdev(dev); 3046 phy_disconnect(bp->phy_dev);
3047 mdiobus_unregister(bp->mii_bus);
3048 mdiobus_free(bp->mii_bus);
3049
3050 /* Shutdown the PHY if there is a GPIO reset */
3051 if (bp->reset_gpio)
3052 gpiod_set_value(bp->reset_gpio, 0);
3005 3053
3006err_out_free_netdev: 3054err_out_free_netdev:
3007 free_netdev(dev); 3055 free_netdev(dev);
@@ -3029,7 +3077,8 @@ static int macb_remove(struct platform_device *pdev)
3029 mdiobus_free(bp->mii_bus); 3077 mdiobus_free(bp->mii_bus);
3030 3078
3031 /* Shutdown the PHY if there is a GPIO reset */ 3079 /* Shutdown the PHY if there is a GPIO reset */
3032 gpiod_set_value(bp->reset_gpio, 0); 3080 if (bp->reset_gpio)
3081 gpiod_set_value(bp->reset_gpio, 0);
3033 3082
3034 unregister_netdev(dev); 3083 unregister_netdev(dev);
3035 clk_disable_unprepare(bp->tx_clk); 3084 clk_disable_unprepare(bp->tx_clk);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 967951582e03..d20539a6d162 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1011,10 +1011,11 @@ static int bgx_init_of_phy(struct bgx *bgx)
1011 } 1011 }
1012 1012
1013 lmac++; 1013 lmac++;
1014 if (lmac == MAX_LMAC_PER_BGX) 1014 if (lmac == MAX_LMAC_PER_BGX) {
1015 of_node_put(node);
1015 break; 1016 break;
1017 }
1016 } 1018 }
1017 of_node_put(node);
1018 return 0; 1019 return 0;
1019 1020
1020defer: 1021defer:
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 60908eab3b3a..43da891fab97 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -576,7 +576,7 @@ static void setup_rss(struct adapter *adap)
576 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets; 576 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
577 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1; 577 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
578 u8 cpus[SGE_QSETS + 1]; 578 u8 cpus[SGE_QSETS + 1];
579 u16 rspq_map[RSS_TABLE_SIZE]; 579 u16 rspq_map[RSS_TABLE_SIZE + 1];
580 580
581 for (i = 0; i < SGE_QSETS; ++i) 581 for (i = 0; i < SGE_QSETS; ++i)
582 cpus[i] = i; 582 cpus[i] = i;
@@ -586,6 +586,7 @@ static void setup_rss(struct adapter *adap)
586 rspq_map[i] = i % nq0; 586 rspq_map[i] = i % nq0;
587 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0; 587 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
588 } 588 }
589 rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
589 590
590 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN | 591 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
591 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | 592 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 984a3cc26f86..326d4009525e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1451,6 +1451,9 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
1451 unsigned int mmd, unsigned int reg, u16 *valp); 1451 unsigned int mmd, unsigned int reg, u16 *valp);
1452int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 1452int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
1453 unsigned int mmd, unsigned int reg, u16 val); 1453 unsigned int mmd, unsigned int reg, u16 val);
1454int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
1455 unsigned int vf, unsigned int iqtype, unsigned int iqid,
1456 unsigned int fl0id, unsigned int fl1id);
1454int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 1457int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1455 unsigned int vf, unsigned int iqtype, unsigned int iqid, 1458 unsigned int vf, unsigned int iqtype, unsigned int iqid,
1456 unsigned int fl0id, unsigned int fl1id); 1459 unsigned int fl0id, unsigned int fl1id);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 13b144bcf725..6278e5a74b74 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2981,14 +2981,28 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
2981void t4_free_sge_resources(struct adapter *adap) 2981void t4_free_sge_resources(struct adapter *adap)
2982{ 2982{
2983 int i; 2983 int i;
2984 struct sge_eth_rxq *eq = adap->sge.ethrxq; 2984 struct sge_eth_rxq *eq;
2985 struct sge_eth_txq *etq = adap->sge.ethtxq; 2985 struct sge_eth_txq *etq;
2986
2987 /* stop all Rx queues in order to start them draining */
2988 for (i = 0; i < adap->sge.ethqsets; i++) {
2989 eq = &adap->sge.ethrxq[i];
2990 if (eq->rspq.desc)
2991 t4_iq_stop(adap, adap->mbox, adap->pf, 0,
2992 FW_IQ_TYPE_FL_INT_CAP,
2993 eq->rspq.cntxt_id,
2994 eq->fl.size ? eq->fl.cntxt_id : 0xffff,
2995 0xffff);
2996 }
2986 2997
2987 /* clean up Ethernet Tx/Rx queues */ 2998 /* clean up Ethernet Tx/Rx queues */
2988 for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) { 2999 for (i = 0; i < adap->sge.ethqsets; i++) {
3000 eq = &adap->sge.ethrxq[i];
2989 if (eq->rspq.desc) 3001 if (eq->rspq.desc)
2990 free_rspq_fl(adap, &eq->rspq, 3002 free_rspq_fl(adap, &eq->rspq,
2991 eq->fl.size ? &eq->fl : NULL); 3003 eq->fl.size ? &eq->fl : NULL);
3004
3005 etq = &adap->sge.ethtxq[i];
2992 if (etq->q.desc) { 3006 if (etq->q.desc) {
2993 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, 3007 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
2994 etq->q.cntxt_id); 3008 etq->q.cntxt_id);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index cc1736bece0f..71586a3e0f61 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2557,6 +2557,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
2557} 2557}
2558 2558
2559#define EEPROM_STAT_ADDR 0x7bfc 2559#define EEPROM_STAT_ADDR 0x7bfc
2560#define VPD_SIZE 0x800
2560#define VPD_BASE 0x400 2561#define VPD_BASE 0x400
2561#define VPD_BASE_OLD 0 2562#define VPD_BASE_OLD 0
2562#define VPD_LEN 1024 2563#define VPD_LEN 1024
@@ -2594,6 +2595,15 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
2594 if (!vpd) 2595 if (!vpd)
2595 return -ENOMEM; 2596 return -ENOMEM;
2596 2597
2598 /* We have two VPD data structures stored in the adapter VPD area.
2599 * By default, Linux calculates the size of the VPD area by traversing
2600 * the first VPD area at offset 0x0, so we need to tell the OS what
2601 * our real VPD size is.
2602 */
2603 ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
2604 if (ret < 0)
2605 goto out;
2606
2597 /* Card information normally starts at VPD_BASE but early cards had 2607 /* Card information normally starts at VPD_BASE but early cards had
2598 * it at 0. 2608 * it at 0.
2599 */ 2609 */
@@ -6940,6 +6950,39 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
6940} 6950}
6941 6951
6942/** 6952/**
6953 * t4_iq_stop - stop an ingress queue and its FLs
6954 * @adap: the adapter
6955 * @mbox: mailbox to use for the FW command
6956 * @pf: the PF owning the queues
6957 * @vf: the VF owning the queues
6958 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
6959 * @iqid: ingress queue id
6960 * @fl0id: FL0 queue id or 0xffff if no attached FL0
6961 * @fl1id: FL1 queue id or 0xffff if no attached FL1
6962 *
6963 * Stops an ingress queue and its associated FLs, if any. This causes
6964 * any current or future data/messages destined for these queues to be
6965 * tossed.
6966 */
6967int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
6968 unsigned int vf, unsigned int iqtype, unsigned int iqid,
6969 unsigned int fl0id, unsigned int fl1id)
6970{
6971 struct fw_iq_cmd c;
6972
6973 memset(&c, 0, sizeof(c));
6974 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
6975 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
6976 FW_IQ_CMD_VFN_V(vf));
6977 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
6978 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
6979 c.iqid = cpu_to_be16(iqid);
6980 c.fl0id = cpu_to_be16(fl0id);
6981 c.fl1id = cpu_to_be16(fl1id);
6982 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6983}
6984
6985/**
6943 * t4_iq_free - free an ingress queue and its FLs 6986 * t4_iq_free - free an ingress queue and its FLs
6944 * @adap: the adapter 6987 * @adap: the adapter
6945 * @mbox: mailbox to use for the FW command 6988 * @mbox: mailbox to use for the FW command
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 06bc2d2e7a73..a2cdfc1261dc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -166,6 +166,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
166 CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */ 166 CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */
167 CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */ 167 CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */
168 CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */ 168 CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */
169 CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/
169 170
170 /* T6 adapters: 171 /* T6 adapters:
171 */ 172 */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 37c081583084..08243c2ff4b4 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -943,8 +943,8 @@ fec_restart(struct net_device *ndev)
943 else 943 else
944 val &= ~FEC_RACC_OPTIONS; 944 val &= ~FEC_RACC_OPTIONS;
945 writel(val, fep->hwp + FEC_RACC); 945 writel(val, fep->hwp + FEC_RACC);
946 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
946 } 947 }
947 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
948#endif 948#endif
949 949
950 /* 950 /*
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 37d0cce392be..e8d36aaea223 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -469,7 +469,7 @@ struct hnae_ae_ops {
469 u32 *tx_usecs, u32 *rx_usecs); 469 u32 *tx_usecs, u32 *rx_usecs);
470 void (*get_rx_max_coalesced_frames)(struct hnae_handle *handle, 470 void (*get_rx_max_coalesced_frames)(struct hnae_handle *handle,
471 u32 *tx_frames, u32 *rx_frames); 471 u32 *tx_frames, u32 *rx_frames);
472 void (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout); 472 int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
473 int (*set_coalesce_frames)(struct hnae_handle *handle, 473 int (*set_coalesce_frames)(struct hnae_handle *handle,
474 u32 coalesce_frames); 474 u32 coalesce_frames);
475 void (*set_promisc_mode)(struct hnae_handle *handle, u32 en); 475 void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index 285c893ab135..a1cb461ac45f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -159,11 +159,6 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
159 ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i]; 159 ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i];
160 160
161 ring_pair_cb->used_by_vf = 1; 161 ring_pair_cb->used_by_vf = 1;
162 if (port_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
163 ring_pair_cb->port_id_in_dsa = port_idx;
164 else
165 ring_pair_cb->port_id_in_dsa = 0;
166
167 ring_pair_cb++; 162 ring_pair_cb++;
168 } 163 }
169 164
@@ -453,59 +448,46 @@ static int hns_ae_set_pauseparam(struct hnae_handle *handle,
453static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle, 448static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle,
454 u32 *tx_usecs, u32 *rx_usecs) 449 u32 *tx_usecs, u32 *rx_usecs)
455{ 450{
456 int port; 451 struct ring_pair_cb *ring_pair =
457 452 container_of(handle->qs[0], struct ring_pair_cb, q);
458 port = hns_ae_map_eport_to_dport(handle->eport_id);
459 453
460 *tx_usecs = hns_rcb_get_coalesce_usecs( 454 *tx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
461 hns_ae_get_dsaf_dev(handle->dev), 455 ring_pair->port_id_in_comm);
462 hns_dsaf_get_comm_idx_by_port(port)); 456 *rx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
463 *rx_usecs = hns_rcb_get_coalesce_usecs( 457 ring_pair->port_id_in_comm);
464 hns_ae_get_dsaf_dev(handle->dev),
465 hns_dsaf_get_comm_idx_by_port(port));
466} 458}
467 459
468static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle, 460static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle,
469 u32 *tx_frames, u32 *rx_frames) 461 u32 *tx_frames, u32 *rx_frames)
470{ 462{
471 int port; 463 struct ring_pair_cb *ring_pair =
464 container_of(handle->qs[0], struct ring_pair_cb, q);
472 465
473 assert(handle); 466 *tx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
474 467 ring_pair->port_id_in_comm);
475 port = hns_ae_map_eport_to_dport(handle->eport_id); 468 *rx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
476 469 ring_pair->port_id_in_comm);
477 *tx_frames = hns_rcb_get_coalesced_frames(
478 hns_ae_get_dsaf_dev(handle->dev), port);
479 *rx_frames = hns_rcb_get_coalesced_frames(
480 hns_ae_get_dsaf_dev(handle->dev), port);
481} 470}
482 471
483static void hns_ae_set_coalesce_usecs(struct hnae_handle *handle, 472static int hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
484 u32 timeout) 473 u32 timeout)
485{ 474{
486 int port; 475 struct ring_pair_cb *ring_pair =
476 container_of(handle->qs[0], struct ring_pair_cb, q);
487 477
488 assert(handle); 478 return hns_rcb_set_coalesce_usecs(
489 479 ring_pair->rcb_common, ring_pair->port_id_in_comm, timeout);
490 port = hns_ae_map_eport_to_dport(handle->eport_id);
491
492 hns_rcb_set_coalesce_usecs(hns_ae_get_dsaf_dev(handle->dev),
493 port, timeout);
494} 480}
495 481
496static int hns_ae_set_coalesce_frames(struct hnae_handle *handle, 482static int hns_ae_set_coalesce_frames(struct hnae_handle *handle,
497 u32 coalesce_frames) 483 u32 coalesce_frames)
498{ 484{
499 int port; 485 struct ring_pair_cb *ring_pair =
500 int ret; 486 container_of(handle->qs[0], struct ring_pair_cb, q);
501 487
502 assert(handle); 488 return hns_rcb_set_coalesced_frames(
503 489 ring_pair->rcb_common,
504 port = hns_ae_map_eport_to_dport(handle->eport_id); 490 ring_pair->port_id_in_comm, coalesce_frames);
505
506 ret = hns_rcb_set_coalesced_frames(hns_ae_get_dsaf_dev(handle->dev),
507 port, coalesce_frames);
508 return ret;
509} 491}
510 492
511void hns_ae_update_stats(struct hnae_handle *handle, 493void hns_ae_update_stats(struct hnae_handle *handle,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 6e2b76ede075..44abb08de155 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -664,7 +664,8 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data)
664 return; 664 return;
665 665
666 for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) { 666 for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) {
667 snprintf(buff, ETH_GSTRING_LEN, g_gmac_stats_string[i].desc); 667 snprintf(buff, ETH_GSTRING_LEN, "%s",
668 g_gmac_stats_string[i].desc);
668 buff = buff + ETH_GSTRING_LEN; 669 buff = buff + ETH_GSTRING_LEN;
669 } 670 }
670} 671}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 5c1ac9ba1bf2..5978a5c8ef35 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -2219,17 +2219,17 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
2219 /* dsaf onode registers */ 2219 /* dsaf onode registers */
2220 for (i = 0; i < DSAF_XOD_NUM; i++) { 2220 for (i = 0; i < DSAF_XOD_NUM; i++) {
2221 p[311 + i] = dsaf_read_dev(ddev, 2221 p[311 + i] = dsaf_read_dev(ddev,
2222 DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + j * 0x90); 2222 DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90);
2223 p[319 + i] = dsaf_read_dev(ddev, 2223 p[319 + i] = dsaf_read_dev(ddev,
2224 DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + j * 0x90); 2224 DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90);
2225 p[327 + i] = dsaf_read_dev(ddev, 2225 p[327 + i] = dsaf_read_dev(ddev,
2226 DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + j * 0x90); 2226 DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90);
2227 p[335 + i] = dsaf_read_dev(ddev, 2227 p[335 + i] = dsaf_read_dev(ddev,
2228 DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + j * 0x90); 2228 DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90);
2229 p[343 + i] = dsaf_read_dev(ddev, 2229 p[343 + i] = dsaf_read_dev(ddev,
2230 DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + j * 0x90); 2230 DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90);
2231 p[351 + i] = dsaf_read_dev(ddev, 2231 p[351 + i] = dsaf_read_dev(ddev,
2232 DSAF_XOD_ETS_TOKEN_CFG_0_REG + j * 0x90); 2232 DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90);
2233 } 2233 }
2234 2234
2235 p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90); 2235 p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 607c3be42241..e69b02287c44 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -244,31 +244,35 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
244 */ 244 */
245phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb) 245phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
246{ 246{
247 u32 hilink3_mode; 247 u32 mode;
248 u32 hilink4_mode; 248 u32 reg;
249 u32 shift;
250 bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
249 void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr; 251 void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr;
250 int dev_id = mac_cb->mac_id; 252 int mac_id = mac_cb->mac_id;
251 phy_interface_t phy_if = PHY_INTERFACE_MODE_NA; 253 phy_interface_t phy_if = PHY_INTERFACE_MODE_NA;
252 254
253 hilink3_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK3_REG); 255 if (is_ver1 && (mac_id >= 6 && mac_id <= 7)) {
254 hilink4_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK4_REG); 256 phy_if = PHY_INTERFACE_MODE_SGMII;
255 if (dev_id >= 0 && dev_id <= 3) { 257 } else if (mac_id >= 0 && mac_id <= 3) {
256 if (hilink4_mode == 0) 258 reg = is_ver1 ? HNS_MAC_HILINK4_REG : HNS_MAC_HILINK4V2_REG;
257 phy_if = PHY_INTERFACE_MODE_SGMII; 259 mode = dsaf_read_reg(sys_ctl_vaddr, reg);
258 else 260 /* mac_id 0, 1, 2, 3 ---> hilink4 lane 0, 1, 2, 3 */
261 shift = is_ver1 ? 0 : mac_id;
262 if (dsaf_get_bit(mode, shift))
259 phy_if = PHY_INTERFACE_MODE_XGMII; 263 phy_if = PHY_INTERFACE_MODE_XGMII;
260 } else if (dev_id >= 4 && dev_id <= 5) {
261 if (hilink3_mode == 0)
262 phy_if = PHY_INTERFACE_MODE_SGMII;
263 else 264 else
265 phy_if = PHY_INTERFACE_MODE_SGMII;
266 } else if (mac_id >= 4 && mac_id <= 7) {
267 reg = is_ver1 ? HNS_MAC_HILINK3_REG : HNS_MAC_HILINK3V2_REG;
268 mode = dsaf_read_reg(sys_ctl_vaddr, reg);
269 /* mac_id 4, 5, 6, 7 ---> hilink3 lane 2, 3, 0, 1 */
270 shift = is_ver1 ? 0 : mac_id <= 5 ? mac_id - 2 : mac_id - 6;
271 if (dsaf_get_bit(mode, shift))
264 phy_if = PHY_INTERFACE_MODE_XGMII; 272 phy_if = PHY_INTERFACE_MODE_XGMII;
265 } else { 273 else
266 phy_if = PHY_INTERFACE_MODE_SGMII; 274 phy_if = PHY_INTERFACE_MODE_SGMII;
267 } 275 }
268
269 dev_dbg(mac_cb->dev,
270 "hilink3_mode=%d, hilink4_mode=%d dev_id=%d, phy_if=%d\n",
271 hilink3_mode, hilink4_mode, dev_id, phy_if);
272 return phy_if; 276 return phy_if;
273} 277}
274 278
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 12188807468c..28ee26e5c478 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -215,9 +215,9 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
215 dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG, 215 dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
216 bd_size_type); 216 bd_size_type);
217 dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG, 217 dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG,
218 ring_pair->port_id_in_dsa); 218 ring_pair->port_id_in_comm);
219 dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG, 219 dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG,
220 ring_pair->port_id_in_dsa); 220 ring_pair->port_id_in_comm);
221 } else { 221 } else {
222 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG, 222 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG,
223 (u32)dma); 223 (u32)dma);
@@ -227,9 +227,9 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
227 dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG, 227 dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
228 bd_size_type); 228 bd_size_type);
229 dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG, 229 dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG,
230 ring_pair->port_id_in_dsa); 230 ring_pair->port_id_in_comm);
231 dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG, 231 dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG,
232 ring_pair->port_id_in_dsa); 232 ring_pair->port_id_in_comm);
233 } 233 }
234} 234}
235 235
@@ -256,50 +256,16 @@ static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common,
256 desc_cnt); 256 desc_cnt);
257} 257}
258 258
259/** 259static void hns_rcb_set_port_timeout(
260 *hns_rcb_set_port_coalesced_frames - set rcb port coalesced frames 260 struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
261 *@rcb_common: rcb_common device
262 *@port_idx:port index
263 *@coalesced_frames:BD num for coalesced frames
264 */
265static int hns_rcb_set_port_coalesced_frames(struct rcb_common_cb *rcb_common,
266 u32 port_idx,
267 u32 coalesced_frames)
268{
269 if (coalesced_frames >= rcb_common->desc_num ||
270 coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES)
271 return -EINVAL;
272
273 dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
274 coalesced_frames);
275 return 0;
276}
277
278/**
279 *hns_rcb_get_port_coalesced_frames - set rcb port coalesced frames
280 *@rcb_common: rcb_common device
281 *@port_idx:port index
282 * return coaleseced frames value
283 */
284static u32 hns_rcb_get_port_coalesced_frames(struct rcb_common_cb *rcb_common,
285 u32 port_idx)
286{ 261{
287 if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM) 262 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
288 port_idx = 0; 263 dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG,
289 264 timeout * HNS_RCB_CLK_FREQ_MHZ);
290 return dsaf_read_dev(rcb_common, 265 else
291 RCB_CFG_PKTLINE_REG + port_idx * 4); 266 dsaf_write_dev(rcb_common,
292} 267 RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
293 268 timeout);
294/**
295 *hns_rcb_set_timeout - set rcb port coalesced time_out
296 *@rcb_common: rcb_common device
297 *@time_out:time for coalesced time_out
298 */
299static void hns_rcb_set_timeout(struct rcb_common_cb *rcb_common,
300 u32 timeout)
301{
302 dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, timeout);
303} 269}
304 270
305static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common) 271static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
@@ -361,10 +327,11 @@ int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common)
361 327
362 for (i = 0; i < port_num; i++) { 328 for (i = 0; i < port_num; i++) {
363 hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num); 329 hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num);
364 (void)hns_rcb_set_port_coalesced_frames( 330 (void)hns_rcb_set_coalesced_frames(
365 rcb_common, i, rcb_common->coalesced_frames); 331 rcb_common, i, HNS_RCB_DEF_COALESCED_FRAMES);
332 hns_rcb_set_port_timeout(
333 rcb_common, i, HNS_RCB_DEF_COALESCED_USECS);
366 } 334 }
367 hns_rcb_set_timeout(rcb_common, rcb_common->timeout);
368 335
369 dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG, 336 dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG,
370 HNS_RCB_COMMON_ENDIAN); 337 HNS_RCB_COMMON_ENDIAN);
@@ -460,7 +427,8 @@ static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
460 hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING); 427 hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING);
461} 428}
462 429
463static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx) 430static int hns_rcb_get_port_in_comm(
431 struct rcb_common_cb *rcb_common, int ring_idx)
464{ 432{
465 int comm_index = rcb_common->comm_index; 433 int comm_index = rcb_common->comm_index;
466 int port; 434 int port;
@@ -470,7 +438,7 @@ static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx)
470 q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn; 438 q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn;
471 port = ring_idx / q_num; 439 port = ring_idx / q_num;
472 } else { 440 } else {
473 port = HNS_RCB_SERVICE_NW_ENGINE_NUM + comm_index - 1; 441 port = 0; /* config debug-ports port_id_in_comm to 0*/
474 } 442 }
475 443
476 return port; 444 return port;
@@ -518,7 +486,8 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
518 ring_pair_cb->index = i; 486 ring_pair_cb->index = i;
519 ring_pair_cb->q.io_base = 487 ring_pair_cb->q.io_base =
520 RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i); 488 RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i);
521 ring_pair_cb->port_id_in_dsa = hns_rcb_get_port(rcb_common, i); 489 ring_pair_cb->port_id_in_comm =
490 hns_rcb_get_port_in_comm(rcb_common, i);
522 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] = 491 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] =
523 is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) : 492 is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) :
524 platform_get_irq(pdev, base_irq_idx + i * 3 + 1); 493 platform_get_irq(pdev, base_irq_idx + i * 3 + 1);
@@ -534,82 +503,95 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
534/** 503/**
535 *hns_rcb_get_coalesced_frames - get rcb port coalesced frames 504 *hns_rcb_get_coalesced_frames - get rcb port coalesced frames
536 *@rcb_common: rcb_common device 505 *@rcb_common: rcb_common device
537 *@comm_index:port index 506 *@port_idx:port id in comm
538 *return coalesced_frames 507 *
508 *Returns: coalesced_frames
539 */ 509 */
540u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int port) 510u32 hns_rcb_get_coalesced_frames(
511 struct rcb_common_cb *rcb_common, u32 port_idx)
541{ 512{
542 int comm_index = hns_dsaf_get_comm_idx_by_port(port); 513 return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4);
543 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
544
545 return hns_rcb_get_port_coalesced_frames(rcb_comm, port);
546} 514}
547 515
548/** 516/**
549 *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out 517 *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
550 *@rcb_common: rcb_common device 518 *@rcb_common: rcb_common device
551 *@comm_index:port index 519 *@port_idx:port id in comm
552 *return time_out 520 *
521 *Returns: time_out
553 */ 522 */
554u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index) 523u32 hns_rcb_get_coalesce_usecs(
524 struct rcb_common_cb *rcb_common, u32 port_idx)
555{ 525{
556 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index]; 526 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
557 527 return dsaf_read_dev(rcb_common, RCB_CFG_OVERTIME_REG) /
558 return rcb_comm->timeout; 528 HNS_RCB_CLK_FREQ_MHZ;
529 else
530 return dsaf_read_dev(rcb_common,
531 RCB_PORT_CFG_OVERTIME_REG + port_idx * 4);
559} 532}
560 533
561/** 534/**
562 *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out 535 *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out
563 *@rcb_common: rcb_common device 536 *@rcb_common: rcb_common device
564 *@comm_index: comm :index 537 *@port_idx:port id in comm
565 *@etx_usecs:tx time for coalesced time_out 538 *@timeout:tx/rx time for coalesced time_out
566 *@rx_usecs:rx time for coalesced time_out 539 *
540 * Returns:
541 * Zero for success, or an error code in case of failure
567 */ 542 */
568void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev, 543int hns_rcb_set_coalesce_usecs(
569 int port, u32 timeout) 544 struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
570{ 545{
571 int comm_index = hns_dsaf_get_comm_idx_by_port(port); 546 u32 old_timeout = hns_rcb_get_coalesce_usecs(rcb_common, port_idx);
572 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
573 547
574 if (rcb_comm->timeout == timeout) 548 if (timeout == old_timeout)
575 return; 549 return 0;
576 550
577 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { 551 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
578 dev_err(dsaf_dev->dev, 552 if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
579 "error: not support coalesce_usecs setting!\n"); 553 dev_err(rcb_common->dsaf_dev->dev,
580 return; 554 "error: not support coalesce_usecs setting!\n");
555 return -EINVAL;
556 }
581 } 557 }
582 rcb_comm->timeout = timeout; 558 if (timeout > HNS_RCB_MAX_COALESCED_USECS) {
583 hns_rcb_set_timeout(rcb_comm, rcb_comm->timeout); 559 dev_err(rcb_common->dsaf_dev->dev,
560 "error: not support coalesce %dus!\n", timeout);
561 return -EINVAL;
562 }
563 hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
564 return 0;
584} 565}
585 566
586/** 567/**
587 *hns_rcb_set_coalesced_frames - set rcb coalesced frames 568 *hns_rcb_set_coalesced_frames - set rcb coalesced frames
588 *@rcb_common: rcb_common device 569 *@rcb_common: rcb_common device
589 *@tx_frames:tx BD num for coalesced frames 570 *@port_idx:port id in comm
590 *@rx_frames:rx BD num for coalesced frames 571 *@coalesced_frames:tx/rx BD num for coalesced frames
591 *Return 0 on success, negative on failure 572 *
573 * Returns:
574 * Zero for success, or an error code in case of failure
592 */ 575 */
593int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev, 576int hns_rcb_set_coalesced_frames(
594 int port, u32 coalesced_frames) 577 struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
595{ 578{
596 int comm_index = hns_dsaf_get_comm_idx_by_port(port); 579 u32 old_waterline = hns_rcb_get_coalesced_frames(rcb_common, port_idx);
597 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
598 u32 coalesced_reg_val;
599 int ret;
600 580
601 coalesced_reg_val = hns_rcb_get_port_coalesced_frames(rcb_comm, port); 581 if (coalesced_frames == old_waterline)
602
603 if (coalesced_reg_val == coalesced_frames)
604 return 0; 582 return 0;
605 583
606 if (coalesced_frames >= HNS_RCB_MIN_COALESCED_FRAMES) { 584 if (coalesced_frames >= rcb_common->desc_num ||
607 ret = hns_rcb_set_port_coalesced_frames(rcb_comm, port, 585 coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES ||
608 coalesced_frames); 586 coalesced_frames < HNS_RCB_MIN_COALESCED_FRAMES) {
609 return ret; 587 dev_err(rcb_common->dsaf_dev->dev,
610 } else { 588 "error: not support coalesce_frames setting!\n");
611 return -EINVAL; 589 return -EINVAL;
612 } 590 }
591
592 dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
593 coalesced_frames);
594 return 0;
613} 595}
614 596
615/** 597/**
@@ -749,8 +731,6 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
749 rcb_common->dsaf_dev = dsaf_dev; 731 rcb_common->dsaf_dev = dsaf_dev;
750 732
751 rcb_common->desc_num = dsaf_dev->desc_num; 733 rcb_common->desc_num = dsaf_dev->desc_num;
752 rcb_common->coalesced_frames = HNS_RCB_DEF_COALESCED_FRAMES;
753 rcb_common->timeout = HNS_RCB_MAX_TIME_OUT;
754 734
755 hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf); 735 hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf);
756 rcb_common->max_vfn = max_vfn; 736 rcb_common->max_vfn = max_vfn;
@@ -951,6 +931,10 @@ void hns_rcb_get_strings(int stringset, u8 *data, int index)
951void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) 931void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
952{ 932{
953 u32 *regs = data; 933 u32 *regs = data;
934 bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver);
935 bool is_dbg = (rcb_com->comm_index != HNS_DSAF_COMM_SERVICE_NW_IDX);
936 u32 reg_tmp;
937 u32 reg_num_tmp;
954 u32 i = 0; 938 u32 i = 0;
955 939
956 /*rcb common registers */ 940 /*rcb common registers */
@@ -1004,12 +988,16 @@ void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
1004 = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i); 988 = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i);
1005 } 989 }
1006 990
1007 regs[70] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_REG); 991 reg_tmp = is_ver1 ? RCB_CFG_OVERTIME_REG : RCB_PORT_CFG_OVERTIME_REG;
1008 regs[71] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG); 992 reg_num_tmp = (is_ver1 || is_dbg) ? 1 : 6;
1009 regs[72] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG); 993 for (i = 0; i < reg_num_tmp; i++)
994 regs[70 + i] = dsaf_read_dev(rcb_com, reg_tmp);
995
996 regs[76] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG);
997 regs[77] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG);
1010 998
1011 /* mark end of rcb common regs */ 999 /* mark end of rcb common regs */
1012 for (i = 73; i < 80; i++) 1000 for (i = 78; i < 80; i++)
1013 regs[i] = 0xcccccccc; 1001 regs[i] = 0xcccccccc;
1014} 1002}
1015 1003
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index 81fe9f849973..eb61014ad615 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -38,7 +38,9 @@ struct rcb_common_cb;
38#define HNS_RCB_MAX_COALESCED_FRAMES 1023 38#define HNS_RCB_MAX_COALESCED_FRAMES 1023
39#define HNS_RCB_MIN_COALESCED_FRAMES 1 39#define HNS_RCB_MIN_COALESCED_FRAMES 1
40#define HNS_RCB_DEF_COALESCED_FRAMES 50 40#define HNS_RCB_DEF_COALESCED_FRAMES 50
41#define HNS_RCB_MAX_TIME_OUT 0x500 41#define HNS_RCB_CLK_FREQ_MHZ 350
42#define HNS_RCB_MAX_COALESCED_USECS 0x3ff
43#define HNS_RCB_DEF_COALESCED_USECS 3
42 44
43#define HNS_RCB_COMMON_ENDIAN 1 45#define HNS_RCB_COMMON_ENDIAN 1
44 46
@@ -82,7 +84,7 @@ struct ring_pair_cb {
82 84
83 int virq[HNS_RCB_IRQ_NUM_PER_QUEUE]; 85 int virq[HNS_RCB_IRQ_NUM_PER_QUEUE];
84 86
85 u8 port_id_in_dsa; 87 u8 port_id_in_comm;
86 u8 used_by_vf; 88 u8 used_by_vf;
87 89
88 struct hns_ring_hw_stats hw_stats; 90 struct hns_ring_hw_stats hw_stats;
@@ -97,8 +99,6 @@ struct rcb_common_cb {
97 99
98 u8 comm_index; 100 u8 comm_index;
99 u32 ring_num; 101 u32 ring_num;
100 u32 coalesced_frames; /* frames threshold of rx interrupt */
101 u32 timeout; /* time threshold of rx interrupt */
102 u32 desc_num; /* desc num per queue*/ 102 u32 desc_num; /* desc num per queue*/
103 103
104 struct ring_pair_cb ring_pair_cb[0]; 104 struct ring_pair_cb ring_pair_cb[0];
@@ -125,13 +125,14 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
125void hns_rcb_init_hw(struct ring_pair_cb *ring); 125void hns_rcb_init_hw(struct ring_pair_cb *ring);
126void hns_rcb_reset_ring_hw(struct hnae_queue *q); 126void hns_rcb_reset_ring_hw(struct hnae_queue *q);
127void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag); 127void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
128 128u32 hns_rcb_get_coalesced_frames(
129u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int comm_index); 129 struct rcb_common_cb *rcb_common, u32 port_idx);
130u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index); 130u32 hns_rcb_get_coalesce_usecs(
131void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev, 131 struct rcb_common_cb *rcb_common, u32 port_idx);
132 int comm_index, u32 timeout); 132int hns_rcb_set_coalesce_usecs(
133int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev, 133 struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout);
134 int comm_index, u32 coalesce_frames); 134int hns_rcb_set_coalesced_frames(
135 struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames);
135void hns_rcb_update_stats(struct hnae_queue *queue); 136void hns_rcb_update_stats(struct hnae_queue *queue);
136 137
137void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data); 138void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index bf62687e5ea7..7d7204f45e78 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -103,6 +103,8 @@
103/*serdes offset**/ 103/*serdes offset**/
104#define HNS_MAC_HILINK3_REG DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG 104#define HNS_MAC_HILINK3_REG DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG
105#define HNS_MAC_HILINK4_REG DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG 105#define HNS_MAC_HILINK4_REG DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG
106#define HNS_MAC_HILINK3V2_REG DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG
107#define HNS_MAC_HILINK4V2_REG DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG
106#define HNS_MAC_LANE0_CTLEDFE_REG 0x000BFFCCULL 108#define HNS_MAC_LANE0_CTLEDFE_REG 0x000BFFCCULL
107#define HNS_MAC_LANE1_CTLEDFE_REG 0x000BFFBCULL 109#define HNS_MAC_LANE1_CTLEDFE_REG 0x000BFFBCULL
108#define HNS_MAC_LANE2_CTLEDFE_REG 0x000BFFACULL 110#define HNS_MAC_LANE2_CTLEDFE_REG 0x000BFFACULL
@@ -404,6 +406,7 @@
404#define RCB_CFG_OVERTIME_REG 0x9300 406#define RCB_CFG_OVERTIME_REG 0x9300
405#define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304 407#define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304
406#define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308 408#define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308
409#define RCB_PORT_CFG_OVERTIME_REG 0x9430
407 410
408#define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000 411#define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000
409#define RCB_RING_RX_RING_BASEADDR_H_REG 0x00004 412#define RCB_RING_RX_RING_BASEADDR_H_REG 0x00004
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 71aa37b4b338..687204b780b0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -913,10 +913,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
913static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data) 913static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
914{ 914{
915 struct hnae_ring *ring = ring_data->ring; 915 struct hnae_ring *ring = ring_data->ring;
916 int head = ring->next_to_clean; 916 int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
917
918 /* for hardware bug fixed */
919 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
920 917
921 if (head != ring->next_to_clean) { 918 if (head != ring->next_to_clean) {
922 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 919 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
@@ -959,8 +956,8 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget)
959 napi_complete(napi); 956 napi_complete(napi);
960 ring_data->ring->q->handle->dev->ops->toggle_ring_irq( 957 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
961 ring_data->ring, 0); 958 ring_data->ring, 0);
962 959 if (ring_data->fini_process)
963 ring_data->fini_process(ring_data); 960 ring_data->fini_process(ring_data);
964 return 0; 961 return 0;
965 } 962 }
966 963
@@ -1723,6 +1720,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
1723{ 1720{
1724 struct hnae_handle *h = priv->ae_handle; 1721 struct hnae_handle *h = priv->ae_handle;
1725 struct hns_nic_ring_data *rd; 1722 struct hns_nic_ring_data *rd;
1723 bool is_ver1 = AE_IS_VER1(priv->enet_ver);
1726 int i; 1724 int i;
1727 1725
1728 if (h->q_num > NIC_MAX_Q_PER_VF) { 1726 if (h->q_num > NIC_MAX_Q_PER_VF) {
@@ -1740,7 +1738,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
1740 rd->queue_index = i; 1738 rd->queue_index = i;
1741 rd->ring = &h->qs[i]->tx_ring; 1739 rd->ring = &h->qs[i]->tx_ring;
1742 rd->poll_one = hns_nic_tx_poll_one; 1740 rd->poll_one = hns_nic_tx_poll_one;
1743 rd->fini_process = hns_nic_tx_fini_pro; 1741 rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : NULL;
1744 1742
1745 netif_napi_add(priv->netdev, &rd->napi, 1743 netif_napi_add(priv->netdev, &rd->napi,
1746 hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); 1744 hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
@@ -1752,7 +1750,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
1752 rd->ring = &h->qs[i - h->q_num]->rx_ring; 1750 rd->ring = &h->qs[i - h->q_num]->rx_ring;
1753 rd->poll_one = hns_nic_rx_poll_one; 1751 rd->poll_one = hns_nic_rx_poll_one;
1754 rd->ex_process = hns_nic_rx_up_pro; 1752 rd->ex_process = hns_nic_rx_up_pro;
1755 rd->fini_process = hns_nic_rx_fini_pro; 1753 rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : NULL;
1756 1754
1757 netif_napi_add(priv->netdev, &rd->napi, 1755 netif_napi_add(priv->netdev, &rd->napi,
1758 hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); 1756 hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
@@ -1816,7 +1814,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev)
1816 h = hnae_get_handle(&priv->netdev->dev, 1814 h = hnae_get_handle(&priv->netdev->dev,
1817 priv->ae_node, priv->port_id, NULL); 1815 priv->ae_node, priv->port_id, NULL);
1818 if (IS_ERR_OR_NULL(h)) { 1816 if (IS_ERR_OR_NULL(h)) {
1819 ret = PTR_ERR(h); 1817 ret = -ENODEV;
1820 dev_dbg(priv->dev, "has not handle, register notifier!\n"); 1818 dev_dbg(priv->dev, "has not handle, register notifier!\n");
1821 goto out; 1819 goto out;
1822 } 1820 }
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 9c3ba65988e1..3d746c887873 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -794,8 +794,10 @@ static int hns_set_coalesce(struct net_device *net_dev,
794 (!ops->set_coalesce_frames)) 794 (!ops->set_coalesce_frames))
795 return -ESRCH; 795 return -ESRCH;
796 796
797 ops->set_coalesce_usecs(priv->ae_handle, 797 ret = ops->set_coalesce_usecs(priv->ae_handle,
798 ec->rx_coalesce_usecs); 798 ec->rx_coalesce_usecs);
799 if (ret)
800 return ret;
799 801
800 ret = ops->set_coalesce_frames( 802 ret = ops->set_coalesce_frames(
801 priv->ae_handle, 803 priv->ae_handle,
@@ -1013,8 +1015,8 @@ int hns_phy_led_set(struct net_device *netdev, int value)
1013 struct phy_device *phy_dev = priv->phy; 1015 struct phy_device *phy_dev = priv->phy;
1014 1016
1015 retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED); 1017 retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED);
1016 retval = phy_write(phy_dev, HNS_LED_FC_REG, value); 1018 retval |= phy_write(phy_dev, HNS_LED_FC_REG, value);
1017 retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER); 1019 retval |= phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER);
1018 if (retval) { 1020 if (retval) {
1019 netdev_err(netdev, "mdiobus_write fail !\n"); 1021 netdev_err(netdev, "mdiobus_write fail !\n");
1020 return retval; 1022 return retval;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 3fc7bde699ba..ae90d4f12b70 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3106,7 +3106,7 @@ static int e1000_maybe_stop_tx(struct net_device *netdev,
3106 return __e1000_maybe_stop_tx(netdev, size); 3106 return __e1000_maybe_stop_tx(netdev, size);
3107} 3107}
3108 3108
3109#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1) 3109#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
3110static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 3110static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3111 struct net_device *netdev) 3111 struct net_device *netdev)
3112{ 3112{
@@ -3256,12 +3256,29 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3256 nr_frags, mss); 3256 nr_frags, mss);
3257 3257
3258 if (count) { 3258 if (count) {
3259 /* The descriptors needed is higher than other Intel drivers
3260 * due to a number of workarounds. The breakdown is below:
3261 * Data descriptors: MAX_SKB_FRAGS + 1
3262 * Context Descriptor: 1
3263 * Keep head from touching tail: 2
3264 * Workarounds: 3
3265 */
3266 int desc_needed = MAX_SKB_FRAGS + 7;
3267
3259 netdev_sent_queue(netdev, skb->len); 3268 netdev_sent_queue(netdev, skb->len);
3260 skb_tx_timestamp(skb); 3269 skb_tx_timestamp(skb);
3261 3270
3262 e1000_tx_queue(adapter, tx_ring, tx_flags, count); 3271 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3272
3273 /* 82544 potentially requires twice as many data descriptors
3274 * in order to guarantee buffers don't end on evenly-aligned
3275 * dwords
3276 */
3277 if (adapter->pcix_82544)
3278 desc_needed += MAX_SKB_FRAGS + 1;
3279
3263 /* Make sure there is space in the ring for the next send. */ 3280 /* Make sure there is space in the ring for the next send. */
3264 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); 3281 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3265 3282
3266 if (!skb->xmit_more || 3283 if (!skb->xmit_more ||
3267 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { 3284 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 62ccebc5f728..8cf943db5662 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1223,18 +1223,32 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
1223 if (err) 1223 if (err)
1224 return err; 1224 return err;
1225 1225
1226 /* verify upper 16 bits are zero */
1227 if (vid >> 16)
1228 return FM10K_ERR_PARAM;
1229
1230 set = !(vid & FM10K_VLAN_CLEAR); 1226 set = !(vid & FM10K_VLAN_CLEAR);
1231 vid &= ~FM10K_VLAN_CLEAR; 1227 vid &= ~FM10K_VLAN_CLEAR;
1232 1228
1233 err = fm10k_iov_select_vid(vf_info, (u16)vid); 1229 /* if the length field has been set, this is a multi-bit
1234 if (err < 0) 1230 * update request. For multi-bit requests, simply disallow
1235 return err; 1231 * them when the pf_vid has been set. In this case, the PF
1232 * should have already cleared the VLAN_TABLE, and if we
1233 * allowed them, it could allow a rogue VF to receive traffic
1234 * on a VLAN it was not assigned. In the single-bit case, we
1235 * need to modify requests for VLAN 0 to use the default PF or
1236 * SW vid when assigned.
1237 */
1236 1238
1237 vid = err; 1239 if (vid >> 16) {
1240 /* prevent multi-bit requests when PF has
1241 * administratively set the VLAN for this VF
1242 */
1243 if (vf_info->pf_vid)
1244 return FM10K_ERR_PARAM;
1245 } else {
1246 err = fm10k_iov_select_vid(vf_info, (u16)vid);
1247 if (err < 0)
1248 return err;
1249
1250 vid = err;
1251 }
1238 1252
1239 /* update VSI info for VF in regards to VLAN table */ 1253 /* update VSI info for VF in regards to VLAN table */
1240 err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set); 1254 err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 67006431726a..344912957cab 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -8559,6 +8559,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
8559 I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | 8559 I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
8560 I40E_FLAG_WB_ON_ITR_CAPABLE | 8560 I40E_FLAG_WB_ON_ITR_CAPABLE |
8561 I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | 8561 I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
8562 I40E_FLAG_NO_PCI_LINK_CHECK |
8562 I40E_FLAG_100M_SGMII_CAPABLE | 8563 I40E_FLAG_100M_SGMII_CAPABLE |
8563 I40E_FLAG_USE_SET_LLDP_MIB | 8564 I40E_FLAG_USE_SET_LLDP_MIB |
8564 I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; 8565 I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 084d0ab316b7..6a49b7ae511c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2594,35 +2594,34 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2594} 2594}
2595 2595
2596/** 2596/**
2597 * __i40e_chk_linearize - Check if there are more than 8 fragments per packet 2597 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
2598 * @skb: send buffer 2598 * @skb: send buffer
2599 * 2599 *
2600 * Note: Our HW can't scatter-gather more than 8 fragments to build 2600 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
2601 * a packet on the wire and so we need to figure out the cases where we 2601 * and so we need to figure out the cases where we need to linearize the skb.
2602 * need to linearize the skb. 2602 *
2603 * For TSO we need to count the TSO header and segment payload separately.
2604 * As such we need to check cases where we have 7 fragments or more as we
2605 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2606 * the segment payload in the first descriptor, and another 7 for the
2607 * fragments.
2603 **/ 2608 **/
2604bool __i40e_chk_linearize(struct sk_buff *skb) 2609bool __i40e_chk_linearize(struct sk_buff *skb)
2605{ 2610{
2606 const struct skb_frag_struct *frag, *stale; 2611 const struct skb_frag_struct *frag, *stale;
2607 int gso_size, nr_frags, sum; 2612 int nr_frags, sum;
2608
2609 /* check to see if TSO is enabled, if so we may get a repreive */
2610 gso_size = skb_shinfo(skb)->gso_size;
2611 if (unlikely(!gso_size))
2612 return true;
2613 2613
2614 /* no need to check if number of frags is less than 8 */ 2614 /* no need to check if number of frags is less than 7 */
2615 nr_frags = skb_shinfo(skb)->nr_frags; 2615 nr_frags = skb_shinfo(skb)->nr_frags;
2616 if (nr_frags < I40E_MAX_BUFFER_TXD) 2616 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
2617 return false; 2617 return false;
2618 2618
2619 /* We need to walk through the list and validate that each group 2619 /* We need to walk through the list and validate that each group
2620 * of 6 fragments totals at least gso_size. However we don't need 2620 * of 6 fragments totals at least gso_size. However we don't need
2621 * to perform such validation on the first or last 6 since the first 2621 * to perform such validation on the last 6 since the last 6 cannot
2622 * 6 cannot inherit any data from a descriptor before them, and the 2622 * inherit any data from a descriptor after them.
2623 * last 6 cannot inherit any data from a descriptor after them.
2624 */ 2623 */
2625 nr_frags -= I40E_MAX_BUFFER_TXD - 1; 2624 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
2626 frag = &skb_shinfo(skb)->frags[0]; 2625 frag = &skb_shinfo(skb)->frags[0];
2627 2626
2628 /* Initialize size to the negative value of gso_size minus 1. We 2627 /* Initialize size to the negative value of gso_size minus 1. We
@@ -2631,21 +2630,21 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
2631 * descriptors for a single transmit as the header and previous 2630 * descriptors for a single transmit as the header and previous
2632 * fragment are already consuming 2 descriptors. 2631 * fragment are already consuming 2 descriptors.
2633 */ 2632 */
2634 sum = 1 - gso_size; 2633 sum = 1 - skb_shinfo(skb)->gso_size;
2635 2634
2636 /* Add size of frags 1 through 5 to create our initial sum */ 2635 /* Add size of frags 0 through 4 to create our initial sum */
2637 sum += skb_frag_size(++frag); 2636 sum += skb_frag_size(frag++);
2638 sum += skb_frag_size(++frag); 2637 sum += skb_frag_size(frag++);
2639 sum += skb_frag_size(++frag); 2638 sum += skb_frag_size(frag++);
2640 sum += skb_frag_size(++frag); 2639 sum += skb_frag_size(frag++);
2641 sum += skb_frag_size(++frag); 2640 sum += skb_frag_size(frag++);
2642 2641
2643 /* Walk through fragments adding latest fragment, testing it, and 2642 /* Walk through fragments adding latest fragment, testing it, and
2644 * then removing stale fragments from the sum. 2643 * then removing stale fragments from the sum.
2645 */ 2644 */
2646 stale = &skb_shinfo(skb)->frags[0]; 2645 stale = &skb_shinfo(skb)->frags[0];
2647 for (;;) { 2646 for (;;) {
2648 sum += skb_frag_size(++frag); 2647 sum += skb_frag_size(frag++);
2649 2648
2650 /* if sum is negative we failed to make sufficient progress */ 2649 /* if sum is negative we failed to make sufficient progress */
2651 if (sum < 0) 2650 if (sum < 0)
@@ -2655,7 +2654,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
2655 if (!--nr_frags) 2654 if (!--nr_frags)
2656 break; 2655 break;
2657 2656
2658 sum -= skb_frag_size(++stale); 2657 sum -= skb_frag_size(stale++);
2659 } 2658 }
2660 2659
2661 return false; 2660 return false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index cdd5dc00aec5..a9bd70537d65 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -413,10 +413,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
413 **/ 413 **/
414static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) 414static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
415{ 415{
416 /* we can only support up to 8 data buffers for a single send */ 416 /* Both TSO and single send will work if count is less than 8 */
417 if (likely(count <= I40E_MAX_BUFFER_TXD)) 417 if (likely(count < I40E_MAX_BUFFER_TXD))
418 return false; 418 return false;
419 419
420 return __i40e_chk_linearize(skb); 420 if (skb_is_gso(skb))
421 return __i40e_chk_linearize(skb);
422
423 /* we can support up to 8 data buffers for a single send */
424 return count != I40E_MAX_BUFFER_TXD;
421} 425}
422#endif /* _I40E_TXRX_H_ */ 426#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index ebcc25c05796..cea97daa844c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1796,35 +1796,34 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1796} 1796}
1797 1797
1798/** 1798/**
1799 * __i40evf_chk_linearize - Check if there are more than 8 fragments per packet 1799 * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet
1800 * @skb: send buffer 1800 * @skb: send buffer
1801 * 1801 *
1802 * Note: Our HW can't scatter-gather more than 8 fragments to build 1802 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
1803 * a packet on the wire and so we need to figure out the cases where we 1803 * and so we need to figure out the cases where we need to linearize the skb.
1804 * need to linearize the skb. 1804 *
1805 * For TSO we need to count the TSO header and segment payload separately.
1806 * As such we need to check cases where we have 7 fragments or more as we
1807 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
1808 * the segment payload in the first descriptor, and another 7 for the
1809 * fragments.
1805 **/ 1810 **/
1806bool __i40evf_chk_linearize(struct sk_buff *skb) 1811bool __i40evf_chk_linearize(struct sk_buff *skb)
1807{ 1812{
1808 const struct skb_frag_struct *frag, *stale; 1813 const struct skb_frag_struct *frag, *stale;
1809 int gso_size, nr_frags, sum; 1814 int nr_frags, sum;
1810
1811 /* check to see if TSO is enabled, if so we may get a repreive */
1812 gso_size = skb_shinfo(skb)->gso_size;
1813 if (unlikely(!gso_size))
1814 return true;
1815 1815
1816 /* no need to check if number of frags is less than 8 */ 1816 /* no need to check if number of frags is less than 7 */
1817 nr_frags = skb_shinfo(skb)->nr_frags; 1817 nr_frags = skb_shinfo(skb)->nr_frags;
1818 if (nr_frags < I40E_MAX_BUFFER_TXD) 1818 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
1819 return false; 1819 return false;
1820 1820
1821 /* We need to walk through the list and validate that each group 1821 /* We need to walk through the list and validate that each group
1822 * of 6 fragments totals at least gso_size. However we don't need 1822 * of 6 fragments totals at least gso_size. However we don't need
1823 * to perform such validation on the first or last 6 since the first 1823 * to perform such validation on the last 6 since the last 6 cannot
1824 * 6 cannot inherit any data from a descriptor before them, and the 1824 * inherit any data from a descriptor after them.
1825 * last 6 cannot inherit any data from a descriptor after them.
1826 */ 1825 */
1827 nr_frags -= I40E_MAX_BUFFER_TXD - 1; 1826 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
1828 frag = &skb_shinfo(skb)->frags[0]; 1827 frag = &skb_shinfo(skb)->frags[0];
1829 1828
1830 /* Initialize size to the negative value of gso_size minus 1. We 1829 /* Initialize size to the negative value of gso_size minus 1. We
@@ -1833,21 +1832,21 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
1833 * descriptors for a single transmit as the header and previous 1832 * descriptors for a single transmit as the header and previous
1834 * fragment are already consuming 2 descriptors. 1833 * fragment are already consuming 2 descriptors.
1835 */ 1834 */
1836 sum = 1 - gso_size; 1835 sum = 1 - skb_shinfo(skb)->gso_size;
1837 1836
1838 /* Add size of frags 1 through 5 to create our initial sum */ 1837 /* Add size of frags 0 through 4 to create our initial sum */
1839 sum += skb_frag_size(++frag); 1838 sum += skb_frag_size(frag++);
1840 sum += skb_frag_size(++frag); 1839 sum += skb_frag_size(frag++);
1841 sum += skb_frag_size(++frag); 1840 sum += skb_frag_size(frag++);
1842 sum += skb_frag_size(++frag); 1841 sum += skb_frag_size(frag++);
1843 sum += skb_frag_size(++frag); 1842 sum += skb_frag_size(frag++);
1844 1843
1845 /* Walk through fragments adding latest fragment, testing it, and 1844 /* Walk through fragments adding latest fragment, testing it, and
1846 * then removing stale fragments from the sum. 1845 * then removing stale fragments from the sum.
1847 */ 1846 */
1848 stale = &skb_shinfo(skb)->frags[0]; 1847 stale = &skb_shinfo(skb)->frags[0];
1849 for (;;) { 1848 for (;;) {
1850 sum += skb_frag_size(++frag); 1849 sum += skb_frag_size(frag++);
1851 1850
1852 /* if sum is negative we failed to make sufficient progress */ 1851 /* if sum is negative we failed to make sufficient progress */
1853 if (sum < 0) 1852 if (sum < 0)
@@ -1857,7 +1856,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
1857 if (!--nr_frags) 1856 if (!--nr_frags)
1858 break; 1857 break;
1859 1858
1860 sum -= skb_frag_size(++stale); 1859 sum -= skb_frag_size(stale++);
1861 } 1860 }
1862 1861
1863 return false; 1862 return false;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index c1dd8c5c9666..0429553fe887 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -395,10 +395,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
395 **/ 395 **/
396static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) 396static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
397{ 397{
398 /* we can only support up to 8 data buffers for a single send */ 398 /* Both TSO and single send will work if count is less than 8 */
399 if (likely(count <= I40E_MAX_BUFFER_TXD)) 399 if (likely(count < I40E_MAX_BUFFER_TXD))
400 return false; 400 return false;
401 401
402 return __i40evf_chk_linearize(skb); 402 if (skb_is_gso(skb))
403 return __i40evf_chk_linearize(skb);
404
405 /* we can support up to 8 data buffers for a single send */
406 return count != I40E_MAX_BUFFER_TXD;
403} 407}
404#endif /* _I40E_TXRX_H_ */ 408#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 84fa28ceb200..e4949af7dd6b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -661,9 +661,7 @@ struct ixgbe_adapter {
661#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) 661#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
662#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10) 662#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
663#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11) 663#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11)
664#ifdef CONFIG_IXGBE_VXLAN
665#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12) 664#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12)
666#endif
667#define IXGBE_FLAG2_VLAN_PROMISC BIT(13) 665#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
668 666
669 /* Tx fast path data */ 667 /* Tx fast path data */
@@ -675,6 +673,9 @@ struct ixgbe_adapter {
675 int num_rx_queues; 673 int num_rx_queues;
676 u16 rx_itr_setting; 674 u16 rx_itr_setting;
677 675
676 /* Port number used to identify VXLAN traffic */
677 __be16 vxlan_port;
678
678 /* TX */ 679 /* TX */
679 struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; 680 struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
680 681
@@ -782,9 +783,6 @@ struct ixgbe_adapter {
782 u32 timer_event_accumulator; 783 u32 timer_event_accumulator;
783 u32 vferr_refcount; 784 u32 vferr_refcount;
784 struct ixgbe_mac_addr *mac_table; 785 struct ixgbe_mac_addr *mac_table;
785#ifdef CONFIG_IXGBE_VXLAN
786 u16 vxlan_port;
787#endif
788 struct kobject *info_kobj; 786 struct kobject *info_kobj;
789#ifdef CONFIG_IXGBE_HWMON 787#ifdef CONFIG_IXGBE_HWMON
790 struct hwmon_buff *ixgbe_hwmon_buff; 788 struct hwmon_buff *ixgbe_hwmon_buff;
@@ -879,6 +877,8 @@ extern const char ixgbe_driver_version[];
879extern char ixgbe_default_device_descr[]; 877extern char ixgbe_default_device_descr[];
880#endif /* IXGBE_FCOE */ 878#endif /* IXGBE_FCOE */
881 879
880int ixgbe_open(struct net_device *netdev);
881int ixgbe_close(struct net_device *netdev);
882void ixgbe_up(struct ixgbe_adapter *adapter); 882void ixgbe_up(struct ixgbe_adapter *adapter);
883void ixgbe_down(struct ixgbe_adapter *adapter); 883void ixgbe_down(struct ixgbe_adapter *adapter);
884void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); 884void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 726e0eeee63b..b3530e1e3ce1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2053,7 +2053,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
2053 2053
2054 if (if_running) 2054 if (if_running)
2055 /* indicate we're in test mode */ 2055 /* indicate we're in test mode */
2056 dev_close(netdev); 2056 ixgbe_close(netdev);
2057 else 2057 else
2058 ixgbe_reset(adapter); 2058 ixgbe_reset(adapter);
2059 2059
@@ -2091,7 +2091,7 @@ skip_loopback:
2091 /* clear testing bit and return adapter to previous state */ 2091 /* clear testing bit and return adapter to previous state */
2092 clear_bit(__IXGBE_TESTING, &adapter->state); 2092 clear_bit(__IXGBE_TESTING, &adapter->state);
2093 if (if_running) 2093 if (if_running)
2094 dev_open(netdev); 2094 ixgbe_open(netdev);
2095 else if (hw->mac.ops.disable_tx_laser) 2095 else if (hw->mac.ops.disable_tx_laser)
2096 hw->mac.ops.disable_tx_laser(hw); 2096 hw->mac.ops.disable_tx_laser(hw);
2097 } else { 2097 } else {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 569cb0757c93..7df3fe29b210 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4531,9 +4531,7 @@ static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
4531 case ixgbe_mac_X550: 4531 case ixgbe_mac_X550:
4532 case ixgbe_mac_X550EM_x: 4532 case ixgbe_mac_X550EM_x:
4533 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0); 4533 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
4534#ifdef CONFIG_IXGBE_VXLAN
4535 adapter->vxlan_port = 0; 4534 adapter->vxlan_port = 0;
4536#endif
4537 break; 4535 break;
4538 default: 4536 default:
4539 break; 4537 break;
@@ -5994,7 +5992,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5994 * handler is registered with the OS, the watchdog timer is started, 5992 * handler is registered with the OS, the watchdog timer is started,
5995 * and the stack is notified that the interface is ready. 5993 * and the stack is notified that the interface is ready.
5996 **/ 5994 **/
5997static int ixgbe_open(struct net_device *netdev) 5995int ixgbe_open(struct net_device *netdev)
5998{ 5996{
5999 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5997 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6000 struct ixgbe_hw *hw = &adapter->hw; 5998 struct ixgbe_hw *hw = &adapter->hw;
@@ -6096,7 +6094,7 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
6096 * needs to be disabled. A global MAC reset is issued to stop the 6094 * needs to be disabled. A global MAC reset is issued to stop the
6097 * hardware, and all transmit and receive resources are freed. 6095 * hardware, and all transmit and receive resources are freed.
6098 **/ 6096 **/
6099static int ixgbe_close(struct net_device *netdev) 6097int ixgbe_close(struct net_device *netdev)
6100{ 6098{
6101 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6099 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6102 6100
@@ -7560,11 +7558,10 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
7560 struct ipv6hdr *ipv6; 7558 struct ipv6hdr *ipv6;
7561 } hdr; 7559 } hdr;
7562 struct tcphdr *th; 7560 struct tcphdr *th;
7561 unsigned int hlen;
7563 struct sk_buff *skb; 7562 struct sk_buff *skb;
7564#ifdef CONFIG_IXGBE_VXLAN
7565 u8 encap = false;
7566#endif /* CONFIG_IXGBE_VXLAN */
7567 __be16 vlan_id; 7563 __be16 vlan_id;
7564 int l4_proto;
7568 7565
7569 /* if ring doesn't have a interrupt vector, cannot perform ATR */ 7566 /* if ring doesn't have a interrupt vector, cannot perform ATR */
7570 if (!q_vector) 7567 if (!q_vector)
@@ -7576,62 +7573,50 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
7576 7573
7577 ring->atr_count++; 7574 ring->atr_count++;
7578 7575
7576 /* currently only IPv4/IPv6 with TCP is supported */
7577 if ((first->protocol != htons(ETH_P_IP)) &&
7578 (first->protocol != htons(ETH_P_IPV6)))
7579 return;
7580
7579 /* snag network header to get L4 type and address */ 7581 /* snag network header to get L4 type and address */
7580 skb = first->skb; 7582 skb = first->skb;
7581 hdr.network = skb_network_header(skb); 7583 hdr.network = skb_network_header(skb);
7582 if (!skb->encapsulation) {
7583 th = tcp_hdr(skb);
7584 } else {
7585#ifdef CONFIG_IXGBE_VXLAN 7584#ifdef CONFIG_IXGBE_VXLAN
7585 if (skb->encapsulation &&
7586 first->protocol == htons(ETH_P_IP) &&
7587 hdr.ipv4->protocol != IPPROTO_UDP) {
7586 struct ixgbe_adapter *adapter = q_vector->adapter; 7588 struct ixgbe_adapter *adapter = q_vector->adapter;
7587 7589
7588 if (!adapter->vxlan_port) 7590 /* verify the port is recognized as VXLAN */
7589 return; 7591 if (adapter->vxlan_port &&
7590 if (first->protocol != htons(ETH_P_IP) || 7592 udp_hdr(skb)->dest == adapter->vxlan_port)
7591 hdr.ipv4->version != IPVERSION || 7593 hdr.network = skb_inner_network_header(skb);
7592 hdr.ipv4->protocol != IPPROTO_UDP) {
7593 return;
7594 }
7595 if (ntohs(udp_hdr(skb)->dest) != adapter->vxlan_port)
7596 return;
7597 encap = true;
7598 hdr.network = skb_inner_network_header(skb);
7599 th = inner_tcp_hdr(skb);
7600#else
7601 return;
7602#endif /* CONFIG_IXGBE_VXLAN */
7603 } 7594 }
7595#endif /* CONFIG_IXGBE_VXLAN */
7604 7596
7605 /* Currently only IPv4/IPv6 with TCP is supported */ 7597 /* Currently only IPv4/IPv6 with TCP is supported */
7606 switch (hdr.ipv4->version) { 7598 switch (hdr.ipv4->version) {
7607 case IPVERSION: 7599 case IPVERSION:
7608 if (hdr.ipv4->protocol != IPPROTO_TCP) 7600 /* access ihl as u8 to avoid unaligned access on ia64 */
7609 return; 7601 hlen = (hdr.network[0] & 0x0F) << 2;
7602 l4_proto = hdr.ipv4->protocol;
7610 break; 7603 break;
7611 case 6: 7604 case 6:
7612 if (likely((unsigned char *)th - hdr.network == 7605 hlen = hdr.network - skb->data;
7613 sizeof(struct ipv6hdr))) { 7606 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
7614 if (hdr.ipv6->nexthdr != IPPROTO_TCP) 7607 hlen -= hdr.network - skb->data;
7615 return;
7616 } else {
7617 __be16 frag_off;
7618 u8 l4_hdr;
7619
7620 ipv6_skip_exthdr(skb, hdr.network - skb->data +
7621 sizeof(struct ipv6hdr),
7622 &l4_hdr, &frag_off);
7623 if (unlikely(frag_off))
7624 return;
7625 if (l4_hdr != IPPROTO_TCP)
7626 return;
7627 }
7628 break; 7608 break;
7629 default: 7609 default:
7630 return; 7610 return;
7631 } 7611 }
7632 7612
7633 /* skip this packet since it is invalid or the socket is closing */ 7613 if (l4_proto != IPPROTO_TCP)
7634 if (!th || th->fin) 7614 return;
7615
7616 th = (struct tcphdr *)(hdr.network + hlen);
7617
7618 /* skip this packet since the socket is closing */
7619 if (th->fin)
7635 return; 7620 return;
7636 7621
7637 /* sample on all syn packets or once every atr sample count */ 7622 /* sample on all syn packets or once every atr sample count */
@@ -7682,10 +7667,8 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
7682 break; 7667 break;
7683 } 7668 }
7684 7669
7685#ifdef CONFIG_IXGBE_VXLAN 7670 if (hdr.network != skb_network_header(skb))
7686 if (encap)
7687 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK; 7671 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
7688#endif /* CONFIG_IXGBE_VXLAN */
7689 7672
7690 /* This assumes the Rx queue and Tx queue are bound to the same CPU */ 7673 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
7691 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, 7674 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
@@ -8209,10 +8192,17 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
8209static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, 8192static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
8210 struct tc_cls_u32_offload *cls) 8193 struct tc_cls_u32_offload *cls)
8211{ 8194{
8195 u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
8196 u32 loc;
8212 int err; 8197 int err;
8213 8198
8199 if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
8200 return -EINVAL;
8201
8202 loc = cls->knode.handle & 0xfffff;
8203
8214 spin_lock(&adapter->fdir_perfect_lock); 8204 spin_lock(&adapter->fdir_perfect_lock);
8215 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, cls->knode.handle); 8205 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
8216 spin_unlock(&adapter->fdir_perfect_lock); 8206 spin_unlock(&adapter->fdir_perfect_lock);
8217 return err; 8207 return err;
8218} 8208}
@@ -8221,20 +8211,30 @@ static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
8221 __be16 protocol, 8211 __be16 protocol,
8222 struct tc_cls_u32_offload *cls) 8212 struct tc_cls_u32_offload *cls)
8223{ 8213{
8214 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
8215
8216 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
8217 return -EINVAL;
8218
8224 /* This ixgbe devices do not support hash tables at the moment 8219 /* This ixgbe devices do not support hash tables at the moment
8225 * so abort when given hash tables. 8220 * so abort when given hash tables.
8226 */ 8221 */
8227 if (cls->hnode.divisor > 0) 8222 if (cls->hnode.divisor > 0)
8228 return -EINVAL; 8223 return -EINVAL;
8229 8224
8230 set_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables); 8225 set_bit(uhtid - 1, &adapter->tables);
8231 return 0; 8226 return 0;
8232} 8227}
8233 8228
8234static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter, 8229static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
8235 struct tc_cls_u32_offload *cls) 8230 struct tc_cls_u32_offload *cls)
8236{ 8231{
8237 clear_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables); 8232 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
8233
8234 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
8235 return -EINVAL;
8236
8237 clear_bit(uhtid - 1, &adapter->tables);
8238 return 0; 8238 return 0;
8239} 8239}
8240 8240
@@ -8252,27 +8252,29 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
8252#endif 8252#endif
8253 int i, err = 0; 8253 int i, err = 0;
8254 u8 queue; 8254 u8 queue;
8255 u32 handle; 8255 u32 uhtid, link_uhtid;
8256 8256
8257 memset(&mask, 0, sizeof(union ixgbe_atr_input)); 8257 memset(&mask, 0, sizeof(union ixgbe_atr_input));
8258 handle = cls->knode.handle; 8258 uhtid = TC_U32_USERHTID(cls->knode.handle);
8259 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
8259 8260
8260 /* At the moment cls_u32 jumps to transport layer and skips past 8261 /* At the moment cls_u32 jumps to network layer and skips past
8261 * L2 headers. The canonical method to match L2 frames is to use 8262 * L2 headers. The canonical method to match L2 frames is to use
8262 * negative values. However this is error prone at best but really 8263 * negative values. However this is error prone at best but really
8263 * just broken because there is no way to "know" what sort of hdr 8264 * just broken because there is no way to "know" what sort of hdr
8264 * is in front of the transport layer. Fix cls_u32 to support L2 8265 * is in front of the network layer. Fix cls_u32 to support L2
8265 * headers when needed. 8266 * headers when needed.
8266 */ 8267 */
8267 if (protocol != htons(ETH_P_IP)) 8268 if (protocol != htons(ETH_P_IP))
8268 return -EINVAL; 8269 return -EINVAL;
8269 8270
8270 if (cls->knode.link_handle || 8271 if (link_uhtid) {
8271 cls->knode.link_handle >= IXGBE_MAX_LINK_HANDLE) {
8272 struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; 8272 struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
8273 u32 uhtid = TC_U32_USERHTID(cls->knode.link_handle);
8274 8273
8275 if (!test_bit(uhtid, &adapter->tables)) 8274 if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
8275 return -EINVAL;
8276
8277 if (!test_bit(link_uhtid - 1, &adapter->tables))
8276 return -EINVAL; 8278 return -EINVAL;
8277 8279
8278 for (i = 0; nexthdr[i].jump; i++) { 8280 for (i = 0; nexthdr[i].jump; i++) {
@@ -8288,10 +8290,7 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
8288 nexthdr->mask != cls->knode.sel->keys[0].mask) 8290 nexthdr->mask != cls->knode.sel->keys[0].mask)
8289 return -EINVAL; 8291 return -EINVAL;
8290 8292
8291 if (uhtid >= IXGBE_MAX_LINK_HANDLE) 8293 adapter->jump_tables[link_uhtid] = nexthdr->jump;
8292 return -EINVAL;
8293
8294 adapter->jump_tables[uhtid] = nexthdr->jump;
8295 } 8294 }
8296 return 0; 8295 return 0;
8297 } 8296 }
@@ -8308,13 +8307,13 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
8308 * To add support for new nodes update ixgbe_model.h parse structures 8307 * To add support for new nodes update ixgbe_model.h parse structures
8309 * this function _should_ be generic try not to hardcode values here. 8308 * this function _should_ be generic try not to hardcode values here.
8310 */ 8309 */
8311 if (TC_U32_USERHTID(handle) == 0x800) { 8310 if (uhtid == 0x800) {
8312 field_ptr = adapter->jump_tables[0]; 8311 field_ptr = adapter->jump_tables[0];
8313 } else { 8312 } else {
8314 if (TC_U32_USERHTID(handle) >= ARRAY_SIZE(adapter->jump_tables)) 8313 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
8315 return -EINVAL; 8314 return -EINVAL;
8316 8315
8317 field_ptr = adapter->jump_tables[TC_U32_USERHTID(handle)]; 8316 field_ptr = adapter->jump_tables[uhtid];
8318 } 8317 }
8319 8318
8320 if (!field_ptr) 8319 if (!field_ptr)
@@ -8332,8 +8331,7 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
8332 int j; 8331 int j;
8333 8332
8334 for (j = 0; field_ptr[j].val; j++) { 8333 for (j = 0; field_ptr[j].val; j++) {
8335 if (field_ptr[j].off == off && 8334 if (field_ptr[j].off == off) {
8336 field_ptr[j].mask == m) {
8337 field_ptr[j].val(input, &mask, val, m); 8335 field_ptr[j].val(input, &mask, val, m);
8338 input->filter.formatted.flow_type |= 8336 input->filter.formatted.flow_type |=
8339 field_ptr[j].type; 8337 field_ptr[j].type;
@@ -8393,8 +8391,8 @@ err_out:
8393 return -EINVAL; 8391 return -EINVAL;
8394} 8392}
8395 8393
8396int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, 8394static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
8397 struct tc_to_netdev *tc) 8395 struct tc_to_netdev *tc)
8398{ 8396{
8399 struct ixgbe_adapter *adapter = netdev_priv(dev); 8397 struct ixgbe_adapter *adapter = netdev_priv(dev);
8400 8398
@@ -8554,7 +8552,6 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
8554{ 8552{
8555 struct ixgbe_adapter *adapter = netdev_priv(dev); 8553 struct ixgbe_adapter *adapter = netdev_priv(dev);
8556 struct ixgbe_hw *hw = &adapter->hw; 8554 struct ixgbe_hw *hw = &adapter->hw;
8557 u16 new_port = ntohs(port);
8558 8555
8559 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) 8556 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
8560 return; 8557 return;
@@ -8562,18 +8559,18 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
8562 if (sa_family == AF_INET6) 8559 if (sa_family == AF_INET6)
8563 return; 8560 return;
8564 8561
8565 if (adapter->vxlan_port == new_port) 8562 if (adapter->vxlan_port == port)
8566 return; 8563 return;
8567 8564
8568 if (adapter->vxlan_port) { 8565 if (adapter->vxlan_port) {
8569 netdev_info(dev, 8566 netdev_info(dev,
8570 "Hit Max num of VXLAN ports, not adding port %d\n", 8567 "Hit Max num of VXLAN ports, not adding port %d\n",
8571 new_port); 8568 ntohs(port));
8572 return; 8569 return;
8573 } 8570 }
8574 8571
8575 adapter->vxlan_port = new_port; 8572 adapter->vxlan_port = port;
8576 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port); 8573 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, ntohs(port));
8577} 8574}
8578 8575
8579/** 8576/**
@@ -8586,7 +8583,6 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
8586 __be16 port) 8583 __be16 port)
8587{ 8584{
8588 struct ixgbe_adapter *adapter = netdev_priv(dev); 8585 struct ixgbe_adapter *adapter = netdev_priv(dev);
8589 u16 new_port = ntohs(port);
8590 8586
8591 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) 8587 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
8592 return; 8588 return;
@@ -8594,9 +8590,9 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
8594 if (sa_family == AF_INET6) 8590 if (sa_family == AF_INET6)
8595 return; 8591 return;
8596 8592
8597 if (adapter->vxlan_port != new_port) { 8593 if (adapter->vxlan_port != port) {
8598 netdev_info(dev, "Port %d was not found, not deleting\n", 8594 netdev_info(dev, "Port %d was not found, not deleting\n",
8599 new_port); 8595 ntohs(port));
8600 return; 8596 return;
8601 } 8597 }
8602 8598
@@ -9265,17 +9261,6 @@ skip_sriov:
9265 netdev->priv_flags |= IFF_UNICAST_FLT; 9261 netdev->priv_flags |= IFF_UNICAST_FLT;
9266 netdev->priv_flags |= IFF_SUPP_NOFCS; 9262 netdev->priv_flags |= IFF_SUPP_NOFCS;
9267 9263
9268#ifdef CONFIG_IXGBE_VXLAN
9269 switch (adapter->hw.mac.type) {
9270 case ixgbe_mac_X550:
9271 case ixgbe_mac_X550EM_x:
9272 netdev->hw_enc_features |= NETIF_F_RXCSUM;
9273 break;
9274 default:
9275 break;
9276 }
9277#endif /* CONFIG_IXGBE_VXLAN */
9278
9279#ifdef CONFIG_IXGBE_DCB 9264#ifdef CONFIG_IXGBE_DCB
9280 netdev->dcbnl_ops = &dcbnl_ops; 9265 netdev->dcbnl_ops = &dcbnl_ops;
9281#endif 9266#endif
@@ -9329,6 +9314,8 @@ skip_sriov:
9329 goto err_sw_init; 9314 goto err_sw_init;
9330 } 9315 }
9331 9316
9317 /* Set hw->mac.addr to permanent MAC address */
9318 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
9332 ixgbe_mac_set_default_filter(adapter); 9319 ixgbe_mac_set_default_filter(adapter);
9333 9320
9334 setup_timer(&adapter->service_timer, &ixgbe_service_timer, 9321 setup_timer(&adapter->service_timer, &ixgbe_service_timer,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
index ce48872d4782..74c53ad9d268 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
@@ -32,7 +32,6 @@
32 32
33struct ixgbe_mat_field { 33struct ixgbe_mat_field {
34 unsigned int off; 34 unsigned int off;
35 unsigned int mask;
36 int (*val)(struct ixgbe_fdir_filter *input, 35 int (*val)(struct ixgbe_fdir_filter *input,
37 union ixgbe_atr_input *mask, 36 union ixgbe_atr_input *mask,
38 u32 val, u32 m); 37 u32 val, u32 m);
@@ -58,35 +57,27 @@ static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input,
58} 57}
59 58
60static struct ixgbe_mat_field ixgbe_ipv4_fields[] = { 59static struct ixgbe_mat_field ixgbe_ipv4_fields[] = {
61 { .off = 12, .mask = -1, .val = ixgbe_mat_prgm_sip, 60 { .off = 12, .val = ixgbe_mat_prgm_sip,
62 .type = IXGBE_ATR_FLOW_TYPE_IPV4}, 61 .type = IXGBE_ATR_FLOW_TYPE_IPV4},
63 { .off = 16, .mask = -1, .val = ixgbe_mat_prgm_dip, 62 { .off = 16, .val = ixgbe_mat_prgm_dip,
64 .type = IXGBE_ATR_FLOW_TYPE_IPV4}, 63 .type = IXGBE_ATR_FLOW_TYPE_IPV4},
65 { .val = NULL } /* terminal node */ 64 { .val = NULL } /* terminal node */
66}; 65};
67 66
68static inline int ixgbe_mat_prgm_sport(struct ixgbe_fdir_filter *input, 67static inline int ixgbe_mat_prgm_ports(struct ixgbe_fdir_filter *input,
69 union ixgbe_atr_input *mask, 68 union ixgbe_atr_input *mask,
70 u32 val, u32 m) 69 u32 val, u32 m)
71{ 70{
72 input->filter.formatted.src_port = val & 0xffff; 71 input->filter.formatted.src_port = val & 0xffff;
73 mask->formatted.src_port = m & 0xffff; 72 mask->formatted.src_port = m & 0xffff;
74 return 0; 73 input->filter.formatted.dst_port = val >> 16;
75}; 74 mask->formatted.dst_port = m >> 16;
76 75
77static inline int ixgbe_mat_prgm_dport(struct ixgbe_fdir_filter *input,
78 union ixgbe_atr_input *mask,
79 u32 val, u32 m)
80{
81 input->filter.formatted.dst_port = val & 0xffff;
82 mask->formatted.dst_port = m & 0xffff;
83 return 0; 76 return 0;
84}; 77};
85 78
86static struct ixgbe_mat_field ixgbe_tcp_fields[] = { 79static struct ixgbe_mat_field ixgbe_tcp_fields[] = {
87 {.off = 0, .mask = 0xffff, .val = ixgbe_mat_prgm_sport, 80 {.off = 0, .val = ixgbe_mat_prgm_ports,
88 .type = IXGBE_ATR_FLOW_TYPE_TCPV4},
89 {.off = 2, .mask = 0xffff, .val = ixgbe_mat_prgm_dport,
90 .type = IXGBE_ATR_FLOW_TYPE_TCPV4}, 81 .type = IXGBE_ATR_FLOW_TYPE_TCPV4},
91 { .val = NULL } /* terminal node */ 82 { .val = NULL } /* terminal node */
92}; 83};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 87aca3f7c3de..68a9c646498e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -355,7 +355,7 @@ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
355 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); 355 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
356 if (!(command & IXGBE_SB_IOSF_CTRL_BUSY)) 356 if (!(command & IXGBE_SB_IOSF_CTRL_BUSY))
357 break; 357 break;
358 usleep_range(10, 20); 358 udelay(10);
359 } 359 }
360 if (ctrl) 360 if (ctrl)
361 *ctrl = command; 361 *ctrl = command;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index c48aef613b0a..d7aa4b203f40 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -680,7 +680,7 @@ static void ixgbevf_diag_test(struct net_device *netdev,
680 680
681 if (if_running) 681 if (if_running)
682 /* indicate we're in test mode */ 682 /* indicate we're in test mode */
683 dev_close(netdev); 683 ixgbevf_close(netdev);
684 else 684 else
685 ixgbevf_reset(adapter); 685 ixgbevf_reset(adapter);
686 686
@@ -692,7 +692,7 @@ static void ixgbevf_diag_test(struct net_device *netdev,
692 692
693 clear_bit(__IXGBEVF_TESTING, &adapter->state); 693 clear_bit(__IXGBEVF_TESTING, &adapter->state);
694 if (if_running) 694 if (if_running)
695 dev_open(netdev); 695 ixgbevf_open(netdev);
696 } else { 696 } else {
697 hw_dbg(&adapter->hw, "online testing starting\n"); 697 hw_dbg(&adapter->hw, "online testing starting\n");
698 /* Online tests */ 698 /* Online tests */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 68ec7daa04fd..991eeae81473 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -486,6 +486,8 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
486extern const char ixgbevf_driver_name[]; 486extern const char ixgbevf_driver_name[];
487extern const char ixgbevf_driver_version[]; 487extern const char ixgbevf_driver_version[];
488 488
489int ixgbevf_open(struct net_device *netdev);
490int ixgbevf_close(struct net_device *netdev);
489void ixgbevf_up(struct ixgbevf_adapter *adapter); 491void ixgbevf_up(struct ixgbevf_adapter *adapter);
490void ixgbevf_down(struct ixgbevf_adapter *adapter); 492void ixgbevf_down(struct ixgbevf_adapter *adapter);
491void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); 493void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 0ea14c0a2e74..b0edae94d73d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3122,7 +3122,7 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3122 * handler is registered with the OS, the watchdog timer is started, 3122 * handler is registered with the OS, the watchdog timer is started,
3123 * and the stack is notified that the interface is ready. 3123 * and the stack is notified that the interface is ready.
3124 **/ 3124 **/
3125static int ixgbevf_open(struct net_device *netdev) 3125int ixgbevf_open(struct net_device *netdev)
3126{ 3126{
3127 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3127 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3128 struct ixgbe_hw *hw = &adapter->hw; 3128 struct ixgbe_hw *hw = &adapter->hw;
@@ -3205,7 +3205,7 @@ err_setup_reset:
3205 * needs to be disabled. A global MAC reset is issued to stop the 3205 * needs to be disabled. A global MAC reset is issued to stop the
3206 * hardware, and all transmit and receive resources are freed. 3206 * hardware, and all transmit and receive resources are freed.
3207 **/ 3207 **/
3208static int ixgbevf_close(struct net_device *netdev) 3208int ixgbevf_close(struct net_device *netdev)
3209{ 3209{
3210 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3210 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3211 3211
@@ -3692,19 +3692,23 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3692 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3692 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3693 struct ixgbe_hw *hw = &adapter->hw; 3693 struct ixgbe_hw *hw = &adapter->hw;
3694 struct sockaddr *addr = p; 3694 struct sockaddr *addr = p;
3695 int err;
3695 3696
3696 if (!is_valid_ether_addr(addr->sa_data)) 3697 if (!is_valid_ether_addr(addr->sa_data))
3697 return -EADDRNOTAVAIL; 3698 return -EADDRNOTAVAIL;
3698 3699
3699 ether_addr_copy(netdev->dev_addr, addr->sa_data);
3700 ether_addr_copy(hw->mac.addr, addr->sa_data);
3701
3702 spin_lock_bh(&adapter->mbx_lock); 3700 spin_lock_bh(&adapter->mbx_lock);
3703 3701
3704 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 3702 err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
3705 3703
3706 spin_unlock_bh(&adapter->mbx_lock); 3704 spin_unlock_bh(&adapter->mbx_lock);
3707 3705
3706 if (err)
3707 return -EPERM;
3708
3709 ether_addr_copy(hw->mac.addr, addr->sa_data);
3710 ether_addr_copy(netdev->dev_addr, addr->sa_data);
3711
3708 return 0; 3712 return 0;
3709} 3713}
3710 3714
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 61a98f4c5746..4d613a4f2a7f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -408,8 +408,10 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
408 408
409 /* if nacked the address was rejected, use "perm_addr" */ 409 /* if nacked the address was rejected, use "perm_addr" */
410 if (!ret_val && 410 if (!ret_val &&
411 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) 411 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
412 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr); 412 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
413 return IXGBE_ERR_MBX;
414 }
413 415
414 return ret_val; 416 return ret_val;
415} 417}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 577f7ca7deba..a6d26d351dfc 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -260,7 +260,6 @@
260 260
261#define MVNETA_VLAN_TAG_LEN 4 261#define MVNETA_VLAN_TAG_LEN 4
262 262
263#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
264#define MVNETA_TX_CSUM_DEF_SIZE 1600 263#define MVNETA_TX_CSUM_DEF_SIZE 1600
265#define MVNETA_TX_CSUM_MAX_SIZE 9800 264#define MVNETA_TX_CSUM_MAX_SIZE 9800
266#define MVNETA_ACC_MODE_EXT1 1 265#define MVNETA_ACC_MODE_EXT1 1
@@ -300,7 +299,7 @@
300#define MVNETA_RX_PKT_SIZE(mtu) \ 299#define MVNETA_RX_PKT_SIZE(mtu) \
301 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \ 300 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
302 ETH_HLEN + ETH_FCS_LEN, \ 301 ETH_HLEN + ETH_FCS_LEN, \
303 MVNETA_CPU_D_CACHE_LINE_SIZE) 302 cache_line_size())
304 303
305#define IS_TSO_HEADER(txq, addr) \ 304#define IS_TSO_HEADER(txq, addr) \
306 ((addr >= txq->tso_hdrs_phys) && \ 305 ((addr >= txq->tso_hdrs_phys) && \
@@ -2764,9 +2763,6 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
2764 if (rxq->descs == NULL) 2763 if (rxq->descs == NULL)
2765 return -ENOMEM; 2764 return -ENOMEM;
2766 2765
2767 BUG_ON(rxq->descs !=
2768 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2769
2770 rxq->last_desc = rxq->size - 1; 2766 rxq->last_desc = rxq->size - 1;
2771 2767
2772 /* Set Rx descriptors queue starting address */ 2768 /* Set Rx descriptors queue starting address */
@@ -2837,10 +2833,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
2837 if (txq->descs == NULL) 2833 if (txq->descs == NULL)
2838 return -ENOMEM; 2834 return -ENOMEM;
2839 2835
2840 /* Make sure descriptor address is cache line size aligned */
2841 BUG_ON(txq->descs !=
2842 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2843
2844 txq->last_desc = txq->size - 1; 2836 txq->last_desc = txq->size - 1;
2845 2837
2846 /* Set maximum bandwidth for enabled TXQs */ 2838 /* Set maximum bandwidth for enabled TXQs */
@@ -3050,6 +3042,20 @@ static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
3050 return mtu; 3042 return mtu;
3051} 3043}
3052 3044
3045static void mvneta_percpu_enable(void *arg)
3046{
3047 struct mvneta_port *pp = arg;
3048
3049 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3050}
3051
3052static void mvneta_percpu_disable(void *arg)
3053{
3054 struct mvneta_port *pp = arg;
3055
3056 disable_percpu_irq(pp->dev->irq);
3057}
3058
3053/* Change the device mtu */ 3059/* Change the device mtu */
3054static int mvneta_change_mtu(struct net_device *dev, int mtu) 3060static int mvneta_change_mtu(struct net_device *dev, int mtu)
3055{ 3061{
@@ -3074,6 +3080,7 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
3074 * reallocation of the queues 3080 * reallocation of the queues
3075 */ 3081 */
3076 mvneta_stop_dev(pp); 3082 mvneta_stop_dev(pp);
3083 on_each_cpu(mvneta_percpu_disable, pp, true);
3077 3084
3078 mvneta_cleanup_txqs(pp); 3085 mvneta_cleanup_txqs(pp);
3079 mvneta_cleanup_rxqs(pp); 3086 mvneta_cleanup_rxqs(pp);
@@ -3097,6 +3104,7 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
3097 return ret; 3104 return ret;
3098 } 3105 }
3099 3106
3107 on_each_cpu(mvneta_percpu_enable, pp, true);
3100 mvneta_start_dev(pp); 3108 mvneta_start_dev(pp);
3101 mvneta_port_up(pp); 3109 mvneta_port_up(pp);
3102 3110
@@ -3250,20 +3258,6 @@ static void mvneta_mdio_remove(struct mvneta_port *pp)
3250 pp->phy_dev = NULL; 3258 pp->phy_dev = NULL;
3251} 3259}
3252 3260
3253static void mvneta_percpu_enable(void *arg)
3254{
3255 struct mvneta_port *pp = arg;
3256
3257 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3258}
3259
3260static void mvneta_percpu_disable(void *arg)
3261{
3262 struct mvneta_port *pp = arg;
3263
3264 disable_percpu_irq(pp->dev->irq);
3265}
3266
3267/* Electing a CPU must be done in an atomic way: it should be done 3261/* Electing a CPU must be done in an atomic way: it should be done
3268 * after or before the removal/insertion of a CPU and this function is 3262 * after or before the removal/insertion of a CPU and this function is
3269 * not reentrant. 3263 * not reentrant.
@@ -3360,8 +3354,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
3360 /* Enable per-CPU interrupts on the CPU that is 3354 /* Enable per-CPU interrupts on the CPU that is
3361 * brought up. 3355 * brought up.
3362 */ 3356 */
3363 smp_call_function_single(cpu, mvneta_percpu_enable, 3357 mvneta_percpu_enable(pp);
3364 pp, true);
3365 3358
3366 /* Enable per-CPU interrupt on the one CPU we care 3359 /* Enable per-CPU interrupt on the one CPU we care
3367 * about. 3360 * about.
@@ -3393,8 +3386,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
3393 /* Disable per-CPU interrupts on the CPU that is 3386 /* Disable per-CPU interrupts on the CPU that is
3394 * brought down. 3387 * brought down.
3395 */ 3388 */
3396 smp_call_function_single(cpu, mvneta_percpu_disable, 3389 mvneta_percpu_disable(pp);
3397 pp, true);
3398 3390
3399 break; 3391 break;
3400 case CPU_DEAD: 3392 case CPU_DEAD:
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index c797971aefab..868a957f24bb 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -321,7 +321,6 @@
321/* Lbtd 802.3 type */ 321/* Lbtd 802.3 type */
322#define MVPP2_IP_LBDT_TYPE 0xfffa 322#define MVPP2_IP_LBDT_TYPE 0xfffa
323 323
324#define MVPP2_CPU_D_CACHE_LINE_SIZE 32
325#define MVPP2_TX_CSUM_MAX_SIZE 9800 324#define MVPP2_TX_CSUM_MAX_SIZE 9800
326 325
327/* Timeout constants */ 326/* Timeout constants */
@@ -377,7 +376,7 @@
377 376
378#define MVPP2_RX_PKT_SIZE(mtu) \ 377#define MVPP2_RX_PKT_SIZE(mtu) \
379 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ 378 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
380 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE) 379 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
381 380
382#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) 381#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
383#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) 382#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
@@ -4493,10 +4492,6 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
4493 if (!aggr_txq->descs) 4492 if (!aggr_txq->descs)
4494 return -ENOMEM; 4493 return -ENOMEM;
4495 4494
4496 /* Make sure descriptor address is cache line size aligned */
4497 BUG_ON(aggr_txq->descs !=
4498 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4499
4500 aggr_txq->last_desc = aggr_txq->size - 1; 4495 aggr_txq->last_desc = aggr_txq->size - 1;
4501 4496
4502 /* Aggr TXQ no reset WA */ 4497 /* Aggr TXQ no reset WA */
@@ -4526,9 +4521,6 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
4526 if (!rxq->descs) 4521 if (!rxq->descs)
4527 return -ENOMEM; 4522 return -ENOMEM;
4528 4523
4529 BUG_ON(rxq->descs !=
4530 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4531
4532 rxq->last_desc = rxq->size - 1; 4524 rxq->last_desc = rxq->size - 1;
4533 4525
4534 /* Zero occupied and non-occupied counters - direct access */ 4526 /* Zero occupied and non-occupied counters - direct access */
@@ -4616,10 +4608,6 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
4616 if (!txq->descs) 4608 if (!txq->descs)
4617 return -ENOMEM; 4609 return -ENOMEM;
4618 4610
4619 /* Make sure descriptor address is cache line size aligned */
4620 BUG_ON(txq->descs !=
4621 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4622
4623 txq->last_desc = txq->size - 1; 4611 txq->last_desc = txq->size - 1;
4624 4612
4625 /* Set Tx descriptors queue starting address - indirect access */ 4613 /* Set Tx descriptors queue starting address - indirect access */
@@ -6059,8 +6047,10 @@ static int mvpp2_port_init(struct mvpp2_port *port)
6059 6047
6060 /* Map physical Rx queue to port's logical Rx queue */ 6048 /* Map physical Rx queue to port's logical Rx queue */
6061 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); 6049 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
6062 if (!rxq) 6050 if (!rxq) {
6051 err = -ENOMEM;
6063 goto err_free_percpu; 6052 goto err_free_percpu;
6053 }
6064 /* Map this Rx queue to a physical queue */ 6054 /* Map this Rx queue to a physical queue */
6065 rxq->id = port->first_rxq + queue; 6055 rxq->id = port->first_rxq + queue;
6066 rxq->port = port->id; 6056 rxq->port = port->id;
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 7ace07dad6a3..c442f6ad15ff 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -979,6 +979,8 @@ static int pxa168_init_phy(struct net_device *dev)
979 return 0; 979 return 0;
980 980
981 pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr); 981 pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
982 if (IS_ERR(pep->phy))
983 return PTR_ERR(pep->phy);
982 if (!pep->phy) 984 if (!pep->phy)
983 return -ENODEV; 985 return -ENODEV;
984 986
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index f69584a9b47f..c761194bb323 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -337,7 +337,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
337 case ETH_SS_STATS: 337 case ETH_SS_STATS:
338 return bitmap_iterator_count(&it) + 338 return bitmap_iterator_count(&it) +
339 (priv->tx_ring_num * 2) + 339 (priv->tx_ring_num * 2) +
340 (priv->rx_ring_num * 2); 340 (priv->rx_ring_num * 3);
341 case ETH_SS_TEST: 341 case ETH_SS_TEST:
342 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags 342 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
343 & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; 343 & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
@@ -404,6 +404,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
404 for (i = 0; i < priv->rx_ring_num; i++) { 404 for (i = 0; i < priv->rx_ring_num; i++) {
405 data[index++] = priv->rx_ring[i]->packets; 405 data[index++] = priv->rx_ring[i]->packets;
406 data[index++] = priv->rx_ring[i]->bytes; 406 data[index++] = priv->rx_ring[i]->bytes;
407 data[index++] = priv->rx_ring[i]->dropped;
407 } 408 }
408 spin_unlock_bh(&priv->stats_lock); 409 spin_unlock_bh(&priv->stats_lock);
409 410
@@ -477,6 +478,8 @@ static void mlx4_en_get_strings(struct net_device *dev,
477 "rx%d_packets", i); 478 "rx%d_packets", i);
478 sprintf(data + (index++) * ETH_GSTRING_LEN, 479 sprintf(data + (index++) * ETH_GSTRING_LEN,
479 "rx%d_bytes", i); 480 "rx%d_bytes", i);
481 sprintf(data + (index++) * ETH_GSTRING_LEN,
482 "rx%d_dropped", i);
480 } 483 }
481 break; 484 break;
482 case ETH_SS_PRIV_FLAGS: 485 case ETH_SS_PRIV_FLAGS:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 3904b5fc0b7c..20b6c2e678b8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -158,6 +158,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
158 u64 in_mod = reset << 8 | port; 158 u64 in_mod = reset << 8 | port;
159 int err; 159 int err;
160 int i, counter_index; 160 int i, counter_index;
161 unsigned long sw_rx_dropped = 0;
161 162
162 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 163 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
163 if (IS_ERR(mailbox)) 164 if (IS_ERR(mailbox))
@@ -180,6 +181,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
180 for (i = 0; i < priv->rx_ring_num; i++) { 181 for (i = 0; i < priv->rx_ring_num; i++) {
181 stats->rx_packets += priv->rx_ring[i]->packets; 182 stats->rx_packets += priv->rx_ring[i]->packets;
182 stats->rx_bytes += priv->rx_ring[i]->bytes; 183 stats->rx_bytes += priv->rx_ring[i]->bytes;
184 sw_rx_dropped += priv->rx_ring[i]->dropped;
183 priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok; 185 priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
184 priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none; 186 priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
185 priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete; 187 priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete;
@@ -236,7 +238,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
236 &mlx4_en_stats->MCAST_prio_1, 238 &mlx4_en_stats->MCAST_prio_1,
237 NUM_PRIORITIES); 239 NUM_PRIORITIES);
238 stats->collisions = 0; 240 stats->collisions = 0;
239 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP); 241 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
242 sw_rx_dropped;
240 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); 243 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
241 stats->rx_over_errors = 0; 244 stats->rx_over_errors = 0;
242 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); 245 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 86bcfe510e4e..b723e3bcab39 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -61,7 +61,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
61 gfp_t gfp = _gfp; 61 gfp_t gfp = _gfp;
62 62
63 if (order) 63 if (order)
64 gfp |= __GFP_COMP | __GFP_NOWARN; 64 gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC;
65 page = alloc_pages(gfp, order); 65 page = alloc_pages(gfp, order);
66 if (likely(page)) 66 if (likely(page))
67 break; 67 break;
@@ -126,7 +126,9 @@ out:
126 dma_unmap_page(priv->ddev, page_alloc[i].dma, 126 dma_unmap_page(priv->ddev, page_alloc[i].dma,
127 page_alloc[i].page_size, PCI_DMA_FROMDEVICE); 127 page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
128 page = page_alloc[i].page; 128 page = page_alloc[i].page;
129 set_page_count(page, 1); 129 /* Revert changes done by mlx4_alloc_pages */
130 page_ref_sub(page, page_alloc[i].page_size /
131 priv->frag_info[i].frag_stride - 1);
130 put_page(page); 132 put_page(page);
131 } 133 }
132 } 134 }
@@ -176,7 +178,9 @@ out:
176 dma_unmap_page(priv->ddev, page_alloc->dma, 178 dma_unmap_page(priv->ddev, page_alloc->dma,
177 page_alloc->page_size, PCI_DMA_FROMDEVICE); 179 page_alloc->page_size, PCI_DMA_FROMDEVICE);
178 page = page_alloc->page; 180 page = page_alloc->page;
179 set_page_count(page, 1); 181 /* Revert changes done by mlx4_alloc_pages */
182 page_ref_sub(page, page_alloc->page_size /
183 priv->frag_info[i].frag_stride - 1);
180 put_page(page); 184 put_page(page);
181 page_alloc->page = NULL; 185 page_alloc->page = NULL;
182 } 186 }
@@ -939,7 +943,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
939 /* GRO not possible, complete processing here */ 943 /* GRO not possible, complete processing here */
940 skb = mlx4_en_rx_skb(priv, rx_desc, frags, length); 944 skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
941 if (!skb) { 945 if (!skb) {
942 priv->stats.rx_dropped++; 946 ring->dropped++;
943 goto next; 947 goto next;
944 } 948 }
945 949
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c0d7b7296236..a386f047c1af 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -405,7 +405,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
405 u32 packets = 0; 405 u32 packets = 0;
406 u32 bytes = 0; 406 u32 bytes = 0;
407 int factor = priv->cqe_factor; 407 int factor = priv->cqe_factor;
408 u64 timestamp = 0;
409 int done = 0; 408 int done = 0;
410 int budget = priv->tx_work_limit; 409 int budget = priv->tx_work_limit;
411 u32 last_nr_txbb; 410 u32 last_nr_txbb;
@@ -445,9 +444,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
445 new_index = be16_to_cpu(cqe->wqe_index) & size_mask; 444 new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
446 445
447 do { 446 do {
447 u64 timestamp = 0;
448
448 txbbs_skipped += last_nr_txbb; 449 txbbs_skipped += last_nr_txbb;
449 ring_index = (ring_index + last_nr_txbb) & size_mask; 450 ring_index = (ring_index + last_nr_txbb) & size_mask;
450 if (ring->tx_info[ring_index].ts_requested) 451
452 if (unlikely(ring->tx_info[ring_index].ts_requested))
451 timestamp = mlx4_en_get_cqe_ts(cqe); 453 timestamp = mlx4_en_get_cqe_ts(cqe);
452 454
453 /* free next descriptor */ 455 /* free next descriptor */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 358f7230da58..12c77a70abdb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3172,6 +3172,34 @@ static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap
3172 return 0; 3172 return 0;
3173} 3173}
3174 3174
3175static int mlx4_pci_enable_device(struct mlx4_dev *dev)
3176{
3177 struct pci_dev *pdev = dev->persist->pdev;
3178 int err = 0;
3179
3180 mutex_lock(&dev->persist->pci_status_mutex);
3181 if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
3182 err = pci_enable_device(pdev);
3183 if (!err)
3184 dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
3185 }
3186 mutex_unlock(&dev->persist->pci_status_mutex);
3187
3188 return err;
3189}
3190
3191static void mlx4_pci_disable_device(struct mlx4_dev *dev)
3192{
3193 struct pci_dev *pdev = dev->persist->pdev;
3194
3195 mutex_lock(&dev->persist->pci_status_mutex);
3196 if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
3197 pci_disable_device(pdev);
3198 dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
3199 }
3200 mutex_unlock(&dev->persist->pci_status_mutex);
3201}
3202
3175static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 3203static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
3176 int total_vfs, int *nvfs, struct mlx4_priv *priv, 3204 int total_vfs, int *nvfs, struct mlx4_priv *priv,
3177 int reset_flow) 3205 int reset_flow)
@@ -3582,7 +3610,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
3582 3610
3583 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 3611 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
3584 3612
3585 err = pci_enable_device(pdev); 3613 err = mlx4_pci_enable_device(&priv->dev);
3586 if (err) { 3614 if (err) {
3587 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 3615 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
3588 return err; 3616 return err;
@@ -3715,7 +3743,7 @@ err_release_regions:
3715 pci_release_regions(pdev); 3743 pci_release_regions(pdev);
3716 3744
3717err_disable_pdev: 3745err_disable_pdev:
3718 pci_disable_device(pdev); 3746 mlx4_pci_disable_device(&priv->dev);
3719 pci_set_drvdata(pdev, NULL); 3747 pci_set_drvdata(pdev, NULL);
3720 return err; 3748 return err;
3721} 3749}
@@ -3775,6 +3803,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
3775 priv->pci_dev_data = id->driver_data; 3803 priv->pci_dev_data = id->driver_data;
3776 mutex_init(&dev->persist->device_state_mutex); 3804 mutex_init(&dev->persist->device_state_mutex);
3777 mutex_init(&dev->persist->interface_state_mutex); 3805 mutex_init(&dev->persist->interface_state_mutex);
3806 mutex_init(&dev->persist->pci_status_mutex);
3778 3807
3779 ret = devlink_register(devlink, &pdev->dev); 3808 ret = devlink_register(devlink, &pdev->dev);
3780 if (ret) 3809 if (ret)
@@ -3923,7 +3952,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
3923 } 3952 }
3924 3953
3925 pci_release_regions(pdev); 3954 pci_release_regions(pdev);
3926 pci_disable_device(pdev); 3955 mlx4_pci_disable_device(dev);
3927 devlink_unregister(devlink); 3956 devlink_unregister(devlink);
3928 kfree(dev->persist); 3957 kfree(dev->persist);
3929 devlink_free(devlink); 3958 devlink_free(devlink);
@@ -4042,7 +4071,7 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
4042 if (state == pci_channel_io_perm_failure) 4071 if (state == pci_channel_io_perm_failure)
4043 return PCI_ERS_RESULT_DISCONNECT; 4072 return PCI_ERS_RESULT_DISCONNECT;
4044 4073
4045 pci_disable_device(pdev); 4074 mlx4_pci_disable_device(persist->dev);
4046 return PCI_ERS_RESULT_NEED_RESET; 4075 return PCI_ERS_RESULT_NEED_RESET;
4047} 4076}
4048 4077
@@ -4050,45 +4079,53 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
4050{ 4079{
4051 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4080 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4052 struct mlx4_dev *dev = persist->dev; 4081 struct mlx4_dev *dev = persist->dev;
4053 struct mlx4_priv *priv = mlx4_priv(dev); 4082 int err;
4054 int ret;
4055 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4056 int total_vfs;
4057 4083
4058 mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); 4084 mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
4059 ret = pci_enable_device(pdev); 4085 err = mlx4_pci_enable_device(dev);
4060 if (ret) { 4086 if (err) {
4061 mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret); 4087 mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
4062 return PCI_ERS_RESULT_DISCONNECT; 4088 return PCI_ERS_RESULT_DISCONNECT;
4063 } 4089 }
4064 4090
4065 pci_set_master(pdev); 4091 pci_set_master(pdev);
4066 pci_restore_state(pdev); 4092 pci_restore_state(pdev);
4067 pci_save_state(pdev); 4093 pci_save_state(pdev);
4094 return PCI_ERS_RESULT_RECOVERED;
4095}
4068 4096
4097static void mlx4_pci_resume(struct pci_dev *pdev)
4098{
4099 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4100 struct mlx4_dev *dev = persist->dev;
4101 struct mlx4_priv *priv = mlx4_priv(dev);
4102 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4103 int total_vfs;
4104 int err;
4105
4106 mlx4_err(dev, "%s was called\n", __func__);
4069 total_vfs = dev->persist->num_vfs; 4107 total_vfs = dev->persist->num_vfs;
4070 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 4108 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4071 4109
4072 mutex_lock(&persist->interface_state_mutex); 4110 mutex_lock(&persist->interface_state_mutex);
4073 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { 4111 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
4074 ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, 4112 err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
4075 priv, 1); 4113 priv, 1);
4076 if (ret) { 4114 if (err) {
4077 mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n", 4115 mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
4078 __func__, ret); 4116 __func__, err);
4079 goto end; 4117 goto end;
4080 } 4118 }
4081 4119
4082 ret = restore_current_port_types(dev, dev->persist-> 4120 err = restore_current_port_types(dev, dev->persist->
4083 curr_port_type, dev->persist-> 4121 curr_port_type, dev->persist->
4084 curr_port_poss_type); 4122 curr_port_poss_type);
4085 if (ret) 4123 if (err)
4086 mlx4_err(dev, "could not restore original port types (%d)\n", ret); 4124 mlx4_err(dev, "could not restore original port types (%d)\n", err);
4087 } 4125 }
4088end: 4126end:
4089 mutex_unlock(&persist->interface_state_mutex); 4127 mutex_unlock(&persist->interface_state_mutex);
4090 4128
4091 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
4092} 4129}
4093 4130
4094static void mlx4_shutdown(struct pci_dev *pdev) 4131static void mlx4_shutdown(struct pci_dev *pdev)
@@ -4105,6 +4142,7 @@ static void mlx4_shutdown(struct pci_dev *pdev)
4105static const struct pci_error_handlers mlx4_err_handler = { 4142static const struct pci_error_handlers mlx4_err_handler = {
4106 .error_detected = mlx4_pci_err_detected, 4143 .error_detected = mlx4_pci_err_detected,
4107 .slot_reset = mlx4_pci_slot_reset, 4144 .slot_reset = mlx4_pci_slot_reset,
4145 .resume = mlx4_pci_resume,
4108}; 4146};
4109 4147
4110static struct pci_driver mlx4_driver = { 4148static struct pci_driver mlx4_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index ef9683101ead..c9d7fc5159f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -586,6 +586,8 @@ struct mlx4_mfunc_master_ctx {
586 struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; 586 struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
587 int init_port_ref[MLX4_MAX_PORTS + 1]; 587 int init_port_ref[MLX4_MAX_PORTS + 1];
588 u16 max_mtu[MLX4_MAX_PORTS + 1]; 588 u16 max_mtu[MLX4_MAX_PORTS + 1];
589 u8 pptx;
590 u8 pprx;
589 int disable_mcast_ref[MLX4_MAX_PORTS + 1]; 591 int disable_mcast_ref[MLX4_MAX_PORTS + 1];
590 struct mlx4_resource_tracker res_tracker; 592 struct mlx4_resource_tracker res_tracker;
591 struct workqueue_struct *comm_wq; 593 struct workqueue_struct *comm_wq;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d12ab6a73344..63b1aeae2c03 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -323,6 +323,7 @@ struct mlx4_en_rx_ring {
323 unsigned long csum_ok; 323 unsigned long csum_ok;
324 unsigned long csum_none; 324 unsigned long csum_none;
325 unsigned long csum_complete; 325 unsigned long csum_complete;
326 unsigned long dropped;
326 int hwtstamp_rx_filter; 327 int hwtstamp_rx_filter;
327 cpumask_var_t affinity_mask; 328 cpumask_var_t affinity_mask;
328}; 329};
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 211c65087997..087b23b320cb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1317,6 +1317,19 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
1317 } 1317 }
1318 1318
1319 gen_context->mtu = cpu_to_be16(master->max_mtu[port]); 1319 gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
1320 /* Slave cannot change Global Pause configuration */
1321 if (slave != mlx4_master_func_num(dev) &&
1322 ((gen_context->pptx != master->pptx) ||
1323 (gen_context->pprx != master->pprx))) {
1324 gen_context->pptx = master->pptx;
1325 gen_context->pprx = master->pprx;
1326 mlx4_warn(dev,
1327 "denying Global Pause change for slave:%d\n",
1328 slave);
1329 } else {
1330 master->pptx = gen_context->pptx;
1331 master->pprx = gen_context->pprx;
1332 }
1320 break; 1333 break;
1321 case MLX4_SET_PORT_GID_TABLE: 1334 case MLX4_SET_PORT_GID_TABLE:
1322 /* change to MULTIPLE entries: number of guest's gids 1335 /* change to MULTIPLE entries: number of guest's gids
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 1cf722eba607..559d11a443bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -14,6 +14,7 @@ config MLX5_CORE_EN
14 bool "Mellanox Technologies ConnectX-4 Ethernet support" 14 bool "Mellanox Technologies ConnectX-4 Ethernet support"
15 depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE 15 depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
16 select PTP_1588_CLOCK 16 select PTP_1588_CLOCK
17 select VXLAN if MLX5_CORE=y
17 default n 18 default n
18 ---help--- 19 ---help---
19 Ethernet support in Mellanox Technologies ConnectX-4 NIC. 20 Ethernet support in Mellanox Technologies ConnectX-4 NIC.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 879e6276c473..3881dce0cc30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -567,6 +567,7 @@ struct mlx5e_priv {
567 struct mlx5e_vxlan_db vxlan; 567 struct mlx5e_vxlan_db vxlan;
568 568
569 struct mlx5e_params params; 569 struct mlx5e_params params;
570 struct workqueue_struct *wq;
570 struct work_struct update_carrier_work; 571 struct work_struct update_carrier_work;
571 struct work_struct set_rx_mode_work; 572 struct work_struct set_rx_mode_work;
572 struct delayed_work update_stats_work; 573 struct delayed_work update_stats_work;
@@ -609,7 +610,7 @@ enum mlx5e_link_mode {
609 MLX5E_100GBASE_KR4 = 22, 610 MLX5E_100GBASE_KR4 = 22,
610 MLX5E_100GBASE_LR4 = 23, 611 MLX5E_100GBASE_LR4 = 23,
611 MLX5E_100BASE_TX = 24, 612 MLX5E_100BASE_TX = 24,
612 MLX5E_100BASE_T = 25, 613 MLX5E_1000BASE_T = 25,
613 MLX5E_10GBASE_T = 26, 614 MLX5E_10GBASE_T = 26,
614 MLX5E_25GBASE_CR = 27, 615 MLX5E_25GBASE_CR = 27,
615 MLX5E_25GBASE_KR = 28, 616 MLX5E_25GBASE_KR = 28,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 68834b715f6c..3476ab844634 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -138,10 +138,10 @@ static const struct {
138 [MLX5E_100BASE_TX] = { 138 [MLX5E_100BASE_TX] = {
139 .speed = 100, 139 .speed = 100,
140 }, 140 },
141 [MLX5E_100BASE_T] = { 141 [MLX5E_1000BASE_T] = {
142 .supported = SUPPORTED_100baseT_Full, 142 .supported = SUPPORTED_1000baseT_Full,
143 .advertised = ADVERTISED_100baseT_Full, 143 .advertised = ADVERTISED_1000baseT_Full,
144 .speed = 100, 144 .speed = 1000,
145 }, 145 },
146 [MLX5E_10GBASE_T] = { 146 [MLX5E_10GBASE_T] = {
147 .supported = SUPPORTED_10000baseT_Full, 147 .supported = SUPPORTED_10000baseT_Full,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index e0adb604f461..d4dfc5ce516a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -262,9 +262,8 @@ static void mlx5e_update_stats_work(struct work_struct *work)
262 mutex_lock(&priv->state_lock); 262 mutex_lock(&priv->state_lock);
263 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { 263 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
264 mlx5e_update_stats(priv); 264 mlx5e_update_stats(priv);
265 schedule_delayed_work(dwork, 265 queue_delayed_work(priv->wq, dwork,
266 msecs_to_jiffies( 266 msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
267 MLX5E_UPDATE_STATS_INTERVAL));
268 } 267 }
269 mutex_unlock(&priv->state_lock); 268 mutex_unlock(&priv->state_lock);
270} 269}
@@ -280,7 +279,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
280 switch (event) { 279 switch (event) {
281 case MLX5_DEV_EVENT_PORT_UP: 280 case MLX5_DEV_EVENT_PORT_UP:
282 case MLX5_DEV_EVENT_PORT_DOWN: 281 case MLX5_DEV_EVENT_PORT_DOWN:
283 schedule_work(&priv->update_carrier_work); 282 queue_work(priv->wq, &priv->update_carrier_work);
284 break; 283 break;
285 284
286 default: 285 default:
@@ -1404,24 +1403,50 @@ static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
1404 return 0; 1403 return 0;
1405} 1404}
1406 1405
1407static int mlx5e_set_dev_port_mtu(struct net_device *netdev) 1406static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
1408{ 1407{
1409 struct mlx5e_priv *priv = netdev_priv(netdev);
1410 struct mlx5_core_dev *mdev = priv->mdev; 1408 struct mlx5_core_dev *mdev = priv->mdev;
1411 int hw_mtu; 1409 u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
1412 int err; 1410 int err;
1413 1411
1414 err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1); 1412 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
1415 if (err) 1413 if (err)
1416 return err; 1414 return err;
1417 1415
1418 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); 1416 /* Update vport context MTU */
1417 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
1418 return 0;
1419}
1420
1421static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
1422{
1423 struct mlx5_core_dev *mdev = priv->mdev;
1424 u16 hw_mtu = 0;
1425 int err;
1419 1426
1420 if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu) 1427 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
1421 netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n", 1428 if (err || !hw_mtu) /* fallback to port oper mtu */
1422 __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu); 1429 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
1430
1431 *mtu = MLX5E_HW2SW_MTU(hw_mtu);
1432}
1433
1434static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
1435{
1436 struct mlx5e_priv *priv = netdev_priv(netdev);
1437 u16 mtu;
1438 int err;
1423 1439
1424 netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu); 1440 err = mlx5e_set_mtu(priv, netdev->mtu);
1441 if (err)
1442 return err;
1443
1444 mlx5e_query_mtu(priv, &mtu);
1445 if (mtu != netdev->mtu)
1446 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
1447 __func__, mtu, netdev->mtu);
1448
1449 netdev->mtu = mtu;
1425 return 0; 1450 return 0;
1426} 1451}
1427 1452
@@ -1479,7 +1504,7 @@ int mlx5e_open_locked(struct net_device *netdev)
1479 mlx5e_update_carrier(priv); 1504 mlx5e_update_carrier(priv);
1480 mlx5e_timestamp_init(priv); 1505 mlx5e_timestamp_init(priv);
1481 1506
1482 schedule_delayed_work(&priv->update_stats_work, 0); 1507 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
1483 1508
1484 return 0; 1509 return 0;
1485 1510
@@ -1935,7 +1960,7 @@ static void mlx5e_set_rx_mode(struct net_device *dev)
1935{ 1960{
1936 struct mlx5e_priv *priv = netdev_priv(dev); 1961 struct mlx5e_priv *priv = netdev_priv(dev);
1937 1962
1938 schedule_work(&priv->set_rx_mode_work); 1963 queue_work(priv->wq, &priv->set_rx_mode_work);
1939} 1964}
1940 1965
1941static int mlx5e_set_mac(struct net_device *netdev, void *addr) 1966static int mlx5e_set_mac(struct net_device *netdev, void *addr)
@@ -1950,7 +1975,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
1950 ether_addr_copy(netdev->dev_addr, saddr->sa_data); 1975 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
1951 netif_addr_unlock_bh(netdev); 1976 netif_addr_unlock_bh(netdev);
1952 1977
1953 schedule_work(&priv->set_rx_mode_work); 1978 queue_work(priv->wq, &priv->set_rx_mode_work);
1954 1979
1955 return 0; 1980 return 0;
1956} 1981}
@@ -1999,22 +2024,27 @@ static int mlx5e_set_features(struct net_device *netdev,
1999 return err; 2024 return err;
2000} 2025}
2001 2026
2027#define MXL5_HW_MIN_MTU 64
2028#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
2029
2002static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) 2030static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
2003{ 2031{
2004 struct mlx5e_priv *priv = netdev_priv(netdev); 2032 struct mlx5e_priv *priv = netdev_priv(netdev);
2005 struct mlx5_core_dev *mdev = priv->mdev; 2033 struct mlx5_core_dev *mdev = priv->mdev;
2006 bool was_opened; 2034 bool was_opened;
2007 int max_mtu; 2035 u16 max_mtu;
2036 u16 min_mtu;
2008 int err = 0; 2037 int err = 0;
2009 2038
2010 mlx5_query_port_max_mtu(mdev, &max_mtu, 1); 2039 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
2011 2040
2012 max_mtu = MLX5E_HW2SW_MTU(max_mtu); 2041 max_mtu = MLX5E_HW2SW_MTU(max_mtu);
2042 min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
2013 2043
2014 if (new_mtu > max_mtu) { 2044 if (new_mtu > max_mtu || new_mtu < min_mtu) {
2015 netdev_err(netdev, 2045 netdev_err(netdev,
2016 "%s: Bad MTU (%d) > (%d) Max\n", 2046 "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
2017 __func__, new_mtu, max_mtu); 2047 __func__, new_mtu, min_mtu, max_mtu);
2018 return -EINVAL; 2048 return -EINVAL;
2019 } 2049 }
2020 2050
@@ -2127,7 +2157,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev,
2127 if (!mlx5e_vxlan_allowed(priv->mdev)) 2157 if (!mlx5e_vxlan_allowed(priv->mdev))
2128 return; 2158 return;
2129 2159
2130 mlx5e_vxlan_add_port(priv, be16_to_cpu(port)); 2160 mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1);
2131} 2161}
2132 2162
2133static void mlx5e_del_vxlan_port(struct net_device *netdev, 2163static void mlx5e_del_vxlan_port(struct net_device *netdev,
@@ -2138,7 +2168,7 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev,
2138 if (!mlx5e_vxlan_allowed(priv->mdev)) 2168 if (!mlx5e_vxlan_allowed(priv->mdev))
2139 return; 2169 return;
2140 2170
2141 mlx5e_vxlan_del_port(priv, be16_to_cpu(port)); 2171 mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0);
2142} 2172}
2143 2173
2144static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv, 2174static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
@@ -2467,10 +2497,14 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
2467 2497
2468 priv = netdev_priv(netdev); 2498 priv = netdev_priv(netdev);
2469 2499
2500 priv->wq = create_singlethread_workqueue("mlx5e");
2501 if (!priv->wq)
2502 goto err_free_netdev;
2503
2470 err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false); 2504 err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
2471 if (err) { 2505 if (err) {
2472 mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err); 2506 mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
2473 goto err_free_netdev; 2507 goto err_destroy_wq;
2474 } 2508 }
2475 2509
2476 err = mlx5_core_alloc_pd(mdev, &priv->pdn); 2510 err = mlx5_core_alloc_pd(mdev, &priv->pdn);
@@ -2549,7 +2583,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
2549 vxlan_get_rx_port(netdev); 2583 vxlan_get_rx_port(netdev);
2550 2584
2551 mlx5e_enable_async_events(priv); 2585 mlx5e_enable_async_events(priv);
2552 schedule_work(&priv->set_rx_mode_work); 2586 queue_work(priv->wq, &priv->set_rx_mode_work);
2553 2587
2554 return priv; 2588 return priv;
2555 2589
@@ -2586,6 +2620,9 @@ err_dealloc_pd:
2586err_unmap_free_uar: 2620err_unmap_free_uar:
2587 mlx5_unmap_free_uar(mdev, &priv->cq_uar); 2621 mlx5_unmap_free_uar(mdev, &priv->cq_uar);
2588 2622
2623err_destroy_wq:
2624 destroy_workqueue(priv->wq);
2625
2589err_free_netdev: 2626err_free_netdev:
2590 free_netdev(netdev); 2627 free_netdev(netdev);
2591 2628
@@ -2599,10 +2636,19 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
2599 2636
2600 set_bit(MLX5E_STATE_DESTROYING, &priv->state); 2637 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
2601 2638
2602 schedule_work(&priv->set_rx_mode_work); 2639 queue_work(priv->wq, &priv->set_rx_mode_work);
2603 mlx5e_disable_async_events(priv); 2640 mlx5e_disable_async_events(priv);
2604 flush_scheduled_work(); 2641 flush_workqueue(priv->wq);
2605 unregister_netdev(netdev); 2642 if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
2643 netif_device_detach(netdev);
2644 mutex_lock(&priv->state_lock);
2645 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
2646 mlx5e_close_locked(netdev);
2647 mutex_unlock(&priv->state_lock);
2648 } else {
2649 unregister_netdev(netdev);
2650 }
2651
2606 mlx5e_tc_cleanup(priv); 2652 mlx5e_tc_cleanup(priv);
2607 mlx5e_vxlan_cleanup(priv); 2653 mlx5e_vxlan_cleanup(priv);
2608 mlx5e_destroy_flow_tables(priv); 2654 mlx5e_destroy_flow_tables(priv);
@@ -2615,7 +2661,11 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
2615 mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn); 2661 mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
2616 mlx5_core_dealloc_pd(priv->mdev, priv->pdn); 2662 mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
2617 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); 2663 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
2618 free_netdev(netdev); 2664 cancel_delayed_work_sync(&priv->update_stats_work);
2665 destroy_workqueue(priv->wq);
2666
2667 if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
2668 free_netdev(netdev);
2619} 2669}
2620 2670
2621static void *mlx5e_get_netdev(void *vpriv) 2671static void *mlx5e_get_netdev(void *vpriv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 5121be4675d1..89cce97d46c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1065,33 +1065,6 @@ unlock_fg:
1065 return rule; 1065 return rule;
1066} 1066}
1067 1067
1068static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft,
1069 u8 match_criteria_enable,
1070 u32 *match_criteria,
1071 u32 *match_value,
1072 u8 action,
1073 u32 flow_tag,
1074 struct mlx5_flow_destination *dest)
1075{
1076 struct mlx5_flow_rule *rule;
1077 struct mlx5_flow_group *g;
1078
1079 g = create_autogroup(ft, match_criteria_enable, match_criteria);
1080 if (IS_ERR(g))
1081 return (void *)g;
1082
1083 rule = add_rule_fg(g, match_value,
1084 action, flow_tag, dest);
1085 if (IS_ERR(rule)) {
1086 /* Remove assumes refcount > 0 and autogroup creates a group
1087 * with a refcount = 0.
1088 */
1089 tree_get_node(&g->node);
1090 tree_remove_node(&g->node);
1091 }
1092 return rule;
1093}
1094
1095static struct mlx5_flow_rule * 1068static struct mlx5_flow_rule *
1096_mlx5_add_flow_rule(struct mlx5_flow_table *ft, 1069_mlx5_add_flow_rule(struct mlx5_flow_table *ft,
1097 u8 match_criteria_enable, 1070 u8 match_criteria_enable,
@@ -1119,8 +1092,23 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
1119 goto unlock; 1092 goto unlock;
1120 } 1093 }
1121 1094
1122 rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria, 1095 g = create_autogroup(ft, match_criteria_enable, match_criteria);
1123 match_value, action, flow_tag, dest); 1096 if (IS_ERR(g)) {
1097 rule = (void *)g;
1098 goto unlock;
1099 }
1100
1101 rule = add_rule_fg(g, match_value,
1102 action, flow_tag, dest);
1103 if (IS_ERR(rule)) {
1104 /* Remove assumes refcount > 0 and autogroup creates a group
1105 * with a refcount = 0.
1106 */
1107 unlock_ref_node(&ft->node);
1108 tree_get_node(&g->node);
1109 tree_remove_node(&g->node);
1110 return rule;
1111 }
1124unlock: 1112unlock:
1125 unlock_ref_node(&ft->node); 1113 unlock_ref_node(&ft->node);
1126 return rule; 1114 return rule;
@@ -1288,7 +1276,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
1288{ 1276{
1289 struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns; 1277 struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
1290 int prio; 1278 int prio;
1291 static struct fs_prio *fs_prio; 1279 struct fs_prio *fs_prio;
1292 struct mlx5_flow_namespace *ns; 1280 struct mlx5_flow_namespace *ns;
1293 1281
1294 if (!root_ns) 1282 if (!root_ns)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 3f3b2fae4991..6892746fd10d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -966,7 +966,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
966 int err; 966 int err;
967 967
968 mutex_lock(&dev->intf_state_mutex); 968 mutex_lock(&dev->intf_state_mutex);
969 if (dev->interface_state == MLX5_INTERFACE_STATE_UP) { 969 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
970 dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n", 970 dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
971 __func__); 971 __func__);
972 goto out; 972 goto out;
@@ -1133,7 +1133,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
1133 if (err) 1133 if (err)
1134 pr_info("failed request module on %s\n", MLX5_IB_MOD); 1134 pr_info("failed request module on %s\n", MLX5_IB_MOD);
1135 1135
1136 dev->interface_state = MLX5_INTERFACE_STATE_UP; 1136 clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
1137 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1137out: 1138out:
1138 mutex_unlock(&dev->intf_state_mutex); 1139 mutex_unlock(&dev->intf_state_mutex);
1139 1140
@@ -1207,7 +1208,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
1207 } 1208 }
1208 1209
1209 mutex_lock(&dev->intf_state_mutex); 1210 mutex_lock(&dev->intf_state_mutex);
1210 if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) { 1211 if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
1211 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", 1212 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
1212 __func__); 1213 __func__);
1213 goto out; 1214 goto out;
@@ -1241,7 +1242,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
1241 mlx5_cmd_cleanup(dev); 1242 mlx5_cmd_cleanup(dev);
1242 1243
1243out: 1244out:
1244 dev->interface_state = MLX5_INTERFACE_STATE_DOWN; 1245 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1246 set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
1245 mutex_unlock(&dev->intf_state_mutex); 1247 mutex_unlock(&dev->intf_state_mutex);
1246 return err; 1248 return err;
1247} 1249}
@@ -1452,6 +1454,18 @@ static const struct pci_error_handlers mlx5_err_handler = {
1452 .resume = mlx5_pci_resume 1454 .resume = mlx5_pci_resume
1453}; 1455};
1454 1456
1457static void shutdown(struct pci_dev *pdev)
1458{
1459 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1460 struct mlx5_priv *priv = &dev->priv;
1461
1462 dev_info(&pdev->dev, "Shutdown was called\n");
1463 /* Notify mlx5 clients that the kernel is being shut down */
1464 set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
1465 mlx5_unload_one(dev, priv);
1466 mlx5_pci_disable_device(dev);
1467}
1468
1455static const struct pci_device_id mlx5_core_pci_table[] = { 1469static const struct pci_device_id mlx5_core_pci_table[] = {
1456 { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */ 1470 { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
1457 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */ 1471 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
@@ -1459,6 +1473,8 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
1459 { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */ 1473 { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
1460 { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */ 1474 { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
1461 { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */ 1475 { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
1476 { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */
1477 { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
1462 { 0, } 1478 { 0, }
1463}; 1479};
1464 1480
@@ -1469,6 +1485,7 @@ static struct pci_driver mlx5_core_driver = {
1469 .id_table = mlx5_core_pci_table, 1485 .id_table = mlx5_core_pci_table,
1470 .probe = init_one, 1486 .probe = init_one,
1471 .remove = remove_one, 1487 .remove = remove_one,
1488 .shutdown = shutdown,
1472 .err_handler = &mlx5_err_handler, 1489 .err_handler = &mlx5_err_handler,
1473 .sriov_configure = mlx5_core_sriov_configure, 1490 .sriov_configure = mlx5_core_sriov_configure,
1474}; 1491};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index ae378c575deb..53cc1e2c693b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -247,8 +247,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
247} 247}
248EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status); 248EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
249 249
250static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu, 250static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
251 int *max_mtu, int *oper_mtu, u8 port) 251 u16 *max_mtu, u16 *oper_mtu, u8 port)
252{ 252{
253 u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; 253 u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
254 u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; 254 u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -268,7 +268,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
268 *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu); 268 *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
269} 269}
270 270
271int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port) 271int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
272{ 272{
273 u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; 273 u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
274 u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; 274 u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -283,14 +283,14 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
283} 283}
284EXPORT_SYMBOL_GPL(mlx5_set_port_mtu); 284EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
285 285
286void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, 286void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
287 u8 port) 287 u8 port)
288{ 288{
289 mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port); 289 mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
290} 290}
291EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu); 291EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
292 292
293void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, 293void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
294 u8 port) 294 u8 port)
295{ 295{
296 mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port); 296 mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 8ba080e441a1..5ff8af472bf5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -269,8 +269,10 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar);
269 269
270void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) 270void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
271{ 271{
272 iounmap(uar->map); 272 if (uar->map)
273 iounmap(uar->bf_map); 273 iounmap(uar->map);
274 else
275 iounmap(uar->bf_map);
274 mlx5_cmd_free_uar(mdev, uar->index); 276 mlx5_cmd_free_uar(mdev, uar->index);
275} 277}
276EXPORT_SYMBOL(mlx5_unmap_free_uar); 278EXPORT_SYMBOL(mlx5_unmap_free_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index bd518405859e..b69dadcfb897 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -196,6 +196,46 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
196} 196}
197EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address); 197EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
198 198
199int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
200{
201 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
202 u32 *out;
203 int err;
204
205 out = mlx5_vzalloc(outlen);
206 if (!out)
207 return -ENOMEM;
208
209 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
210 if (!err)
211 *mtu = MLX5_GET(query_nic_vport_context_out, out,
212 nic_vport_context.mtu);
213
214 kvfree(out);
215 return err;
216}
217EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
218
219int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
220{
221 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
222 void *in;
223 int err;
224
225 in = mlx5_vzalloc(inlen);
226 if (!in)
227 return -ENOMEM;
228
229 MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
230 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
231
232 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
233
234 kvfree(in);
235 return err;
236}
237EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
238
199int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, 239int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
200 u32 vport, 240 u32 vport,
201 enum mlx5_list_type list_type, 241 enum mlx5_list_type list_type,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
index 9f10df25f3cd..f2fd1ef16da7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -95,21 +95,22 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
95 return vxlan; 95 return vxlan;
96} 96}
97 97
98int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) 98static void mlx5e_vxlan_add_port(struct work_struct *work)
99{ 99{
100 struct mlx5e_vxlan_work *vxlan_work =
101 container_of(work, struct mlx5e_vxlan_work, work);
102 struct mlx5e_priv *priv = vxlan_work->priv;
100 struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; 103 struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
104 u16 port = vxlan_work->port;
101 struct mlx5e_vxlan *vxlan; 105 struct mlx5e_vxlan *vxlan;
102 int err; 106 int err;
103 107
104 err = mlx5e_vxlan_core_add_port_cmd(priv->mdev, port); 108 if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
105 if (err) 109 goto free_work;
106 return err;
107 110
108 vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL); 111 vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
109 if (!vxlan) { 112 if (!vxlan)
110 err = -ENOMEM;
111 goto err_delete_port; 113 goto err_delete_port;
112 }
113 114
114 vxlan->udp_port = port; 115 vxlan->udp_port = port;
115 116
@@ -119,13 +120,14 @@ int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
119 if (err) 120 if (err)
120 goto err_free; 121 goto err_free;
121 122
122 return 0; 123 goto free_work;
123 124
124err_free: 125err_free:
125 kfree(vxlan); 126 kfree(vxlan);
126err_delete_port: 127err_delete_port:
127 mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); 128 mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
128 return err; 129free_work:
130 kfree(vxlan_work);
129} 131}
130 132
131static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port) 133static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
@@ -145,12 +147,36 @@ static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
145 kfree(vxlan); 147 kfree(vxlan);
146} 148}
147 149
148void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port) 150static void mlx5e_vxlan_del_port(struct work_struct *work)
149{ 151{
150 if (!mlx5e_vxlan_lookup_port(priv, port)) 152 struct mlx5e_vxlan_work *vxlan_work =
151 return; 153 container_of(work, struct mlx5e_vxlan_work, work);
154 struct mlx5e_priv *priv = vxlan_work->priv;
155 u16 port = vxlan_work->port;
152 156
153 __mlx5e_vxlan_core_del_port(priv, port); 157 __mlx5e_vxlan_core_del_port(priv, port);
158
159 kfree(vxlan_work);
160}
161
162void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
163 u16 port, int add)
164{
165 struct mlx5e_vxlan_work *vxlan_work;
166
167 vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
168 if (!vxlan_work)
169 return;
170
171 if (add)
172 INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_port);
173 else
174 INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_port);
175
176 vxlan_work->priv = priv;
177 vxlan_work->port = port;
178 vxlan_work->sa_family = sa_family;
179 queue_work(priv->wq, &vxlan_work->work);
154} 180}
155 181
156void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) 182void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
index a01685056ab1..129f3527aa14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
@@ -39,6 +39,13 @@ struct mlx5e_vxlan {
39 u16 udp_port; 39 u16 udp_port;
40}; 40};
41 41
42struct mlx5e_vxlan_work {
43 struct work_struct work;
44 struct mlx5e_priv *priv;
45 sa_family_t sa_family;
46 u16 port;
47};
48
42static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) 49static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
43{ 50{
44 return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) && 51 return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
@@ -46,8 +53,8 @@ static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
46} 53}
47 54
48void mlx5e_vxlan_init(struct mlx5e_priv *priv); 55void mlx5e_vxlan_init(struct mlx5e_priv *priv);
49int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port); 56void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
50void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port); 57 u16 port, int add);
51struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port); 58struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
52void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv); 59void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
53 60
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 270c9eeb7ab6..6d1a956e3f77 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2668,9 +2668,9 @@ static int myri10ge_close(struct net_device *dev)
2668 2668
2669 del_timer_sync(&mgp->watchdog_timer); 2669 del_timer_sync(&mgp->watchdog_timer);
2670 mgp->running = MYRI10GE_ETH_STOPPING; 2670 mgp->running = MYRI10GE_ETH_STOPPING;
2671 local_bh_disable(); /* myri10ge_ss_lock_napi needs bh disabled */
2672 for (i = 0; i < mgp->num_slices; i++) { 2671 for (i = 0; i < mgp->num_slices; i++) {
2673 napi_disable(&mgp->ss[i].napi); 2672 napi_disable(&mgp->ss[i].napi);
2673 local_bh_disable(); /* myri10ge_ss_lock_napi needs this */
2674 /* Lock the slice to prevent the busy_poll handler from 2674 /* Lock the slice to prevent the busy_poll handler from
2675 * accessing it. Later when we bring the NIC up, myri10ge_open 2675 * accessing it. Later when we bring the NIC up, myri10ge_open
2676 * resets the slice including this lock. 2676 * resets the slice including this lock.
@@ -2679,8 +2679,8 @@ static int myri10ge_close(struct net_device *dev)
2679 pr_info("Slice %d locked\n", i); 2679 pr_info("Slice %d locked\n", i);
2680 mdelay(1); 2680 mdelay(1);
2681 } 2681 }
2682 local_bh_enable();
2682 } 2683 }
2683 local_bh_enable();
2684 netif_carrier_off(dev); 2684 netif_carrier_off(dev);
2685 2685
2686 netif_tx_stop_all_queues(dev); 2686 netif_tx_stop_all_queues(dev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index ffd0accc2ec9..2017b0121f5f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -2750,7 +2750,7 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
2750int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 2750int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2751 enum qed_int_mode int_mode) 2751 enum qed_int_mode int_mode)
2752{ 2752{
2753 int rc; 2753 int rc = 0;
2754 2754
2755 /* Configure AEU signal change to produce attentions */ 2755 /* Configure AEU signal change to produce attentions */
2756 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); 2756 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 518af329502d..7869465435fa 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -750,6 +750,12 @@ static bool qede_has_tx_work(struct qede_fastpath *fp)
750 return false; 750 return false;
751} 751}
752 752
753static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
754{
755 qed_chain_consume(&rxq->rx_bd_ring);
756 rxq->sw_rx_cons++;
757}
758
753/* This function reuses the buffer(from an offset) from 759/* This function reuses the buffer(from an offset) from
754 * consumer index to producer index in the bd ring 760 * consumer index to producer index in the bd ring
755 */ 761 */
@@ -773,6 +779,21 @@ static inline void qede_reuse_page(struct qede_dev *edev,
773 curr_cons->data = NULL; 779 curr_cons->data = NULL;
774} 780}
775 781
782/* In case of allocation failures reuse buffers
783 * from consumer index to produce buffers for firmware
784 */
785static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
786 struct qede_dev *edev, u8 count)
787{
788 struct sw_rx_data *curr_cons;
789
790 for (; count > 0; count--) {
791 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
792 qede_reuse_page(edev, rxq, curr_cons);
793 qede_rx_bd_ring_consume(rxq);
794 }
795}
796
776static inline int qede_realloc_rx_buffer(struct qede_dev *edev, 797static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
777 struct qede_rx_queue *rxq, 798 struct qede_rx_queue *rxq,
778 struct sw_rx_data *curr_cons) 799 struct sw_rx_data *curr_cons)
@@ -781,8 +802,14 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
781 curr_cons->page_offset += rxq->rx_buf_seg_size; 802 curr_cons->page_offset += rxq->rx_buf_seg_size;
782 803
783 if (curr_cons->page_offset == PAGE_SIZE) { 804 if (curr_cons->page_offset == PAGE_SIZE) {
784 if (unlikely(qede_alloc_rx_buffer(edev, rxq))) 805 if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
806 /* Since we failed to allocate new buffer
807 * current buffer can be used again.
808 */
809 curr_cons->page_offset -= rxq->rx_buf_seg_size;
810
785 return -ENOMEM; 811 return -ENOMEM;
812 }
786 813
787 dma_unmap_page(&edev->pdev->dev, curr_cons->mapping, 814 dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
788 PAGE_SIZE, DMA_FROM_DEVICE); 815 PAGE_SIZE, DMA_FROM_DEVICE);
@@ -901,7 +928,10 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
901 len_on_bd); 928 len_on_bd);
902 929
903 if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) { 930 if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
904 tpa_info->agg_state = QEDE_AGG_STATE_ERROR; 931 /* Incr page ref count to reuse on allocation failure
932 * so that it doesn't get freed while freeing SKB.
933 */
934 atomic_inc(&current_bd->data->_count);
905 goto out; 935 goto out;
906 } 936 }
907 937
@@ -915,6 +945,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
915 return 0; 945 return 0;
916 946
917out: 947out:
948 tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
949 qede_recycle_rx_bd_ring(rxq, edev, 1);
918 return -ENOMEM; 950 return -ENOMEM;
919} 951}
920 952
@@ -966,8 +998,9 @@ static void qede_tpa_start(struct qede_dev *edev,
966 tpa_info->skb = netdev_alloc_skb(edev->ndev, 998 tpa_info->skb = netdev_alloc_skb(edev->ndev,
967 le16_to_cpu(cqe->len_on_first_bd)); 999 le16_to_cpu(cqe->len_on_first_bd));
968 if (unlikely(!tpa_info->skb)) { 1000 if (unlikely(!tpa_info->skb)) {
1001 DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
969 tpa_info->agg_state = QEDE_AGG_STATE_ERROR; 1002 tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
970 return; 1003 goto cons_buf;
971 } 1004 }
972 1005
973 skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); 1006 skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
@@ -990,6 +1023,7 @@ static void qede_tpa_start(struct qede_dev *edev,
990 /* This is needed in order to enable forwarding support */ 1023 /* This is needed in order to enable forwarding support */
991 qede_set_gro_params(edev, tpa_info->skb, cqe); 1024 qede_set_gro_params(edev, tpa_info->skb, cqe);
992 1025
1026cons_buf: /* We still need to handle bd_len_list to consume buffers */
993 if (likely(cqe->ext_bd_len_list[0])) 1027 if (likely(cqe->ext_bd_len_list[0]))
994 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, 1028 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
995 le16_to_cpu(cqe->ext_bd_len_list[0])); 1029 le16_to_cpu(cqe->ext_bd_len_list[0]));
@@ -1007,7 +1041,6 @@ static void qede_gro_ip_csum(struct sk_buff *skb)
1007 const struct iphdr *iph = ip_hdr(skb); 1041 const struct iphdr *iph = ip_hdr(skb);
1008 struct tcphdr *th; 1042 struct tcphdr *th;
1009 1043
1010 skb_set_network_header(skb, 0);
1011 skb_set_transport_header(skb, sizeof(struct iphdr)); 1044 skb_set_transport_header(skb, sizeof(struct iphdr));
1012 th = tcp_hdr(skb); 1045 th = tcp_hdr(skb);
1013 1046
@@ -1022,7 +1055,6 @@ static void qede_gro_ipv6_csum(struct sk_buff *skb)
1022 struct ipv6hdr *iph = ipv6_hdr(skb); 1055 struct ipv6hdr *iph = ipv6_hdr(skb);
1023 struct tcphdr *th; 1056 struct tcphdr *th;
1024 1057
1025 skb_set_network_header(skb, 0);
1026 skb_set_transport_header(skb, sizeof(struct ipv6hdr)); 1058 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
1027 th = tcp_hdr(skb); 1059 th = tcp_hdr(skb);
1028 1060
@@ -1037,8 +1069,21 @@ static void qede_gro_receive(struct qede_dev *edev,
1037 struct sk_buff *skb, 1069 struct sk_buff *skb,
1038 u16 vlan_tag) 1070 u16 vlan_tag)
1039{ 1071{
1072 /* FW can send a single MTU sized packet from gro flow
1073 * due to aggregation timeout/last segment etc. which
1074 * is not expected to be a gro packet. If a skb has zero
1075 * frags then simply push it in the stack as non gso skb.
1076 */
1077 if (unlikely(!skb->data_len)) {
1078 skb_shinfo(skb)->gso_type = 0;
1079 skb_shinfo(skb)->gso_size = 0;
1080 goto send_skb;
1081 }
1082
1040#ifdef CONFIG_INET 1083#ifdef CONFIG_INET
1041 if (skb_shinfo(skb)->gso_size) { 1084 if (skb_shinfo(skb)->gso_size) {
1085 skb_set_network_header(skb, 0);
1086
1042 switch (skb->protocol) { 1087 switch (skb->protocol) {
1043 case htons(ETH_P_IP): 1088 case htons(ETH_P_IP):
1044 qede_gro_ip_csum(skb); 1089 qede_gro_ip_csum(skb);
@@ -1053,6 +1098,8 @@ static void qede_gro_receive(struct qede_dev *edev,
1053 } 1098 }
1054 } 1099 }
1055#endif 1100#endif
1101
1102send_skb:
1056 skb_record_rx_queue(skb, fp->rss_id); 1103 skb_record_rx_queue(skb, fp->rss_id);
1057 qede_skb_receive(edev, fp, skb, vlan_tag); 1104 qede_skb_receive(edev, fp, skb, vlan_tag);
1058} 1105}
@@ -1244,17 +1291,17 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1244 "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", 1291 "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
1245 sw_comp_cons, parse_flag); 1292 sw_comp_cons, parse_flag);
1246 rxq->rx_hw_errors++; 1293 rxq->rx_hw_errors++;
1247 qede_reuse_page(edev, rxq, sw_rx_data); 1294 qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
1248 goto next_rx; 1295 goto next_cqe;
1249 } 1296 }
1250 1297
1251 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); 1298 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
1252 if (unlikely(!skb)) { 1299 if (unlikely(!skb)) {
1253 DP_NOTICE(edev, 1300 DP_NOTICE(edev,
1254 "Build_skb failed, dropping incoming packet\n"); 1301 "Build_skb failed, dropping incoming packet\n");
1255 qede_reuse_page(edev, rxq, sw_rx_data); 1302 qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
1256 rxq->rx_alloc_errors++; 1303 rxq->rx_alloc_errors++;
1257 goto next_rx; 1304 goto next_cqe;
1258 } 1305 }
1259 1306
1260 /* Copy data into SKB */ 1307 /* Copy data into SKB */
@@ -1288,11 +1335,22 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1288 if (unlikely(qede_realloc_rx_buffer(edev, rxq, 1335 if (unlikely(qede_realloc_rx_buffer(edev, rxq,
1289 sw_rx_data))) { 1336 sw_rx_data))) {
1290 DP_ERR(edev, "Failed to allocate rx buffer\n"); 1337 DP_ERR(edev, "Failed to allocate rx buffer\n");
1338 /* Incr page ref count to reuse on allocation
1339 * failure so that it doesn't get freed while
1340 * freeing SKB.
1341 */
1342
1343 atomic_inc(&sw_rx_data->data->_count);
1291 rxq->rx_alloc_errors++; 1344 rxq->rx_alloc_errors++;
1345 qede_recycle_rx_bd_ring(rxq, edev,
1346 fp_cqe->bd_num);
1347 dev_kfree_skb_any(skb);
1292 goto next_cqe; 1348 goto next_cqe;
1293 } 1349 }
1294 } 1350 }
1295 1351
1352 qede_rx_bd_ring_consume(rxq);
1353
1296 if (fp_cqe->bd_num != 1) { 1354 if (fp_cqe->bd_num != 1) {
1297 u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len); 1355 u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
1298 u8 num_frags; 1356 u8 num_frags;
@@ -1303,18 +1361,27 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1303 num_frags--) { 1361 num_frags--) {
1304 u16 cur_size = pkt_len > rxq->rx_buf_size ? 1362 u16 cur_size = pkt_len > rxq->rx_buf_size ?
1305 rxq->rx_buf_size : pkt_len; 1363 rxq->rx_buf_size : pkt_len;
1364 if (unlikely(!cur_size)) {
1365 DP_ERR(edev,
1366 "Still got %d BDs for mapping jumbo, but length became 0\n",
1367 num_frags);
1368 qede_recycle_rx_bd_ring(rxq, edev,
1369 num_frags);
1370 dev_kfree_skb_any(skb);
1371 goto next_cqe;
1372 }
1306 1373
1307 WARN_ONCE(!cur_size, 1374 if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
1308 "Still got %d BDs for mapping jumbo, but length became 0\n", 1375 qede_recycle_rx_bd_ring(rxq, edev,
1309 num_frags); 1376 num_frags);
1310 1377 dev_kfree_skb_any(skb);
1311 if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
1312 goto next_cqe; 1378 goto next_cqe;
1379 }
1313 1380
1314 rxq->sw_rx_cons++;
1315 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; 1381 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1316 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; 1382 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
1317 qed_chain_consume(&rxq->rx_bd_ring); 1383 qede_rx_bd_ring_consume(rxq);
1384
1318 dma_unmap_page(&edev->pdev->dev, 1385 dma_unmap_page(&edev->pdev->dev,
1319 sw_rx_data->mapping, 1386 sw_rx_data->mapping,
1320 PAGE_SIZE, DMA_FROM_DEVICE); 1387 PAGE_SIZE, DMA_FROM_DEVICE);
@@ -1330,7 +1397,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1330 pkt_len -= cur_size; 1397 pkt_len -= cur_size;
1331 } 1398 }
1332 1399
1333 if (pkt_len) 1400 if (unlikely(pkt_len))
1334 DP_ERR(edev, 1401 DP_ERR(edev,
1335 "Mapped all BDs of jumbo, but still have %d bytes\n", 1402 "Mapped all BDs of jumbo, but still have %d bytes\n",
1336 pkt_len); 1403 pkt_len);
@@ -1349,10 +1416,6 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1349 skb_record_rx_queue(skb, fp->rss_id); 1416 skb_record_rx_queue(skb, fp->rss_id);
1350 1417
1351 qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag)); 1418 qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
1352
1353 qed_chain_consume(&rxq->rx_bd_ring);
1354next_rx:
1355 rxq->sw_rx_cons++;
1356next_rx_only: 1419next_rx_only:
1357 rx_pkt++; 1420 rx_pkt++;
1358 1421
@@ -2257,7 +2320,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
2257 struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; 2320 struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
2258 struct sw_rx_data *replace_buf = &tpa_info->replace_buf; 2321 struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
2259 2322
2260 if (replace_buf) { 2323 if (replace_buf->data) {
2261 dma_unmap_page(&edev->pdev->dev, 2324 dma_unmap_page(&edev->pdev->dev,
2262 dma_unmap_addr(replace_buf, mapping), 2325 dma_unmap_addr(replace_buf, mapping),
2263 PAGE_SIZE, DMA_FROM_DEVICE); 2326 PAGE_SIZE, DMA_FROM_DEVICE);
@@ -2377,7 +2440,7 @@ err:
2377static int qede_alloc_mem_rxq(struct qede_dev *edev, 2440static int qede_alloc_mem_rxq(struct qede_dev *edev,
2378 struct qede_rx_queue *rxq) 2441 struct qede_rx_queue *rxq)
2379{ 2442{
2380 int i, rc, size, num_allocated; 2443 int i, rc, size;
2381 2444
2382 rxq->num_rx_buffers = edev->q_num_rx_buffers; 2445 rxq->num_rx_buffers = edev->q_num_rx_buffers;
2383 2446
@@ -2394,6 +2457,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
2394 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL); 2457 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
2395 if (!rxq->sw_rx_ring) { 2458 if (!rxq->sw_rx_ring) {
2396 DP_ERR(edev, "Rx buffers ring allocation failed\n"); 2459 DP_ERR(edev, "Rx buffers ring allocation failed\n");
2460 rc = -ENOMEM;
2397 goto err; 2461 goto err;
2398 } 2462 }
2399 2463
@@ -2421,26 +2485,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
2421 /* Allocate buffers for the Rx ring */ 2485 /* Allocate buffers for the Rx ring */
2422 for (i = 0; i < rxq->num_rx_buffers; i++) { 2486 for (i = 0; i < rxq->num_rx_buffers; i++) {
2423 rc = qede_alloc_rx_buffer(edev, rxq); 2487 rc = qede_alloc_rx_buffer(edev, rxq);
2424 if (rc) 2488 if (rc) {
2425 break; 2489 DP_ERR(edev,
2426 } 2490 "Rx buffers allocation failed at index %d\n", i);
2427 num_allocated = i; 2491 goto err;
2428 if (!num_allocated) { 2492 }
2429 DP_ERR(edev, "Rx buffers allocation failed\n");
2430 goto err;
2431 } else if (num_allocated < rxq->num_rx_buffers) {
2432 DP_NOTICE(edev,
2433 "Allocated less buffers than desired (%d allocated)\n",
2434 num_allocated);
2435 } 2493 }
2436 2494
2437 qede_alloc_sge_mem(edev, rxq); 2495 rc = qede_alloc_sge_mem(edev, rxq);
2438
2439 return 0;
2440
2441err: 2496err:
2442 qede_free_mem_rxq(edev, rxq); 2497 return rc;
2443 return -ENOMEM;
2444} 2498}
2445 2499
2446static void qede_free_mem_txq(struct qede_dev *edev, 2500static void qede_free_mem_txq(struct qede_dev *edev,
@@ -2523,10 +2577,8 @@ static int qede_alloc_mem_fp(struct qede_dev *edev,
2523 } 2577 }
2524 2578
2525 return 0; 2579 return 0;
2526
2527err: 2580err:
2528 qede_free_mem_fp(edev, fp); 2581 return rc;
2529 return -ENOMEM;
2530} 2582}
2531 2583
2532static void qede_free_mem_load(struct qede_dev *edev) 2584static void qede_free_mem_load(struct qede_dev *edev)
@@ -2549,22 +2601,13 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
2549 struct qede_fastpath *fp = &edev->fp_array[rss_id]; 2601 struct qede_fastpath *fp = &edev->fp_array[rss_id];
2550 2602
2551 rc = qede_alloc_mem_fp(edev, fp); 2603 rc = qede_alloc_mem_fp(edev, fp);
2552 if (rc) 2604 if (rc) {
2553 break;
2554 }
2555
2556 if (rss_id != QEDE_RSS_CNT(edev)) {
2557 /* Failed allocating memory for all the queues */
2558 if (!rss_id) {
2559 DP_ERR(edev, 2605 DP_ERR(edev,
2560 "Failed to allocate memory for the leading queue\n"); 2606 "Failed to allocate memory for fastpath - rss id = %d\n",
2561 rc = -ENOMEM; 2607 rss_id);
2562 } else { 2608 qede_free_mem_load(edev);
2563 DP_NOTICE(edev, 2609 return rc;
2564 "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
2565 QEDE_RSS_CNT(edev), rss_id);
2566 } 2610 }
2567 edev->num_rss = rss_id;
2568 } 2611 }
2569 2612
2570 return 0; 2613 return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 55007f1e6bbc..caf6ddb7ea76 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -37,8 +37,8 @@
37 37
38#define _QLCNIC_LINUX_MAJOR 5 38#define _QLCNIC_LINUX_MAJOR 5
39#define _QLCNIC_LINUX_MINOR 3 39#define _QLCNIC_LINUX_MINOR 3
40#define _QLCNIC_LINUX_SUBVERSION 63 40#define _QLCNIC_LINUX_SUBVERSION 64
41#define QLCNIC_LINUX_VERSIONID "5.3.63" 41#define QLCNIC_LINUX_VERSIONID "5.3.64"
42#define QLCNIC_DRV_IDC_VER 0x01 42#define QLCNIC_DRV_IDC_VER 0x01
43#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 43#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index ef332708e5f2..6d31f92ef2b6 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
18 */ 18 */
19#define DRV_NAME "qlge" 19#define DRV_NAME "qlge"
20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
21#define DRV_VERSION "1.00.00.34" 21#define DRV_VERSION "1.00.00.35"
22 22
23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ 23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
24 24
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 4e1a7dba7c4a..9e2a0bd8f5a8 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1377,11 +1377,11 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1377 1377
1378 /* TAG and timestamp required flag */ 1378 /* TAG and timestamp required flag */
1379 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1379 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1380 skb_tx_timestamp(skb);
1381 desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; 1380 desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
1382 desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12); 1381 desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
1383 } 1382 }
1384 1383
1384 skb_tx_timestamp(skb);
1385 /* Descriptor type must be set after all the above writes */ 1385 /* Descriptor type must be set after all the above writes */
1386 dma_wmb(); 1386 dma_wmb();
1387 desc->die_dt = DT_FEND; 1387 desc->die_dt = DT_FEND;
@@ -1691,6 +1691,9 @@ static int ravb_set_gti(struct net_device *ndev)
1691 rate = clk_get_rate(clk); 1691 rate = clk_get_rate(clk);
1692 clk_put(clk); 1692 clk_put(clk);
1693 1693
1694 if (!rate)
1695 return -EINVAL;
1696
1694 inc = 1000000000ULL << 20; 1697 inc = 1000000000ULL << 20;
1695 do_div(inc, rate); 1698 do_div(inc, rate);
1696 1699
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 004e2d7560fd..ceea74cc2229 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2194,17 +2194,13 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
2194 __func__); 2194 __func__);
2195 return ret; 2195 return ret;
2196 } 2196 }
2197 ret = sh_eth_dev_init(ndev, false); 2197 ret = sh_eth_dev_init(ndev, true);
2198 if (ret < 0) { 2198 if (ret < 0) {
2199 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", 2199 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
2200 __func__); 2200 __func__);
2201 return ret; 2201 return ret;
2202 } 2202 }
2203 2203
2204 mdp->irq_enabled = true;
2205 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
2206 /* Setting the Rx mode will start the Rx process. */
2207 sh_eth_write(ndev, EDRRR_R, EDRRR);
2208 netif_device_attach(ndev); 2204 netif_device_attach(ndev);
2209 } 2205 }
2210 2206
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index b02eed12bfc5..73427e29df2a 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -155,11 +155,11 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
155 return 0; 155 return 0;
156 156
157err_rx_irq_unmap: 157err_rx_irq_unmap:
158 while (--i) 158 while (i--)
159 irq_dispose_mapping(priv->rxq[i]->irq_no); 159 irq_dispose_mapping(priv->rxq[i]->irq_no);
160 i = SXGBE_TX_QUEUES; 160 i = SXGBE_TX_QUEUES;
161err_tx_irq_unmap: 161err_tx_irq_unmap:
162 while (--i) 162 while (i--)
163 irq_dispose_mapping(priv->txq[i]->irq_no); 163 irq_dispose_mapping(priv->txq[i]->irq_no);
164 irq_dispose_mapping(priv->irq); 164 irq_dispose_mapping(priv->irq);
165err_drv_remove: 165err_drv_remove:
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 98d33d462c6c..1681084cc96f 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1920,6 +1920,10 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
1920 return 0; 1920 return 0;
1921 } 1921 }
1922 1922
1923 if (nic_data->datapath_caps &
1924 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
1925 return -EOPNOTSUPP;
1926
1923 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, 1927 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
1924 nic_data->vport_id); 1928 nic_data->vport_id);
1925 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type); 1929 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
@@ -2923,9 +2927,16 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
2923 bool replacing) 2927 bool replacing)
2924{ 2928{
2925 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2929 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2930 u32 flags = spec->flags;
2926 2931
2927 memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN); 2932 memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
2928 2933
2934 /* Remove RSS flag if we don't have an RSS context. */
2935 if (flags & EFX_FILTER_FLAG_RX_RSS &&
2936 spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT &&
2937 nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID)
2938 flags &= ~EFX_FILTER_FLAG_RX_RSS;
2939
2929 if (replacing) { 2940 if (replacing) {
2930 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 2941 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2931 MC_CMD_FILTER_OP_IN_OP_REPLACE); 2942 MC_CMD_FILTER_OP_IN_OP_REPLACE);
@@ -2985,10 +2996,10 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
2985 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? 2996 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
2986 0 : spec->dmaq_id); 2997 0 : spec->dmaq_id);
2987 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE, 2998 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
2988 (spec->flags & EFX_FILTER_FLAG_RX_RSS) ? 2999 (flags & EFX_FILTER_FLAG_RX_RSS) ?
2989 MC_CMD_FILTER_OP_IN_RX_MODE_RSS : 3000 MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
2990 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); 3001 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
2991 if (spec->flags & EFX_FILTER_FLAG_RX_RSS) 3002 if (flags & EFX_FILTER_FLAG_RX_RSS)
2992 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, 3003 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
2993 spec->rss_context != 3004 spec->rss_context !=
2994 EFX_FILTER_RSS_CONTEXT_DEFAULT ? 3005 EFX_FILTER_RSS_CONTEXT_DEFAULT ?
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index f0d797ab74d8..afb90d129cb6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -34,6 +34,9 @@
34#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003 34#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
35#define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010 35#define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010
36 36
37#define SYSMGR_FPGAGRP_MODULE_REG 0x00000028
38#define SYSMGR_FPGAGRP_MODULE_EMAC 0x00000004
39
37#define EMAC_SPLITTER_CTRL_REG 0x0 40#define EMAC_SPLITTER_CTRL_REG 0x0
38#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3 41#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3
39#define EMAC_SPLITTER_CTRL_SPEED_10 0x2 42#define EMAC_SPLITTER_CTRL_SPEED_10 0x2
@@ -46,7 +49,6 @@ struct socfpga_dwmac {
46 u32 reg_shift; 49 u32 reg_shift;
47 struct device *dev; 50 struct device *dev;
48 struct regmap *sys_mgr_base_addr; 51 struct regmap *sys_mgr_base_addr;
49 struct reset_control *stmmac_rst;
50 void __iomem *splitter_base; 52 void __iomem *splitter_base;
51 bool f2h_ptp_ref_clk; 53 bool f2h_ptp_ref_clk;
52}; 54};
@@ -89,15 +91,6 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
89 struct device_node *np_splitter; 91 struct device_node *np_splitter;
90 struct resource res_splitter; 92 struct resource res_splitter;
91 93
92 dwmac->stmmac_rst = devm_reset_control_get(dev,
93 STMMAC_RESOURCE_NAME);
94 if (IS_ERR(dwmac->stmmac_rst)) {
95 dev_info(dev, "Could not get reset control!\n");
96 if (PTR_ERR(dwmac->stmmac_rst) == -EPROBE_DEFER)
97 return -EPROBE_DEFER;
98 dwmac->stmmac_rst = NULL;
99 }
100
101 dwmac->interface = of_get_phy_mode(np); 94 dwmac->interface = of_get_phy_mode(np);
102 95
103 sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon"); 96 sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
@@ -148,7 +141,7 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
148 int phymode = dwmac->interface; 141 int phymode = dwmac->interface;
149 u32 reg_offset = dwmac->reg_offset; 142 u32 reg_offset = dwmac->reg_offset;
150 u32 reg_shift = dwmac->reg_shift; 143 u32 reg_shift = dwmac->reg_shift;
151 u32 ctrl, val; 144 u32 ctrl, val, module;
152 145
153 switch (phymode) { 146 switch (phymode) {
154 case PHY_INTERFACE_MODE_RGMII: 147 case PHY_INTERFACE_MODE_RGMII:
@@ -175,39 +168,39 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
175 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); 168 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
176 ctrl |= val << reg_shift; 169 ctrl |= val << reg_shift;
177 170
178 if (dwmac->f2h_ptp_ref_clk) 171 if (dwmac->f2h_ptp_ref_clk) {
179 ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2); 172 ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
180 else 173 regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
174 &module);
175 module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2));
176 regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
177 module);
178 } else {
181 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2)); 179 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2));
180 }
182 181
183 regmap_write(sys_mgr_base_addr, reg_offset, ctrl); 182 regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
184 return 0;
185}
186
187static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv)
188{
189 struct socfpga_dwmac *dwmac = priv;
190 183
191 /* On socfpga platform exit, assert and hold reset to the 184 return 0;
192 * enet controller - the default state after a hard reset.
193 */
194 if (dwmac->stmmac_rst)
195 reset_control_assert(dwmac->stmmac_rst);
196} 185}
197 186
198static int socfpga_dwmac_init(struct platform_device *pdev, void *priv) 187static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
199{ 188{
200 struct socfpga_dwmac *dwmac = priv; 189 struct socfpga_dwmac *dwmac = priv;
201 struct net_device *ndev = platform_get_drvdata(pdev); 190 struct net_device *ndev = platform_get_drvdata(pdev);
202 struct stmmac_priv *stpriv = NULL; 191 struct stmmac_priv *stpriv = NULL;
203 int ret = 0; 192 int ret = 0;
204 193
205 if (ndev) 194 if (!ndev)
206 stpriv = netdev_priv(ndev); 195 return -EINVAL;
196
197 stpriv = netdev_priv(ndev);
198 if (!stpriv)
199 return -EINVAL;
207 200
208 /* Assert reset to the enet controller before changing the phy mode */ 201 /* Assert reset to the enet controller before changing the phy mode */
209 if (dwmac->stmmac_rst) 202 if (stpriv->stmmac_rst)
210 reset_control_assert(dwmac->stmmac_rst); 203 reset_control_assert(stpriv->stmmac_rst);
211 204
212 /* Setup the phy mode in the system manager registers according to 205 /* Setup the phy mode in the system manager registers according to
213 * devicetree configuration 206 * devicetree configuration
@@ -217,8 +210,8 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
217 /* Deassert reset for the phy configuration to be sampled by 210 /* Deassert reset for the phy configuration to be sampled by
218 * the enet controller, and operation to start in requested mode 211 * the enet controller, and operation to start in requested mode
219 */ 212 */
220 if (dwmac->stmmac_rst) 213 if (stpriv->stmmac_rst)
221 reset_control_deassert(dwmac->stmmac_rst); 214 reset_control_deassert(stpriv->stmmac_rst);
222 215
223 /* Before the enet controller is suspended, the phy is suspended. 216 /* Before the enet controller is suspended, the phy is suspended.
224 * This causes the phy clock to be gated. The enet controller is 217 * This causes the phy clock to be gated. The enet controller is
@@ -235,7 +228,7 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
235 * control register 0, and can be modified by the phy driver 228 * control register 0, and can be modified by the phy driver
236 * framework. 229 * framework.
237 */ 230 */
238 if (stpriv && stpriv->phydev) 231 if (stpriv->phydev)
239 phy_resume(stpriv->phydev); 232 phy_resume(stpriv->phydev);
240 233
241 return ret; 234 return ret;
@@ -275,14 +268,13 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
275 268
276 plat_dat->bsp_priv = dwmac; 269 plat_dat->bsp_priv = dwmac;
277 plat_dat->init = socfpga_dwmac_init; 270 plat_dat->init = socfpga_dwmac_init;
278 plat_dat->exit = socfpga_dwmac_exit;
279 plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; 271 plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
280 272
281 ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv); 273 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
282 if (ret) 274 if (!ret)
283 return ret; 275 ret = socfpga_dwmac_init(pdev, dwmac);
284 276
285 return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 277 return ret;
286} 278}
287 279
288static const struct of_device_id socfpga_dwmac_match[] = { 280static const struct of_device_id socfpga_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index e13228f115f0..011386f6f24d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -199,11 +199,6 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
199{ 199{
200 unsigned int tdes1 = p->des1; 200 unsigned int tdes1 = p->des1;
201 201
202 if (mode == STMMAC_CHAIN_MODE)
203 norm_set_tx_desc_len_on_chain(p, len);
204 else
205 norm_set_tx_desc_len_on_ring(p, len);
206
207 if (is_fs) 202 if (is_fs)
208 tdes1 |= TDES1_FIRST_SEGMENT; 203 tdes1 |= TDES1_FIRST_SEGMENT;
209 else 204 else
@@ -217,10 +212,15 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
217 if (ls) 212 if (ls)
218 tdes1 |= TDES1_LAST_SEGMENT; 213 tdes1 |= TDES1_LAST_SEGMENT;
219 214
220 if (tx_own)
221 tdes1 |= TDES0_OWN;
222
223 p->des1 = tdes1; 215 p->des1 = tdes1;
216
217 if (mode == STMMAC_CHAIN_MODE)
218 norm_set_tx_desc_len_on_chain(p, len);
219 else
220 norm_set_tx_desc_len_on_ring(p, len);
221
222 if (tx_own)
223 p->des0 |= TDES0_OWN;
224} 224}
225 225
226static void ndesc_set_tx_ic(struct dma_desc *p) 226static void ndesc_set_tx_ic(struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4c5ce9848ca9..fcbd4be562e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -278,7 +278,6 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
278 */ 278 */
279bool stmmac_eee_init(struct stmmac_priv *priv) 279bool stmmac_eee_init(struct stmmac_priv *priv)
280{ 280{
281 char *phy_bus_name = priv->plat->phy_bus_name;
282 unsigned long flags; 281 unsigned long flags;
283 bool ret = false; 282 bool ret = false;
284 283
@@ -289,10 +288,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
289 (priv->pcs == STMMAC_PCS_RTBI)) 288 (priv->pcs == STMMAC_PCS_RTBI))
290 goto out; 289 goto out;
291 290
292 /* Never init EEE in case of a switch is attached */
293 if (phy_bus_name && (!strcmp(phy_bus_name, "fixed")))
294 goto out;
295
296 /* MAC core supports the EEE feature. */ 291 /* MAC core supports the EEE feature. */
297 if (priv->dma_cap.eee) { 292 if (priv->dma_cap.eee) {
298 int tx_lpi_timer = priv->tx_lpi_timer; 293 int tx_lpi_timer = priv->tx_lpi_timer;
@@ -772,10 +767,16 @@ static void stmmac_adjust_link(struct net_device *dev)
772 767
773 spin_unlock_irqrestore(&priv->lock, flags); 768 spin_unlock_irqrestore(&priv->lock, flags);
774 769
775 /* At this stage, it could be needed to setup the EEE or adjust some 770 if (phydev->is_pseudo_fixed_link)
776 * MAC related HW registers. 771 /* Stop PHY layer to call the hook to adjust the link in case
777 */ 772 * of a switch is attached to the stmmac driver.
778 priv->eee_enabled = stmmac_eee_init(priv); 773 */
774 phydev->irq = PHY_IGNORE_INTERRUPT;
775 else
776 /* At this stage, init the EEE if supported.
777 * Never called in case of fixed_link.
778 */
779 priv->eee_enabled = stmmac_eee_init(priv);
779} 780}
780 781
781/** 782/**
@@ -827,12 +828,8 @@ static int stmmac_init_phy(struct net_device *dev)
827 phydev = of_phy_connect(dev, priv->plat->phy_node, 828 phydev = of_phy_connect(dev, priv->plat->phy_node,
828 &stmmac_adjust_link, 0, interface); 829 &stmmac_adjust_link, 0, interface);
829 } else { 830 } else {
830 if (priv->plat->phy_bus_name) 831 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
831 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x", 832 priv->plat->bus_id);
832 priv->plat->phy_bus_name, priv->plat->bus_id);
833 else
834 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
835 priv->plat->bus_id);
836 833
837 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, 834 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
838 priv->plat->phy_addr); 835 priv->plat->phy_addr);
@@ -870,11 +867,6 @@ static int stmmac_init_phy(struct net_device *dev)
870 return -ENODEV; 867 return -ENODEV;
871 } 868 }
872 869
873 /* If attached to a switch, there is no reason to poll phy handler */
874 if (priv->plat->phy_bus_name)
875 if (!strcmp(priv->plat->phy_bus_name, "fixed"))
876 phydev->irq = PHY_IGNORE_INTERRUPT;
877
878 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" 870 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
879 " Link = %d\n", dev->name, phydev->phy_id, phydev->link); 871 " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
880 872
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index ea76129dafc2..06704ca6f9ca 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -198,20 +198,12 @@ int stmmac_mdio_register(struct net_device *ndev)
198 struct mii_bus *new_bus; 198 struct mii_bus *new_bus;
199 struct stmmac_priv *priv = netdev_priv(ndev); 199 struct stmmac_priv *priv = netdev_priv(ndev);
200 struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; 200 struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
201 int addr, found;
202 struct device_node *mdio_node = priv->plat->mdio_node; 201 struct device_node *mdio_node = priv->plat->mdio_node;
202 int addr, found;
203 203
204 if (!mdio_bus_data) 204 if (!mdio_bus_data)
205 return 0; 205 return 0;
206 206
207 if (IS_ENABLED(CONFIG_OF)) {
208 if (mdio_node) {
209 netdev_dbg(ndev, "FOUND MDIO subnode\n");
210 } else {
211 netdev_warn(ndev, "No MDIO subnode found\n");
212 }
213 }
214
215 new_bus = mdiobus_alloc(); 207 new_bus = mdiobus_alloc();
216 if (new_bus == NULL) 208 if (new_bus == NULL)
217 return -ENOMEM; 209 return -ENOMEM;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index dcbd2a1601e8..cf37ea558ecc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -132,6 +132,69 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
132} 132}
133 133
134/** 134/**
135 * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
136 * @plat: driver data platform structure
137 * @np: device tree node
138 * @dev: device pointer
139 * Description:
140 * The mdio bus will be allocated in case of a phy transceiver is on board;
141 * it will be NULL if the fixed-link is configured.
142 * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated
143 * in any case (for DSA, mdio must be registered even if fixed-link).
144 * The table below sums the supported configurations:
145 * -------------------------------
146 * snps,phy-addr | Y
147 * -------------------------------
148 * phy-handle | Y
149 * -------------------------------
150 * fixed-link | N
151 * -------------------------------
152 * snps,dwmac-mdio |
153 * even if | Y
154 * fixed-link |
155 * -------------------------------
156 *
157 * It returns 0 in case of success otherwise -ENODEV.
158 */
159static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
160 struct device_node *np, struct device *dev)
161{
162 bool mdio = true;
163
164 /* If phy-handle property is passed from DT, use it as the PHY */
165 plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
166 if (plat->phy_node)
167 dev_dbg(dev, "Found phy-handle subnode\n");
168
169 /* If phy-handle is not specified, check if we have a fixed-phy */
170 if (!plat->phy_node && of_phy_is_fixed_link(np)) {
171 if ((of_phy_register_fixed_link(np) < 0))
172 return -ENODEV;
173
174 dev_dbg(dev, "Found fixed-link subnode\n");
175 plat->phy_node = of_node_get(np);
176 mdio = false;
177 }
178
179 /* If snps,dwmac-mdio is passed from DT, always register the MDIO */
180 for_each_child_of_node(np, plat->mdio_node) {
181 if (of_device_is_compatible(plat->mdio_node, "snps,dwmac-mdio"))
182 break;
183 }
184
185 if (plat->mdio_node) {
186 dev_dbg(dev, "Found MDIO subnode\n");
187 mdio = true;
188 }
189
190 if (mdio)
191 plat->mdio_bus_data =
192 devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
193 GFP_KERNEL);
194 return 0;
195}
196
197/**
135 * stmmac_probe_config_dt - parse device-tree driver parameters 198 * stmmac_probe_config_dt - parse device-tree driver parameters
136 * @pdev: platform_device structure 199 * @pdev: platform_device structure
137 * @plat: driver data platform structure 200 * @plat: driver data platform structure
@@ -146,7 +209,6 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
146 struct device_node *np = pdev->dev.of_node; 209 struct device_node *np = pdev->dev.of_node;
147 struct plat_stmmacenet_data *plat; 210 struct plat_stmmacenet_data *plat;
148 struct stmmac_dma_cfg *dma_cfg; 211 struct stmmac_dma_cfg *dma_cfg;
149 struct device_node *child_node = NULL;
150 212
151 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); 213 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
152 if (!plat) 214 if (!plat)
@@ -166,36 +228,15 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
166 /* Default to phy auto-detection */ 228 /* Default to phy auto-detection */
167 plat->phy_addr = -1; 229 plat->phy_addr = -1;
168 230
169 /* If we find a phy-handle property, use it as the PHY */
170 plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
171
172 /* If phy-handle is not specified, check if we have a fixed-phy */
173 if (!plat->phy_node && of_phy_is_fixed_link(np)) {
174 if ((of_phy_register_fixed_link(np) < 0))
175 return ERR_PTR(-ENODEV);
176
177 plat->phy_node = of_node_get(np);
178 }
179
180 for_each_child_of_node(np, child_node)
181 if (of_device_is_compatible(child_node, "snps,dwmac-mdio")) {
182 plat->mdio_node = child_node;
183 break;
184 }
185
186 /* "snps,phy-addr" is not a standard property. Mark it as deprecated 231 /* "snps,phy-addr" is not a standard property. Mark it as deprecated
187 * and warn of its use. Remove this when phy node support is added. 232 * and warn of its use. Remove this when phy node support is added.
188 */ 233 */
189 if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) 234 if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
190 dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); 235 dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
191 236
192 if ((plat->phy_node && !of_phy_is_fixed_link(np)) || !plat->mdio_node) 237 /* To Configure PHY by using all device-tree supported properties */
193 plat->mdio_bus_data = NULL; 238 if (stmmac_dt_phy(plat, np, &pdev->dev))
194 else 239 return ERR_PTR(-ENODEV);
195 plat->mdio_bus_data =
196 devm_kzalloc(&pdev->dev,
197 sizeof(struct stmmac_mdio_bus_data),
198 GFP_KERNEL);
199 240
200 of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); 241 of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
201 242
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 42fdfd4d9d4f..e2fcdf1eec44 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -367,7 +367,6 @@ struct cpsw_priv {
367 spinlock_t lock; 367 spinlock_t lock;
368 struct platform_device *pdev; 368 struct platform_device *pdev;
369 struct net_device *ndev; 369 struct net_device *ndev;
370 struct device_node *phy_node;
371 struct napi_struct napi_rx; 370 struct napi_struct napi_rx;
372 struct napi_struct napi_tx; 371 struct napi_struct napi_tx;
373 struct device *dev; 372 struct device *dev;
@@ -1148,25 +1147,34 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1148 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 1147 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1149 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); 1148 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
1150 1149
1151 if (priv->phy_node) 1150 if (slave->data->phy_node) {
1152 slave->phy = of_phy_connect(priv->ndev, priv->phy_node, 1151 slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node,
1153 &cpsw_adjust_link, 0, slave->data->phy_if); 1152 &cpsw_adjust_link, 0, slave->data->phy_if);
1154 else 1153 if (!slave->phy) {
1154 dev_err(priv->dev, "phy \"%s\" not found on slave %d\n",
1155 slave->data->phy_node->full_name,
1156 slave->slave_num);
1157 return;
1158 }
1159 } else {
1155 slave->phy = phy_connect(priv->ndev, slave->data->phy_id, 1160 slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
1156 &cpsw_adjust_link, slave->data->phy_if); 1161 &cpsw_adjust_link, slave->data->phy_if);
1157 if (IS_ERR(slave->phy)) { 1162 if (IS_ERR(slave->phy)) {
1158 dev_err(priv->dev, "phy %s not found on slave %d\n", 1163 dev_err(priv->dev,
1159 slave->data->phy_id, slave->slave_num); 1164 "phy \"%s\" not found on slave %d, err %ld\n",
1160 slave->phy = NULL; 1165 slave->data->phy_id, slave->slave_num,
1161 } else { 1166 PTR_ERR(slave->phy));
1162 phy_attached_info(slave->phy); 1167 slave->phy = NULL;
1168 return;
1169 }
1170 }
1163 1171
1164 phy_start(slave->phy); 1172 phy_attached_info(slave->phy);
1165 1173
1166 /* Configure GMII_SEL register */ 1174 phy_start(slave->phy);
1167 cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, 1175
1168 slave->slave_num); 1176 /* Configure GMII_SEL register */
1169 } 1177 cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, slave->slave_num);
1170} 1178}
1171 1179
1172static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) 1180static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
@@ -1251,12 +1259,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
1251 int i, ret; 1259 int i, ret;
1252 u32 reg; 1260 u32 reg;
1253 1261
1262 pm_runtime_get_sync(&priv->pdev->dev);
1263
1254 if (!cpsw_common_res_usage_state(priv)) 1264 if (!cpsw_common_res_usage_state(priv))
1255 cpsw_intr_disable(priv); 1265 cpsw_intr_disable(priv);
1256 netif_carrier_off(ndev); 1266 netif_carrier_off(ndev);
1257 1267
1258 pm_runtime_get_sync(&priv->pdev->dev);
1259
1260 reg = priv->version; 1268 reg = priv->version;
1261 1269
1262 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n", 1270 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
@@ -1940,12 +1948,11 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
1940 slave->port_vlan = data->dual_emac_res_vlan; 1948 slave->port_vlan = data->dual_emac_res_vlan;
1941} 1949}
1942 1950
1943static int cpsw_probe_dt(struct cpsw_priv *priv, 1951static int cpsw_probe_dt(struct cpsw_platform_data *data,
1944 struct platform_device *pdev) 1952 struct platform_device *pdev)
1945{ 1953{
1946 struct device_node *node = pdev->dev.of_node; 1954 struct device_node *node = pdev->dev.of_node;
1947 struct device_node *slave_node; 1955 struct device_node *slave_node;
1948 struct cpsw_platform_data *data = &priv->data;
1949 int i = 0, ret; 1956 int i = 0, ret;
1950 u32 prop; 1957 u32 prop;
1951 1958
@@ -2033,25 +2040,21 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
2033 if (strcmp(slave_node->name, "slave")) 2040 if (strcmp(slave_node->name, "slave"))
2034 continue; 2041 continue;
2035 2042
2036 priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0); 2043 slave_data->phy_node = of_parse_phandle(slave_node,
2044 "phy-handle", 0);
2037 parp = of_get_property(slave_node, "phy_id", &lenp); 2045 parp = of_get_property(slave_node, "phy_id", &lenp);
2038 if (of_phy_is_fixed_link(slave_node)) { 2046 if (slave_data->phy_node) {
2039 struct device_node *phy_node; 2047 dev_dbg(&pdev->dev,
2040 struct phy_device *phy_dev; 2048 "slave[%d] using phy-handle=\"%s\"\n",
2041 2049 i, slave_data->phy_node->full_name);
2050 } else if (of_phy_is_fixed_link(slave_node)) {
2042 /* In the case of a fixed PHY, the DT node associated 2051 /* In the case of a fixed PHY, the DT node associated
2043 * to the PHY is the Ethernet MAC DT node. 2052 * to the PHY is the Ethernet MAC DT node.
2044 */ 2053 */
2045 ret = of_phy_register_fixed_link(slave_node); 2054 ret = of_phy_register_fixed_link(slave_node);
2046 if (ret) 2055 if (ret)
2047 return ret; 2056 return ret;
2048 phy_node = of_node_get(slave_node); 2057 slave_data->phy_node = of_node_get(slave_node);
2049 phy_dev = of_phy_find_device(phy_node);
2050 if (!phy_dev)
2051 return -ENODEV;
2052 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
2053 PHY_ID_FMT, phy_dev->mdio.bus->id,
2054 phy_dev->mdio.addr);
2055 } else if (parp) { 2058 } else if (parp) {
2056 u32 phyid; 2059 u32 phyid;
2057 struct device_node *mdio_node; 2060 struct device_node *mdio_node;
@@ -2072,7 +2075,9 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
2072 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), 2075 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
2073 PHY_ID_FMT, mdio->name, phyid); 2076 PHY_ID_FMT, mdio->name, phyid);
2074 } else { 2077 } else {
2075 dev_err(&pdev->dev, "No slave[%d] phy_id or fixed-link property\n", i); 2078 dev_err(&pdev->dev,
2079 "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
2080 i);
2076 goto no_phy_slave; 2081 goto no_phy_slave;
2077 } 2082 }
2078 slave_data->phy_if = of_get_phy_mode(slave_node); 2083 slave_data->phy_if = of_get_phy_mode(slave_node);
@@ -2275,7 +2280,7 @@ static int cpsw_probe(struct platform_device *pdev)
2275 /* Select default pin state */ 2280 /* Select default pin state */
2276 pinctrl_pm_select_default_state(&pdev->dev); 2281 pinctrl_pm_select_default_state(&pdev->dev);
2277 2282
2278 if (cpsw_probe_dt(priv, pdev)) { 2283 if (cpsw_probe_dt(&priv->data, pdev)) {
2279 dev_err(&pdev->dev, "cpsw: platform data missing\n"); 2284 dev_err(&pdev->dev, "cpsw: platform data missing\n");
2280 ret = -ENODEV; 2285 ret = -ENODEV;
2281 goto clean_runtime_disable_ret; 2286 goto clean_runtime_disable_ret;
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index 442a7038e660..e50afd1b2eda 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -18,6 +18,7 @@
18#include <linux/phy.h> 18#include <linux/phy.h>
19 19
20struct cpsw_slave_data { 20struct cpsw_slave_data {
21 struct device_node *phy_node;
21 char phy_id[MII_BUS_ID_SIZE]; 22 char phy_id[MII_BUS_ID_SIZE];
22 int phy_if; 23 int phy_if;
23 u8 mac_addr[ETH_ALEN]; 24 u8 mac_addr[ETH_ALEN];
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 5d9abedd6b75..f56d66e6ec15 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1512,7 +1512,10 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
1512 1512
1513 /* TODO: Add phy read and write and private statistics get feature */ 1513 /* TODO: Add phy read and write and private statistics get feature */
1514 1514
1515 return phy_mii_ioctl(priv->phydev, ifrq, cmd); 1515 if (priv->phydev)
1516 return phy_mii_ioctl(priv->phydev, ifrq, cmd);
1517 else
1518 return -EOPNOTSUPP;
1516} 1519}
1517 1520
1518static int match_first_device(struct device *dev, void *data) 1521static int match_first_device(struct device *dev, void *data)
@@ -1878,8 +1881,6 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
1878 pdata->hw_ram_addr = auxdata->hw_ram_addr; 1881 pdata->hw_ram_addr = auxdata->hw_ram_addr;
1879 } 1882 }
1880 1883
1881 pdev->dev.platform_data = pdata;
1882
1883 return pdata; 1884 return pdata;
1884} 1885}
1885 1886
@@ -2101,6 +2102,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
2101 cpdma_ctlr_destroy(priv->dma); 2102 cpdma_ctlr_destroy(priv->dma);
2102 2103
2103 unregister_netdev(ndev); 2104 unregister_netdev(ndev);
2105 pm_runtime_disable(&pdev->dev);
2104 free_netdev(ndev); 2106 free_netdev(ndev);
2105 2107
2106 return 0; 2108 return 0;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 13214a6492ac..743b18266a7c 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1622,7 +1622,7 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
1622 continue; 1622 continue;
1623 1623
1624 /* copy hw scan info */ 1624 /* copy hw scan info */
1625 memcpy(target->hwinfo, scan_info, scan_info->size); 1625 memcpy(target->hwinfo, scan_info, be16_to_cpu(scan_info->size));
1626 target->essid_len = strnlen(scan_info->essid, 1626 target->essid_len = strnlen(scan_info->essid,
1627 sizeof(scan_info->essid)); 1627 sizeof(scan_info->essid));
1628 target->rate_len = 0; 1628 target->rate_len = 0;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 84d3e5ca8817..c6385617bfb2 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -880,12 +880,12 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
880 macsec_skb_cb(skb)->valid = false; 880 macsec_skb_cb(skb)->valid = false;
881 skb = skb_share_check(skb, GFP_ATOMIC); 881 skb = skb_share_check(skb, GFP_ATOMIC);
882 if (!skb) 882 if (!skb)
883 return NULL; 883 return ERR_PTR(-ENOMEM);
884 884
885 req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC); 885 req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC);
886 if (!req) { 886 if (!req) {
887 kfree_skb(skb); 887 kfree_skb(skb);
888 return NULL; 888 return ERR_PTR(-ENOMEM);
889 } 889 }
890 890
891 hdr = (struct macsec_eth_header *)skb->data; 891 hdr = (struct macsec_eth_header *)skb->data;
@@ -905,7 +905,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
905 skb = skb_unshare(skb, GFP_ATOMIC); 905 skb = skb_unshare(skb, GFP_ATOMIC);
906 if (!skb) { 906 if (!skb) {
907 aead_request_free(req); 907 aead_request_free(req);
908 return NULL; 908 return ERR_PTR(-ENOMEM);
909 } 909 }
910 } else { 910 } else {
911 /* integrity only: all headers + data authenticated */ 911 /* integrity only: all headers + data authenticated */
@@ -921,14 +921,14 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
921 dev_hold(dev); 921 dev_hold(dev);
922 ret = crypto_aead_decrypt(req); 922 ret = crypto_aead_decrypt(req);
923 if (ret == -EINPROGRESS) { 923 if (ret == -EINPROGRESS) {
924 return NULL; 924 return ERR_PTR(ret);
925 } else if (ret != 0) { 925 } else if (ret != 0) {
926 /* decryption/authentication failed 926 /* decryption/authentication failed
927 * 10.6 if validateFrames is disabled, deliver anyway 927 * 10.6 if validateFrames is disabled, deliver anyway
928 */ 928 */
929 if (ret != -EBADMSG) { 929 if (ret != -EBADMSG) {
930 kfree_skb(skb); 930 kfree_skb(skb);
931 skb = NULL; 931 skb = ERR_PTR(ret);
932 } 932 }
933 } else { 933 } else {
934 macsec_skb_cb(skb)->valid = true; 934 macsec_skb_cb(skb)->valid = true;
@@ -1146,8 +1146,10 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
1146 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1146 secy->validate_frames != MACSEC_VALIDATE_DISABLED)
1147 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1147 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
1148 1148
1149 if (!skb) { 1149 if (IS_ERR(skb)) {
1150 macsec_rxsa_put(rx_sa); 1150 /* the decrypt callback needs the reference */
1151 if (PTR_ERR(skb) != -EINPROGRESS)
1152 macsec_rxsa_put(rx_sa);
1151 rcu_read_unlock(); 1153 rcu_read_unlock();
1152 *pskb = NULL; 1154 *pskb = NULL;
1153 return RX_HANDLER_CONSUMED; 1155 return RX_HANDLER_CONSUMED;
@@ -1161,7 +1163,8 @@ deliver:
1161 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1163 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1162 macsec_reset_skb(skb, secy->netdev); 1164 macsec_reset_skb(skb, secy->netdev);
1163 1165
1164 macsec_rxsa_put(rx_sa); 1166 if (rx_sa)
1167 macsec_rxsa_put(rx_sa);
1165 count_rx(dev, skb->len); 1168 count_rx(dev, skb->len);
1166 1169
1167 rcu_read_unlock(); 1170 rcu_read_unlock();
@@ -1622,8 +1625,9 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1622 } 1625 }
1623 1626
1624 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1627 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
1625 if (init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len, 1628 if (!rx_sa || init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1626 secy->icv_len)) { 1629 secy->key_len, secy->icv_len)) {
1630 kfree(rx_sa);
1627 rtnl_unlock(); 1631 rtnl_unlock();
1628 return -ENOMEM; 1632 return -ENOMEM;
1629 } 1633 }
@@ -1768,6 +1772,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1768 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 1772 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
1769 if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1773 if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1770 secy->key_len, secy->icv_len)) { 1774 secy->key_len, secy->icv_len)) {
1775 kfree(tx_sa);
1771 rtnl_unlock(); 1776 rtnl_unlock();
1772 return -ENOMEM; 1777 return -ENOMEM;
1773 } 1778 }
@@ -2227,7 +2232,8 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
2227 return 1; 2232 return 1;
2228 2233
2229 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) || 2234 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) ||
2230 nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, DEFAULT_CIPHER_ID) || 2235 nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
2236 MACSEC_DEFAULT_CIPHER_ID) ||
2231 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 2237 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
2232 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 2238 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
2233 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 2239 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
@@ -2268,7 +2274,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
2268 if (!hdr) 2274 if (!hdr)
2269 return -EMSGSIZE; 2275 return -EMSGSIZE;
2270 2276
2271 rtnl_lock(); 2277 genl_dump_check_consistent(cb, hdr, &macsec_fam);
2272 2278
2273 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 2279 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
2274 goto nla_put_failure; 2280 goto nla_put_failure;
@@ -2429,18 +2435,17 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
2429 2435
2430 nla_nest_end(skb, rxsc_list); 2436 nla_nest_end(skb, rxsc_list);
2431 2437
2432 rtnl_unlock();
2433
2434 genlmsg_end(skb, hdr); 2438 genlmsg_end(skb, hdr);
2435 2439
2436 return 0; 2440 return 0;
2437 2441
2438nla_put_failure: 2442nla_put_failure:
2439 rtnl_unlock();
2440 genlmsg_cancel(skb, hdr); 2443 genlmsg_cancel(skb, hdr);
2441 return -EMSGSIZE; 2444 return -EMSGSIZE;
2442} 2445}
2443 2446
2447static int macsec_generation = 1; /* protected by RTNL */
2448
2444static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 2449static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
2445{ 2450{
2446 struct net *net = sock_net(skb->sk); 2451 struct net *net = sock_net(skb->sk);
@@ -2450,6 +2455,10 @@ static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
2450 dev_idx = cb->args[0]; 2455 dev_idx = cb->args[0];
2451 2456
2452 d = 0; 2457 d = 0;
2458 rtnl_lock();
2459
2460 cb->seq = macsec_generation;
2461
2453 for_each_netdev(net, dev) { 2462 for_each_netdev(net, dev) {
2454 struct macsec_secy *secy; 2463 struct macsec_secy *secy;
2455 2464
@@ -2467,6 +2476,7 @@ next:
2467 } 2476 }
2468 2477
2469done: 2478done:
2479 rtnl_unlock();
2470 cb->args[0] = d; 2480 cb->args[0] = d;
2471 return skb->len; 2481 return skb->len;
2472} 2482}
@@ -2920,10 +2930,14 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head)
2920 struct net_device *real_dev = macsec->real_dev; 2930 struct net_device *real_dev = macsec->real_dev;
2921 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 2931 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
2922 2932
2933 macsec_generation++;
2934
2923 unregister_netdevice_queue(dev, head); 2935 unregister_netdevice_queue(dev, head);
2924 list_del_rcu(&macsec->secys); 2936 list_del_rcu(&macsec->secys);
2925 if (list_empty(&rxd->secys)) 2937 if (list_empty(&rxd->secys)) {
2926 netdev_rx_handler_unregister(real_dev); 2938 netdev_rx_handler_unregister(real_dev);
2939 kfree(rxd);
2940 }
2927 2941
2928 macsec_del_dev(macsec); 2942 macsec_del_dev(macsec);
2929} 2943}
@@ -2945,8 +2959,10 @@ static int register_macsec_dev(struct net_device *real_dev,
2945 2959
2946 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 2960 err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
2947 rxd); 2961 rxd);
2948 if (err < 0) 2962 if (err < 0) {
2963 kfree(rxd);
2949 return err; 2964 return err;
2965 }
2950 } 2966 }
2951 2967
2952 list_add_tail_rcu(&macsec->secys, &rxd->secys); 2968 list_add_tail_rcu(&macsec->secys, &rxd->secys);
@@ -3066,6 +3082,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
3066 if (err < 0) 3082 if (err < 0)
3067 goto del_dev; 3083 goto del_dev;
3068 3084
3085 macsec_generation++;
3086
3069 dev_hold(real_dev); 3087 dev_hold(real_dev);
3070 3088
3071 return 0; 3089 return 0;
@@ -3079,7 +3097,7 @@ unregister:
3079 3097
3080static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[]) 3098static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
3081{ 3099{
3082 u64 csid = DEFAULT_CIPHER_ID; 3100 u64 csid = MACSEC_DEFAULT_CIPHER_ID;
3083 u8 icv_len = DEFAULT_ICV_LEN; 3101 u8 icv_len = DEFAULT_ICV_LEN;
3084 int flag; 3102 int flag;
3085 bool es, scb, sci; 3103 bool es, scb, sci;
@@ -3094,8 +3112,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
3094 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3112 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
3095 3113
3096 switch (csid) { 3114 switch (csid) {
3097 case DEFAULT_CIPHER_ID: 3115 case MACSEC_DEFAULT_CIPHER_ID:
3098 case DEFAULT_CIPHER_ALT: 3116 case MACSEC_DEFAULT_CIPHER_ALT:
3099 if (icv_len < MACSEC_MIN_ICV_LEN || 3117 if (icv_len < MACSEC_MIN_ICV_LEN ||
3100 icv_len > MACSEC_MAX_ICV_LEN) 3118 icv_len > MACSEC_MAX_ICV_LEN)
3101 return -EINVAL; 3119 return -EINVAL;
@@ -3129,8 +3147,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
3129 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 3147 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
3130 return -EINVAL; 3148 return -EINVAL;
3131 3149
3132 if ((data[IFLA_MACSEC_PROTECT] && 3150 if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
3133 nla_get_u8(data[IFLA_MACSEC_PROTECT])) && 3151 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
3134 !data[IFLA_MACSEC_WINDOW]) 3152 !data[IFLA_MACSEC_WINDOW])
3135 return -EINVAL; 3153 return -EINVAL;
3136 3154
@@ -3168,7 +3186,8 @@ static int macsec_fill_info(struct sk_buff *skb,
3168 3186
3169 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) || 3187 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) ||
3170 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 3188 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
3171 nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE, DEFAULT_CIPHER_ID) || 3189 nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE,
3190 MACSEC_DEFAULT_CIPHER_ID) ||
3172 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 3191 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
3173 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 3192 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
3174 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 3193 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index b3ffaee30858..f279a897a5c7 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -359,27 +359,25 @@ static void at803x_link_change_notify(struct phy_device *phydev)
359 * in the FIFO. In such cases, the FIFO enters an error mode it 359 * in the FIFO. In such cases, the FIFO enters an error mode it
360 * cannot recover from by software. 360 * cannot recover from by software.
361 */ 361 */
362 if (phydev->drv->phy_id == ATH8030_PHY_ID) { 362 if (phydev->state == PHY_NOLINK) {
363 if (phydev->state == PHY_NOLINK) { 363 if (priv->gpiod_reset && !priv->phy_reset) {
364 if (priv->gpiod_reset && !priv->phy_reset) { 364 struct at803x_context context;
365 struct at803x_context context; 365
366 366 at803x_context_save(phydev, &context);
367 at803x_context_save(phydev, &context); 367
368 368 gpiod_set_value(priv->gpiod_reset, 1);
369 gpiod_set_value(priv->gpiod_reset, 1); 369 msleep(1);
370 msleep(1); 370 gpiod_set_value(priv->gpiod_reset, 0);
371 gpiod_set_value(priv->gpiod_reset, 0); 371 msleep(1);
372 msleep(1); 372
373 373 at803x_context_restore(phydev, &context);
374 at803x_context_restore(phydev, &context); 374
375 375 phydev_dbg(phydev, "%s(): phy was reset\n",
376 phydev_dbg(phydev, "%s(): phy was reset\n", 376 __func__);
377 __func__); 377 priv->phy_reset = true;
378 priv->phy_reset = true;
379 }
380 } else {
381 priv->phy_reset = false;
382 } 378 }
379 } else {
380 priv->phy_reset = false;
383 } 381 }
384} 382}
385 383
@@ -391,7 +389,6 @@ static struct phy_driver at803x_driver[] = {
391 .phy_id_mask = 0xffffffef, 389 .phy_id_mask = 0xffffffef,
392 .probe = at803x_probe, 390 .probe = at803x_probe,
393 .config_init = at803x_config_init, 391 .config_init = at803x_config_init,
394 .link_change_notify = at803x_link_change_notify,
395 .set_wol = at803x_set_wol, 392 .set_wol = at803x_set_wol,
396 .get_wol = at803x_get_wol, 393 .get_wol = at803x_get_wol,
397 .suspend = at803x_suspend, 394 .suspend = at803x_suspend,
@@ -427,7 +424,6 @@ static struct phy_driver at803x_driver[] = {
427 .phy_id_mask = 0xffffffef, 424 .phy_id_mask = 0xffffffef,
428 .probe = at803x_probe, 425 .probe = at803x_probe,
429 .config_init = at803x_config_init, 426 .config_init = at803x_config_init,
430 .link_change_notify = at803x_link_change_notify,
431 .set_wol = at803x_set_wol, 427 .set_wol = at803x_set_wol,
432 .get_wol = at803x_get_wol, 428 .get_wol = at803x_get_wol,
433 .suspend = at803x_suspend, 429 .suspend = at803x_suspend,
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index b881a7b1e4f6..9636da0b6efc 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -339,6 +339,8 @@ static struct phy_driver bcm7xxx_driver[] = {
339 BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"), 339 BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
340 BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"), 340 BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"),
341 BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"), 341 BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"),
342 BCM7XXX_40NM_EPHY(PHY_ID_BCM7346, "Broadcom BCM7346"),
343 BCM7XXX_40NM_EPHY(PHY_ID_BCM7362, "Broadcom BCM7362"),
342 BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"), 344 BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"),
343 BCM7XXX_40NM_EPHY(PHY_ID_BCM7429, "Broadcom BCM7429"), 345 BCM7XXX_40NM_EPHY(PHY_ID_BCM7429, "Broadcom BCM7429"),
344 BCM7XXX_40NM_EPHY(PHY_ID_BCM7435, "Broadcom BCM7435"), 346 BCM7XXX_40NM_EPHY(PHY_ID_BCM7435, "Broadcom BCM7435"),
@@ -348,6 +350,8 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
348 { PHY_ID_BCM7250, 0xfffffff0, }, 350 { PHY_ID_BCM7250, 0xfffffff0, },
349 { PHY_ID_BCM7364, 0xfffffff0, }, 351 { PHY_ID_BCM7364, 0xfffffff0, },
350 { PHY_ID_BCM7366, 0xfffffff0, }, 352 { PHY_ID_BCM7366, 0xfffffff0, },
353 { PHY_ID_BCM7346, 0xfffffff0, },
354 { PHY_ID_BCM7362, 0xfffffff0, },
351 { PHY_ID_BCM7425, 0xfffffff0, }, 355 { PHY_ID_BCM7425, 0xfffffff0, },
352 { PHY_ID_BCM7429, 0xfffffff0, }, 356 { PHY_ID_BCM7429, 0xfffffff0, },
353 { PHY_ID_BCM7439, 0xfffffff0, }, 357 { PHY_ID_BCM7439, 0xfffffff0, },
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index b5d50d458728..93ffedfa2994 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -441,7 +441,7 @@ static int ks8995_probe(struct spi_device *spi)
441 return -ENOMEM; 441 return -ENOMEM;
442 442
443 mutex_init(&ks->lock); 443 mutex_init(&ks->lock);
444 ks->spi = spi_dev_get(spi); 444 ks->spi = spi;
445 ks->chip = &ks8995_chip[variant]; 445 ks->chip = &ks8995_chip[variant];
446 446
447 if (ks->spi->dev.of_node) { 447 if (ks->spi->dev.of_node) {
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 26c64d2782fa..a0f64cba86ba 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1198,6 +1198,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
1198 goto err_dev_open; 1198 goto err_dev_open;
1199 } 1199 }
1200 1200
1201 dev_uc_sync_multiple(port_dev, dev);
1202 dev_mc_sync_multiple(port_dev, dev);
1203
1201 err = vlan_vids_add_by_dev(port_dev, dev); 1204 err = vlan_vids_add_by_dev(port_dev, dev);
1202 if (err) { 1205 if (err) {
1203 netdev_err(dev, "Failed to add vlan ids to device %s\n", 1206 netdev_err(dev, "Failed to add vlan ids to device %s\n",
@@ -1261,6 +1264,8 @@ err_enable_netpoll:
1261 vlan_vids_del_by_dev(port_dev, dev); 1264 vlan_vids_del_by_dev(port_dev, dev);
1262 1265
1263err_vids_add: 1266err_vids_add:
1267 dev_uc_unsync(port_dev, dev);
1268 dev_mc_unsync(port_dev, dev);
1264 dev_close(port_dev); 1269 dev_close(port_dev);
1265 1270
1266err_dev_open: 1271err_dev_open:
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index afdf950617c3..2c9e45f50edb 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -622,7 +622,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
622 622
623 /* Re-attach the filter to persist device */ 623 /* Re-attach the filter to persist device */
624 if (!skip_filter && (tun->filter_attached == true)) { 624 if (!skip_filter && (tun->filter_attached == true)) {
625 err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 625 err = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
626 lockdep_rtnl_is_held());
626 if (!err) 627 if (!err)
627 goto out; 628 goto out;
628 } 629 }
@@ -1014,7 +1015,6 @@ static void tun_net_init(struct net_device *dev)
1014 /* Zero header length */ 1015 /* Zero header length */
1015 dev->type = ARPHRD_NONE; 1016 dev->type = ARPHRD_NONE;
1016 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 1017 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1017 dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
1018 break; 1018 break;
1019 1019
1020 case IFF_TAP: 1020 case IFF_TAP:
@@ -1026,7 +1026,6 @@ static void tun_net_init(struct net_device *dev)
1026 1026
1027 eth_hw_addr_random(dev); 1027 eth_hw_addr_random(dev);
1028 1028
1029 dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
1030 break; 1029 break;
1031 } 1030 }
1032} 1031}
@@ -1480,6 +1479,8 @@ static void tun_setup(struct net_device *dev)
1480 1479
1481 dev->ethtool_ops = &tun_ethtool_ops; 1480 dev->ethtool_ops = &tun_ethtool_ops;
1482 dev->destructor = tun_free_netdev; 1481 dev->destructor = tun_free_netdev;
1482 /* We prefer our own queue length */
1483 dev->tx_queue_len = TUN_READQ_SIZE;
1483} 1484}
1484 1485
1485/* Trivial set of netlink ops to allow deleting tun or tap 1486/* Trivial set of netlink ops to allow deleting tun or tap
@@ -1822,7 +1823,7 @@ static void tun_detach_filter(struct tun_struct *tun, int n)
1822 1823
1823 for (i = 0; i < n; i++) { 1824 for (i = 0; i < n; i++) {
1824 tfile = rtnl_dereference(tun->tfiles[i]); 1825 tfile = rtnl_dereference(tun->tfiles[i]);
1825 sk_detach_filter(tfile->socket.sk); 1826 __sk_detach_filter(tfile->socket.sk, lockdep_rtnl_is_held());
1826 } 1827 }
1827 1828
1828 tun->filter_attached = false; 1829 tun->filter_attached = false;
@@ -1835,7 +1836,8 @@ static int tun_attach_filter(struct tun_struct *tun)
1835 1836
1836 for (i = 0; i < tun->numqueues; i++) { 1837 for (i = 0; i < tun->numqueues; i++) {
1837 tfile = rtnl_dereference(tun->tfiles[i]); 1838 tfile = rtnl_dereference(tun->tfiles[i]);
1838 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); 1839 ret = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
1840 lockdep_rtnl_is_held());
1839 if (ret) { 1841 if (ret) {
1840 tun_detach_filter(tun, i); 1842 tun_detach_filter(tun, i);
1841 return ret; 1843 return ret;
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index bdd83d95ec0a..96a5028621c8 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -617,8 +617,13 @@ static const struct usb_device_id mbim_devs[] = {
617 { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 617 { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
618 .driver_info = (unsigned long)&cdc_mbim_info, 618 .driver_info = (unsigned long)&cdc_mbim_info,
619 }, 619 },
620 /* Huawei E3372 fails unless NDP comes after the IP packets */ 620
621 { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 621 /* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
622 * (12d1:157d), are known to fail unless the NDP is placed
623 * after the IP packets. Applying the quirk to all Huawei
624 * devices is broader than necessary, but harmless.
625 */
626 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
622 .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end, 627 .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
623 }, 628 },
624 /* default entry */ 629 /* default entry */
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 86ba30ba35e8..2fb31edab125 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1626,6 +1626,13 @@ static const struct usb_device_id cdc_devs[] = {
1626 .driver_info = (unsigned long) &wwan_info, 1626 .driver_info = (unsigned long) &wwan_info,
1627 }, 1627 },
1628 1628
1629 /* Telit LE910 V2 */
1630 { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x0036,
1631 USB_CLASS_COMM,
1632 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
1633 .driver_info = (unsigned long)&wwan_noarp_info,
1634 },
1635
1629 /* DW5812 LTE Verizon Mobile Broadband Card 1636 /* DW5812 LTE Verizon Mobile Broadband Card
1630 * Unlike DW5550 this device requires FLAG_NOARP 1637 * Unlike DW5550 this device requires FLAG_NOARP
1631 */ 1638 */
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index f20890ee03f3..f64778ad9753 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -269,6 +269,7 @@ struct skb_data { /* skb->cb is one of these */
269 struct lan78xx_net *dev; 269 struct lan78xx_net *dev;
270 enum skb_state state; 270 enum skb_state state;
271 size_t length; 271 size_t length;
272 int num_of_packet;
272}; 273};
273 274
274struct usb_context { 275struct usb_context {
@@ -1803,7 +1804,34 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1803 1804
1804static void lan78xx_link_status_change(struct net_device *net) 1805static void lan78xx_link_status_change(struct net_device *net)
1805{ 1806{
1806 /* nothing to do */ 1807 struct phy_device *phydev = net->phydev;
1808 int ret, temp;
1809
1810 /* At forced 100 F/H mode, chip may fail to set mode correctly
1811 * when cable is switched between long(~50+m) and short one.
1812 * As workaround, set to 10 before setting to 100
1813 * at forced 100 F/H mode.
1814 */
1815 if (!phydev->autoneg && (phydev->speed == 100)) {
1816 /* disable phy interrupt */
1817 temp = phy_read(phydev, LAN88XX_INT_MASK);
1818 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1819 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1820
1821 temp = phy_read(phydev, MII_BMCR);
1822 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1823 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1824 temp |= BMCR_SPEED100;
1825 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1826
1827 /* clear pending interrupt generated while workaround */
1828 temp = phy_read(phydev, LAN88XX_INT_STS);
1829
1830 /* enable phy interrupt back */
1831 temp = phy_read(phydev, LAN88XX_INT_MASK);
1832 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1833 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1834 }
1807} 1835}
1808 1836
1809static int lan78xx_phy_init(struct lan78xx_net *dev) 1837static int lan78xx_phy_init(struct lan78xx_net *dev)
@@ -2464,7 +2492,7 @@ static void tx_complete(struct urb *urb)
2464 struct lan78xx_net *dev = entry->dev; 2492 struct lan78xx_net *dev = entry->dev;
2465 2493
2466 if (urb->status == 0) { 2494 if (urb->status == 0) {
2467 dev->net->stats.tx_packets++; 2495 dev->net->stats.tx_packets += entry->num_of_packet;
2468 dev->net->stats.tx_bytes += entry->length; 2496 dev->net->stats.tx_bytes += entry->length;
2469 } else { 2497 } else {
2470 dev->net->stats.tx_errors++; 2498 dev->net->stats.tx_errors++;
@@ -2681,10 +2709,11 @@ void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2681 return; 2709 return;
2682 } 2710 }
2683 2711
2684 skb->protocol = eth_type_trans(skb, dev->net);
2685 dev->net->stats.rx_packets++; 2712 dev->net->stats.rx_packets++;
2686 dev->net->stats.rx_bytes += skb->len; 2713 dev->net->stats.rx_bytes += skb->len;
2687 2714
2715 skb->protocol = eth_type_trans(skb, dev->net);
2716
2688 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", 2717 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2689 skb->len + sizeof(struct ethhdr), skb->protocol); 2718 skb->len + sizeof(struct ethhdr), skb->protocol);
2690 memset(skb->cb, 0, sizeof(struct skb_data)); 2719 memset(skb->cb, 0, sizeof(struct skb_data));
@@ -2934,13 +2963,16 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
2934 2963
2935 skb_totallen = 0; 2964 skb_totallen = 0;
2936 pkt_cnt = 0; 2965 pkt_cnt = 0;
2966 count = 0;
2967 length = 0;
2937 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) { 2968 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2938 if (skb_is_gso(skb)) { 2969 if (skb_is_gso(skb)) {
2939 if (pkt_cnt) { 2970 if (pkt_cnt) {
2940 /* handle previous packets first */ 2971 /* handle previous packets first */
2941 break; 2972 break;
2942 } 2973 }
2943 length = skb->len; 2974 count = 1;
2975 length = skb->len - TX_OVERHEAD;
2944 skb2 = skb_dequeue(tqp); 2976 skb2 = skb_dequeue(tqp);
2945 goto gso_skb; 2977 goto gso_skb;
2946 } 2978 }
@@ -2961,14 +2993,13 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
2961 for (count = pos = 0; count < pkt_cnt; count++) { 2993 for (count = pos = 0; count < pkt_cnt; count++) {
2962 skb2 = skb_dequeue(tqp); 2994 skb2 = skb_dequeue(tqp);
2963 if (skb2) { 2995 if (skb2) {
2996 length += (skb2->len - TX_OVERHEAD);
2964 memcpy(skb->data + pos, skb2->data, skb2->len); 2997 memcpy(skb->data + pos, skb2->data, skb2->len);
2965 pos += roundup(skb2->len, sizeof(u32)); 2998 pos += roundup(skb2->len, sizeof(u32));
2966 dev_kfree_skb(skb2); 2999 dev_kfree_skb(skb2);
2967 } 3000 }
2968 } 3001 }
2969 3002
2970 length = skb_totallen;
2971
2972gso_skb: 3003gso_skb:
2973 urb = usb_alloc_urb(0, GFP_ATOMIC); 3004 urb = usb_alloc_urb(0, GFP_ATOMIC);
2974 if (!urb) { 3005 if (!urb) {
@@ -2980,6 +3011,7 @@ gso_skb:
2980 entry->urb = urb; 3011 entry->urb = urb;
2981 entry->dev = dev; 3012 entry->dev = dev;
2982 entry->length = length; 3013 entry->length = length;
3014 entry->num_of_packet = count;
2983 3015
2984 spin_lock_irqsave(&dev->txq.lock, flags); 3016 spin_lock_irqsave(&dev->txq.lock, flags);
2985 ret = usb_autopm_get_interface_async(dev->intf); 3017 ret = usb_autopm_get_interface_async(dev->intf);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index f84080215915..82129eef7774 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -411,7 +411,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
411 int ret; 411 int ret;
412 412
413 read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart); 413 read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
414 data[0] = 0xc9; 414 data[0] = 0xc8; /* TX & RX enable, append status, no CRC */
415 data[1] = 0; 415 data[1] = 0;
416 if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL)) 416 if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL))
417 data[1] |= 0x20; /* set full duplex */ 417 data[1] |= 0x20; /* set full duplex */
@@ -497,7 +497,7 @@ static void read_bulk_callback(struct urb *urb)
497 pkt_len = buf[count - 3] << 8; 497 pkt_len = buf[count - 3] << 8;
498 pkt_len += buf[count - 4]; 498 pkt_len += buf[count - 4];
499 pkt_len &= 0xfff; 499 pkt_len &= 0xfff;
500 pkt_len -= 8; 500 pkt_len -= 4;
501 } 501 }
502 502
503 /* 503 /*
@@ -528,7 +528,7 @@ static void read_bulk_callback(struct urb *urb)
528goon: 528goon:
529 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, 529 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
530 usb_rcvbulkpipe(pegasus->usb, 1), 530 usb_rcvbulkpipe(pegasus->usb, 1),
531 pegasus->rx_skb->data, PEGASUS_MTU + 8, 531 pegasus->rx_skb->data, PEGASUS_MTU,
532 read_bulk_callback, pegasus); 532 read_bulk_callback, pegasus);
533 rx_status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC); 533 rx_status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
534 if (rx_status == -ENODEV) 534 if (rx_status == -ENODEV)
@@ -569,7 +569,7 @@ static void rx_fixup(unsigned long data)
569 } 569 }
570 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, 570 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
571 usb_rcvbulkpipe(pegasus->usb, 1), 571 usb_rcvbulkpipe(pegasus->usb, 1),
572 pegasus->rx_skb->data, PEGASUS_MTU + 8, 572 pegasus->rx_skb->data, PEGASUS_MTU,
573 read_bulk_callback, pegasus); 573 read_bulk_callback, pegasus);
574try_again: 574try_again:
575 status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC); 575 status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
@@ -823,7 +823,7 @@ static int pegasus_open(struct net_device *net)
823 823
824 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, 824 usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
825 usb_rcvbulkpipe(pegasus->usb, 1), 825 usb_rcvbulkpipe(pegasus->usb, 1),
826 pegasus->rx_skb->data, PEGASUS_MTU + 8, 826 pegasus->rx_skb->data, PEGASUS_MTU,
827 read_bulk_callback, pegasus); 827 read_bulk_callback, pegasus);
828 if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) { 828 if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) {
829 if (res == -ENODEV) 829 if (res == -ENODEV)
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 1bfe0fcaccf5..22e1a9a99a7d 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -38,7 +38,7 @@
38 * HEADS UP: this handshaking isn't all that robust. This driver 38 * HEADS UP: this handshaking isn't all that robust. This driver
39 * gets confused easily if you unplug one end of the cable then 39 * gets confused easily if you unplug one end of the cable then
40 * try to connect it again; you'll need to restart both ends. The 40 * try to connect it again; you'll need to restart both ends. The
41 * "naplink" software (used by some PlayStation/2 deveopers) does 41 * "naplink" software (used by some PlayStation/2 developers) does
42 * the handshaking much better! Also, sometimes this hardware 42 * the handshaking much better! Also, sometimes this hardware
43 * seems to get wedged under load. Prolific docs are weak, and 43 * seems to get wedged under load. Prolific docs are weak, and
44 * don't identify differences between PL2301 and PL2302, much less 44 * don't identify differences between PL2301 and PL2302, much less
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 7d717c66bcb0..9d1fce8a6e84 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -844,6 +844,7 @@ static const struct usb_device_id products[] = {
844 {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ 844 {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
845 {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ 845 {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
846 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ 846 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
847 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
847 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ 848 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
848 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 849 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
849 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ 850 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 30033dbe6662..c369db99c005 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -29,6 +29,7 @@
29#include <linux/crc32.h> 29#include <linux/crc32.h>
30#include <linux/usb/usbnet.h> 30#include <linux/usb/usbnet.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/of_net.h>
32#include "smsc75xx.h" 33#include "smsc75xx.h"
33 34
34#define SMSC_CHIPNAME "smsc75xx" 35#define SMSC_CHIPNAME "smsc75xx"
@@ -761,6 +762,15 @@ static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
761 762
762static void smsc75xx_init_mac_address(struct usbnet *dev) 763static void smsc75xx_init_mac_address(struct usbnet *dev)
763{ 764{
765 const u8 *mac_addr;
766
767 /* maybe the boot loader passed the MAC address in devicetree */
768 mac_addr = of_get_mac_address(dev->udev->dev.of_node);
769 if (mac_addr) {
770 memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
771 return;
772 }
773
764 /* try reading mac address from EEPROM */ 774 /* try reading mac address from EEPROM */
765 if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, 775 if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
766 dev->net->dev_addr) == 0) { 776 dev->net->dev_addr) == 0) {
@@ -772,7 +782,7 @@ static void smsc75xx_init_mac_address(struct usbnet *dev)
772 } 782 }
773 } 783 }
774 784
775 /* no eeprom, or eeprom values are invalid. generate random MAC */ 785 /* no useful static MAC address found. generate a random one */
776 eth_hw_addr_random(dev->net); 786 eth_hw_addr_random(dev->net);
777 netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n"); 787 netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
778} 788}
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 66b3ab9f614e..2edc2bc6d1b9 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -29,6 +29,7 @@
29#include <linux/crc32.h> 29#include <linux/crc32.h>
30#include <linux/usb/usbnet.h> 30#include <linux/usb/usbnet.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/of_net.h>
32#include "smsc95xx.h" 33#include "smsc95xx.h"
33 34
34#define SMSC_CHIPNAME "smsc95xx" 35#define SMSC_CHIPNAME "smsc95xx"
@@ -765,6 +766,15 @@ static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
765 766
766static void smsc95xx_init_mac_address(struct usbnet *dev) 767static void smsc95xx_init_mac_address(struct usbnet *dev)
767{ 768{
769 const u8 *mac_addr;
770
771 /* maybe the boot loader passed the MAC address in devicetree */
772 mac_addr = of_get_mac_address(dev->udev->dev.of_node);
773 if (mac_addr) {
774 memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
775 return;
776 }
777
768 /* try reading mac address from EEPROM */ 778 /* try reading mac address from EEPROM */
769 if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, 779 if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
770 dev->net->dev_addr) == 0) { 780 dev->net->dev_addr) == 0) {
@@ -775,7 +785,7 @@ static void smsc95xx_init_mac_address(struct usbnet *dev)
775 } 785 }
776 } 786 }
777 787
778 /* no eeprom, or eeprom values are invalid. generate random MAC */ 788 /* no useful static MAC address found. generate a random one */
779 eth_hw_addr_random(dev->net); 789 eth_hw_addr_random(dev->net);
780 netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n"); 790 netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
781} 791}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index b2348f67b00a..db8022ae415b 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1152,12 +1152,16 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1152 union Vmxnet3_GenericDesc *gdesc) 1152 union Vmxnet3_GenericDesc *gdesc)
1153{ 1153{
1154 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) { 1154 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1155 /* typical case: TCP/UDP over IP and both csums are correct */ 1155 if (gdesc->rcd.v4 &&
1156 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) == 1156 (le32_to_cpu(gdesc->dword[3]) &
1157 VMXNET3_RCD_CSUM_OK) { 1157 VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
1158 skb->ip_summed = CHECKSUM_UNNECESSARY;
1159 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1160 BUG_ON(gdesc->rcd.frg);
1161 } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
1162 (1 << VMXNET3_RCD_TUC_SHIFT))) {
1158 skb->ip_summed = CHECKSUM_UNNECESSARY; 1163 skb->ip_summed = CHECKSUM_UNNECESSARY;
1159 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); 1164 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1160 BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
1161 BUG_ON(gdesc->rcd.frg); 1165 BUG_ON(gdesc->rcd.frg);
1162 } else { 1166 } else {
1163 if (gdesc->rcd.csum) { 1167 if (gdesc->rcd.csum) {
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 729c344e6774..c4825392d64b 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.6.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040600 75#define VMXNET3_DRIVER_VERSION_NUM 0x01040700
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 9a9fabb900c1..8a8f1e58b415 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -60,41 +60,6 @@ struct pcpu_dstats {
60 struct u64_stats_sync syncp; 60 struct u64_stats_sync syncp;
61}; 61};
62 62
63static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
64{
65 return dst;
66}
67
68static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
69{
70 return ip_local_out(net, sk, skb);
71}
72
73static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
74{
75 /* TO-DO: return max ethernet size? */
76 return dst->dev->mtu;
77}
78
79static void vrf_dst_destroy(struct dst_entry *dst)
80{
81 /* our dst lives forever - or until the device is closed */
82}
83
84static unsigned int vrf_default_advmss(const struct dst_entry *dst)
85{
86 return 65535 - 40;
87}
88
89static struct dst_ops vrf_dst_ops = {
90 .family = AF_INET,
91 .local_out = vrf_ip_local_out,
92 .check = vrf_ip_check,
93 .mtu = vrf_v4_mtu,
94 .destroy = vrf_dst_destroy,
95 .default_advmss = vrf_default_advmss,
96};
97
98/* neighbor handling is done with actual device; do not want 63/* neighbor handling is done with actual device; do not want
99 * to flip skb->dev for those ndisc packets. This really fails 64 * to flip skb->dev for those ndisc packets. This really fails
100 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is 65 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
@@ -349,46 +314,6 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
349} 314}
350 315
351#if IS_ENABLED(CONFIG_IPV6) 316#if IS_ENABLED(CONFIG_IPV6)
352static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie)
353{
354 return dst;
355}
356
357static struct dst_ops vrf_dst_ops6 = {
358 .family = AF_INET6,
359 .local_out = ip6_local_out,
360 .check = vrf_ip6_check,
361 .mtu = vrf_v4_mtu,
362 .destroy = vrf_dst_destroy,
363 .default_advmss = vrf_default_advmss,
364};
365
366static int init_dst_ops6_kmem_cachep(void)
367{
368 vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache",
369 sizeof(struct rt6_info),
370 0,
371 SLAB_HWCACHE_ALIGN,
372 NULL);
373
374 if (!vrf_dst_ops6.kmem_cachep)
375 return -ENOMEM;
376
377 return 0;
378}
379
380static void free_dst_ops6_kmem_cachep(void)
381{
382 kmem_cache_destroy(vrf_dst_ops6.kmem_cachep);
383}
384
385static int vrf_input6(struct sk_buff *skb)
386{
387 skb->dev->stats.rx_errors++;
388 kfree_skb(skb);
389 return 0;
390}
391
392/* modelled after ip6_finish_output2 */ 317/* modelled after ip6_finish_output2 */
393static int vrf_finish_output6(struct net *net, struct sock *sk, 318static int vrf_finish_output6(struct net *net, struct sock *sk,
394 struct sk_buff *skb) 319 struct sk_buff *skb)
@@ -429,67 +354,34 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
429 !(IP6CB(skb)->flags & IP6SKB_REROUTED)); 354 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
430} 355}
431 356
432static void vrf_rt6_destroy(struct net_vrf *vrf) 357static void vrf_rt6_release(struct net_vrf *vrf)
433{ 358{
434 dst_destroy(&vrf->rt6->dst); 359 dst_release(&vrf->rt6->dst);
435 free_percpu(vrf->rt6->rt6i_pcpu);
436 vrf->rt6 = NULL; 360 vrf->rt6 = NULL;
437} 361}
438 362
439static int vrf_rt6_create(struct net_device *dev) 363static int vrf_rt6_create(struct net_device *dev)
440{ 364{
441 struct net_vrf *vrf = netdev_priv(dev); 365 struct net_vrf *vrf = netdev_priv(dev);
442 struct dst_entry *dst; 366 struct net *net = dev_net(dev);
443 struct rt6_info *rt6; 367 struct rt6_info *rt6;
444 int cpu;
445 int rc = -ENOMEM; 368 int rc = -ENOMEM;
446 369
447 rt6 = dst_alloc(&vrf_dst_ops6, dev, 0, 370 rt6 = ip6_dst_alloc(net, dev,
448 DST_OBSOLETE_NONE, 371 DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE);
449 (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
450 if (!rt6) 372 if (!rt6)
451 goto out; 373 goto out;
452 374
453 dst = &rt6->dst;
454
455 rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL);
456 if (!rt6->rt6i_pcpu) {
457 dst_destroy(dst);
458 goto out;
459 }
460 for_each_possible_cpu(cpu) {
461 struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu);
462 *p = NULL;
463 }
464
465 memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst));
466
467 INIT_LIST_HEAD(&rt6->rt6i_siblings);
468 INIT_LIST_HEAD(&rt6->rt6i_uncached);
469
470 rt6->dst.input = vrf_input6;
471 rt6->dst.output = vrf_output6; 375 rt6->dst.output = vrf_output6;
472 376 rt6->rt6i_table = fib6_get_table(net, vrf->tb_id);
473 rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id); 377 dst_hold(&rt6->dst);
474
475 atomic_set(&rt6->dst.__refcnt, 2);
476
477 vrf->rt6 = rt6; 378 vrf->rt6 = rt6;
478 rc = 0; 379 rc = 0;
479out: 380out:
480 return rc; 381 return rc;
481} 382}
482#else 383#else
483static int init_dst_ops6_kmem_cachep(void) 384static void vrf_rt6_release(struct net_vrf *vrf)
484{
485 return 0;
486}
487
488static void free_dst_ops6_kmem_cachep(void)
489{
490}
491
492static void vrf_rt6_destroy(struct net_vrf *vrf)
493{ 385{
494} 386}
495 387
@@ -557,11 +449,11 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
557 !(IPCB(skb)->flags & IPSKB_REROUTED)); 449 !(IPCB(skb)->flags & IPSKB_REROUTED));
558} 450}
559 451
560static void vrf_rtable_destroy(struct net_vrf *vrf) 452static void vrf_rtable_release(struct net_vrf *vrf)
561{ 453{
562 struct dst_entry *dst = (struct dst_entry *)vrf->rth; 454 struct dst_entry *dst = (struct dst_entry *)vrf->rth;
563 455
564 dst_destroy(dst); 456 dst_release(dst);
565 vrf->rth = NULL; 457 vrf->rth = NULL;
566} 458}
567 459
@@ -570,22 +462,10 @@ static struct rtable *vrf_rtable_create(struct net_device *dev)
570 struct net_vrf *vrf = netdev_priv(dev); 462 struct net_vrf *vrf = netdev_priv(dev);
571 struct rtable *rth; 463 struct rtable *rth;
572 464
573 rth = dst_alloc(&vrf_dst_ops, dev, 2, 465 rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
574 DST_OBSOLETE_NONE,
575 (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
576 if (rth) { 466 if (rth) {
577 rth->dst.output = vrf_output; 467 rth->dst.output = vrf_output;
578 rth->rt_genid = rt_genid_ipv4(dev_net(dev));
579 rth->rt_flags = 0;
580 rth->rt_type = RTN_UNICAST;
581 rth->rt_is_input = 0;
582 rth->rt_iif = 0;
583 rth->rt_pmtu = 0;
584 rth->rt_gateway = 0;
585 rth->rt_uses_gateway = 0;
586 rth->rt_table_id = vrf->tb_id; 468 rth->rt_table_id = vrf->tb_id;
587 INIT_LIST_HEAD(&rth->rt_uncached);
588 rth->rt_uncached_list = NULL;
589 } 469 }
590 470
591 return rth; 471 return rth;
@@ -673,8 +553,8 @@ static void vrf_dev_uninit(struct net_device *dev)
673 struct net_device *port_dev; 553 struct net_device *port_dev;
674 struct list_head *iter; 554 struct list_head *iter;
675 555
676 vrf_rtable_destroy(vrf); 556 vrf_rtable_release(vrf);
677 vrf_rt6_destroy(vrf); 557 vrf_rt6_release(vrf);
678 558
679 netdev_for_each_lower_dev(dev, port_dev, iter) 559 netdev_for_each_lower_dev(dev, port_dev, iter)
680 vrf_del_slave(dev, port_dev); 560 vrf_del_slave(dev, port_dev);
@@ -704,7 +584,7 @@ static int vrf_dev_init(struct net_device *dev)
704 return 0; 584 return 0;
705 585
706out_rth: 586out_rth:
707 vrf_rtable_destroy(vrf); 587 vrf_rtable_release(vrf);
708out_stats: 588out_stats:
709 free_percpu(dev->dstats); 589 free_percpu(dev->dstats);
710 dev->dstats = NULL; 590 dev->dstats = NULL;
@@ -737,7 +617,7 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev,
737 struct net_vrf *vrf = netdev_priv(dev); 617 struct net_vrf *vrf = netdev_priv(dev);
738 618
739 rth = vrf->rth; 619 rth = vrf->rth;
740 atomic_inc(&rth->dst.__refcnt); 620 dst_hold(&rth->dst);
741 } 621 }
742 622
743 return rth; 623 return rth;
@@ -788,7 +668,7 @@ static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
788 struct net_vrf *vrf = netdev_priv(dev); 668 struct net_vrf *vrf = netdev_priv(dev);
789 669
790 rt = vrf->rt6; 670 rt = vrf->rt6;
791 atomic_inc(&rt->dst.__refcnt); 671 dst_hold(&rt->dst);
792 } 672 }
793 673
794 return (struct dst_entry *)rt; 674 return (struct dst_entry *)rt;
@@ -946,19 +826,6 @@ static int __init vrf_init_module(void)
946{ 826{
947 int rc; 827 int rc;
948 828
949 vrf_dst_ops.kmem_cachep =
950 kmem_cache_create("vrf_ip_dst_cache",
951 sizeof(struct rtable), 0,
952 SLAB_HWCACHE_ALIGN,
953 NULL);
954
955 if (!vrf_dst_ops.kmem_cachep)
956 return -ENOMEM;
957
958 rc = init_dst_ops6_kmem_cachep();
959 if (rc != 0)
960 goto error2;
961
962 register_netdevice_notifier(&vrf_notifier_block); 829 register_netdevice_notifier(&vrf_notifier_block);
963 830
964 rc = rtnl_link_register(&vrf_link_ops); 831 rc = rtnl_link_register(&vrf_link_ops);
@@ -969,22 +836,10 @@ static int __init vrf_init_module(void)
969 836
970error: 837error:
971 unregister_netdevice_notifier(&vrf_notifier_block); 838 unregister_netdevice_notifier(&vrf_notifier_block);
972 free_dst_ops6_kmem_cachep();
973error2:
974 kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
975 return rc; 839 return rc;
976} 840}
977 841
978static void __exit vrf_cleanup_module(void)
979{
980 rtnl_link_unregister(&vrf_link_ops);
981 unregister_netdevice_notifier(&vrf_notifier_block);
982 kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
983 free_dst_ops6_kmem_cachep();
984}
985
986module_init(vrf_init_module); 842module_init(vrf_init_module);
987module_exit(vrf_cleanup_module);
988MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern"); 843MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
989MODULE_DESCRIPTION("Device driver to instantiate VRF domains"); 844MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
990MODULE_LICENSE("GPL"); 845MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 8f8793004b9f..1b271b99c49e 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -274,6 +274,9 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
274 }; 274 };
275 static const int inc[4] = { 0, 100, 0, 0 }; 275 static const int inc[4] = { 0, 100, 0, 0 };
276 276
277 memset(&mask_m, 0, sizeof(int8_t) * 123);
278 memset(&mask_p, 0, sizeof(int8_t) * 123);
279
277 cur_bin = -6000; 280 cur_bin = -6000;
278 upper = bin + 100; 281 upper = bin + 100;
279 lower = bin - 100; 282 lower = bin - 100;
@@ -424,14 +427,9 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
424 int tmp, new; 427 int tmp, new;
425 int i; 428 int i;
426 429
427 int8_t mask_m[123];
428 int8_t mask_p[123];
429 int cur_bb_spur; 430 int cur_bb_spur;
430 bool is2GHz = IS_CHAN_2GHZ(chan); 431 bool is2GHz = IS_CHAN_2GHZ(chan);
431 432
432 memset(&mask_m, 0, sizeof(int8_t) * 123);
433 memset(&mask_p, 0, sizeof(int8_t) * 123);
434
435 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { 433 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
436 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz); 434 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
437 if (AR_NO_SPUR == cur_bb_spur) 435 if (AR_NO_SPUR == cur_bb_spur)
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index db6624527d99..53d7445a5d12 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -178,14 +178,9 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
178 int i; 178 int i;
179 struct chan_centers centers; 179 struct chan_centers centers;
180 180
181 int8_t mask_m[123];
182 int8_t mask_p[123];
183 int cur_bb_spur; 181 int cur_bb_spur;
184 bool is2GHz = IS_CHAN_2GHZ(chan); 182 bool is2GHz = IS_CHAN_2GHZ(chan);
185 183
186 memset(&mask_m, 0, sizeof(int8_t) * 123);
187 memset(&mask_p, 0, sizeof(int8_t) * 123);
188
189 ath9k_hw_get_channel_centers(ah, chan, &centers); 184 ath9k_hw_get_channel_centers(ah, chan, &centers);
190 freq = centers.synth_center; 185 freq = centers.synth_center;
191 186
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 72380af9dc52..b0603e796ad8 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -5680,11 +5680,12 @@ static int b43_bcma_probe(struct bcma_device *core)
5680 INIT_WORK(&wl->firmware_load, b43_request_firmware); 5680 INIT_WORK(&wl->firmware_load, b43_request_firmware);
5681 schedule_work(&wl->firmware_load); 5681 schedule_work(&wl->firmware_load);
5682 5682
5683bcma_out:
5684 return err; 5683 return err;
5685 5684
5686bcma_err_wireless_exit: 5685bcma_err_wireless_exit:
5687 ieee80211_free_hw(wl->hw); 5686 ieee80211_free_hw(wl->hw);
5687bcma_out:
5688 kfree(dev);
5688 return err; 5689 return err;
5689} 5690}
5690 5691
@@ -5712,8 +5713,8 @@ static void b43_bcma_remove(struct bcma_device *core)
5712 b43_rng_exit(wl); 5713 b43_rng_exit(wl);
5713 5714
5714 b43_leds_unregister(wl); 5715 b43_leds_unregister(wl);
5715
5716 ieee80211_free_hw(wl->hw); 5716 ieee80211_free_hw(wl->hw);
5717 kfree(wldev->dev);
5717} 5718}
5718 5719
5719static struct bcma_driver b43_bcma_driver = { 5720static struct bcma_driver b43_bcma_driver = {
@@ -5796,6 +5797,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
5796 5797
5797 b43_leds_unregister(wl); 5798 b43_leds_unregister(wl);
5798 b43_wireless_exit(dev, wl); 5799 b43_wireless_exit(dev, wl);
5800 kfree(dev);
5799} 5801}
5800 5802
5801static struct ssb_driver b43_ssb_driver = { 5803static struct ssb_driver b43_ssb_driver = {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index 97be104d1203..b5c57eebf995 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -93,7 +93,7 @@
93#define IWL8260_SMEM_OFFSET 0x400000 93#define IWL8260_SMEM_OFFSET 0x400000
94#define IWL8260_SMEM_LEN 0x68000 94#define IWL8260_SMEM_LEN 0x68000
95 95
96#define IWL8000_FW_PRE "iwlwifi-8000" 96#define IWL8000_FW_PRE "iwlwifi-8000C-"
97#define IWL8000_MODULE_FIRMWARE(api) \ 97#define IWL8000_MODULE_FIRMWARE(api) \
98 IWL8000_FW_PRE "-" __stringify(api) ".ucode" 98 IWL8000_FW_PRE "-" __stringify(api) ".ucode"
99 99
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index f899666acb41..9e45bf9c6071 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -238,19 +238,6 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
238 snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode", 238 snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
239 name_pre, tag); 239 name_pre, tag);
240 240
241 /*
242 * Starting 8000B - FW name format has changed. This overwrites the
243 * previous name and uses the new format.
244 */
245 if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
246 char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev);
247
248 if (rev_step != 'A')
249 snprintf(drv->firmware_name,
250 sizeof(drv->firmware_name), "%s%c-%s.ucode",
251 name_pre, rev_step, tag);
252 }
253
254 IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n", 241 IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
255 (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) 242 (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
256 ? "EXPERIMENTAL " : "", 243 ? "EXPERIMENTAL " : "",
@@ -1060,11 +1047,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
1060 return -EINVAL; 1047 return -EINVAL;
1061 } 1048 }
1062 1049
1063 if (WARN(fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) && 1050 /*
1064 !gscan_capa, 1051 * If ucode advertises that it supports GSCAN but GSCAN
1065 "GSCAN is supported but capabilities TLV is unavailable\n")) 1052 * capabilities TLV is not present, or if it has an old format,
1053 * warn and continue without GSCAN.
1054 */
1055 if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
1056 !gscan_capa) {
1057 IWL_DEBUG_INFO(drv,
1058 "GSCAN is supported but capabilities TLV is unavailable\n");
1066 __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT, 1059 __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
1067 capa->_capa); 1060 capa->_capa);
1061 }
1068 1062
1069 return 0; 1063 return 0;
1070 1064
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index 4856eac120f6..6938cd37be57 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -526,7 +526,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
526 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; 526 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
527 527
528 /* Make room for fw's virtual image pages, if it exists */ 528 /* Make room for fw's virtual image pages, if it exists */
529 if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) 529 if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
530 mvm->fw_paging_db[0].fw_paging_block)
530 file_len += mvm->num_of_paging_blk * 531 file_len += mvm->num_of_paging_blk *
531 (sizeof(*dump_data) + 532 (sizeof(*dump_data) +
532 sizeof(struct iwl_fw_error_dump_paging) + 533 sizeof(struct iwl_fw_error_dump_paging) +
@@ -643,7 +644,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
643 } 644 }
644 645
645 /* Dump fw's virtual image */ 646 /* Dump fw's virtual image */
646 if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) { 647 if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
648 mvm->fw_paging_db[0].fw_paging_block) {
647 for (i = 1; i < mvm->num_of_paging_blk + 1; i++) { 649 for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
648 struct iwl_fw_error_dump_paging *paging; 650 struct iwl_fw_error_dump_paging *paging;
649 struct page *pages = 651 struct page *pages =
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 594cd0dc7df9..09d895fafaf2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -144,9 +144,11 @@ void iwl_free_fw_paging(struct iwl_mvm *mvm)
144 144
145 __free_pages(mvm->fw_paging_db[i].fw_paging_block, 145 __free_pages(mvm->fw_paging_db[i].fw_paging_block,
146 get_order(mvm->fw_paging_db[i].fw_paging_size)); 146 get_order(mvm->fw_paging_db[i].fw_paging_size));
147 mvm->fw_paging_db[i].fw_paging_block = NULL;
147 } 148 }
148 kfree(mvm->trans->paging_download_buf); 149 kfree(mvm->trans->paging_download_buf);
149 mvm->trans->paging_download_buf = NULL; 150 mvm->trans->paging_download_buf = NULL;
151 mvm->trans->paging_db = NULL;
150 152
151 memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db)); 153 memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
152} 154}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 76e649c680a1..a50f4df7eae7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1147,6 +1147,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1147 /* the fw is stopped, the aux sta is dead: clean up driver state */ 1147 /* the fw is stopped, the aux sta is dead: clean up driver state */
1148 iwl_mvm_del_aux_sta(mvm); 1148 iwl_mvm_del_aux_sta(mvm);
1149 1149
1150 iwl_free_fw_paging(mvm);
1151
1150 /* 1152 /*
1151 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() 1153 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
1152 * won't be called in this case). 1154 * won't be called in this case).
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 5e8ab796d5bc..d278399097dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -761,8 +761,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
761 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) 761 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
762 kfree(mvm->nvm_sections[i].data); 762 kfree(mvm->nvm_sections[i].data);
763 763
764 iwl_free_fw_paging(mvm);
765
766 iwl_mvm_tof_clean(mvm); 764 iwl_mvm_tof_clean(mvm);
767 765
768 ieee80211_free_hw(mvm->hw); 766 ieee80211_free_hw(mvm->hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 05b968506836..79d7cd7d461e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -479,8 +479,18 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
479 {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)}, 479 {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
480 {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)}, 480 {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
481 {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)}, 481 {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
482 {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)},
483 {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)},
484 {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)},
485 {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)},
486 {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)},
487 {IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)},
488 {IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)},
489 {IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)},
482 {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)}, 490 {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)},
483 {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)}, 491 {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
492 {IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)},
493 {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)},
484 494
485/* 9000 Series */ 495/* 9000 Series */
486 {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)}, 496 {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index eb39c7e09781..b2b79354d5c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -732,8 +732,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
732 */ 732 */
733 val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0); 733 val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
734 if (val & (BIT(1) | BIT(17))) { 734 if (val & (BIT(1) | BIT(17))) {
735 IWL_INFO(trans, 735 IWL_DEBUG_INFO(trans,
736 "can't access the RSA semaphore it is write protected\n"); 736 "can't access the RSA semaphore it is write protected\n");
737 return 0; 737 return 0;
738 } 738 }
739 739
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index 95dcbff4673b..6a8245c4ea48 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -2488,9 +2488,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
2488 for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) 2488 for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
2489 rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p]; 2489 rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p];
2490 2490
2491 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, 2491 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
2492 "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n", 2492 "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
2493 rtldm->thermalvalue, thermal_value); 2493 rtldm->thermalvalue, thermal_value);
2494 /*Record last Power Tracking Thermal Value*/ 2494 /*Record last Power Tracking Thermal Value*/
2495 rtldm->thermalvalue = thermal_value; 2495 rtldm->thermalvalue = thermal_value;
2496 } 2496 }
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index c32cbb593600..f068b6513cd2 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1204,7 +1204,7 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector,
1204{ 1204{
1205 struct btt *btt = bdev->bd_disk->private_data; 1205 struct btt *btt = bdev->bd_disk->private_data;
1206 1206
1207 btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector); 1207 btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, rw, sector);
1208 page_endio(page, rw & WRITE, 0); 1208 page_endio(page, rw & WRITE, 0);
1209 return 0; 1209 return 0;
1210} 1210}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index fc82743aefb6..19f822d7f652 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -407,7 +407,7 @@ static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
407 [ND_CMD_IMPLEMENTED] = { }, 407 [ND_CMD_IMPLEMENTED] = { },
408 [ND_CMD_SMART] = { 408 [ND_CMD_SMART] = {
409 .out_num = 2, 409 .out_num = 2,
410 .out_sizes = { 4, 8, }, 410 .out_sizes = { 4, 128, },
411 }, 411 },
412 [ND_CMD_SMART_THRESHOLD] = { 412 [ND_CMD_SMART_THRESHOLD] = {
413 .out_num = 2, 413 .out_num = 2,
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 79646d0c3277..182a93fe3712 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -417,8 +417,8 @@ static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
417 set_badblock(bb, start_sector, num_sectors); 417 set_badblock(bb, start_sector, num_sectors);
418} 418}
419 419
420static void namespace_add_poison(struct list_head *poison_list, 420static void badblocks_populate(struct list_head *poison_list,
421 struct badblocks *bb, struct resource *res) 421 struct badblocks *bb, const struct resource *res)
422{ 422{
423 struct nd_poison *pl; 423 struct nd_poison *pl;
424 424
@@ -460,36 +460,35 @@ static void namespace_add_poison(struct list_head *poison_list,
460} 460}
461 461
462/** 462/**
463 * nvdimm_namespace_add_poison() - Convert a list of poison ranges to badblocks 463 * nvdimm_badblocks_populate() - Convert a list of poison ranges to badblocks
464 * @ndns: the namespace containing poison ranges 464 * @region: parent region of the range to interrogate
465 * @bb: badblocks instance to populate 465 * @bb: badblocks instance to populate
466 * @offset: offset at the start of the namespace before 'sector 0' 466 * @res: resource range to consider
467 * 467 *
468 * The poison list generated during NFIT initialization may contain multiple, 468 * The poison list generated during bus initialization may contain
469 * possibly overlapping ranges in the SPA (System Physical Address) space. 469 * multiple, possibly overlapping physical address ranges. Compare each
470 * Compare each of these ranges to the namespace currently being initialized, 470 * of these ranges to the resource range currently being initialized,
471 * and add badblocks to the gendisk for all matching sub-ranges 471 * and add badblocks entries for all matching sub-ranges
472 */ 472 */
473void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns, 473void nvdimm_badblocks_populate(struct nd_region *nd_region,
474 struct badblocks *bb, resource_size_t offset) 474 struct badblocks *bb, const struct resource *res)
475{ 475{
476 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
477 struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
478 struct nvdimm_bus *nvdimm_bus; 476 struct nvdimm_bus *nvdimm_bus;
479 struct list_head *poison_list; 477 struct list_head *poison_list;
480 struct resource res = {
481 .start = nsio->res.start + offset,
482 .end = nsio->res.end,
483 };
484 478
485 nvdimm_bus = to_nvdimm_bus(nd_region->dev.parent); 479 if (!is_nd_pmem(&nd_region->dev)) {
480 dev_WARN_ONCE(&nd_region->dev, 1,
481 "%s only valid for pmem regions\n", __func__);
482 return;
483 }
484 nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
486 poison_list = &nvdimm_bus->poison_list; 485 poison_list = &nvdimm_bus->poison_list;
487 486
488 nvdimm_bus_lock(&nvdimm_bus->dev); 487 nvdimm_bus_lock(&nvdimm_bus->dev);
489 namespace_add_poison(poison_list, bb, &res); 488 badblocks_populate(poison_list, bb, res);
490 nvdimm_bus_unlock(&nvdimm_bus->dev); 489 nvdimm_bus_unlock(&nvdimm_bus->dev);
491} 490}
492EXPORT_SYMBOL_GPL(nvdimm_namespace_add_poison); 491EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
493 492
494static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) 493static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
495{ 494{
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 1799bd97a9ce..875c524fafb0 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -266,8 +266,8 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
266int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns); 266int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns);
267const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, 267const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
268 char *name); 268 char *name);
269void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns, 269void nvdimm_badblocks_populate(struct nd_region *nd_region,
270 struct badblocks *bb, resource_size_t offset); 270 struct badblocks *bb, const struct resource *res);
271int nd_blk_region_init(struct nd_region *nd_region); 271int nd_blk_region_init(struct nd_region *nd_region);
272void __nd_iostat_start(struct bio *bio, unsigned long *start); 272void __nd_iostat_start(struct bio *bio, unsigned long *start);
273static inline bool nd_iostat_start(struct bio *bio, unsigned long *start) 273static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 254d3bc13f70..e071e214feba 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -376,7 +376,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
376 } else { 376 } else {
377 /* from init we validate */ 377 /* from init we validate */
378 if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0) 378 if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
379 return -EINVAL; 379 return -ENODEV;
380 } 380 }
381 381
382 if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) { 382 if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index ca5721c306bb..5101f3ab4f29 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -99,10 +99,24 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
99 if (unlikely(bad_pmem)) 99 if (unlikely(bad_pmem))
100 rc = -EIO; 100 rc = -EIO;
101 else { 101 else {
102 memcpy_from_pmem(mem + off, pmem_addr, len); 102 rc = memcpy_from_pmem(mem + off, pmem_addr, len);
103 flush_dcache_page(page); 103 flush_dcache_page(page);
104 } 104 }
105 } else { 105 } else {
106 /*
107 * Note that we write the data both before and after
108 * clearing poison. The write before clear poison
109 * handles situations where the latest written data is
110 * preserved and the clear poison operation simply marks
111 * the address range as valid without changing the data.
112 * In this case application software can assume that an
113 * interrupted write will either return the new good
114 * data or an error.
115 *
116 * However, if pmem_clear_poison() leaves the data in an
117 * indeterminate state we need to perform the write
118 * after clear poison.
119 */
106 flush_dcache_page(page); 120 flush_dcache_page(page);
107 memcpy_to_pmem(pmem_addr, mem + off, len); 121 memcpy_to_pmem(pmem_addr, mem + off, len);
108 if (unlikely(bad_pmem)) { 122 if (unlikely(bad_pmem)) {
@@ -151,7 +165,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
151 struct pmem_device *pmem = bdev->bd_disk->private_data; 165 struct pmem_device *pmem = bdev->bd_disk->private_data;
152 int rc; 166 int rc;
153 167
154 rc = pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector); 168 rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
155 if (rw & WRITE) 169 if (rw & WRITE)
156 wmb_pmem(); 170 wmb_pmem();
157 171
@@ -244,7 +258,9 @@ static void pmem_detach_disk(struct pmem_device *pmem)
244static int pmem_attach_disk(struct device *dev, 258static int pmem_attach_disk(struct device *dev,
245 struct nd_namespace_common *ndns, struct pmem_device *pmem) 259 struct nd_namespace_common *ndns, struct pmem_device *pmem)
246{ 260{
261 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
247 int nid = dev_to_node(dev); 262 int nid = dev_to_node(dev);
263 struct resource bb_res;
248 struct gendisk *disk; 264 struct gendisk *disk;
249 265
250 blk_queue_make_request(pmem->pmem_queue, pmem_make_request); 266 blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
@@ -271,8 +287,17 @@ static int pmem_attach_disk(struct device *dev,
271 devm_exit_badblocks(dev, &pmem->bb); 287 devm_exit_badblocks(dev, &pmem->bb);
272 if (devm_init_badblocks(dev, &pmem->bb)) 288 if (devm_init_badblocks(dev, &pmem->bb))
273 return -ENOMEM; 289 return -ENOMEM;
274 nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset); 290 bb_res.start = nsio->res.start + pmem->data_offset;
275 291 bb_res.end = nsio->res.end;
292 if (is_nd_pfn(dev)) {
293 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
294 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
295
296 bb_res.start += __le32_to_cpu(pfn_sb->start_pad);
297 bb_res.end -= __le32_to_cpu(pfn_sb->end_trunc);
298 }
299 nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb,
300 &bb_res);
276 disk->bb = &pmem->bb; 301 disk->bb = &pmem->bb;
277 add_disk(disk); 302 add_disk(disk);
278 revalidate_disk(disk); 303 revalidate_disk(disk);
@@ -295,7 +320,7 @@ static int pmem_rw_bytes(struct nd_namespace_common *ndns,
295 320
296 if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align))) 321 if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align)))
297 return -EIO; 322 return -EIO;
298 memcpy_from_pmem(buf, pmem->virt_addr + offset, size); 323 return memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
299 } else { 324 } else {
300 memcpy_to_pmem(pmem->virt_addr + offset, buf, size); 325 memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
301 wmb_pmem(); 326 wmb_pmem();
@@ -372,10 +397,17 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
372 */ 397 */
373 start += start_pad; 398 start += start_pad;
374 npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K; 399 npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K;
375 if (nd_pfn->mode == PFN_MODE_PMEM) 400 if (nd_pfn->mode == PFN_MODE_PMEM) {
376 offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align) 401 unsigned long memmap_size;
402
403 /*
404 * vmemmap_populate_hugepages() allocates the memmap array in
405 * HPAGE_SIZE chunks.
406 */
407 memmap_size = ALIGN(64 * npfns, HPAGE_SIZE);
408 offset = ALIGN(start + SZ_8K + memmap_size, nd_pfn->align)
377 - start; 409 - start;
378 else if (nd_pfn->mode == PFN_MODE_RAM) 410 } else if (nd_pfn->mode == PFN_MODE_RAM)
379 offset = ALIGN(start + SZ_8K, nd_pfn->align) - start; 411 offset = ALIGN(start + SZ_8K, nd_pfn->align) - start;
380 else 412 else
381 goto err; 413 goto err;
@@ -553,7 +585,7 @@ static int nd_pmem_probe(struct device *dev)
553 ndns->rw_bytes = pmem_rw_bytes; 585 ndns->rw_bytes = pmem_rw_bytes;
554 if (devm_init_badblocks(dev, &pmem->bb)) 586 if (devm_init_badblocks(dev, &pmem->bb))
555 return -ENOMEM; 587 return -ENOMEM;
556 nvdimm_namespace_add_poison(ndns, &pmem->bb, 0); 588 nvdimm_badblocks_populate(nd_region, &pmem->bb, &nsio->res);
557 589
558 if (is_nd_btt(dev)) { 590 if (is_nd_btt(dev)) {
559 /* btt allocates its own request_queue */ 591 /* btt allocates its own request_queue */
@@ -595,14 +627,25 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
595{ 627{
596 struct pmem_device *pmem = dev_get_drvdata(dev); 628 struct pmem_device *pmem = dev_get_drvdata(dev);
597 struct nd_namespace_common *ndns = pmem->ndns; 629 struct nd_namespace_common *ndns = pmem->ndns;
630 struct nd_region *nd_region = to_nd_region(dev->parent);
631 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
632 struct resource res = {
633 .start = nsio->res.start + pmem->data_offset,
634 .end = nsio->res.end,
635 };
598 636
599 if (event != NVDIMM_REVALIDATE_POISON) 637 if (event != NVDIMM_REVALIDATE_POISON)
600 return; 638 return;
601 639
602 if (is_nd_btt(dev)) 640 if (is_nd_pfn(dev)) {
603 nvdimm_namespace_add_poison(ndns, &pmem->bb, 0); 641 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
604 else 642 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
605 nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset); 643
644 res.start += __le32_to_cpu(pfn_sb->start_pad);
645 res.end -= __le32_to_cpu(pfn_sb->end_trunc);
646 }
647
648 nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
606} 649}
607 650
608MODULE_ALIAS("pmem"); 651MODULE_ALIAS("pmem");
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 24ccda303efb..4fd733ff72b1 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1478,8 +1478,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1478 if (result > 0) { 1478 if (result > 0) {
1479 dev_err(dev->ctrl.device, 1479 dev_err(dev->ctrl.device,
1480 "Could not set queue count (%d)\n", result); 1480 "Could not set queue count (%d)\n", result);
1481 nr_io_queues = 0; 1481 return 0;
1482 result = 0;
1483 } 1482 }
1484 1483
1485 if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) { 1484 if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
@@ -1513,7 +1512,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1513 * If we enable msix early due to not intx, disable it again before 1512 * If we enable msix early due to not intx, disable it again before
1514 * setting up the full range we need. 1513 * setting up the full range we need.
1515 */ 1514 */
1516 if (!pdev->irq) 1515 if (pdev->msi_enabled)
1516 pci_disable_msi(pdev);
1517 else if (pdev->msix_enabled)
1517 pci_disable_msix(pdev); 1518 pci_disable_msix(pdev);
1518 1519
1519 for (i = 0; i < nr_io_queues; i++) 1520 for (i = 0; i < nr_io_queues; i++)
@@ -1696,7 +1697,6 @@ static int nvme_pci_enable(struct nvme_dev *dev)
1696 if (pci_enable_device_mem(pdev)) 1697 if (pci_enable_device_mem(pdev))
1697 return result; 1698 return result;
1698 1699
1699 dev->entry[0].vector = pdev->irq;
1700 pci_set_master(pdev); 1700 pci_set_master(pdev);
1701 1701
1702 if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) && 1702 if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
@@ -1709,13 +1709,18 @@ static int nvme_pci_enable(struct nvme_dev *dev)
1709 } 1709 }
1710 1710
1711 /* 1711 /*
1712 * Some devices don't advertse INTx interrupts, pre-enable a single 1712 * Some devices and/or platforms don't advertise or work with INTx
1713 * MSIX vec for setup. We'll adjust this later. 1713 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
1714 * adjust this later.
1714 */ 1715 */
1715 if (!pdev->irq) { 1716 if (pci_enable_msix(pdev, dev->entry, 1)) {
1716 result = pci_enable_msix(pdev, dev->entry, 1); 1717 pci_enable_msi(pdev);
1717 if (result < 0) 1718 dev->entry[0].vector = pdev->irq;
1718 goto disable; 1719 }
1720
1721 if (!dev->entry[0].vector) {
1722 result = -ENODEV;
1723 goto disable;
1719 } 1724 }
1720 1725
1721 cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 1726 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
@@ -1859,6 +1864,9 @@ static void nvme_reset_work(struct work_struct *work)
1859 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 1864 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
1860 nvme_dev_disable(dev, false); 1865 nvme_dev_disable(dev, false);
1861 1866
1867 if (test_bit(NVME_CTRL_REMOVING, &dev->flags))
1868 goto out;
1869
1862 set_bit(NVME_CTRL_RESETTING, &dev->flags); 1870 set_bit(NVME_CTRL_RESETTING, &dev->flags);
1863 1871
1864 result = nvme_pci_enable(dev); 1872 result = nvme_pci_enable(dev);
@@ -2078,11 +2086,10 @@ static void nvme_remove(struct pci_dev *pdev)
2078{ 2086{
2079 struct nvme_dev *dev = pci_get_drvdata(pdev); 2087 struct nvme_dev *dev = pci_get_drvdata(pdev);
2080 2088
2081 del_timer_sync(&dev->watchdog_timer);
2082
2083 set_bit(NVME_CTRL_REMOVING, &dev->flags); 2089 set_bit(NVME_CTRL_REMOVING, &dev->flags);
2084 pci_set_drvdata(pdev, NULL); 2090 pci_set_drvdata(pdev, NULL);
2085 flush_work(&dev->async_work); 2091 flush_work(&dev->async_work);
2092 flush_work(&dev->reset_work);
2086 flush_work(&dev->scan_work); 2093 flush_work(&dev->scan_work);
2087 nvme_remove_namespaces(&dev->ctrl); 2094 nvme_remove_namespaces(&dev->ctrl);
2088 nvme_uninit_ctrl(&dev->ctrl); 2095 nvme_uninit_ctrl(&dev->ctrl);
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index 8ba19bba3156..2bb3c5799ac4 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -94,7 +94,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
94 if (ret) 94 if (ret)
95 goto close_banks; 95 goto close_banks;
96 96
97 while (val_size) { 97 while (val_size >= reg_size) {
98 if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) { 98 if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
99 /* fill up non-data register */ 99 /* fill up non-data register */
100 *buf = 0; 100 *buf = 0;
@@ -103,7 +103,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
103 } 103 }
104 104
105 buf++; 105 buf++;
106 val_size--; 106 val_size -= reg_size;
107 offset += reg_size; 107 offset += reg_size;
108 } 108 }
109 109
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index b48ac6300c79..a0e5260bd006 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -239,8 +239,8 @@ static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
239{ 239{
240 struct inode *root_inode; 240 struct inode *root_inode;
241 241
242 sb->s_blocksize = PAGE_CACHE_SIZE; 242 sb->s_blocksize = PAGE_SIZE;
243 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 243 sb->s_blocksize_bits = PAGE_SHIFT;
244 sb->s_magic = OPROFILEFS_MAGIC; 244 sb->s_magic = OPROFILEFS_MAGIC;
245 sb->s_op = &s_ops; 245 sb->s_op = &s_ops;
246 sb->s_time_gran = 1; 246 sb->s_time_gran = 1;
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 01b9d0a00abc..d11cdbb8fba3 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -275,6 +275,19 @@ ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void
275} 275}
276EXPORT_SYMBOL(pci_write_vpd); 276EXPORT_SYMBOL(pci_write_vpd);
277 277
278/**
279 * pci_set_vpd_size - Set size of Vital Product Data space
280 * @dev: pci device struct
281 * @len: size of vpd space
282 */
283int pci_set_vpd_size(struct pci_dev *dev, size_t len)
284{
285 if (!dev->vpd || !dev->vpd->ops)
286 return -ENODEV;
287 return dev->vpd->ops->set_size(dev, len);
288}
289EXPORT_SYMBOL(pci_set_vpd_size);
290
278#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1) 291#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
279 292
280/** 293/**
@@ -498,9 +511,23 @@ out:
498 return ret ? ret : count; 511 return ret ? ret : count;
499} 512}
500 513
514static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
515{
516 struct pci_vpd *vpd = dev->vpd;
517
518 if (len == 0 || len > PCI_VPD_MAX_SIZE)
519 return -EIO;
520
521 vpd->valid = 1;
522 vpd->len = len;
523
524 return 0;
525}
526
501static const struct pci_vpd_ops pci_vpd_ops = { 527static const struct pci_vpd_ops pci_vpd_ops = {
502 .read = pci_vpd_read, 528 .read = pci_vpd_read,
503 .write = pci_vpd_write, 529 .write = pci_vpd_write,
530 .set_size = pci_vpd_set_size,
504}; 531};
505 532
506static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count, 533static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
@@ -533,9 +560,24 @@ static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
533 return ret; 560 return ret;
534} 561}
535 562
563static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
564{
565 struct pci_dev *tdev = pci_get_slot(dev->bus,
566 PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
567 int ret;
568
569 if (!tdev)
570 return -ENODEV;
571
572 ret = pci_set_vpd_size(tdev, len);
573 pci_dev_put(tdev);
574 return ret;
575}
576
536static const struct pci_vpd_ops pci_vpd_f0_ops = { 577static const struct pci_vpd_ops pci_vpd_f0_ops = {
537 .read = pci_vpd_f0_read, 578 .read = pci_vpd_f0_read,
538 .write = pci_vpd_f0_write, 579 .write = pci_vpd_f0_write,
580 .set_size = pci_vpd_f0_set_size,
539}; 581};
540 582
541int pci_vpd_init(struct pci_dev *dev) 583int pci_vpd_init(struct pci_dev *dev)
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index eb5a2755a164..2f817fa4c661 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -32,7 +32,7 @@
32#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp) 32#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
33 33
34struct imx6_pcie { 34struct imx6_pcie {
35 struct gpio_desc *reset_gpio; 35 int reset_gpio;
36 struct clk *pcie_bus; 36 struct clk *pcie_bus;
37 struct clk *pcie_phy; 37 struct clk *pcie_phy;
38 struct clk *pcie; 38 struct clk *pcie;
@@ -309,10 +309,10 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
309 usleep_range(200, 500); 309 usleep_range(200, 500);
310 310
311 /* Some boards don't have PCIe reset GPIO. */ 311 /* Some boards don't have PCIe reset GPIO. */
312 if (imx6_pcie->reset_gpio) { 312 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
313 gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0); 313 gpio_set_value_cansleep(imx6_pcie->reset_gpio, 0);
314 msleep(100); 314 msleep(100);
315 gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1); 315 gpio_set_value_cansleep(imx6_pcie->reset_gpio, 1);
316 } 316 }
317 return 0; 317 return 0;
318 318
@@ -523,6 +523,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
523{ 523{
524 struct imx6_pcie *imx6_pcie; 524 struct imx6_pcie *imx6_pcie;
525 struct pcie_port *pp; 525 struct pcie_port *pp;
526 struct device_node *np = pdev->dev.of_node;
526 struct resource *dbi_base; 527 struct resource *dbi_base;
527 struct device_node *node = pdev->dev.of_node; 528 struct device_node *node = pdev->dev.of_node;
528 int ret; 529 int ret;
@@ -544,8 +545,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
544 return PTR_ERR(pp->dbi_base); 545 return PTR_ERR(pp->dbi_base);
545 546
546 /* Fetch GPIOs */ 547 /* Fetch GPIOs */
547 imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset", 548 imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
548 GPIOD_OUT_LOW); 549 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
550 ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
551 GPIOF_OUT_INIT_LOW, "PCIe reset");
552 if (ret) {
553 dev_err(&pdev->dev, "unable to get reset gpio\n");
554 return ret;
555 }
556 }
549 557
550 /* Fetch clocks */ 558 /* Fetch clocks */
551 imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy"); 559 imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index e982010f0ed1..342b6918bbde 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -636,7 +636,7 @@ static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
636 u8 *data = (u8 *) buf; 636 u8 *data = (u8 *) buf;
637 637
638 /* Several chips lock up trying to read undefined config space */ 638 /* Several chips lock up trying to read undefined config space */
639 if (security_capable(filp->f_cred, &init_user_ns, CAP_SYS_ADMIN) == 0) 639 if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN))
640 size = dev->cfg_size; 640 size = dev->cfg_size;
641 else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 641 else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
642 size = 128; 642 size = 128;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d0fb93481573..a814bbb80fcb 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -97,6 +97,7 @@ static inline bool pci_has_subordinate(struct pci_dev *pci_dev)
97struct pci_vpd_ops { 97struct pci_vpd_ops {
98 ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 98 ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
99 ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); 99 ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
100 int (*set_size)(struct pci_dev *dev, size_t len);
100}; 101};
101 102
102struct pci_vpd { 103struct pci_vpd {
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 4c2fa05b4589..944674ee3464 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -56,6 +56,7 @@ struct db1x_pcmcia_sock {
56 int stschg_irq; /* card-status-change irq */ 56 int stschg_irq; /* card-status-change irq */
57 int card_irq; /* card irq */ 57 int card_irq; /* card irq */
58 int eject_irq; /* db1200/pb1200 have these */ 58 int eject_irq; /* db1200/pb1200 have these */
59 int insert_gpio; /* db1000 carddetect gpio */
59 60
60#define BOARD_TYPE_DEFAULT 0 /* most boards */ 61#define BOARD_TYPE_DEFAULT 0 /* most boards */
61#define BOARD_TYPE_DB1200 1 /* IRQs aren't gpios */ 62#define BOARD_TYPE_DB1200 1 /* IRQs aren't gpios */
@@ -83,7 +84,7 @@ static int db1200_card_inserted(struct db1x_pcmcia_sock *sock)
83/* carddetect gpio: low-active */ 84/* carddetect gpio: low-active */
84static int db1000_card_inserted(struct db1x_pcmcia_sock *sock) 85static int db1000_card_inserted(struct db1x_pcmcia_sock *sock)
85{ 86{
86 return !gpio_get_value(irq_to_gpio(sock->insert_irq)); 87 return !gpio_get_value(sock->insert_gpio);
87} 88}
88 89
89static int db1x_card_inserted(struct db1x_pcmcia_sock *sock) 90static int db1x_card_inserted(struct db1x_pcmcia_sock *sock)
@@ -457,9 +458,15 @@ static int db1x_pcmcia_socket_probe(struct platform_device *pdev)
457 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card"); 458 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card");
458 sock->card_irq = r ? r->start : 0; 459 sock->card_irq = r ? r->start : 0;
459 460
460 /* insert: irq which triggers on card insertion/ejection */ 461 /* insert: irq which triggers on card insertion/ejection
462 * BIG FAT NOTE: on DB1000/1100/1500/1550 we pass a GPIO here!
463 */
461 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert"); 464 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert");
462 sock->insert_irq = r ? r->start : -1; 465 sock->insert_irq = r ? r->start : -1;
466 if (sock->board_type == BOARD_TYPE_DEFAULT) {
467 sock->insert_gpio = r ? r->start : -1;
468 sock->insert_irq = r ? gpio_to_irq(r->start) : -1;
469 }
463 470
464 /* stschg: irq which trigger on card status change (optional) */ 471 /* stschg: irq which trigger on card status change (optional) */
465 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg"); 472 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg");
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 32346b5a8a11..f70090897fdf 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -737,8 +737,19 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
737 break; 737 break;
738 case CPU_PM_EXIT: 738 case CPU_PM_EXIT:
739 case CPU_PM_ENTER_FAILED: 739 case CPU_PM_ENTER_FAILED:
740 /* Restore and enable the counter */ 740 /*
741 armpmu_start(event, PERF_EF_RELOAD); 741 * Restore and enable the counter.
742 * armpmu_start() indirectly calls
743 *
744 * perf_event_update_userpage()
745 *
746 * that requires RCU read locking to be functional,
747 * wrap the call within RCU_NONIDLE to make the
748 * RCU subsystem aware this cpu is not idle from
749 * an RCU perspective for the armpmu_start() call
750 * duration.
751 */
752 RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
742 break; 753 break;
743 default: 754 default:
744 break; 755 break;
diff --git a/drivers/phy/phy-rockchip-dp.c b/drivers/phy/phy-rockchip-dp.c
index 77e2d02e6bee..793ecb6d87bc 100644
--- a/drivers/phy/phy-rockchip-dp.c
+++ b/drivers/phy/phy-rockchip-dp.c
@@ -86,6 +86,9 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev)
86 if (!np) 86 if (!np)
87 return -ENODEV; 87 return -ENODEV;
88 88
89 if (!dev->parent || !dev->parent->of_node)
90 return -ENODEV;
91
89 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); 92 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
90 if (IS_ERR(dp)) 93 if (IS_ERR(dp))
91 return -ENOMEM; 94 return -ENOMEM;
@@ -104,9 +107,9 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev)
104 return ret; 107 return ret;
105 } 108 }
106 109
107 dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); 110 dp->grf = syscon_node_to_regmap(dev->parent->of_node);
108 if (IS_ERR(dp->grf)) { 111 if (IS_ERR(dp->grf)) {
109 dev_err(dev, "rk3288-dp needs rockchip,grf property\n"); 112 dev_err(dev, "rk3288-dp needs the General Register Files syscon\n");
110 return PTR_ERR(dp->grf); 113 return PTR_ERR(dp->grf);
111 } 114 }
112 115
diff --git a/drivers/phy/phy-rockchip-emmc.c b/drivers/phy/phy-rockchip-emmc.c
index 887b4c27195f..6ebcf3e41c46 100644
--- a/drivers/phy/phy-rockchip-emmc.c
+++ b/drivers/phy/phy-rockchip-emmc.c
@@ -176,7 +176,10 @@ static int rockchip_emmc_phy_probe(struct platform_device *pdev)
176 struct regmap *grf; 176 struct regmap *grf;
177 unsigned int reg_offset; 177 unsigned int reg_offset;
178 178
179 grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); 179 if (!dev->parent || !dev->parent->of_node)
180 return -ENODEV;
181
182 grf = syscon_node_to_regmap(dev->parent->of_node);
180 if (IS_ERR(grf)) { 183 if (IS_ERR(grf)) {
181 dev_err(dev, "Missing rockchip,grf property\n"); 184 dev_err(dev, "Missing rockchip,grf property\n");
182 return PTR_ERR(grf); 185 return PTR_ERR(grf);
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index debe1219d76d..fc8cbf611723 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -2,6 +2,7 @@ config PINCTRL_IMX
2 bool 2 bool
3 select PINMUX 3 select PINMUX
4 select PINCONF 4 select PINCONF
5 select REGMAP
5 6
6config PINCTRL_IMX1_CORE 7config PINCTRL_IMX1_CORE
7 bool 8 bool
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 46210512d8ec..9cfa544072b5 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -762,19 +762,18 @@ int imx_pinctrl_probe(struct platform_device *pdev,
762 762
763 if (of_property_read_bool(dev_np, "fsl,input-sel")) { 763 if (of_property_read_bool(dev_np, "fsl,input-sel")) {
764 np = of_parse_phandle(dev_np, "fsl,input-sel", 0); 764 np = of_parse_phandle(dev_np, "fsl,input-sel", 0);
765 if (np) { 765 if (!np) {
766 ipctl->input_sel_base = of_iomap(np, 0);
767 if (IS_ERR(ipctl->input_sel_base)) {
768 of_node_put(np);
769 dev_err(&pdev->dev,
770 "iomuxc input select base address not found\n");
771 return PTR_ERR(ipctl->input_sel_base);
772 }
773 } else {
774 dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n"); 766 dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n");
775 return -EINVAL; 767 return -EINVAL;
776 } 768 }
769
770 ipctl->input_sel_base = of_iomap(np, 0);
777 of_node_put(np); 771 of_node_put(np);
772 if (!ipctl->input_sel_base) {
773 dev_err(&pdev->dev,
774 "iomuxc input select base address not found\n");
775 return -ENOMEM;
776 }
778 } 777 }
779 778
780 imx_pinctrl_desc.name = dev_name(&pdev->dev); 779 imx_pinctrl_desc.name = dev_name(&pdev->dev);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 85536b467c25..6c2c816f8e5f 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -665,6 +665,35 @@ static void intel_gpio_irq_ack(struct irq_data *d)
665 spin_unlock(&pctrl->lock); 665 spin_unlock(&pctrl->lock);
666} 666}
667 667
668static void intel_gpio_irq_enable(struct irq_data *d)
669{
670 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
671 struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
672 const struct intel_community *community;
673 unsigned pin = irqd_to_hwirq(d);
674 unsigned long flags;
675
676 spin_lock_irqsave(&pctrl->lock, flags);
677
678 community = intel_get_community(pctrl, pin);
679 if (community) {
680 unsigned padno = pin_to_padno(community, pin);
681 unsigned gpp_size = community->gpp_size;
682 unsigned gpp_offset = padno % gpp_size;
683 unsigned gpp = padno / gpp_size;
684 u32 value;
685
686 /* Clear interrupt status first to avoid unexpected interrupt */
687 writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
688
689 value = readl(community->regs + community->ie_offset + gpp * 4);
690 value |= BIT(gpp_offset);
691 writel(value, community->regs + community->ie_offset + gpp * 4);
692 }
693
694 spin_unlock_irqrestore(&pctrl->lock, flags);
695}
696
668static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask) 697static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
669{ 698{
670 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 699 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -741,8 +770,9 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
741 value |= PADCFG0_RXINV; 770 value |= PADCFG0_RXINV;
742 } else if (type & IRQ_TYPE_EDGE_RISING) { 771 } else if (type & IRQ_TYPE_EDGE_RISING) {
743 value |= PADCFG0_RXEVCFG_EDGE << PADCFG0_RXEVCFG_SHIFT; 772 value |= PADCFG0_RXEVCFG_EDGE << PADCFG0_RXEVCFG_SHIFT;
744 } else if (type & IRQ_TYPE_LEVEL_LOW) { 773 } else if (type & IRQ_TYPE_LEVEL_MASK) {
745 value |= PADCFG0_RXINV; 774 if (type & IRQ_TYPE_LEVEL_LOW)
775 value |= PADCFG0_RXINV;
746 } else { 776 } else {
747 value |= PADCFG0_RXEVCFG_DISABLED << PADCFG0_RXEVCFG_SHIFT; 777 value |= PADCFG0_RXEVCFG_DISABLED << PADCFG0_RXEVCFG_SHIFT;
748 } 778 }
@@ -852,6 +882,7 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
852 882
853static struct irq_chip intel_gpio_irqchip = { 883static struct irq_chip intel_gpio_irqchip = {
854 .name = "intel-gpio", 884 .name = "intel-gpio",
885 .irq_enable = intel_gpio_irq_enable,
855 .irq_ack = intel_gpio_irq_ack, 886 .irq_ack = intel_gpio_irq_ack,
856 .irq_mask = intel_gpio_irq_mask, 887 .irq_mask = intel_gpio_irq_mask,
857 .irq_unmask = intel_gpio_irq_unmask, 888 .irq_unmask = intel_gpio_irq_unmask,
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 2bbe6f7964a7..6ab8c3ccdeea 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -1004,7 +1004,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
1004 struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent); 1004 struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent);
1005 int eint_num, virq, eint_offset; 1005 int eint_num, virq, eint_offset;
1006 unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc; 1006 unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
1007 static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256}; 1007 static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
1008 128000, 256000};
1008 const struct mtk_desc_pin *pin; 1009 const struct mtk_desc_pin *pin;
1009 struct irq_data *d; 1010 struct irq_data *d;
1010 1011
@@ -1022,9 +1023,9 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
1022 if (!mtk_eint_can_en_debounce(pctl, eint_num)) 1023 if (!mtk_eint_can_en_debounce(pctl, eint_num))
1023 return -ENOSYS; 1024 return -ENOSYS;
1024 1025
1025 dbnc = ARRAY_SIZE(dbnc_arr); 1026 dbnc = ARRAY_SIZE(debounce_time);
1026 for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) { 1027 for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
1027 if (debounce <= dbnc_arr[i]) { 1028 if (debounce <= debounce_time[i]) {
1028 dbnc = i; 1029 dbnc = i;
1029 break; 1030 break;
1030 } 1031 }
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 352406108fa0..c8969dd49449 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -990,7 +990,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
990 int val; 990 int val;
991 991
992 if (pull) 992 if (pull)
993 pullidx = data_out ? 1 : 2; 993 pullidx = data_out ? 2 : 1;
994 994
995 seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s", 995 seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
996 gpio, 996 gpio,
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 856f736cb1a6..2673cd9d106e 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -469,27 +469,27 @@ static const char * const pistachio_mips_pll_lock_groups[] = {
469 "mfio83", 469 "mfio83",
470}; 470};
471 471
472static const char * const pistachio_sys_pll_lock_groups[] = { 472static const char * const pistachio_audio_pll_lock_groups[] = {
473 "mfio84", 473 "mfio84",
474}; 474};
475 475
476static const char * const pistachio_wifi_pll_lock_groups[] = { 476static const char * const pistachio_rpu_v_pll_lock_groups[] = {
477 "mfio85", 477 "mfio85",
478}; 478};
479 479
480static const char * const pistachio_bt_pll_lock_groups[] = { 480static const char * const pistachio_rpu_l_pll_lock_groups[] = {
481 "mfio86", 481 "mfio86",
482}; 482};
483 483
484static const char * const pistachio_rpu_v_pll_lock_groups[] = { 484static const char * const pistachio_sys_pll_lock_groups[] = {
485 "mfio87", 485 "mfio87",
486}; 486};
487 487
488static const char * const pistachio_rpu_l_pll_lock_groups[] = { 488static const char * const pistachio_wifi_pll_lock_groups[] = {
489 "mfio88", 489 "mfio88",
490}; 490};
491 491
492static const char * const pistachio_audio_pll_lock_groups[] = { 492static const char * const pistachio_bt_pll_lock_groups[] = {
493 "mfio89", 493 "mfio89",
494}; 494};
495 495
@@ -559,12 +559,12 @@ enum pistachio_mux_option {
559 PISTACHIO_FUNCTION_DREQ4, 559 PISTACHIO_FUNCTION_DREQ4,
560 PISTACHIO_FUNCTION_DREQ5, 560 PISTACHIO_FUNCTION_DREQ5,
561 PISTACHIO_FUNCTION_MIPS_PLL_LOCK, 561 PISTACHIO_FUNCTION_MIPS_PLL_LOCK,
562 PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
563 PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
564 PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
562 PISTACHIO_FUNCTION_SYS_PLL_LOCK, 565 PISTACHIO_FUNCTION_SYS_PLL_LOCK,
563 PISTACHIO_FUNCTION_WIFI_PLL_LOCK, 566 PISTACHIO_FUNCTION_WIFI_PLL_LOCK,
564 PISTACHIO_FUNCTION_BT_PLL_LOCK, 567 PISTACHIO_FUNCTION_BT_PLL_LOCK,
565 PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
566 PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
567 PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
568 PISTACHIO_FUNCTION_DEBUG_RAW_CCA_IND, 568 PISTACHIO_FUNCTION_DEBUG_RAW_CCA_IND,
569 PISTACHIO_FUNCTION_DEBUG_ED_SEC20_CCA_IND, 569 PISTACHIO_FUNCTION_DEBUG_ED_SEC20_CCA_IND,
570 PISTACHIO_FUNCTION_DEBUG_ED_SEC40_CCA_IND, 570 PISTACHIO_FUNCTION_DEBUG_ED_SEC40_CCA_IND,
@@ -620,12 +620,12 @@ static const struct pistachio_function pistachio_functions[] = {
620 FUNCTION(dreq4), 620 FUNCTION(dreq4),
621 FUNCTION(dreq5), 621 FUNCTION(dreq5),
622 FUNCTION(mips_pll_lock), 622 FUNCTION(mips_pll_lock),
623 FUNCTION(audio_pll_lock),
624 FUNCTION(rpu_v_pll_lock),
625 FUNCTION(rpu_l_pll_lock),
623 FUNCTION(sys_pll_lock), 626 FUNCTION(sys_pll_lock),
624 FUNCTION(wifi_pll_lock), 627 FUNCTION(wifi_pll_lock),
625 FUNCTION(bt_pll_lock), 628 FUNCTION(bt_pll_lock),
626 FUNCTION(rpu_v_pll_lock),
627 FUNCTION(rpu_l_pll_lock),
628 FUNCTION(audio_pll_lock),
629 FUNCTION(debug_raw_cca_ind), 629 FUNCTION(debug_raw_cca_ind),
630 FUNCTION(debug_ed_sec20_cca_ind), 630 FUNCTION(debug_ed_sec20_cca_ind),
631 FUNCTION(debug_ed_sec40_cca_ind), 631 FUNCTION(debug_ed_sec40_cca_ind),
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index fb126d56ad40..cf9bafa10acf 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1280,9 +1280,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
1280 1280
1281 /* Parse pins in each row from LSB */ 1281 /* Parse pins in each row from LSB */
1282 while (mask) { 1282 while (mask) {
1283 bit_pos = ffs(mask); 1283 bit_pos = __ffs(mask);
1284 pin_num_from_lsb = bit_pos / pcs->bits_per_pin; 1284 pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
1285 mask_pos = ((pcs->fmask) << (bit_pos - 1)); 1285 mask_pos = ((pcs->fmask) << bit_pos);
1286 val_pos = val & mask_pos; 1286 val_pos = val & mask_pos;
1287 submask = mask & mask_pos; 1287 submask = mask & mask_pos;
1288 1288
@@ -1852,7 +1852,7 @@ static int pcs_probe(struct platform_device *pdev)
1852 ret = of_property_read_u32(np, "pinctrl-single,function-mask", 1852 ret = of_property_read_u32(np, "pinctrl-single,function-mask",
1853 &pcs->fmask); 1853 &pcs->fmask);
1854 if (!ret) { 1854 if (!ret) {
1855 pcs->fshift = ffs(pcs->fmask) - 1; 1855 pcs->fshift = __ffs(pcs->fmask);
1856 pcs->fmax = pcs->fmask >> pcs->fshift; 1856 pcs->fmax = pcs->fmask >> pcs->fshift;
1857 } else { 1857 } else {
1858 /* If mask property doesn't exist, function mux is invalid. */ 1858 /* If mask property doesn't exist, function mux is invalid. */
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index 412c6b78140a..a13f2b6f6fc0 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -1573,6 +1573,22 @@ static int xway_gpio_dir_out(struct gpio_chip *chip, unsigned int pin, int val)
1573 return 0; 1573 return 0;
1574} 1574}
1575 1575
1576/*
1577 * gpiolib gpiod_to_irq callback function.
1578 * Returns the mapped IRQ (external interrupt) number for a given GPIO pin.
1579 */
1580static int xway_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
1581{
1582 struct ltq_pinmux_info *info = dev_get_drvdata(chip->parent);
1583 int i;
1584
1585 for (i = 0; i < info->num_exin; i++)
1586 if (info->exin[i] == offset)
1587 return ltq_eiu_get_irq(i);
1588
1589 return -1;
1590}
1591
1576static struct gpio_chip xway_chip = { 1592static struct gpio_chip xway_chip = {
1577 .label = "gpio-xway", 1593 .label = "gpio-xway",
1578 .direction_input = xway_gpio_dir_in, 1594 .direction_input = xway_gpio_dir_in,
@@ -1581,6 +1597,7 @@ static struct gpio_chip xway_chip = {
1581 .set = xway_gpio_set, 1597 .set = xway_gpio_set,
1582 .request = gpiochip_generic_request, 1598 .request = gpiochip_generic_request,
1583 .free = gpiochip_generic_free, 1599 .free = gpiochip_generic_free,
1600 .to_irq = xway_gpio_to_irq,
1584 .base = -1, 1601 .base = -1,
1585}; 1602};
1586 1603
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
index b5d81ced6ce6..b68ae424cee2 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
@@ -237,7 +237,7 @@ DECLARE_QCA_GPIO_PINS(99);
237 .pins = gpio##id##_pins, \ 237 .pins = gpio##id##_pins, \
238 .npins = (unsigned)ARRAY_SIZE(gpio##id##_pins), \ 238 .npins = (unsigned)ARRAY_SIZE(gpio##id##_pins), \
239 .funcs = (int[]){ \ 239 .funcs = (int[]){ \
240 qca_mux_NA, /* gpio mode */ \ 240 qca_mux_gpio, /* gpio mode */ \
241 qca_mux_##f1, \ 241 qca_mux_##f1, \
242 qca_mux_##f2, \ 242 qca_mux_##f2, \
243 qca_mux_##f3, \ 243 qca_mux_##f3, \
@@ -254,11 +254,11 @@ DECLARE_QCA_GPIO_PINS(99);
254 qca_mux_##f14 \ 254 qca_mux_##f14 \
255 }, \ 255 }, \
256 .nfuncs = 15, \ 256 .nfuncs = 15, \
257 .ctl_reg = 0x1000 + 0x10 * id, \ 257 .ctl_reg = 0x0 + 0x1000 * id, \
258 .io_reg = 0x1004 + 0x10 * id, \ 258 .io_reg = 0x4 + 0x1000 * id, \
259 .intr_cfg_reg = 0x1008 + 0x10 * id, \ 259 .intr_cfg_reg = 0x8 + 0x1000 * id, \
260 .intr_status_reg = 0x100c + 0x10 * id, \ 260 .intr_status_reg = 0xc + 0x1000 * id, \
261 .intr_target_reg = 0x400 + 0x4 * id, \ 261 .intr_target_reg = 0x8 + 0x1000 * id, \
262 .mux_bit = 2, \ 262 .mux_bit = 2, \
263 .pull_bit = 0, \ 263 .pull_bit = 0, \
264 .drv_bit = 6, \ 264 .drv_bit = 6, \
@@ -414,7 +414,7 @@ static const struct msm_pinctrl_soc_data ipq4019_pinctrl = {
414 .nfunctions = ARRAY_SIZE(ipq4019_functions), 414 .nfunctions = ARRAY_SIZE(ipq4019_functions),
415 .groups = ipq4019_groups, 415 .groups = ipq4019_groups,
416 .ngroups = ARRAY_SIZE(ipq4019_groups), 416 .ngroups = ARRAY_SIZE(ipq4019_groups),
417 .ngpios = 70, 417 .ngpios = 100,
418}; 418};
419 419
420static int ipq4019_pinctrl_probe(struct platform_device *pdev) 420static int ipq4019_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index dc3609f0c60b..ee0c1f2567d9 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -546,7 +546,9 @@ static int sh_pfc_probe(struct platform_device *pdev)
546 return ret; 546 return ret;
547 } 547 }
548 548
549 pinctrl_provide_dummies(); 549 /* Enable dummy states for those platforms without pinctrl support */
550 if (!of_have_populated_dt())
551 pinctrl_provide_dummies();
550 552
551 ret = sh_pfc_init_ranges(pfc); 553 ret = sh_pfc_init_ranges(pfc);
552 if (ret < 0) 554 if (ret < 0)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
index 00265f0435a7..8b381d69df86 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
@@ -485,6 +485,7 @@ static const struct sunxi_pinctrl_desc sun8i_a33_pinctrl_data = {
485 .pins = sun8i_a33_pins, 485 .pins = sun8i_a33_pins,
486 .npins = ARRAY_SIZE(sun8i_a33_pins), 486 .npins = ARRAY_SIZE(sun8i_a33_pins),
487 .irq_banks = 2, 487 .irq_banks = 2,
488 .irq_bank_base = 1,
488}; 489};
489 490
490static int sun8i_a33_pinctrl_probe(struct platform_device *pdev) 491static int sun8i_a33_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 12a1dfabb1af..3b017dbd289c 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -579,7 +579,7 @@ static void sunxi_pinctrl_irq_release_resources(struct irq_data *d)
579static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type) 579static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
580{ 580{
581 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 581 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
582 u32 reg = sunxi_irq_cfg_reg(d->hwirq); 582 u32 reg = sunxi_irq_cfg_reg(d->hwirq, pctl->desc->irq_bank_base);
583 u8 index = sunxi_irq_cfg_offset(d->hwirq); 583 u8 index = sunxi_irq_cfg_offset(d->hwirq);
584 unsigned long flags; 584 unsigned long flags;
585 u32 regval; 585 u32 regval;
@@ -626,7 +626,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
626static void sunxi_pinctrl_irq_ack(struct irq_data *d) 626static void sunxi_pinctrl_irq_ack(struct irq_data *d)
627{ 627{
628 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 628 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
629 u32 status_reg = sunxi_irq_status_reg(d->hwirq); 629 u32 status_reg = sunxi_irq_status_reg(d->hwirq,
630 pctl->desc->irq_bank_base);
630 u8 status_idx = sunxi_irq_status_offset(d->hwirq); 631 u8 status_idx = sunxi_irq_status_offset(d->hwirq);
631 632
632 /* Clear the IRQ */ 633 /* Clear the IRQ */
@@ -636,7 +637,7 @@ static void sunxi_pinctrl_irq_ack(struct irq_data *d)
636static void sunxi_pinctrl_irq_mask(struct irq_data *d) 637static void sunxi_pinctrl_irq_mask(struct irq_data *d)
637{ 638{
638 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 639 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
639 u32 reg = sunxi_irq_ctrl_reg(d->hwirq); 640 u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
640 u8 idx = sunxi_irq_ctrl_offset(d->hwirq); 641 u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
641 unsigned long flags; 642 unsigned long flags;
642 u32 val; 643 u32 val;
@@ -653,7 +654,7 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
653static void sunxi_pinctrl_irq_unmask(struct irq_data *d) 654static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
654{ 655{
655 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 656 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
656 u32 reg = sunxi_irq_ctrl_reg(d->hwirq); 657 u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
657 u8 idx = sunxi_irq_ctrl_offset(d->hwirq); 658 u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
658 unsigned long flags; 659 unsigned long flags;
659 u32 val; 660 u32 val;
@@ -745,7 +746,7 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
745 if (bank == pctl->desc->irq_banks) 746 if (bank == pctl->desc->irq_banks)
746 return; 747 return;
747 748
748 reg = sunxi_irq_status_reg_from_bank(bank); 749 reg = sunxi_irq_status_reg_from_bank(bank, pctl->desc->irq_bank_base);
749 val = readl(pctl->membase + reg); 750 val = readl(pctl->membase + reg);
750 751
751 if (val) { 752 if (val) {
@@ -1024,9 +1025,11 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
1024 1025
1025 for (i = 0; i < pctl->desc->irq_banks; i++) { 1026 for (i = 0; i < pctl->desc->irq_banks; i++) {
1026 /* Mask and clear all IRQs before registering a handler */ 1027 /* Mask and clear all IRQs before registering a handler */
1027 writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i)); 1028 writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i,
1029 pctl->desc->irq_bank_base));
1028 writel(0xffffffff, 1030 writel(0xffffffff,
1029 pctl->membase + sunxi_irq_status_reg_from_bank(i)); 1031 pctl->membase + sunxi_irq_status_reg_from_bank(i,
1032 pctl->desc->irq_bank_base));
1030 1033
1031 irq_set_chained_handler_and_data(pctl->irq[i], 1034 irq_set_chained_handler_and_data(pctl->irq[i],
1032 sunxi_pinctrl_irq_handler, 1035 sunxi_pinctrl_irq_handler,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index e248e81a0f9e..0afce1ab12d0 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -97,6 +97,7 @@ struct sunxi_pinctrl_desc {
97 int npins; 97 int npins;
98 unsigned pin_base; 98 unsigned pin_base;
99 unsigned irq_banks; 99 unsigned irq_banks;
100 unsigned irq_bank_base;
100 bool irq_read_needs_mux; 101 bool irq_read_needs_mux;
101}; 102};
102 103
@@ -233,12 +234,12 @@ static inline u32 sunxi_pull_offset(u16 pin)
233 return pin_num * PULL_PINS_BITS; 234 return pin_num * PULL_PINS_BITS;
234} 235}
235 236
236static inline u32 sunxi_irq_cfg_reg(u16 irq) 237static inline u32 sunxi_irq_cfg_reg(u16 irq, unsigned bank_base)
237{ 238{
238 u8 bank = irq / IRQ_PER_BANK; 239 u8 bank = irq / IRQ_PER_BANK;
239 u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04; 240 u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04;
240 241
241 return IRQ_CFG_REG + bank * IRQ_MEM_SIZE + reg; 242 return IRQ_CFG_REG + (bank_base + bank) * IRQ_MEM_SIZE + reg;
242} 243}
243 244
244static inline u32 sunxi_irq_cfg_offset(u16 irq) 245static inline u32 sunxi_irq_cfg_offset(u16 irq)
@@ -247,16 +248,16 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq)
247 return irq_num * IRQ_CFG_IRQ_BITS; 248 return irq_num * IRQ_CFG_IRQ_BITS;
248} 249}
249 250
250static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank) 251static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank, unsigned bank_base)
251{ 252{
252 return IRQ_CTRL_REG + bank * IRQ_MEM_SIZE; 253 return IRQ_CTRL_REG + (bank_base + bank) * IRQ_MEM_SIZE;
253} 254}
254 255
255static inline u32 sunxi_irq_ctrl_reg(u16 irq) 256static inline u32 sunxi_irq_ctrl_reg(u16 irq, unsigned bank_base)
256{ 257{
257 u8 bank = irq / IRQ_PER_BANK; 258 u8 bank = irq / IRQ_PER_BANK;
258 259
259 return sunxi_irq_ctrl_reg_from_bank(bank); 260 return sunxi_irq_ctrl_reg_from_bank(bank, bank_base);
260} 261}
261 262
262static inline u32 sunxi_irq_ctrl_offset(u16 irq) 263static inline u32 sunxi_irq_ctrl_offset(u16 irq)
@@ -265,16 +266,16 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
265 return irq_num * IRQ_CTRL_IRQ_BITS; 266 return irq_num * IRQ_CTRL_IRQ_BITS;
266} 267}
267 268
268static inline u32 sunxi_irq_status_reg_from_bank(u8 bank) 269static inline u32 sunxi_irq_status_reg_from_bank(u8 bank, unsigned bank_base)
269{ 270{
270 return IRQ_STATUS_REG + bank * IRQ_MEM_SIZE; 271 return IRQ_STATUS_REG + (bank_base + bank) * IRQ_MEM_SIZE;
271} 272}
272 273
273static inline u32 sunxi_irq_status_reg(u16 irq) 274static inline u32 sunxi_irq_status_reg(u16 irq, unsigned bank_base)
274{ 275{
275 u8 bank = irq / IRQ_PER_BANK; 276 u8 bank = irq / IRQ_PER_BANK;
276 277
277 return sunxi_irq_status_reg_from_bank(bank); 278 return sunxi_irq_status_reg_from_bank(bank, bank_base);
278} 279}
279 280
280static inline u32 sunxi_irq_status_offset(u16 irq) 281static inline u32 sunxi_irq_status_offset(u16 irq)
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 9973cebb4d6f..07462d79d040 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -309,8 +309,7 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
309 * much memory to the process. 309 * much memory to the process.
310 */ 310 */
311 down_read(&current->mm->mmap_sem); 311 down_read(&current->mm->mmap_sem);
312 ret = get_user_pages(current, current->mm, address, 1, 312 ret = get_user_pages(address, 1, !is_write, 0, &page, NULL);
313 !is_write, 0, &page, NULL);
314 up_read(&current->mm->mmap_sem); 313 up_read(&current->mm->mmap_sem);
315 if (ret < 0) 314 if (ret < 0)
316 break; 315 break;
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 10ce6cba4455..09356684c32f 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -127,8 +127,10 @@ static int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
127 arg0.integer.value = reg; 127 arg0.integer.value = reg;
128 128
129 status = acpi_evaluate_integer(dev->handle, "ALRD", &args, &lret); 129 status = acpi_evaluate_integer(dev->handle, "ALRD", &args, &lret);
130 if (ACPI_FAILURE(status))
131 return -EINVAL;
130 *ret = lret; 132 *ret = lret;
131 return (status != AE_OK) ? -EINVAL : 0; 133 return 0;
132} 134}
133 135
134/** 136/**
@@ -173,6 +175,7 @@ static int lis3lv02d_dmi_matched(const struct dmi_system_id *dmi)
173DEFINE_CONV(normal, 1, 2, 3); 175DEFINE_CONV(normal, 1, 2, 3);
174DEFINE_CONV(y_inverted, 1, -2, 3); 176DEFINE_CONV(y_inverted, 1, -2, 3);
175DEFINE_CONV(x_inverted, -1, 2, 3); 177DEFINE_CONV(x_inverted, -1, 2, 3);
178DEFINE_CONV(x_inverted_usd, -1, 2, -3);
176DEFINE_CONV(z_inverted, 1, 2, -3); 179DEFINE_CONV(z_inverted, 1, 2, -3);
177DEFINE_CONV(xy_swap, 2, 1, 3); 180DEFINE_CONV(xy_swap, 2, 1, 3);
178DEFINE_CONV(xy_rotated_left, -2, 1, 3); 181DEFINE_CONV(xy_rotated_left, -2, 1, 3);
@@ -236,6 +239,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
236 AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted), 239 AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted),
237 AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted), 240 AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
238 AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left), 241 AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
242 AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd),
239 AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), 243 AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
240 AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), 244 AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
241 AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap), 245 AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index f93abc8c1424..a818db6aa08f 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -91,6 +91,8 @@ static int intel_hid_pl_resume_handler(struct device *device)
91} 91}
92 92
93static const struct dev_pm_ops intel_hid_pl_pm_ops = { 93static const struct dev_pm_ops intel_hid_pl_pm_ops = {
94 .freeze = intel_hid_pl_suspend_handler,
95 .restore = intel_hid_pl_resume_handler,
94 .suspend = intel_hid_pl_suspend_handler, 96 .suspend = intel_hid_pl_suspend_handler,
95 .resume = intel_hid_pl_resume_handler, 97 .resume = intel_hid_pl_resume_handler,
96}; 98};
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index 3fb1d85c70a8..6f497e80c9df 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -687,8 +687,8 @@ static int ipc_plat_get_res(struct platform_device *pdev)
687 ipcdev.acpi_io_size = size; 687 ipcdev.acpi_io_size = size;
688 dev_info(&pdev->dev, "io res: %pR\n", res); 688 dev_info(&pdev->dev, "io res: %pR\n", res);
689 689
690 /* This is index 0 to cover BIOS data register */
691 punit_res = punit_res_array; 690 punit_res = punit_res_array;
691 /* This is index 0 to cover BIOS data register */
692 res = platform_get_resource(pdev, IORESOURCE_MEM, 692 res = platform_get_resource(pdev, IORESOURCE_MEM,
693 PLAT_RESOURCE_BIOS_DATA_INDEX); 693 PLAT_RESOURCE_BIOS_DATA_INDEX);
694 if (!res) { 694 if (!res) {
@@ -698,55 +698,51 @@ static int ipc_plat_get_res(struct platform_device *pdev)
698 *punit_res = *res; 698 *punit_res = *res;
699 dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res); 699 dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res);
700 700
701 /* This is index 1 to cover BIOS interface register */
701 res = platform_get_resource(pdev, IORESOURCE_MEM, 702 res = platform_get_resource(pdev, IORESOURCE_MEM,
702 PLAT_RESOURCE_BIOS_IFACE_INDEX); 703 PLAT_RESOURCE_BIOS_IFACE_INDEX);
703 if (!res) { 704 if (!res) {
704 dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n"); 705 dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n");
705 return -ENXIO; 706 return -ENXIO;
706 } 707 }
707 /* This is index 1 to cover BIOS interface register */
708 *++punit_res = *res; 708 *++punit_res = *res;
709 dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res); 709 dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res);
710 710
711 /* This is index 2 to cover ISP data register, optional */
711 res = platform_get_resource(pdev, IORESOURCE_MEM, 712 res = platform_get_resource(pdev, IORESOURCE_MEM,
712 PLAT_RESOURCE_ISP_DATA_INDEX); 713 PLAT_RESOURCE_ISP_DATA_INDEX);
713 if (!res) { 714 ++punit_res;
714 dev_err(&pdev->dev, "Failed to get res of punit ISP data\n"); 715 if (res) {
715 return -ENXIO; 716 *punit_res = *res;
717 dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
716 } 718 }
717 /* This is index 2 to cover ISP data register */
718 *++punit_res = *res;
719 dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
720 719
720 /* This is index 3 to cover ISP interface register, optional */
721 res = platform_get_resource(pdev, IORESOURCE_MEM, 721 res = platform_get_resource(pdev, IORESOURCE_MEM,
722 PLAT_RESOURCE_ISP_IFACE_INDEX); 722 PLAT_RESOURCE_ISP_IFACE_INDEX);
723 if (!res) { 723 ++punit_res;
724 dev_err(&pdev->dev, "Failed to get res of punit ISP iface\n"); 724 if (res) {
725 return -ENXIO; 725 *punit_res = *res;
726 dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
726 } 727 }
727 /* This is index 3 to cover ISP interface register */
728 *++punit_res = *res;
729 dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
730 728
729 /* This is index 4 to cover GTD data register, optional */
731 res = platform_get_resource(pdev, IORESOURCE_MEM, 730 res = platform_get_resource(pdev, IORESOURCE_MEM,
732 PLAT_RESOURCE_GTD_DATA_INDEX); 731 PLAT_RESOURCE_GTD_DATA_INDEX);
733 if (!res) { 732 ++punit_res;
734 dev_err(&pdev->dev, "Failed to get res of punit GTD data\n"); 733 if (res) {
735 return -ENXIO; 734 *punit_res = *res;
735 dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
736 } 736 }
737 /* This is index 4 to cover GTD data register */
738 *++punit_res = *res;
739 dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
740 737
738 /* This is index 5 to cover GTD interface register, optional */
741 res = platform_get_resource(pdev, IORESOURCE_MEM, 739 res = platform_get_resource(pdev, IORESOURCE_MEM,
742 PLAT_RESOURCE_GTD_IFACE_INDEX); 740 PLAT_RESOURCE_GTD_IFACE_INDEX);
743 if (!res) { 741 ++punit_res;
744 dev_err(&pdev->dev, "Failed to get res of punit GTD iface\n"); 742 if (res) {
745 return -ENXIO; 743 *punit_res = *res;
744 dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
746 } 745 }
747 /* This is index 5 to cover GTD interface register */
748 *++punit_res = *res;
749 dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
750 746
751 res = platform_get_resource(pdev, IORESOURCE_MEM, 747 res = platform_get_resource(pdev, IORESOURCE_MEM,
752 PLAT_RESOURCE_IPC_INDEX); 748 PLAT_RESOURCE_IPC_INDEX);
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index bd875409a02d..a47a41fc10ad 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -227,6 +227,11 @@ static int intel_punit_get_bars(struct platform_device *pdev)
227 struct resource *res; 227 struct resource *res;
228 void __iomem *addr; 228 void __iomem *addr;
229 229
230 /*
231 * The following resources are required
232 * - BIOS_IPC BASE_DATA
233 * - BIOS_IPC BASE_IFACE
234 */
230 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 235 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
231 addr = devm_ioremap_resource(&pdev->dev, res); 236 addr = devm_ioremap_resource(&pdev->dev, res);
232 if (IS_ERR(addr)) 237 if (IS_ERR(addr))
@@ -239,29 +244,40 @@ static int intel_punit_get_bars(struct platform_device *pdev)
239 return PTR_ERR(addr); 244 return PTR_ERR(addr);
240 punit_ipcdev->base[BIOS_IPC][BASE_IFACE] = addr; 245 punit_ipcdev->base[BIOS_IPC][BASE_IFACE] = addr;
241 246
247 /*
248 * The following resources are optional
249 * - ISPDRIVER_IPC BASE_DATA
250 * - ISPDRIVER_IPC BASE_IFACE
251 * - GTDRIVER_IPC BASE_DATA
252 * - GTDRIVER_IPC BASE_IFACE
253 */
242 res = platform_get_resource(pdev, IORESOURCE_MEM, 2); 254 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
243 addr = devm_ioremap_resource(&pdev->dev, res); 255 if (res) {
244 if (IS_ERR(addr)) 256 addr = devm_ioremap_resource(&pdev->dev, res);
245 return PTR_ERR(addr); 257 if (!IS_ERR(addr))
246 punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr; 258 punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
259 }
247 260
248 res = platform_get_resource(pdev, IORESOURCE_MEM, 3); 261 res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
249 addr = devm_ioremap_resource(&pdev->dev, res); 262 if (res) {
250 if (IS_ERR(addr)) 263 addr = devm_ioremap_resource(&pdev->dev, res);
251 return PTR_ERR(addr); 264 if (!IS_ERR(addr))
252 punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr; 265 punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
266 }
253 267
254 res = platform_get_resource(pdev, IORESOURCE_MEM, 4); 268 res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
255 addr = devm_ioremap_resource(&pdev->dev, res); 269 if (res) {
256 if (IS_ERR(addr)) 270 addr = devm_ioremap_resource(&pdev->dev, res);
257 return PTR_ERR(addr); 271 if (!IS_ERR(addr))
258 punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr; 272 punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
273 }
259 274
260 res = platform_get_resource(pdev, IORESOURCE_MEM, 5); 275 res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
261 addr = devm_ioremap_resource(&pdev->dev, res); 276 if (res) {
262 if (IS_ERR(addr)) 277 addr = devm_ioremap_resource(&pdev->dev, res);
263 return PTR_ERR(addr); 278 if (!IS_ERR(addr))
264 punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr; 279 punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
280 }
265 281
266 return 0; 282 return 0;
267} 283}
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index 397119f83e82..781bd10ca7ac 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -659,7 +659,7 @@ static int telemetry_plt_update_events(struct telemetry_evtconfig pss_evtconfig,
659static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period) 659static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period)
660{ 660{
661 u32 telem_ctrl = 0; 661 u32 telem_ctrl = 0;
662 int ret; 662 int ret = 0;
663 663
664 mutex_lock(&(telm_conf->telem_lock)); 664 mutex_lock(&(telm_conf->telem_lock));
665 if (ioss_period) { 665 if (ioss_period) {
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e305ab541a22..9255ff3ee81a 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -7972,10 +7972,12 @@ static int fan_get_status_safe(u8 *status)
7972 fan_update_desired_level(s); 7972 fan_update_desired_level(s);
7973 mutex_unlock(&fan_mutex); 7973 mutex_unlock(&fan_mutex);
7974 7974
7975 if (rc)
7976 return rc;
7975 if (status) 7977 if (status)
7976 *status = s; 7978 *status = s;
7977 7979
7978 return rc; 7980 return 0;
7979} 7981}
7980 7982
7981static int fan_get_speed(unsigned int *speed) 7983static int fan_get_speed(unsigned int *speed)
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index df1f1a76a862..01e12d221a8b 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -135,7 +135,7 @@ MODULE_LICENSE("GPL");
135/* Field definitions */ 135/* Field definitions */
136#define HCI_ACCEL_MASK 0x7fff 136#define HCI_ACCEL_MASK 0x7fff
137#define HCI_HOTKEY_DISABLE 0x0b 137#define HCI_HOTKEY_DISABLE 0x0b
138#define HCI_HOTKEY_ENABLE 0x01 138#define HCI_HOTKEY_ENABLE 0x09
139#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10 139#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
140#define HCI_LCD_BRIGHTNESS_BITS 3 140#define HCI_LCD_BRIGHTNESS_BITS 3
141#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS) 141#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index cdfd01f0adb8..8fad0a7044d3 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1091,6 +1091,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
1091 RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */ 1091 RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
1092 RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */ 1092 RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */
1093 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ 1093 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
1094 RAPL_CPU(0x46, rapl_defaults_core),/* Haswell */
1094 RAPL_CPU(0x47, rapl_defaults_core),/* Broadwell-H */ 1095 RAPL_CPU(0x47, rapl_defaults_core),/* Broadwell-H */
1095 RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */ 1096 RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */
1096 RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */ 1097 RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 7225ac6b3df5..fad968eb75f6 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -392,7 +392,7 @@ static const struct regmap_config fsl_pwm_regmap_config = {
392 392
393 .max_register = FTM_PWMLOAD, 393 .max_register = FTM_PWMLOAD,
394 .volatile_reg = fsl_pwm_volatile_reg, 394 .volatile_reg = fsl_pwm_volatile_reg,
395 .cache_type = REGCACHE_RBTREE, 395 .cache_type = REGCACHE_FLAT,
396}; 396};
397 397
398static int fsl_pwm_probe(struct platform_device *pdev) 398static int fsl_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 9607bc826460..e165b7ce29d7 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -126,7 +126,7 @@ struct rio_mport_mapping {
126 struct list_head node; 126 struct list_head node;
127 struct mport_dev *md; 127 struct mport_dev *md;
128 enum rio_mport_map_dir dir; 128 enum rio_mport_map_dir dir;
129 u32 rioid; 129 u16 rioid;
130 u64 rio_addr; 130 u64 rio_addr;
131 dma_addr_t phys_addr; /* for mmap */ 131 dma_addr_t phys_addr; /* for mmap */
132 void *virt_addr; /* kernel address, for dma_free_coherent */ 132 void *virt_addr; /* kernel address, for dma_free_coherent */
@@ -137,7 +137,7 @@ struct rio_mport_mapping {
137 137
138struct rio_mport_dma_map { 138struct rio_mport_dma_map {
139 int valid; 139 int valid;
140 uint64_t length; 140 u64 length;
141 void *vaddr; 141 void *vaddr;
142 dma_addr_t paddr; 142 dma_addr_t paddr;
143}; 143};
@@ -208,7 +208,7 @@ struct mport_cdev_priv {
208 struct kfifo event_fifo; 208 struct kfifo event_fifo;
209 wait_queue_head_t event_rx_wait; 209 wait_queue_head_t event_rx_wait;
210 spinlock_t fifo_lock; 210 spinlock_t fifo_lock;
211 unsigned int event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */ 211 u32 event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
212#ifdef CONFIG_RAPIDIO_DMA_ENGINE 212#ifdef CONFIG_RAPIDIO_DMA_ENGINE
213 struct dma_chan *dmach; 213 struct dma_chan *dmach;
214 struct list_head async_list; 214 struct list_head async_list;
@@ -276,7 +276,8 @@ static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
276 return -EFAULT; 276 return -EFAULT;
277 277
278 if ((maint_io.offset % 4) || 278 if ((maint_io.offset % 4) ||
279 (maint_io.length == 0) || (maint_io.length % 4)) 279 (maint_io.length == 0) || (maint_io.length % 4) ||
280 (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
280 return -EINVAL; 281 return -EINVAL;
281 282
282 buffer = vmalloc(maint_io.length); 283 buffer = vmalloc(maint_io.length);
@@ -298,7 +299,8 @@ static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
298 offset += 4; 299 offset += 4;
299 } 300 }
300 301
301 if (unlikely(copy_to_user(maint_io.buffer, buffer, maint_io.length))) 302 if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer,
303 buffer, maint_io.length)))
302 ret = -EFAULT; 304 ret = -EFAULT;
303out: 305out:
304 vfree(buffer); 306 vfree(buffer);
@@ -319,7 +321,8 @@ static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
319 return -EFAULT; 321 return -EFAULT;
320 322
321 if ((maint_io.offset % 4) || 323 if ((maint_io.offset % 4) ||
322 (maint_io.length == 0) || (maint_io.length % 4)) 324 (maint_io.length == 0) || (maint_io.length % 4) ||
325 (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
323 return -EINVAL; 326 return -EINVAL;
324 327
325 buffer = vmalloc(maint_io.length); 328 buffer = vmalloc(maint_io.length);
@@ -327,7 +330,8 @@ static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
327 return -ENOMEM; 330 return -ENOMEM;
328 length = maint_io.length; 331 length = maint_io.length;
329 332
330 if (unlikely(copy_from_user(buffer, maint_io.buffer, length))) { 333 if (unlikely(copy_from_user(buffer,
334 (void __user *)(uintptr_t)maint_io.buffer, length))) {
331 ret = -EFAULT; 335 ret = -EFAULT;
332 goto out; 336 goto out;
333 } 337 }
@@ -360,7 +364,7 @@ out:
360 */ 364 */
361static int 365static int
362rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, 366rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
363 u32 rioid, u64 raddr, u32 size, 367 u16 rioid, u64 raddr, u32 size,
364 dma_addr_t *paddr) 368 dma_addr_t *paddr)
365{ 369{
366 struct rio_mport *mport = md->mport; 370 struct rio_mport *mport = md->mport;
@@ -369,7 +373,7 @@ rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
369 373
370 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size); 374 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size);
371 375
372 map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); 376 map = kzalloc(sizeof(*map), GFP_KERNEL);
373 if (map == NULL) 377 if (map == NULL)
374 return -ENOMEM; 378 return -ENOMEM;
375 379
@@ -394,7 +398,7 @@ err_map_outb:
394 398
395static int 399static int
396rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, 400rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp,
397 u32 rioid, u64 raddr, u32 size, 401 u16 rioid, u64 raddr, u32 size,
398 dma_addr_t *paddr) 402 dma_addr_t *paddr)
399{ 403{
400 struct rio_mport_mapping *map; 404 struct rio_mport_mapping *map;
@@ -433,7 +437,7 @@ static int rio_mport_obw_map(struct file *filp, void __user *arg)
433 dma_addr_t paddr; 437 dma_addr_t paddr;
434 int ret; 438 int ret;
435 439
436 if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap)))) 440 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
437 return -EFAULT; 441 return -EFAULT;
438 442
439 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx", 443 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx",
@@ -448,7 +452,7 @@ static int rio_mport_obw_map(struct file *filp, void __user *arg)
448 452
449 map.handle = paddr; 453 map.handle = paddr;
450 454
451 if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) 455 if (unlikely(copy_to_user(arg, &map, sizeof(map))))
452 return -EFAULT; 456 return -EFAULT;
453 return 0; 457 return 0;
454} 458}
@@ -469,7 +473,7 @@ static int rio_mport_obw_free(struct file *filp, void __user *arg)
469 if (!md->mport->ops->unmap_outb) 473 if (!md->mport->ops->unmap_outb)
470 return -EPROTONOSUPPORT; 474 return -EPROTONOSUPPORT;
471 475
472 if (copy_from_user(&handle, arg, sizeof(u64))) 476 if (copy_from_user(&handle, arg, sizeof(handle)))
473 return -EFAULT; 477 return -EFAULT;
474 478
475 rmcd_debug(OBW, "h=0x%llx", handle); 479 rmcd_debug(OBW, "h=0x%llx", handle);
@@ -498,9 +502,9 @@ static int rio_mport_obw_free(struct file *filp, void __user *arg)
498static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg) 502static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
499{ 503{
500 struct mport_dev *md = priv->md; 504 struct mport_dev *md = priv->md;
501 uint16_t hdid; 505 u16 hdid;
502 506
503 if (copy_from_user(&hdid, arg, sizeof(uint16_t))) 507 if (copy_from_user(&hdid, arg, sizeof(hdid)))
504 return -EFAULT; 508 return -EFAULT;
505 509
506 md->mport->host_deviceid = hdid; 510 md->mport->host_deviceid = hdid;
@@ -520,9 +524,9 @@ static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
520static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg) 524static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
521{ 525{
522 struct mport_dev *md = priv->md; 526 struct mport_dev *md = priv->md;
523 uint32_t comptag; 527 u32 comptag;
524 528
525 if (copy_from_user(&comptag, arg, sizeof(uint32_t))) 529 if (copy_from_user(&comptag, arg, sizeof(comptag)))
526 return -EFAULT; 530 return -EFAULT;
527 531
528 rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag); 532 rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag);
@@ -837,7 +841,7 @@ err_out:
837 * @xfer: data transfer descriptor structure 841 * @xfer: data transfer descriptor structure
838 */ 842 */
839static int 843static int
840rio_dma_transfer(struct file *filp, uint32_t transfer_mode, 844rio_dma_transfer(struct file *filp, u32 transfer_mode,
841 enum rio_transfer_sync sync, enum dma_data_direction dir, 845 enum rio_transfer_sync sync, enum dma_data_direction dir,
842 struct rio_transfer_io *xfer) 846 struct rio_transfer_io *xfer)
843{ 847{
@@ -875,7 +879,7 @@ rio_dma_transfer(struct file *filp, uint32_t transfer_mode,
875 unsigned long offset; 879 unsigned long offset;
876 long pinned; 880 long pinned;
877 881
878 offset = (unsigned long)xfer->loc_addr & ~PAGE_MASK; 882 offset = (unsigned long)(uintptr_t)xfer->loc_addr & ~PAGE_MASK;
879 nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT; 883 nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
880 884
881 page_list = kmalloc_array(nr_pages, 885 page_list = kmalloc_array(nr_pages,
@@ -886,7 +890,7 @@ rio_dma_transfer(struct file *filp, uint32_t transfer_mode,
886 } 890 }
887 891
888 down_read(&current->mm->mmap_sem); 892 down_read(&current->mm->mmap_sem);
889 pinned = get_user_pages(current, current->mm, 893 pinned = get_user_pages(
890 (unsigned long)xfer->loc_addr & PAGE_MASK, 894 (unsigned long)xfer->loc_addr & PAGE_MASK,
891 nr_pages, dir == DMA_FROM_DEVICE, 0, 895 nr_pages, dir == DMA_FROM_DEVICE, 0,
892 page_list, NULL); 896 page_list, NULL);
@@ -1015,19 +1019,20 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
1015 if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction)))) 1019 if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction))))
1016 return -EFAULT; 1020 return -EFAULT;
1017 1021
1018 if (transaction.count != 1) 1022 if (transaction.count != 1) /* only single transfer for now */
1019 return -EINVAL; 1023 return -EINVAL;
1020 1024
1021 if ((transaction.transfer_mode & 1025 if ((transaction.transfer_mode &
1022 priv->md->properties.transfer_mode) == 0) 1026 priv->md->properties.transfer_mode) == 0)
1023 return -ENODEV; 1027 return -ENODEV;
1024 1028
1025 transfer = vmalloc(transaction.count * sizeof(struct rio_transfer_io)); 1029 transfer = vmalloc(transaction.count * sizeof(*transfer));
1026 if (!transfer) 1030 if (!transfer)
1027 return -ENOMEM; 1031 return -ENOMEM;
1028 1032
1029 if (unlikely(copy_from_user(transfer, transaction.block, 1033 if (unlikely(copy_from_user(transfer,
1030 transaction.count * sizeof(struct rio_transfer_io)))) { 1034 (void __user *)(uintptr_t)transaction.block,
1035 transaction.count * sizeof(*transfer)))) {
1031 ret = -EFAULT; 1036 ret = -EFAULT;
1032 goto out_free; 1037 goto out_free;
1033 } 1038 }
@@ -1038,8 +1043,9 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
1038 ret = rio_dma_transfer(filp, transaction.transfer_mode, 1043 ret = rio_dma_transfer(filp, transaction.transfer_mode,
1039 transaction.sync, dir, &transfer[i]); 1044 transaction.sync, dir, &transfer[i]);
1040 1045
1041 if (unlikely(copy_to_user(transaction.block, transfer, 1046 if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
1042 transaction.count * sizeof(struct rio_transfer_io)))) 1047 transfer,
1048 transaction.count * sizeof(*transfer))))
1043 ret = -EFAULT; 1049 ret = -EFAULT;
1044 1050
1045out_free: 1051out_free:
@@ -1129,11 +1135,11 @@ err_tmo:
1129} 1135}
1130 1136
1131static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp, 1137static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp,
1132 uint64_t size, struct rio_mport_mapping **mapping) 1138 u64 size, struct rio_mport_mapping **mapping)
1133{ 1139{
1134 struct rio_mport_mapping *map; 1140 struct rio_mport_mapping *map;
1135 1141
1136 map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); 1142 map = kzalloc(sizeof(*map), GFP_KERNEL);
1137 if (map == NULL) 1143 if (map == NULL)
1138 return -ENOMEM; 1144 return -ENOMEM;
1139 1145
@@ -1165,7 +1171,7 @@ static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1165 struct rio_mport_mapping *mapping = NULL; 1171 struct rio_mport_mapping *mapping = NULL;
1166 int ret; 1172 int ret;
1167 1173
1168 if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_dma_mem)))) 1174 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1169 return -EFAULT; 1175 return -EFAULT;
1170 1176
1171 ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping); 1177 ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
@@ -1174,7 +1180,7 @@ static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1174 1180
1175 map.dma_handle = mapping->phys_addr; 1181 map.dma_handle = mapping->phys_addr;
1176 1182
1177 if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_dma_mem)))) { 1183 if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1178 mutex_lock(&md->buf_mutex); 1184 mutex_lock(&md->buf_mutex);
1179 kref_put(&mapping->ref, mport_release_mapping); 1185 kref_put(&mapping->ref, mport_release_mapping);
1180 mutex_unlock(&md->buf_mutex); 1186 mutex_unlock(&md->buf_mutex);
@@ -1192,7 +1198,7 @@ static int rio_mport_free_dma(struct file *filp, void __user *arg)
1192 int ret = -EFAULT; 1198 int ret = -EFAULT;
1193 struct rio_mport_mapping *map, *_map; 1199 struct rio_mport_mapping *map, *_map;
1194 1200
1195 if (copy_from_user(&handle, arg, sizeof(u64))) 1201 if (copy_from_user(&handle, arg, sizeof(handle)))
1196 return -EFAULT; 1202 return -EFAULT;
1197 rmcd_debug(EXIT, "filp=%p", filp); 1203 rmcd_debug(EXIT, "filp=%p", filp);
1198 1204
@@ -1242,14 +1248,18 @@ static int rio_mport_free_dma(struct file *filp, void __user *arg)
1242 1248
1243static int 1249static int
1244rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, 1250rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
1245 u64 raddr, u32 size, 1251 u64 raddr, u64 size,
1246 struct rio_mport_mapping **mapping) 1252 struct rio_mport_mapping **mapping)
1247{ 1253{
1248 struct rio_mport *mport = md->mport; 1254 struct rio_mport *mport = md->mport;
1249 struct rio_mport_mapping *map; 1255 struct rio_mport_mapping *map;
1250 int ret; 1256 int ret;
1251 1257
1252 map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); 1258 /* rio_map_inb_region() accepts u32 size */
1259 if (size > 0xffffffff)
1260 return -EINVAL;
1261
1262 map = kzalloc(sizeof(*map), GFP_KERNEL);
1253 if (map == NULL) 1263 if (map == NULL)
1254 return -ENOMEM; 1264 return -ENOMEM;
1255 1265
@@ -1262,7 +1272,7 @@ rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
1262 1272
1263 if (raddr == RIO_MAP_ANY_ADDR) 1273 if (raddr == RIO_MAP_ANY_ADDR)
1264 raddr = map->phys_addr; 1274 raddr = map->phys_addr;
1265 ret = rio_map_inb_region(mport, map->phys_addr, raddr, size, 0); 1275 ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0);
1266 if (ret < 0) 1276 if (ret < 0)
1267 goto err_map_inb; 1277 goto err_map_inb;
1268 1278
@@ -1288,7 +1298,7 @@ err_dma_alloc:
1288 1298
1289static int 1299static int
1290rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp, 1300rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp,
1291 u64 raddr, u32 size, 1301 u64 raddr, u64 size,
1292 struct rio_mport_mapping **mapping) 1302 struct rio_mport_mapping **mapping)
1293{ 1303{
1294 struct rio_mport_mapping *map; 1304 struct rio_mport_mapping *map;
@@ -1331,7 +1341,7 @@ static int rio_mport_map_inbound(struct file *filp, void __user *arg)
1331 1341
1332 if (!md->mport->ops->map_inb) 1342 if (!md->mport->ops->map_inb)
1333 return -EPROTONOSUPPORT; 1343 return -EPROTONOSUPPORT;
1334 if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap)))) 1344 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1335 return -EFAULT; 1345 return -EFAULT;
1336 1346
1337 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); 1347 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
@@ -1344,7 +1354,7 @@ static int rio_mport_map_inbound(struct file *filp, void __user *arg)
1344 map.handle = mapping->phys_addr; 1354 map.handle = mapping->phys_addr;
1345 map.rio_addr = mapping->rio_addr; 1355 map.rio_addr = mapping->rio_addr;
1346 1356
1347 if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) { 1357 if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1348 /* Delete mapping if it was created by this request */ 1358 /* Delete mapping if it was created by this request */
1349 if (ret == 0 && mapping->filp == filp) { 1359 if (ret == 0 && mapping->filp == filp) {
1350 mutex_lock(&md->buf_mutex); 1360 mutex_lock(&md->buf_mutex);
@@ -1375,7 +1385,7 @@ static int rio_mport_inbound_free(struct file *filp, void __user *arg)
1375 if (!md->mport->ops->unmap_inb) 1385 if (!md->mport->ops->unmap_inb)
1376 return -EPROTONOSUPPORT; 1386 return -EPROTONOSUPPORT;
1377 1387
1378 if (copy_from_user(&handle, arg, sizeof(u64))) 1388 if (copy_from_user(&handle, arg, sizeof(handle)))
1379 return -EFAULT; 1389 return -EFAULT;
1380 1390
1381 mutex_lock(&md->buf_mutex); 1391 mutex_lock(&md->buf_mutex);
@@ -1401,7 +1411,7 @@ static int rio_mport_inbound_free(struct file *filp, void __user *arg)
1401static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg) 1411static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg)
1402{ 1412{
1403 struct mport_dev *md = priv->md; 1413 struct mport_dev *md = priv->md;
1404 uint32_t port_idx = md->mport->index; 1414 u32 port_idx = md->mport->index;
1405 1415
1406 rmcd_debug(MPORT, "port_index=%d", port_idx); 1416 rmcd_debug(MPORT, "port_index=%d", port_idx);
1407 1417
@@ -1451,7 +1461,7 @@ static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id,
1451 handled = 0; 1461 handled = 0;
1452 spin_lock(&data->db_lock); 1462 spin_lock(&data->db_lock);
1453 list_for_each_entry(db_filter, &data->doorbells, data_node) { 1463 list_for_each_entry(db_filter, &data->doorbells, data_node) {
1454 if (((db_filter->filter.rioid == 0xffffffff || 1464 if (((db_filter->filter.rioid == RIO_INVALID_DESTID ||
1455 db_filter->filter.rioid == src)) && 1465 db_filter->filter.rioid == src)) &&
1456 info >= db_filter->filter.low && 1466 info >= db_filter->filter.low &&
1457 info <= db_filter->filter.high) { 1467 info <= db_filter->filter.high) {
@@ -1525,6 +1535,9 @@ static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv,
1525 if (copy_from_user(&filter, arg, sizeof(filter))) 1535 if (copy_from_user(&filter, arg, sizeof(filter)))
1526 return -EFAULT; 1536 return -EFAULT;
1527 1537
1538 if (filter.low > filter.high)
1539 return -EINVAL;
1540
1528 spin_lock_irqsave(&priv->md->db_lock, flags); 1541 spin_lock_irqsave(&priv->md->db_lock, flags);
1529 list_for_each_entry(db_filter, &priv->db_filters, priv_node) { 1542 list_for_each_entry(db_filter, &priv->db_filters, priv_node) {
1530 if (db_filter->filter.rioid == filter.rioid && 1543 if (db_filter->filter.rioid == filter.rioid &&
@@ -1737,10 +1750,10 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
1737 return -EEXIST; 1750 return -EEXIST;
1738 } 1751 }
1739 1752
1740 size = sizeof(struct rio_dev); 1753 size = sizeof(*rdev);
1741 mport = md->mport; 1754 mport = md->mport;
1742 destid = (u16)dev_info.destid; 1755 destid = dev_info.destid;
1743 hopcount = (u8)dev_info.hopcount; 1756 hopcount = dev_info.hopcount;
1744 1757
1745 if (rio_mport_read_config_32(mport, destid, hopcount, 1758 if (rio_mport_read_config_32(mport, destid, hopcount,
1746 RIO_PEF_CAR, &rval)) 1759 RIO_PEF_CAR, &rval))
@@ -1872,8 +1885,8 @@ static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
1872 do { 1885 do {
1873 rdev = rio_get_comptag(dev_info.comptag, rdev); 1886 rdev = rio_get_comptag(dev_info.comptag, rdev);
1874 if (rdev && rdev->dev.parent == &mport->net->dev && 1887 if (rdev && rdev->dev.parent == &mport->net->dev &&
1875 rdev->destid == (u16)dev_info.destid && 1888 rdev->destid == dev_info.destid &&
1876 rdev->hopcount == (u8)dev_info.hopcount) 1889 rdev->hopcount == dev_info.hopcount)
1877 break; 1890 break;
1878 } while (rdev); 1891 } while (rdev);
1879 } 1892 }
@@ -2146,8 +2159,8 @@ static long mport_cdev_ioctl(struct file *filp,
2146 return maint_port_idx_get(data, (void __user *)arg); 2159 return maint_port_idx_get(data, (void __user *)arg);
2147 case RIO_MPORT_GET_PROPERTIES: 2160 case RIO_MPORT_GET_PROPERTIES:
2148 md->properties.hdid = md->mport->host_deviceid; 2161 md->properties.hdid = md->mport->host_deviceid;
2149 if (copy_to_user((void __user *)arg, &(data->md->properties), 2162 if (copy_to_user((void __user *)arg, &(md->properties),
2150 sizeof(data->md->properties))) 2163 sizeof(md->properties)))
2151 return -EFAULT; 2164 return -EFAULT;
2152 return 0; 2165 return 0;
2153 case RIO_ENABLE_DOORBELL_RANGE: 2166 case RIO_ENABLE_DOORBELL_RANGE:
@@ -2159,11 +2172,11 @@ static long mport_cdev_ioctl(struct file *filp,
2159 case RIO_DISABLE_PORTWRITE_RANGE: 2172 case RIO_DISABLE_PORTWRITE_RANGE:
2160 return rio_mport_remove_pw_filter(data, (void __user *)arg); 2173 return rio_mport_remove_pw_filter(data, (void __user *)arg);
2161 case RIO_SET_EVENT_MASK: 2174 case RIO_SET_EVENT_MASK:
2162 data->event_mask = arg; 2175 data->event_mask = (u32)arg;
2163 return 0; 2176 return 0;
2164 case RIO_GET_EVENT_MASK: 2177 case RIO_GET_EVENT_MASK:
2165 if (copy_to_user((void __user *)arg, &data->event_mask, 2178 if (copy_to_user((void __user *)arg, &data->event_mask,
2166 sizeof(data->event_mask))) 2179 sizeof(u32)))
2167 return -EFAULT; 2180 return -EFAULT;
2168 return 0; 2181 return 0;
2169 case RIO_MAP_OUTBOUND: 2182 case RIO_MAP_OUTBOUND:
@@ -2374,7 +2387,7 @@ static ssize_t mport_write(struct file *filp, const char __user *buf,
2374 return -EINVAL; 2387 return -EINVAL;
2375 2388
2376 ret = rio_mport_send_doorbell(mport, 2389 ret = rio_mport_send_doorbell(mport,
2377 (u16)event.u.doorbell.rioid, 2390 event.u.doorbell.rioid,
2378 event.u.doorbell.payload); 2391 event.u.doorbell.payload);
2379 if (ret < 0) 2392 if (ret < 0)
2380 return ret; 2393 return ret;
@@ -2421,7 +2434,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
2421 struct mport_dev *md; 2434 struct mport_dev *md;
2422 struct rio_mport_attr attr; 2435 struct rio_mport_attr attr;
2423 2436
2424 md = kzalloc(sizeof(struct mport_dev), GFP_KERNEL); 2437 md = kzalloc(sizeof(*md), GFP_KERNEL);
2425 if (!md) { 2438 if (!md) {
2426 rmcd_error("Unable allocate a device object"); 2439 rmcd_error("Unable allocate a device object");
2427 return NULL; 2440 return NULL;
@@ -2470,7 +2483,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
2470 /* The transfer_mode property will be returned through mport query 2483 /* The transfer_mode property will be returned through mport query
2471 * interface 2484 * interface
2472 */ 2485 */
2473#ifdef CONFIG_PPC /* for now: only on Freescale's SoCs */ 2486#ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */
2474 md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED; 2487 md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED;
2475#else 2488#else
2476 md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; 2489 md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
@@ -2669,9 +2682,9 @@ static int __init mport_init(void)
2669 2682
2670 /* Create device class needed by udev */ 2683 /* Create device class needed by udev */
2671 dev_class = class_create(THIS_MODULE, DRV_NAME); 2684 dev_class = class_create(THIS_MODULE, DRV_NAME);
2672 if (!dev_class) { 2685 if (IS_ERR(dev_class)) {
2673 rmcd_error("Unable to create " DRV_NAME " class"); 2686 rmcd_error("Unable to create " DRV_NAME " class");
2674 return -EINVAL; 2687 return PTR_ERR(dev_class);
2675 } 2688 }
2676 2689
2677 ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME); 2690 ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index 6bb04d453247..6f056caa8a56 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -189,9 +189,9 @@ static int st_rproc_parse_dt(struct platform_device *pdev)
189 } 189 }
190 190
191 ddata->boot_base = syscon_regmap_lookup_by_phandle(np, "st,syscfg"); 191 ddata->boot_base = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
192 if (!ddata->boot_base) { 192 if (IS_ERR(ddata->boot_base)) {
193 dev_err(dev, "Boot base not found\n"); 193 dev_err(dev, "Boot base not found\n");
194 return -EINVAL; 194 return PTR_ERR(ddata->boot_base);
195 } 195 }
196 196
197 err = of_property_read_u32_index(np, "st,syscfg", 1, 197 err = of_property_read_u32_index(np, "st,syscfg", 1,
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index b2156ee5bae1..ecb7dbae9be9 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -863,7 +863,7 @@ out:
863 * A user-initiated temperature conversion is not started by this function, 863 * A user-initiated temperature conversion is not started by this function,
864 * so the temperature is updated once every 64 seconds. 864 * so the temperature is updated once every 64 seconds.
865 */ 865 */
866static int ds3231_hwmon_read_temp(struct device *dev, s16 *mC) 866static int ds3231_hwmon_read_temp(struct device *dev, s32 *mC)
867{ 867{
868 struct ds1307 *ds1307 = dev_get_drvdata(dev); 868 struct ds1307 *ds1307 = dev_get_drvdata(dev);
869 u8 temp_buf[2]; 869 u8 temp_buf[2];
@@ -892,7 +892,7 @@ static ssize_t ds3231_hwmon_show_temp(struct device *dev,
892 struct device_attribute *attr, char *buf) 892 struct device_attribute *attr, char *buf)
893{ 893{
894 int ret; 894 int ret;
895 s16 temp; 895 s32 temp;
896 896
897 ret = ds3231_hwmon_read_temp(dev, &temp); 897 ret = ds3231_hwmon_read_temp(dev, &temp);
898 if (ret) 898 if (ret)
@@ -1531,7 +1531,7 @@ read_rtc:
1531 return PTR_ERR(ds1307->rtc); 1531 return PTR_ERR(ds1307->rtc);
1532 } 1532 }
1533 1533
1534 if (ds1307_can_wakeup_device) { 1534 if (ds1307_can_wakeup_device && ds1307->client->irq <= 0) {
1535 /* Disable request for an IRQ */ 1535 /* Disable request for an IRQ */
1536 want_irq = false; 1536 want_irq = false;
1537 dev_info(&client->dev, "'wakeup-source' is set, request for an IRQ is disabled!\n"); 1537 dev_info(&client->dev, "'wakeup-source' is set, request for an IRQ is disabled!\n");
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 17ad5749e91d..1e560188dd13 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -317,17 +317,17 @@ static int _add_device_to_lcu(struct alias_lcu *lcu,
317 struct alias_pav_group *group; 317 struct alias_pav_group *group;
318 struct dasd_uid uid; 318 struct dasd_uid uid;
319 319
320 spin_lock(get_ccwdev_lock(device->cdev));
320 private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type; 321 private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
321 private->uid.base_unit_addr = 322 private->uid.base_unit_addr =
322 lcu->uac->unit[private->uid.real_unit_addr].base_ua; 323 lcu->uac->unit[private->uid.real_unit_addr].base_ua;
323 uid = private->uid; 324 uid = private->uid;
324 325 spin_unlock(get_ccwdev_lock(device->cdev));
325 /* if we have no PAV anyway, we don't need to bother with PAV groups */ 326 /* if we have no PAV anyway, we don't need to bother with PAV groups */
326 if (lcu->pav == NO_PAV) { 327 if (lcu->pav == NO_PAV) {
327 list_move(&device->alias_list, &lcu->active_devices); 328 list_move(&device->alias_list, &lcu->active_devices);
328 return 0; 329 return 0;
329 } 330 }
330
331 group = _find_group(lcu, &uid); 331 group = _find_group(lcu, &uid);
332 if (!group) { 332 if (!group) {
333 group = kzalloc(sizeof(*group), GFP_ATOMIC); 333 group = kzalloc(sizeof(*group), GFP_ATOMIC);
@@ -397,130 +397,6 @@ suborder_not_supported(struct dasd_ccw_req *cqr)
397 return 0; 397 return 0;
398} 398}
399 399
400/*
401 * This function tries to lock all devices on an lcu via trylock
402 * return NULL on success otherwise return first failed device
403 */
404static struct dasd_device *_trylock_all_devices_on_lcu(struct alias_lcu *lcu,
405 struct dasd_device *pos)
406
407{
408 struct alias_pav_group *pavgroup;
409 struct dasd_device *device;
410
411 list_for_each_entry(device, &lcu->active_devices, alias_list) {
412 if (device == pos)
413 continue;
414 if (!spin_trylock(get_ccwdev_lock(device->cdev)))
415 return device;
416 }
417 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
418 if (device == pos)
419 continue;
420 if (!spin_trylock(get_ccwdev_lock(device->cdev)))
421 return device;
422 }
423 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
424 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
425 if (device == pos)
426 continue;
427 if (!spin_trylock(get_ccwdev_lock(device->cdev)))
428 return device;
429 }
430 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
431 if (device == pos)
432 continue;
433 if (!spin_trylock(get_ccwdev_lock(device->cdev)))
434 return device;
435 }
436 }
437 return NULL;
438}
439
440/*
441 * unlock all devices except the one that is specified as pos
442 * stop if enddev is specified and reached
443 */
444static void _unlock_all_devices_on_lcu(struct alias_lcu *lcu,
445 struct dasd_device *pos,
446 struct dasd_device *enddev)
447
448{
449 struct alias_pav_group *pavgroup;
450 struct dasd_device *device;
451
452 list_for_each_entry(device, &lcu->active_devices, alias_list) {
453 if (device == pos)
454 continue;
455 if (device == enddev)
456 return;
457 spin_unlock(get_ccwdev_lock(device->cdev));
458 }
459 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
460 if (device == pos)
461 continue;
462 if (device == enddev)
463 return;
464 spin_unlock(get_ccwdev_lock(device->cdev));
465 }
466 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
467 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
468 if (device == pos)
469 continue;
470 if (device == enddev)
471 return;
472 spin_unlock(get_ccwdev_lock(device->cdev));
473 }
474 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
475 if (device == pos)
476 continue;
477 if (device == enddev)
478 return;
479 spin_unlock(get_ccwdev_lock(device->cdev));
480 }
481 }
482}
483
484/*
485 * this function is needed because the locking order
486 * device lock -> lcu lock
487 * needs to be assured when iterating over devices in an LCU
488 *
489 * if a device is specified in pos then the device lock is already hold
490 */
491static void _trylock_and_lock_lcu_irqsave(struct alias_lcu *lcu,
492 struct dasd_device *pos,
493 unsigned long *flags)
494{
495 struct dasd_device *failed;
496
497 do {
498 spin_lock_irqsave(&lcu->lock, *flags);
499 failed = _trylock_all_devices_on_lcu(lcu, pos);
500 if (failed) {
501 _unlock_all_devices_on_lcu(lcu, pos, failed);
502 spin_unlock_irqrestore(&lcu->lock, *flags);
503 cpu_relax();
504 }
505 } while (failed);
506}
507
508static void _trylock_and_lock_lcu(struct alias_lcu *lcu,
509 struct dasd_device *pos)
510{
511 struct dasd_device *failed;
512
513 do {
514 spin_lock(&lcu->lock);
515 failed = _trylock_all_devices_on_lcu(lcu, pos);
516 if (failed) {
517 _unlock_all_devices_on_lcu(lcu, pos, failed);
518 spin_unlock(&lcu->lock);
519 cpu_relax();
520 }
521 } while (failed);
522}
523
524static int read_unit_address_configuration(struct dasd_device *device, 400static int read_unit_address_configuration(struct dasd_device *device,
525 struct alias_lcu *lcu) 401 struct alias_lcu *lcu)
526{ 402{
@@ -615,7 +491,7 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
615 if (rc) 491 if (rc)
616 return rc; 492 return rc;
617 493
618 _trylock_and_lock_lcu_irqsave(lcu, NULL, &flags); 494 spin_lock_irqsave(&lcu->lock, flags);
619 lcu->pav = NO_PAV; 495 lcu->pav = NO_PAV;
620 for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) { 496 for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
621 switch (lcu->uac->unit[i].ua_type) { 497 switch (lcu->uac->unit[i].ua_type) {
@@ -634,7 +510,6 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
634 alias_list) { 510 alias_list) {
635 _add_device_to_lcu(lcu, device, refdev); 511 _add_device_to_lcu(lcu, device, refdev);
636 } 512 }
637 _unlock_all_devices_on_lcu(lcu, NULL, NULL);
638 spin_unlock_irqrestore(&lcu->lock, flags); 513 spin_unlock_irqrestore(&lcu->lock, flags);
639 return 0; 514 return 0;
640} 515}
@@ -722,8 +597,7 @@ int dasd_alias_add_device(struct dasd_device *device)
722 597
723 lcu = private->lcu; 598 lcu = private->lcu;
724 rc = 0; 599 rc = 0;
725 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 600 spin_lock_irqsave(&lcu->lock, flags);
726 spin_lock(&lcu->lock);
727 if (!(lcu->flags & UPDATE_PENDING)) { 601 if (!(lcu->flags & UPDATE_PENDING)) {
728 rc = _add_device_to_lcu(lcu, device, device); 602 rc = _add_device_to_lcu(lcu, device, device);
729 if (rc) 603 if (rc)
@@ -733,8 +607,7 @@ int dasd_alias_add_device(struct dasd_device *device)
733 list_move(&device->alias_list, &lcu->active_devices); 607 list_move(&device->alias_list, &lcu->active_devices);
734 _schedule_lcu_update(lcu, device); 608 _schedule_lcu_update(lcu, device);
735 } 609 }
736 spin_unlock(&lcu->lock); 610 spin_unlock_irqrestore(&lcu->lock, flags);
737 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
738 return rc; 611 return rc;
739} 612}
740 613
@@ -933,15 +806,27 @@ static void _stop_all_devices_on_lcu(struct alias_lcu *lcu)
933 struct alias_pav_group *pavgroup; 806 struct alias_pav_group *pavgroup;
934 struct dasd_device *device; 807 struct dasd_device *device;
935 808
936 list_for_each_entry(device, &lcu->active_devices, alias_list) 809 list_for_each_entry(device, &lcu->active_devices, alias_list) {
810 spin_lock(get_ccwdev_lock(device->cdev));
937 dasd_device_set_stop_bits(device, DASD_STOPPED_SU); 811 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
938 list_for_each_entry(device, &lcu->inactive_devices, alias_list) 812 spin_unlock(get_ccwdev_lock(device->cdev));
813 }
814 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
815 spin_lock(get_ccwdev_lock(device->cdev));
939 dasd_device_set_stop_bits(device, DASD_STOPPED_SU); 816 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
817 spin_unlock(get_ccwdev_lock(device->cdev));
818 }
940 list_for_each_entry(pavgroup, &lcu->grouplist, group) { 819 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
941 list_for_each_entry(device, &pavgroup->baselist, alias_list) 820 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
821 spin_lock(get_ccwdev_lock(device->cdev));
942 dasd_device_set_stop_bits(device, DASD_STOPPED_SU); 822 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
943 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) 823 spin_unlock(get_ccwdev_lock(device->cdev));
824 }
825 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
826 spin_lock(get_ccwdev_lock(device->cdev));
944 dasd_device_set_stop_bits(device, DASD_STOPPED_SU); 827 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
828 spin_unlock(get_ccwdev_lock(device->cdev));
829 }
945 } 830 }
946} 831}
947 832
@@ -950,15 +835,27 @@ static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
950 struct alias_pav_group *pavgroup; 835 struct alias_pav_group *pavgroup;
951 struct dasd_device *device; 836 struct dasd_device *device;
952 837
953 list_for_each_entry(device, &lcu->active_devices, alias_list) 838 list_for_each_entry(device, &lcu->active_devices, alias_list) {
839 spin_lock(get_ccwdev_lock(device->cdev));
954 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); 840 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
955 list_for_each_entry(device, &lcu->inactive_devices, alias_list) 841 spin_unlock(get_ccwdev_lock(device->cdev));
842 }
843 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
844 spin_lock(get_ccwdev_lock(device->cdev));
956 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); 845 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
846 spin_unlock(get_ccwdev_lock(device->cdev));
847 }
957 list_for_each_entry(pavgroup, &lcu->grouplist, group) { 848 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
958 list_for_each_entry(device, &pavgroup->baselist, alias_list) 849 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
850 spin_lock(get_ccwdev_lock(device->cdev));
959 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); 851 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
960 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) 852 spin_unlock(get_ccwdev_lock(device->cdev));
853 }
854 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
855 spin_lock(get_ccwdev_lock(device->cdev));
961 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); 856 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
857 spin_unlock(get_ccwdev_lock(device->cdev));
858 }
962 } 859 }
963} 860}
964 861
@@ -984,48 +881,32 @@ static void summary_unit_check_handling_work(struct work_struct *work)
984 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 881 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
985 reset_summary_unit_check(lcu, device, suc_data->reason); 882 reset_summary_unit_check(lcu, device, suc_data->reason);
986 883
987 _trylock_and_lock_lcu_irqsave(lcu, NULL, &flags); 884 spin_lock_irqsave(&lcu->lock, flags);
988 _unstop_all_devices_on_lcu(lcu); 885 _unstop_all_devices_on_lcu(lcu);
989 _restart_all_base_devices_on_lcu(lcu); 886 _restart_all_base_devices_on_lcu(lcu);
990 /* 3. read new alias configuration */ 887 /* 3. read new alias configuration */
991 _schedule_lcu_update(lcu, device); 888 _schedule_lcu_update(lcu, device);
992 lcu->suc_data.device = NULL; 889 lcu->suc_data.device = NULL;
993 dasd_put_device(device); 890 dasd_put_device(device);
994 _unlock_all_devices_on_lcu(lcu, NULL, NULL);
995 spin_unlock_irqrestore(&lcu->lock, flags); 891 spin_unlock_irqrestore(&lcu->lock, flags);
996} 892}
997 893
998/* 894void dasd_alias_handle_summary_unit_check(struct work_struct *work)
999 * note: this will be called from int handler context (cdev locked)
1000 */
1001void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
1002 struct irb *irb)
1003{ 895{
896 struct dasd_device *device = container_of(work, struct dasd_device,
897 suc_work);
1004 struct dasd_eckd_private *private = device->private; 898 struct dasd_eckd_private *private = device->private;
1005 struct alias_lcu *lcu; 899 struct alias_lcu *lcu;
1006 char reason; 900 unsigned long flags;
1007 char *sense;
1008
1009 sense = dasd_get_sense(irb);
1010 if (sense) {
1011 reason = sense[8];
1012 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
1013 "eckd handle summary unit check: reason", reason);
1014 } else {
1015 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1016 "eckd handle summary unit check:"
1017 " no reason code available");
1018 return;
1019 }
1020 901
1021 lcu = private->lcu; 902 lcu = private->lcu;
1022 if (!lcu) { 903 if (!lcu) {
1023 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 904 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1024 "device not ready to handle summary" 905 "device not ready to handle summary"
1025 " unit check (no lcu structure)"); 906 " unit check (no lcu structure)");
1026 return; 907 goto out;
1027 } 908 }
1028 _trylock_and_lock_lcu(lcu, device); 909 spin_lock_irqsave(&lcu->lock, flags);
1029 /* If this device is about to be removed just return and wait for 910 /* If this device is about to be removed just return and wait for
1030 * the next interrupt on a different device 911 * the next interrupt on a different device
1031 */ 912 */
@@ -1033,27 +914,26 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
1033 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 914 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1034 "device is in offline processing," 915 "device is in offline processing,"
1035 " don't do summary unit check handling"); 916 " don't do summary unit check handling");
1036 _unlock_all_devices_on_lcu(lcu, device, NULL); 917 goto out_unlock;
1037 spin_unlock(&lcu->lock);
1038 return;
1039 } 918 }
1040 if (lcu->suc_data.device) { 919 if (lcu->suc_data.device) {
1041 /* already scheduled or running */ 920 /* already scheduled or running */
1042 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 921 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1043 "previous instance of summary unit check worker" 922 "previous instance of summary unit check worker"
1044 " still pending"); 923 " still pending");
1045 _unlock_all_devices_on_lcu(lcu, device, NULL); 924 goto out_unlock;
1046 spin_unlock(&lcu->lock);
1047 return ;
1048 } 925 }
1049 _stop_all_devices_on_lcu(lcu); 926 _stop_all_devices_on_lcu(lcu);
1050 /* prepare for lcu_update */ 927 /* prepare for lcu_update */
1051 private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING; 928 lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
1052 lcu->suc_data.reason = reason; 929 lcu->suc_data.reason = private->suc_reason;
1053 lcu->suc_data.device = device; 930 lcu->suc_data.device = device;
1054 dasd_get_device(device); 931 dasd_get_device(device);
1055 _unlock_all_devices_on_lcu(lcu, device, NULL);
1056 spin_unlock(&lcu->lock);
1057 if (!schedule_work(&lcu->suc_data.worker)) 932 if (!schedule_work(&lcu->suc_data.worker))
1058 dasd_put_device(device); 933 dasd_put_device(device);
934out_unlock:
935 spin_unlock_irqrestore(&lcu->lock, flags);
936out:
937 clear_bit(DASD_FLAG_SUC, &device->flags);
938 dasd_put_device(device);
1059}; 939};
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 75c032dcf173..c1b4ae55e129 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1682,6 +1682,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1682 1682
1683 /* setup work queue for validate server*/ 1683 /* setup work queue for validate server*/
1684 INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server); 1684 INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
1685 /* setup work queue for summary unit check */
1686 INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
1685 1687
1686 if (!ccw_device_is_pathgroup(device->cdev)) { 1688 if (!ccw_device_is_pathgroup(device->cdev)) {
1687 dev_warn(&device->cdev->dev, 1689 dev_warn(&device->cdev->dev,
@@ -2549,14 +2551,6 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device,
2549 device->state == DASD_STATE_ONLINE && 2551 device->state == DASD_STATE_ONLINE &&
2550 !test_bit(DASD_FLAG_OFFLINE, &device->flags) && 2552 !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
2551 !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { 2553 !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
2552 /*
2553 * the state change could be caused by an alias
2554 * reassignment remove device from alias handling
2555 * to prevent new requests from being scheduled on
2556 * the wrong alias device
2557 */
2558 dasd_alias_remove_device(device);
2559
2560 /* schedule worker to reload device */ 2554 /* schedule worker to reload device */
2561 dasd_reload_device(device); 2555 dasd_reload_device(device);
2562 } 2556 }
@@ -2571,7 +2565,27 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device,
2571 /* summary unit check */ 2565 /* summary unit check */
2572 if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) && 2566 if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
2573 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) { 2567 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
2574 dasd_alias_handle_summary_unit_check(device, irb); 2568 if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
2569 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2570 "eckd suc: device already notified");
2571 return;
2572 }
2573 sense = dasd_get_sense(irb);
2574 if (!sense) {
2575 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2576 "eckd suc: no reason code available");
2577 clear_bit(DASD_FLAG_SUC, &device->flags);
2578 return;
2579
2580 }
2581 private->suc_reason = sense[8];
2582 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
2583 "eckd handle summary unit check: reason",
2584 private->suc_reason);
2585 dasd_get_device(device);
2586 if (!schedule_work(&device->suc_work))
2587 dasd_put_device(device);
2588
2575 return; 2589 return;
2576 } 2590 }
2577 2591
@@ -4495,6 +4509,12 @@ static int dasd_eckd_reload_device(struct dasd_device *device)
4495 struct dasd_uid uid; 4509 struct dasd_uid uid;
4496 unsigned long flags; 4510 unsigned long flags;
4497 4511
4512 /*
4513 * remove device from alias handling to prevent new requests
4514 * from being scheduled on the wrong alias device
4515 */
4516 dasd_alias_remove_device(device);
4517
4498 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 4518 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
4499 old_base = private->uid.base_unit_addr; 4519 old_base = private->uid.base_unit_addr;
4500 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 4520 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index f8f91ee652d3..6d9a6d3517cd 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -525,6 +525,7 @@ struct dasd_eckd_private {
525 int count; 525 int count;
526 526
527 u32 fcx_max_data; 527 u32 fcx_max_data;
528 char suc_reason;
528}; 529};
529 530
530 531
@@ -534,7 +535,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *);
534int dasd_alias_add_device(struct dasd_device *); 535int dasd_alias_add_device(struct dasd_device *);
535int dasd_alias_remove_device(struct dasd_device *); 536int dasd_alias_remove_device(struct dasd_device *);
536struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *); 537struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *);
537void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *); 538void dasd_alias_handle_summary_unit_check(struct work_struct *);
538void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *); 539void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
539void dasd_alias_lcu_setup_complete(struct dasd_device *); 540void dasd_alias_lcu_setup_complete(struct dasd_device *);
540void dasd_alias_wait_for_lcu_setup(struct dasd_device *); 541void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 8de29be32a56..0f0add932e7a 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -470,6 +470,7 @@ struct dasd_device {
470 struct work_struct restore_device; 470 struct work_struct restore_device;
471 struct work_struct reload_device; 471 struct work_struct reload_device;
472 struct work_struct kick_validate; 472 struct work_struct kick_validate;
473 struct work_struct suc_work;
473 struct timer_list timer; 474 struct timer_list timer;
474 475
475 debug_info_t *debug_area; 476 debug_info_t *debug_area;
@@ -542,6 +543,7 @@ struct dasd_attention_data {
542#define DASD_FLAG_SAFE_OFFLINE_RUNNING 11 /* safe offline running */ 543#define DASD_FLAG_SAFE_OFFLINE_RUNNING 11 /* safe offline running */
543#define DASD_FLAG_ABORTALL 12 /* Abort all noretry requests */ 544#define DASD_FLAG_ABORTALL 12 /* Abort all noretry requests */
544#define DASD_FLAG_PATH_VERIFY 13 /* Path verification worker running */ 545#define DASD_FLAG_PATH_VERIFY 13 /* Path verification worker running */
546#define DASD_FLAG_SUC 14 /* unhandled summary unit check */
545 547
546#define DASD_SLEEPON_START_TAG ((void *) 1) 548#define DASD_SLEEPON_START_TAG ((void *) 1)
547#define DASD_SLEEPON_END_TAG ((void *) 2) 549#define DASD_SLEEPON_END_TAG ((void *) 2)
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 1bce9cf51b1e..b83908670a9a 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -756,15 +756,16 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
756 blk_cleanup_queue(dev_info->dcssblk_queue); 756 blk_cleanup_queue(dev_info->dcssblk_queue);
757 dev_info->gd->queue = NULL; 757 dev_info->gd->queue = NULL;
758 put_disk(dev_info->gd); 758 put_disk(dev_info->gd);
759 device_unregister(&dev_info->dev);
760 759
761 /* unload all related segments */ 760 /* unload all related segments */
762 list_for_each_entry(entry, &dev_info->seg_list, lh) 761 list_for_each_entry(entry, &dev_info->seg_list, lh)
763 segment_unload(entry->segment_name); 762 segment_unload(entry->segment_name);
764 763
765 put_device(&dev_info->dev);
766 up_write(&dcssblk_devices_sem); 764 up_write(&dcssblk_devices_sem);
767 765
766 device_unregister(&dev_info->dev);
767 put_device(&dev_info->dev);
768
768 rc = count; 769 rc = count;
769out_buf: 770out_buf:
770 kfree(local_buf); 771 kfree(local_buf);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 75d9896deccb..e6f54d3b8969 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -303,7 +303,7 @@ static void scm_blk_request(struct request_queue *rq)
303 if (req->cmd_type != REQ_TYPE_FS) { 303 if (req->cmd_type != REQ_TYPE_FS) {
304 blk_start_request(req); 304 blk_start_request(req);
305 blk_dump_rq_flags(req, KMSG_COMPONENT " bad request"); 305 blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
306 blk_end_request_all(req, -EIO); 306 __blk_end_request_all(req, -EIO);
307 continue; 307 continue;
308 } 308 }
309 309
diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
index 648cb86afd42..ea607a4a1bdd 100644
--- a/drivers/s390/char/sclp_ctl.c
+++ b/drivers/s390/char/sclp_ctl.c
@@ -56,6 +56,7 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
56{ 56{
57 struct sclp_ctl_sccb ctl_sccb; 57 struct sclp_ctl_sccb ctl_sccb;
58 struct sccb_header *sccb; 58 struct sccb_header *sccb;
59 unsigned long copied;
59 int rc; 60 int rc;
60 61
61 if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb))) 62 if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
@@ -65,14 +66,15 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
65 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 66 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
66 if (!sccb) 67 if (!sccb)
67 return -ENOMEM; 68 return -ENOMEM;
68 if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) { 69 copied = PAGE_SIZE -
70 copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE);
71 if (offsetof(struct sccb_header, length) +
72 sizeof(sccb->length) > copied || sccb->length > copied) {
69 rc = -EFAULT; 73 rc = -EFAULT;
70 goto out_free; 74 goto out_free;
71 } 75 }
72 if (sccb->length > PAGE_SIZE || sccb->length < 8) 76 if (sccb->length < 8) {
73 return -EINVAL; 77 rc = -EINVAL;
74 if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) {
75 rc = -EFAULT;
76 goto out_free; 78 goto out_free;
77 } 79 }
78 rc = sclp_sync_request(ctl_sccb.cmdw, sccb); 80 rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 21a67ed047e8..ff6caab8cc8b 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -452,10 +452,11 @@ static int aac_slave_configure(struct scsi_device *sdev)
452 else if (depth < 2) 452 else if (depth < 2)
453 depth = 2; 453 depth = 2;
454 scsi_change_queue_depth(sdev, depth); 454 scsi_change_queue_depth(sdev, depth);
455 } else 455 } else {
456 scsi_change_queue_depth(sdev, 1); 456 scsi_change_queue_depth(sdev, 1);
457 457
458 sdev->tagged_supported = 1; 458 sdev->tagged_supported = 1;
459 }
459 460
460 return 0; 461 return 0;
461} 462}
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index f3bb7af4e984..ead83a24bcd1 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -688,6 +688,7 @@ static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr,
688{ 688{
689 struct flowi6 fl; 689 struct flowi6 fl;
690 690
691 memset(&fl, 0, sizeof(fl));
691 if (saddr) 692 if (saddr)
692 memcpy(&fl.saddr, saddr, sizeof(struct in6_addr)); 693 memcpy(&fl.saddr, saddr, sizeof(struct in6_addr));
693 if (daddr) 694 if (daddr)
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 35968bdb4866..8fb9643fe6e3 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -289,7 +289,7 @@ static void context_reset(struct afu_cmd *cmd)
289 atomic64_set(&afu->room, room); 289 atomic64_set(&afu->room, room);
290 if (room) 290 if (room)
291 goto write_rrin; 291 goto write_rrin;
292 udelay(nretry); 292 udelay(1 << nretry);
293 } while (nretry++ < MC_ROOM_RETRY_CNT); 293 } while (nretry++ < MC_ROOM_RETRY_CNT);
294 294
295 pr_err("%s: no cmd_room to send reset\n", __func__); 295 pr_err("%s: no cmd_room to send reset\n", __func__);
@@ -303,7 +303,7 @@ write_rrin:
303 if (rrin != 0x1) 303 if (rrin != 0x1)
304 break; 304 break;
305 /* Double delay each time */ 305 /* Double delay each time */
306 udelay(2 << nretry); 306 udelay(1 << nretry);
307 } while (nretry++ < MC_ROOM_RETRY_CNT); 307 } while (nretry++ < MC_ROOM_RETRY_CNT);
308} 308}
309 309
@@ -338,7 +338,7 @@ retry:
338 atomic64_set(&afu->room, room); 338 atomic64_set(&afu->room, room);
339 if (room) 339 if (room)
340 goto write_ioarrin; 340 goto write_ioarrin;
341 udelay(nretry); 341 udelay(1 << nretry);
342 } while (nretry++ < MC_ROOM_RETRY_CNT); 342 } while (nretry++ < MC_ROOM_RETRY_CNT);
343 343
344 dev_err(dev, "%s: no cmd_room to send 0x%X\n", 344 dev_err(dev, "%s: no cmd_room to send 0x%X\n",
@@ -352,7 +352,7 @@ retry:
352 * afu->room. 352 * afu->room.
353 */ 353 */
354 if (nretry++ < MC_ROOM_RETRY_CNT) { 354 if (nretry++ < MC_ROOM_RETRY_CNT) {
355 udelay(nretry); 355 udelay(1 << nretry);
356 goto retry; 356 goto retry;
357 } 357 }
358 358
@@ -683,28 +683,23 @@ static void stop_afu(struct cxlflash_cfg *cfg)
683} 683}
684 684
685/** 685/**
686 * term_mc() - terminates the master context 686 * term_intr() - disables all AFU interrupts
687 * @cfg: Internal structure associated with the host. 687 * @cfg: Internal structure associated with the host.
688 * @level: Depth of allocation, where to begin waterfall tear down. 688 * @level: Depth of allocation, where to begin waterfall tear down.
689 * 689 *
690 * Safe to call with AFU/MC in partially allocated/initialized state. 690 * Safe to call with AFU/MC in partially allocated/initialized state.
691 */ 691 */
692static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level) 692static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level)
693{ 693{
694 int rc = 0;
695 struct afu *afu = cfg->afu; 694 struct afu *afu = cfg->afu;
696 struct device *dev = &cfg->dev->dev; 695 struct device *dev = &cfg->dev->dev;
697 696
698 if (!afu || !cfg->mcctx) { 697 if (!afu || !cfg->mcctx) {
699 dev_err(dev, "%s: returning from term_mc with NULL afu or MC\n", 698 dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
700 __func__);
701 return; 699 return;
702 } 700 }
703 701
704 switch (level) { 702 switch (level) {
705 case UNDO_START:
706 rc = cxl_stop_context(cfg->mcctx);
707 BUG_ON(rc);
708 case UNMAP_THREE: 703 case UNMAP_THREE:
709 cxl_unmap_afu_irq(cfg->mcctx, 3, afu); 704 cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
710 case UNMAP_TWO: 705 case UNMAP_TWO:
@@ -713,9 +708,34 @@ static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
713 cxl_unmap_afu_irq(cfg->mcctx, 1, afu); 708 cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
714 case FREE_IRQ: 709 case FREE_IRQ:
715 cxl_free_afu_irqs(cfg->mcctx); 710 cxl_free_afu_irqs(cfg->mcctx);
716 case RELEASE_CONTEXT: 711 /* fall through */
717 cfg->mcctx = NULL; 712 case UNDO_NOOP:
713 /* No action required */
714 break;
715 }
716}
717
718/**
719 * term_mc() - terminates the master context
720 * @cfg: Internal structure associated with the host.
721 * @level: Depth of allocation, where to begin waterfall tear down.
722 *
723 * Safe to call with AFU/MC in partially allocated/initialized state.
724 */
725static void term_mc(struct cxlflash_cfg *cfg)
726{
727 int rc = 0;
728 struct afu *afu = cfg->afu;
729 struct device *dev = &cfg->dev->dev;
730
731 if (!afu || !cfg->mcctx) {
732 dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
733 return;
718 } 734 }
735
736 rc = cxl_stop_context(cfg->mcctx);
737 WARN_ON(rc);
738 cfg->mcctx = NULL;
719} 739}
720 740
721/** 741/**
@@ -726,10 +746,20 @@ static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
726 */ 746 */
727static void term_afu(struct cxlflash_cfg *cfg) 747static void term_afu(struct cxlflash_cfg *cfg)
728{ 748{
749 /*
750 * Tear down is carefully orchestrated to ensure
751 * no interrupts can come in when the problem state
752 * area is unmapped.
753 *
754 * 1) Disable all AFU interrupts
755 * 2) Unmap the problem state area
756 * 3) Stop the master context
757 */
758 term_intr(cfg, UNMAP_THREE);
729 if (cfg->afu) 759 if (cfg->afu)
730 stop_afu(cfg); 760 stop_afu(cfg);
731 761
732 term_mc(cfg, UNDO_START); 762 term_mc(cfg);
733 763
734 pr_debug("%s: returning\n", __func__); 764 pr_debug("%s: returning\n", __func__);
735} 765}
@@ -1597,41 +1627,24 @@ static int start_afu(struct cxlflash_cfg *cfg)
1597} 1627}
1598 1628
1599/** 1629/**
1600 * init_mc() - create and register as the master context 1630 * init_intr() - setup interrupt handlers for the master context
1601 * @cfg: Internal structure associated with the host. 1631 * @cfg: Internal structure associated with the host.
1602 * 1632 *
1603 * Return: 0 on success, -errno on failure 1633 * Return: 0 on success, -errno on failure
1604 */ 1634 */
1605static int init_mc(struct cxlflash_cfg *cfg) 1635static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1636 struct cxl_context *ctx)
1606{ 1637{
1607 struct cxl_context *ctx;
1608 struct device *dev = &cfg->dev->dev;
1609 struct afu *afu = cfg->afu; 1638 struct afu *afu = cfg->afu;
1639 struct device *dev = &cfg->dev->dev;
1610 int rc = 0; 1640 int rc = 0;
1611 enum undo_level level; 1641 enum undo_level level = UNDO_NOOP;
1612
1613 ctx = cxl_get_context(cfg->dev);
1614 if (unlikely(!ctx))
1615 return -ENOMEM;
1616 cfg->mcctx = ctx;
1617
1618 /* Set it up as a master with the CXL */
1619 cxl_set_master(ctx);
1620
1621 /* During initialization reset the AFU to start from a clean slate */
1622 rc = cxl_afu_reset(cfg->mcctx);
1623 if (unlikely(rc)) {
1624 dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
1625 __func__, rc);
1626 level = RELEASE_CONTEXT;
1627 goto out;
1628 }
1629 1642
1630 rc = cxl_allocate_afu_irqs(ctx, 3); 1643 rc = cxl_allocate_afu_irqs(ctx, 3);
1631 if (unlikely(rc)) { 1644 if (unlikely(rc)) {
1632 dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n", 1645 dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
1633 __func__, rc); 1646 __func__, rc);
1634 level = RELEASE_CONTEXT; 1647 level = UNDO_NOOP;
1635 goto out; 1648 goto out;
1636 } 1649 }
1637 1650
@@ -1661,8 +1674,47 @@ static int init_mc(struct cxlflash_cfg *cfg)
1661 level = UNMAP_TWO; 1674 level = UNMAP_TWO;
1662 goto out; 1675 goto out;
1663 } 1676 }
1677out:
1678 return level;
1679}
1664 1680
1665 rc = 0; 1681/**
1682 * init_mc() - create and register as the master context
1683 * @cfg: Internal structure associated with the host.
1684 *
1685 * Return: 0 on success, -errno on failure
1686 */
1687static int init_mc(struct cxlflash_cfg *cfg)
1688{
1689 struct cxl_context *ctx;
1690 struct device *dev = &cfg->dev->dev;
1691 int rc = 0;
1692 enum undo_level level;
1693
1694 ctx = cxl_get_context(cfg->dev);
1695 if (unlikely(!ctx)) {
1696 rc = -ENOMEM;
1697 goto ret;
1698 }
1699 cfg->mcctx = ctx;
1700
1701 /* Set it up as a master with the CXL */
1702 cxl_set_master(ctx);
1703
1704 /* During initialization reset the AFU to start from a clean slate */
1705 rc = cxl_afu_reset(cfg->mcctx);
1706 if (unlikely(rc)) {
1707 dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
1708 __func__, rc);
1709 goto ret;
1710 }
1711
1712 level = init_intr(cfg, ctx);
1713 if (unlikely(level)) {
1714 dev_err(dev, "%s: setting up interrupts failed rc=%d\n",
1715 __func__, rc);
1716 goto out;
1717 }
1666 1718
1667 /* This performs the equivalent of the CXL_IOCTL_START_WORK. 1719 /* This performs the equivalent of the CXL_IOCTL_START_WORK.
1668 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process 1720 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
@@ -1678,7 +1730,7 @@ ret:
1678 pr_debug("%s: returning rc=%d\n", __func__, rc); 1730 pr_debug("%s: returning rc=%d\n", __func__, rc);
1679 return rc; 1731 return rc;
1680out: 1732out:
1681 term_mc(cfg, level); 1733 term_intr(cfg, level);
1682 goto ret; 1734 goto ret;
1683} 1735}
1684 1736
@@ -1751,7 +1803,8 @@ out:
1751err2: 1803err2:
1752 kref_put(&afu->mapcount, afu_unmap); 1804 kref_put(&afu->mapcount, afu_unmap);
1753err1: 1805err1:
1754 term_mc(cfg, UNDO_START); 1806 term_intr(cfg, UNMAP_THREE);
1807 term_mc(cfg);
1755 goto out; 1808 goto out;
1756} 1809}
1757 1810
@@ -2488,8 +2541,7 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
2488 if (unlikely(rc)) 2541 if (unlikely(rc))
2489 dev_err(dev, "%s: Failed to mark user contexts!(%d)\n", 2542 dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
2490 __func__, rc); 2543 __func__, rc);
2491 stop_afu(cfg); 2544 term_afu(cfg);
2492 term_mc(cfg, UNDO_START);
2493 return PCI_ERS_RESULT_NEED_RESET; 2545 return PCI_ERS_RESULT_NEED_RESET;
2494 case pci_channel_io_perm_failure: 2546 case pci_channel_io_perm_failure:
2495 cfg->state = STATE_FAILTERM; 2547 cfg->state = STATE_FAILTERM;
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index 0faed422c7f4..eb9d8f730b38 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -79,12 +79,11 @@
79#define WWPN_BUF_LEN (WWPN_LEN + 1) 79#define WWPN_BUF_LEN (WWPN_LEN + 1)
80 80
81enum undo_level { 81enum undo_level {
82 RELEASE_CONTEXT = 0, 82 UNDO_NOOP = 0,
83 FREE_IRQ, 83 FREE_IRQ,
84 UNMAP_ONE, 84 UNMAP_ONE,
85 UNMAP_TWO, 85 UNMAP_TWO,
86 UNMAP_THREE, 86 UNMAP_THREE
87 UNDO_START
88}; 87};
89 88
90struct dev_dependent_vals { 89struct dev_dependent_vals {
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index a404a41e871c..8eaed0522aa3 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1112,9 +1112,9 @@ static void alua_bus_detach(struct scsi_device *sdev)
1112 h->sdev = NULL; 1112 h->sdev = NULL;
1113 spin_unlock(&h->pg_lock); 1113 spin_unlock(&h->pg_lock);
1114 if (pg) { 1114 if (pg) {
1115 spin_lock(&pg->lock); 1115 spin_lock_irq(&pg->lock);
1116 list_del_rcu(&h->node); 1116 list_del_rcu(&h->node);
1117 spin_unlock(&pg->lock); 1117 spin_unlock_irq(&pg->lock);
1118 kref_put(&pg->kref, release_port_group); 1118 kref_put(&pg->kref, release_port_group);
1119 } 1119 }
1120 sdev->handler_data = NULL; 1120 sdev->handler_data = NULL;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index e4db5fb3239a..8c44b9c424af 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -5030,7 +5030,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
5030static int 5030static int
5031_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) 5031_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
5032{ 5032{
5033 int r, i; 5033 int r, i, index;
5034 unsigned long flags; 5034 unsigned long flags;
5035 u32 reply_address; 5035 u32 reply_address;
5036 u16 smid; 5036 u16 smid;
@@ -5039,8 +5039,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
5039 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next; 5039 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
5040 u8 hide_flag; 5040 u8 hide_flag;
5041 struct adapter_reply_queue *reply_q; 5041 struct adapter_reply_queue *reply_q;
5042 long reply_post_free; 5042 Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
5043 u32 reply_post_free_sz, index = 0;
5044 5043
5045 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5044 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5046 __func__)); 5045 __func__));
@@ -5124,27 +5123,27 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
5124 _base_assign_reply_queues(ioc); 5123 _base_assign_reply_queues(ioc);
5125 5124
5126 /* initialize Reply Post Free Queue */ 5125 /* initialize Reply Post Free Queue */
5127 reply_post_free_sz = ioc->reply_post_queue_depth * 5126 index = 0;
5128 sizeof(Mpi2DefaultReplyDescriptor_t); 5127 reply_post_free_contig = ioc->reply_post[0].reply_post_free;
5129 reply_post_free = (long)ioc->reply_post[index].reply_post_free;
5130 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 5128 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
5129 /*
5130 * If RDPQ is enabled, switch to the next allocation.
5131 * Otherwise advance within the contiguous region.
5132 */
5133 if (ioc->rdpq_array_enable) {
5134 reply_q->reply_post_free =
5135 ioc->reply_post[index++].reply_post_free;
5136 } else {
5137 reply_q->reply_post_free = reply_post_free_contig;
5138 reply_post_free_contig += ioc->reply_post_queue_depth;
5139 }
5140
5131 reply_q->reply_post_host_index = 0; 5141 reply_q->reply_post_host_index = 0;
5132 reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
5133 reply_post_free;
5134 for (i = 0; i < ioc->reply_post_queue_depth; i++) 5142 for (i = 0; i < ioc->reply_post_queue_depth; i++)
5135 reply_q->reply_post_free[i].Words = 5143 reply_q->reply_post_free[i].Words =
5136 cpu_to_le64(ULLONG_MAX); 5144 cpu_to_le64(ULLONG_MAX);
5137 if (!_base_is_controller_msix_enabled(ioc)) 5145 if (!_base_is_controller_msix_enabled(ioc))
5138 goto skip_init_reply_post_free_queue; 5146 goto skip_init_reply_post_free_queue;
5139 /*
5140 * If RDPQ is enabled, switch to the next allocation.
5141 * Otherwise advance within the contiguous region.
5142 */
5143 if (ioc->rdpq_array_enable)
5144 reply_post_free = (long)
5145 ioc->reply_post[++index].reply_post_free;
5146 else
5147 reply_post_free += reply_post_free_sz;
5148 } 5147 }
5149 skip_init_reply_post_free_queue: 5148 skip_init_reply_post_free_queue:
5150 5149
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index b1bf42b93fcc..1deb6adc411f 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -784,8 +784,9 @@ void scsi_attach_vpd(struct scsi_device *sdev)
784 int pg83_supported = 0; 784 int pg83_supported = 0;
785 unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL; 785 unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL;
786 786
787 if (sdev->skip_vpd_pages) 787 if (!scsi_device_supports_vpd(sdev))
788 return; 788 return;
789
789retry_pg0: 790retry_pg0:
790 vpd_buf = kmalloc(vpd_len, GFP_KERNEL); 791 vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
791 if (!vpd_buf) 792 if (!vpd_buf)
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 92ffd2406f97..2b642b145be1 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -81,6 +81,7 @@ const char *scsi_host_state_name(enum scsi_host_state state)
81 return name; 81 return name;
82} 82}
83 83
84#ifdef CONFIG_SCSI_DH
84static const struct { 85static const struct {
85 unsigned char value; 86 unsigned char value;
86 char *name; 87 char *name;
@@ -94,7 +95,7 @@ static const struct {
94 { SCSI_ACCESS_STATE_TRANSITIONING, "transitioning" }, 95 { SCSI_ACCESS_STATE_TRANSITIONING, "transitioning" },
95}; 96};
96 97
97const char *scsi_access_state_name(unsigned char state) 98static const char *scsi_access_state_name(unsigned char state)
98{ 99{
99 int i; 100 int i;
100 char *name = NULL; 101 char *name = NULL;
@@ -107,6 +108,7 @@ const char *scsi_access_state_name(unsigned char state)
107 } 108 }
108 return name; 109 return name;
109} 110}
111#endif
110 112
111static int check_set(unsigned long long *val, char *src) 113static int check_set(unsigned long long *val, char *src)
112{ 114{
@@ -226,7 +228,7 @@ show_shost_state(struct device *dev, struct device_attribute *attr, char *buf)
226} 228}
227 229
228/* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */ 230/* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */
229struct device_attribute dev_attr_hstate = 231static struct device_attribute dev_attr_hstate =
230 __ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state); 232 __ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state);
231 233
232static ssize_t 234static ssize_t
@@ -401,7 +403,7 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
401 NULL 403 NULL
402}; 404};
403 405
404struct attribute_group scsi_shost_attr_group = { 406static struct attribute_group scsi_shost_attr_group = {
405 .attrs = scsi_sysfs_shost_attrs, 407 .attrs = scsi_sysfs_shost_attrs,
406}; 408};
407 409
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5a5457ac9cdb..f52b74cf8d1e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1275,18 +1275,19 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1275 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); 1275 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
1276 struct scsi_device *sdp = sdkp->device; 1276 struct scsi_device *sdp = sdkp->device;
1277 struct Scsi_Host *host = sdp->host; 1277 struct Scsi_Host *host = sdp->host;
1278 sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
1278 int diskinfo[4]; 1279 int diskinfo[4];
1279 1280
1280 /* default to most commonly used values */ 1281 /* default to most commonly used values */
1281 diskinfo[0] = 0x40; /* 1 << 6 */ 1282 diskinfo[0] = 0x40; /* 1 << 6 */
1282 diskinfo[1] = 0x20; /* 1 << 5 */ 1283 diskinfo[1] = 0x20; /* 1 << 5 */
1283 diskinfo[2] = sdkp->capacity >> 11; 1284 diskinfo[2] = capacity >> 11;
1284 1285
1285 /* override with calculated, extended default, or driver values */ 1286 /* override with calculated, extended default, or driver values */
1286 if (host->hostt->bios_param) 1287 if (host->hostt->bios_param)
1287 host->hostt->bios_param(sdp, bdev, sdkp->capacity, diskinfo); 1288 host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
1288 else 1289 else
1289 scsicam_bios_param(bdev, sdkp->capacity, diskinfo); 1290 scsicam_bios_param(bdev, capacity, diskinfo);
1290 1291
1291 geo->heads = diskinfo[0]; 1292 geo->heads = diskinfo[0];
1292 geo->sectors = diskinfo[1]; 1293 geo->sectors = diskinfo[1];
@@ -2337,14 +2338,6 @@ got_data:
2337 if (sdkp->capacity > 0xffffffff) 2338 if (sdkp->capacity > 0xffffffff)
2338 sdp->use_16_for_rw = 1; 2339 sdp->use_16_for_rw = 1;
2339 2340
2340 /* Rescale capacity to 512-byte units */
2341 if (sector_size == 4096)
2342 sdkp->capacity <<= 3;
2343 else if (sector_size == 2048)
2344 sdkp->capacity <<= 2;
2345 else if (sector_size == 1024)
2346 sdkp->capacity <<= 1;
2347
2348 blk_queue_physical_block_size(sdp->request_queue, 2341 blk_queue_physical_block_size(sdp->request_queue,
2349 sdkp->physical_block_size); 2342 sdkp->physical_block_size);
2350 sdkp->device->sector_size = sector_size; 2343 sdkp->device->sector_size = sector_size;
@@ -2795,28 +2788,6 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
2795 sdkp->ws10 = 1; 2788 sdkp->ws10 = 1;
2796} 2789}
2797 2790
2798static int sd_try_extended_inquiry(struct scsi_device *sdp)
2799{
2800 /* Attempt VPD inquiry if the device blacklist explicitly calls
2801 * for it.
2802 */
2803 if (sdp->try_vpd_pages)
2804 return 1;
2805 /*
2806 * Although VPD inquiries can go to SCSI-2 type devices,
2807 * some USB ones crash on receiving them, and the pages
2808 * we currently ask for are for SPC-3 and beyond
2809 */
2810 if (sdp->scsi_level > SCSI_SPC_2 && !sdp->skip_vpd_pages)
2811 return 1;
2812 return 0;
2813}
2814
2815static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks)
2816{
2817 return blocks << (ilog2(sdev->sector_size) - 9);
2818}
2819
2820/** 2791/**
2821 * sd_revalidate_disk - called the first time a new disk is seen, 2792 * sd_revalidate_disk - called the first time a new disk is seen,
2822 * performs disk spin up, read_capacity, etc. 2793 * performs disk spin up, read_capacity, etc.
@@ -2856,7 +2827,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
2856 if (sdkp->media_present) { 2827 if (sdkp->media_present) {
2857 sd_read_capacity(sdkp, buffer); 2828 sd_read_capacity(sdkp, buffer);
2858 2829
2859 if (sd_try_extended_inquiry(sdp)) { 2830 if (scsi_device_supports_vpd(sdp)) {
2860 sd_read_block_provisioning(sdkp); 2831 sd_read_block_provisioning(sdkp);
2861 sd_read_block_limits(sdkp); 2832 sd_read_block_limits(sdkp);
2862 sd_read_block_characteristics(sdkp); 2833 sd_read_block_characteristics(sdkp);
@@ -2891,7 +2862,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
2891 if (sdkp->opt_xfer_blocks && 2862 if (sdkp->opt_xfer_blocks &&
2892 sdkp->opt_xfer_blocks <= dev_max && 2863 sdkp->opt_xfer_blocks <= dev_max &&
2893 sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && 2864 sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
2894 sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE) 2865 sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
2895 rw_max = q->limits.io_opt = 2866 rw_max = q->limits.io_opt =
2896 sdkp->opt_xfer_blocks * sdp->sector_size; 2867 sdkp->opt_xfer_blocks * sdp->sector_size;
2897 else 2868 else
@@ -2900,7 +2871,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
2900 /* Combine with controller limits */ 2871 /* Combine with controller limits */
2901 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q)); 2872 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
2902 2873
2903 set_capacity(disk, sdkp->capacity); 2874 set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
2904 sd_config_write_same(sdkp); 2875 sd_config_write_same(sdkp);
2905 kfree(buffer); 2876 kfree(buffer);
2906 2877
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 5f2a84aff29f..654630bb7d0e 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -65,7 +65,7 @@ struct scsi_disk {
65 struct device dev; 65 struct device dev;
66 struct gendisk *disk; 66 struct gendisk *disk;
67 atomic_t openers; 67 atomic_t openers;
68 sector_t capacity; /* size in 512-byte sectors */ 68 sector_t capacity; /* size in logical blocks */
69 u32 max_xfer_blocks; 69 u32 max_xfer_blocks;
70 u32 opt_xfer_blocks; 70 u32 opt_xfer_blocks;
71 u32 max_ws_blocks; 71 u32 max_ws_blocks;
@@ -146,6 +146,11 @@ static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
146 return 0; 146 return 0;
147} 147}
148 148
149static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blocks)
150{
151 return blocks << (ilog2(sdev->sector_size) - 9);
152}
153
149/* 154/*
150 * A DIF-capable target device can be formatted with different 155 * A DIF-capable target device can be formatted with different
151 * protection schemes. Currently 0 through 3 are defined: 156 * protection schemes. Currently 0 through 3 are defined:
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 71c5138ddf94..dbf1882cfbac 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4941,7 +4941,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
4941 out_unmap: 4941 out_unmap:
4942 if (res > 0) { 4942 if (res > 0) {
4943 for (j=0; j < res; j++) 4943 for (j=0; j < res; j++)
4944 page_cache_release(pages[j]); 4944 put_page(pages[j]);
4945 res = 0; 4945 res = 0;
4946 } 4946 }
4947 kfree(pages); 4947 kfree(pages);
@@ -4963,7 +4963,7 @@ static int sgl_unmap_user_pages(struct st_buffer *STbp,
4963 /* FIXME: cache flush missing for rw==READ 4963 /* FIXME: cache flush missing for rw==READ
4964 * FIXME: call the correct reference counting function 4964 * FIXME: call the correct reference counting function
4965 */ 4965 */
4966 page_cache_release(page); 4966 put_page(page);
4967 } 4967 }
4968 kfree(STbp->mapped_pages); 4968 kfree(STbp->mapped_pages);
4969 STbp->mapped_pages = NULL; 4969 STbp->mapped_pages = NULL;
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index 57e781c71e67..837effe19907 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -491,13 +491,14 @@ static int scpsys_probe(struct platform_device *pdev)
491 genpd->dev_ops.active_wakeup = scpsys_active_wakeup; 491 genpd->dev_ops.active_wakeup = scpsys_active_wakeup;
492 492
493 /* 493 /*
494 * With CONFIG_PM disabled turn on all domains to make the 494 * Initially turn on all domains to make the domains usable
495 * hardware usable. 495 * with !CONFIG_PM and to get the hardware in sync with the
496 * software. The unused domains will be switched off during
497 * late_init time.
496 */ 498 */
497 if (!IS_ENABLED(CONFIG_PM)) 499 genpd->power_on(genpd);
498 genpd->power_on(genpd);
499 500
500 pm_genpd_init(genpd, NULL, true); 501 pm_genpd_init(genpd, NULL, false);
501 } 502 }
502 503
503 /* 504 /*
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index e7a19be87c38..50769078e72e 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -211,11 +211,15 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
211 struct spi_transfer *transfer) 211 struct spi_transfer *transfer)
212{ 212{
213 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 213 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
214 unsigned int bpw = transfer->bits_per_word; 214 unsigned int bpw;
215 215
216 if (!master->dma_rx) 216 if (!master->dma_rx)
217 return false; 217 return false;
218 218
219 if (!transfer)
220 return false;
221
222 bpw = transfer->bits_per_word;
219 if (!bpw) 223 if (!bpw)
220 bpw = spi->bits_per_word; 224 bpw = spi->bits_per_word;
221 225
@@ -333,8 +337,9 @@ static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
333static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx, 337static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
334 struct spi_imx_config *config) 338 struct spi_imx_config *config)
335{ 339{
336 u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0; 340 u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
337 u32 clk = config->speed_hz, delay, reg; 341 u32 clk = config->speed_hz, delay, reg;
342 u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
338 343
339 /* 344 /*
340 * The hardware seems to have a race condition when changing modes. The 345 * The hardware seems to have a race condition when changing modes. The
@@ -358,13 +363,20 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
358 363
359 if (config->mode & SPI_CPHA) 364 if (config->mode & SPI_CPHA)
360 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs); 365 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
366 else
367 cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
361 368
362 if (config->mode & SPI_CPOL) { 369 if (config->mode & SPI_CPOL) {
363 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs); 370 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
364 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs); 371 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
372 } else {
373 cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
374 cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
365 } 375 }
366 if (config->mode & SPI_CS_HIGH) 376 if (config->mode & SPI_CS_HIGH)
367 cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs); 377 cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs);
378 else
379 cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(config->cs);
368 380
369 if (spi_imx->usedma) 381 if (spi_imx->usedma)
370 ctrl |= MX51_ECSPI_CTRL_SMC; 382 ctrl |= MX51_ECSPI_CTRL_SMC;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 0caa3c8bef46..43a02e377b3b 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -423,16 +423,12 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
423 423
424 if (mcspi_dma->dma_tx) { 424 if (mcspi_dma->dma_tx) {
425 struct dma_async_tx_descriptor *tx; 425 struct dma_async_tx_descriptor *tx;
426 struct scatterlist sg;
427 426
428 dmaengine_slave_config(mcspi_dma->dma_tx, &cfg); 427 dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
429 428
430 sg_init_table(&sg, 1); 429 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
431 sg_dma_address(&sg) = xfer->tx_dma; 430 xfer->tx_sg.nents, DMA_MEM_TO_DEV,
432 sg_dma_len(&sg) = xfer->len; 431 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
433
434 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
435 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
436 if (tx) { 432 if (tx) {
437 tx->callback = omap2_mcspi_tx_callback; 433 tx->callback = omap2_mcspi_tx_callback;
438 tx->callback_param = spi; 434 tx->callback_param = spi;
@@ -478,20 +474,15 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
478 474
479 if (mcspi_dma->dma_rx) { 475 if (mcspi_dma->dma_rx) {
480 struct dma_async_tx_descriptor *tx; 476 struct dma_async_tx_descriptor *tx;
481 struct scatterlist sg;
482 477
483 dmaengine_slave_config(mcspi_dma->dma_rx, &cfg); 478 dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
484 479
485 if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0) 480 if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
486 dma_count -= es; 481 dma_count -= es;
487 482
488 sg_init_table(&sg, 1); 483 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, xfer->rx_sg.sgl,
489 sg_dma_address(&sg) = xfer->rx_dma; 484 xfer->rx_sg.nents, DMA_DEV_TO_MEM,
490 sg_dma_len(&sg) = dma_count; 485 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
491
492 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
493 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
494 DMA_CTRL_ACK);
495 if (tx) { 486 if (tx) {
496 tx->callback = omap2_mcspi_rx_callback; 487 tx->callback = omap2_mcspi_rx_callback;
497 tx->callback_param = spi; 488 tx->callback_param = spi;
@@ -505,8 +496,6 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
505 omap2_mcspi_set_dma_req(spi, 1, 1); 496 omap2_mcspi_set_dma_req(spi, 1, 1);
506 497
507 wait_for_completion(&mcspi_dma->dma_rx_completion); 498 wait_for_completion(&mcspi_dma->dma_rx_completion);
508 dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
509 DMA_FROM_DEVICE);
510 499
511 if (mcspi->fifo_depth > 0) 500 if (mcspi->fifo_depth > 0)
512 return count; 501 return count;
@@ -619,8 +608,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
619 608
620 if (tx != NULL) { 609 if (tx != NULL) {
621 wait_for_completion(&mcspi_dma->dma_tx_completion); 610 wait_for_completion(&mcspi_dma->dma_tx_completion);
622 dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len,
623 DMA_TO_DEVICE);
624 611
625 if (mcspi->fifo_depth > 0) { 612 if (mcspi->fifo_depth > 0) {
626 irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS; 613 irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
@@ -1087,6 +1074,16 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
1087 gpio_free(spi->cs_gpio); 1074 gpio_free(spi->cs_gpio);
1088} 1075}
1089 1076
1077static bool omap2_mcspi_can_dma(struct spi_master *master,
1078 struct spi_device *spi,
1079 struct spi_transfer *xfer)
1080{
1081 if (xfer->len < DMA_MIN_BYTES)
1082 return false;
1083
1084 return true;
1085}
1086
1090static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi, 1087static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
1091 struct spi_device *spi, struct spi_transfer *t) 1088 struct spi_device *spi, struct spi_transfer *t)
1092{ 1089{
@@ -1268,32 +1265,6 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
1268 return -EINVAL; 1265 return -EINVAL;
1269 } 1266 }
1270 1267
1271 if (len < DMA_MIN_BYTES)
1272 goto skip_dma_map;
1273
1274 if (mcspi_dma->dma_tx && tx_buf != NULL) {
1275 t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
1276 len, DMA_TO_DEVICE);
1277 if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
1278 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1279 'T', len);
1280 return -EINVAL;
1281 }
1282 }
1283 if (mcspi_dma->dma_rx && rx_buf != NULL) {
1284 t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
1285 DMA_FROM_DEVICE);
1286 if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
1287 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1288 'R', len);
1289 if (tx_buf != NULL)
1290 dma_unmap_single(mcspi->dev, t->tx_dma,
1291 len, DMA_TO_DEVICE);
1292 return -EINVAL;
1293 }
1294 }
1295
1296skip_dma_map:
1297 return omap2_mcspi_work_one(mcspi, spi, t); 1268 return omap2_mcspi_work_one(mcspi, spi, t);
1298} 1269}
1299 1270
@@ -1377,6 +1348,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
1377 master->transfer_one = omap2_mcspi_transfer_one; 1348 master->transfer_one = omap2_mcspi_transfer_one;
1378 master->set_cs = omap2_mcspi_set_cs; 1349 master->set_cs = omap2_mcspi_set_cs;
1379 master->cleanup = omap2_mcspi_cleanup; 1350 master->cleanup = omap2_mcspi_cleanup;
1351 master->can_dma = omap2_mcspi_can_dma;
1380 master->dev.of_node = node; 1352 master->dev.of_node = node;
1381 master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ; 1353 master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
1382 master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15; 1354 master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 8f50a4020f6f..6c6c0013ec7a 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -534,7 +534,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
534 if (WARN_ON(rs->speed > MAX_SCLK_OUT)) 534 if (WARN_ON(rs->speed > MAX_SCLK_OUT))
535 rs->speed = MAX_SCLK_OUT; 535 rs->speed = MAX_SCLK_OUT;
536 536
537 /* the minimum divsor is 2 */ 537 /* the minimum divisor is 2 */
538 if (rs->max_freq < 2 * rs->speed) { 538 if (rs->max_freq < 2 * rs->speed) {
539 clk_set_rate(rs->spiclk, 2 * rs->speed); 539 clk_set_rate(rs->spiclk, 2 * rs->speed);
540 rs->max_freq = clk_get_rate(rs->spiclk); 540 rs->max_freq = clk_get_rate(rs->spiclk);
@@ -730,23 +730,27 @@ static int rockchip_spi_probe(struct platform_device *pdev)
730 master->transfer_one = rockchip_spi_transfer_one; 730 master->transfer_one = rockchip_spi_transfer_one;
731 master->handle_err = rockchip_spi_handle_err; 731 master->handle_err = rockchip_spi_handle_err;
732 732
733 rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx"); 733 rs->dma_tx.ch = dma_request_chan(rs->dev, "tx");
734 if (IS_ERR_OR_NULL(rs->dma_tx.ch)) { 734 if (IS_ERR(rs->dma_tx.ch)) {
735 /* Check tx to see if we need defer probing driver */ 735 /* Check tx to see if we need defer probing driver */
736 if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) { 736 if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) {
737 ret = -EPROBE_DEFER; 737 ret = -EPROBE_DEFER;
738 goto err_get_fifo_len; 738 goto err_get_fifo_len;
739 } 739 }
740 dev_warn(rs->dev, "Failed to request TX DMA channel\n"); 740 dev_warn(rs->dev, "Failed to request TX DMA channel\n");
741 rs->dma_tx.ch = NULL;
741 } 742 }
742 743
743 rs->dma_rx.ch = dma_request_slave_channel(rs->dev, "rx"); 744 rs->dma_rx.ch = dma_request_chan(rs->dev, "rx");
744 if (!rs->dma_rx.ch) { 745 if (IS_ERR(rs->dma_rx.ch)) {
745 if (rs->dma_tx.ch) { 746 if (PTR_ERR(rs->dma_rx.ch) == -EPROBE_DEFER) {
746 dma_release_channel(rs->dma_tx.ch); 747 dma_release_channel(rs->dma_tx.ch);
747 rs->dma_tx.ch = NULL; 748 rs->dma_tx.ch = NULL;
749 ret = -EPROBE_DEFER;
750 goto err_get_fifo_len;
748 } 751 }
749 dev_warn(rs->dev, "Failed to request RX DMA channel\n"); 752 dev_warn(rs->dev, "Failed to request RX DMA channel\n");
753 rs->dma_rx.ch = NULL;
750 } 754 }
751 755
752 if (rs->dma_tx.ch && rs->dma_rx.ch) { 756 if (rs->dma_tx.ch && rs->dma_rx.ch) {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index de2f2f90d799..0239b45eed92 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1209,7 +1209,7 @@ static void spi_pump_messages(struct kthread_work *work)
1209 struct spi_master *master = 1209 struct spi_master *master =
1210 container_of(work, struct spi_master, pump_messages); 1210 container_of(work, struct spi_master, pump_messages);
1211 1211
1212 __spi_pump_messages(master, true, false); 1212 __spi_pump_messages(master, true, master->bus_lock_flag);
1213} 1213}
1214 1214
1215static int spi_init_queue(struct spi_master *master) 1215static int spi_init_queue(struct spi_master *master)
@@ -2853,7 +2853,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
2853 */ 2853 */
2854int spi_sync(struct spi_device *spi, struct spi_message *message) 2854int spi_sync(struct spi_device *spi, struct spi_message *message)
2855{ 2855{
2856 return __spi_sync(spi, message, 0); 2856 return __spi_sync(spi, message, spi->master->bus_lock_flag);
2857} 2857}
2858EXPORT_SYMBOL_GPL(spi_sync); 2858EXPORT_SYMBOL_GPL(spi_sync);
2859 2859
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index cf84581287b9..5bac28a3944e 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -30,6 +30,8 @@ source "drivers/staging/wlan-ng/Kconfig"
30 30
31source "drivers/staging/comedi/Kconfig" 31source "drivers/staging/comedi/Kconfig"
32 32
33source "drivers/staging/olpc_dcon/Kconfig"
34
33source "drivers/staging/rtl8192u/Kconfig" 35source "drivers/staging/rtl8192u/Kconfig"
34 36
35source "drivers/staging/rtl8192e/Kconfig" 37source "drivers/staging/rtl8192e/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 7d6448d20464..a954242b0f2c 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -4,6 +4,7 @@ obj-y += media/
4obj-$(CONFIG_SLICOSS) += slicoss/ 4obj-$(CONFIG_SLICOSS) += slicoss/
5obj-$(CONFIG_PRISM2_USB) += wlan-ng/ 5obj-$(CONFIG_PRISM2_USB) += wlan-ng/
6obj-$(CONFIG_COMEDI) += comedi/ 6obj-$(CONFIG_COMEDI) += comedi/
7obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
7obj-$(CONFIG_RTL8192U) += rtl8192u/ 8obj-$(CONFIG_RTL8192U) += rtl8192u/
8obj-$(CONFIG_RTL8192E) += rtl8192e/ 9obj-$(CONFIG_RTL8192E) += rtl8192e/
9obj-$(CONFIG_R8712U) += rtl8712/ 10obj-$(CONFIG_R8712U) += rtl8712/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index dab486261154..13335437c69c 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -88,7 +88,7 @@ do { \
88} while (0) 88} while (0)
89 89
90#ifndef LIBCFS_VMALLOC_SIZE 90#ifndef LIBCFS_VMALLOC_SIZE
91#define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */ 91#define LIBCFS_VMALLOC_SIZE (2 << PAGE_SHIFT) /* 2 pages */
92#endif 92#endif
93 93
94#define LIBCFS_ALLOC_PRE(size, mask) \ 94#define LIBCFS_ALLOC_PRE(size, mask) \
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
index 0f2fd79e5ec8..837eb22749c3 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
@@ -57,7 +57,7 @@
57#include "../libcfs_cpu.h" 57#include "../libcfs_cpu.h"
58#endif 58#endif
59 59
60#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1)) 60#define CFS_PAGE_MASK (~((__u64)PAGE_SIZE-1))
61#define page_index(p) ((p)->index) 61#define page_index(p) ((p)->index)
62 62
63#define memory_pressure_get() (current->flags & PF_MEMALLOC) 63#define memory_pressure_get() (current->flags & PF_MEMALLOC)
@@ -67,7 +67,7 @@
67#if BITS_PER_LONG == 32 67#if BITS_PER_LONG == 32
68/* limit to lowmem on 32-bit systems */ 68/* limit to lowmem on 32-bit systems */
69#define NUM_CACHEPAGES \ 69#define NUM_CACHEPAGES \
70 min(totalram_pages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4) 70 min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
71#else 71#else
72#define NUM_CACHEPAGES totalram_pages 72#define NUM_CACHEPAGES totalram_pages
73#endif 73#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h
index 08f193c341c5..1c679cb72785 100644
--- a/drivers/staging/lustre/include/linux/lnet/types.h
+++ b/drivers/staging/lustre/include/linux/lnet/types.h
@@ -514,7 +514,7 @@ typedef struct {
514 /** 514 /**
515 * Starting offset of the fragment within the page. Note that the 515 * Starting offset of the fragment within the page. Note that the
516 * end of the fragment must not pass the end of the page; i.e., 516 * end of the fragment must not pass the end of the page; i.e.,
517 * kiov_len + kiov_offset <= PAGE_CACHE_SIZE. 517 * kiov_len + kiov_offset <= PAGE_SIZE.
518 */ 518 */
519 unsigned int kiov_offset; 519 unsigned int kiov_offset;
520} lnet_kiov_t; 520} lnet_kiov_t;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index 3e1f24e77f64..d4ce06d0aeeb 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -291,7 +291,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
291 291
292 for (nob = i = 0; i < niov; i++) { 292 for (nob = i = 0; i < niov; i++) {
293 if ((kiov[i].kiov_offset && i > 0) || 293 if ((kiov[i].kiov_offset && i > 0) ||
294 (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1)) 294 (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1))
295 return NULL; 295 return NULL;
296 296
297 pages[i] = kiov[i].kiov_page; 297 pages[i] = kiov[i].kiov_page;
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index c90e5102fe06..c3d628bac5b8 100644
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -517,7 +517,7 @@ int libcfs_debug_init(unsigned long bufsize)
517 max = TCD_MAX_PAGES; 517 max = TCD_MAX_PAGES;
518 } else { 518 } else {
519 max = max / num_possible_cpus(); 519 max = max / num_possible_cpus();
520 max <<= (20 - PAGE_CACHE_SHIFT); 520 max <<= (20 - PAGE_SHIFT);
521 } 521 }
522 rc = cfs_tracefile_init(max); 522 rc = cfs_tracefile_init(max);
523 523
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index ec3bc04bd89f..244eb89eef68 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -182,7 +182,7 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
182 if (tcd->tcd_cur_pages > 0) { 182 if (tcd->tcd_cur_pages > 0) {
183 __LASSERT(!list_empty(&tcd->tcd_pages)); 183 __LASSERT(!list_empty(&tcd->tcd_pages));
184 tage = cfs_tage_from_list(tcd->tcd_pages.prev); 184 tage = cfs_tage_from_list(tcd->tcd_pages.prev);
185 if (tage->used + len <= PAGE_CACHE_SIZE) 185 if (tage->used + len <= PAGE_SIZE)
186 return tage; 186 return tage;
187 } 187 }
188 188
@@ -260,7 +260,7 @@ static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
260 * from here: this will lead to infinite recursion. 260 * from here: this will lead to infinite recursion.
261 */ 261 */
262 262
263 if (len > PAGE_CACHE_SIZE) { 263 if (len > PAGE_SIZE) {
264 pr_err("cowardly refusing to write %lu bytes in a page\n", len); 264 pr_err("cowardly refusing to write %lu bytes in a page\n", len);
265 return NULL; 265 return NULL;
266 } 266 }
@@ -349,7 +349,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
349 for (i = 0; i < 2; i++) { 349 for (i = 0; i < 2; i++) {
350 tage = cfs_trace_get_tage(tcd, needed + known_size + 1); 350 tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
351 if (!tage) { 351 if (!tage) {
352 if (needed + known_size > PAGE_CACHE_SIZE) 352 if (needed + known_size > PAGE_SIZE)
353 mask |= D_ERROR; 353 mask |= D_ERROR;
354 354
355 cfs_trace_put_tcd(tcd); 355 cfs_trace_put_tcd(tcd);
@@ -360,7 +360,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
360 string_buf = (char *)page_address(tage->page) + 360 string_buf = (char *)page_address(tage->page) +
361 tage->used + known_size; 361 tage->used + known_size;
362 362
363 max_nob = PAGE_CACHE_SIZE - tage->used - known_size; 363 max_nob = PAGE_SIZE - tage->used - known_size;
364 if (max_nob <= 0) { 364 if (max_nob <= 0) {
365 printk(KERN_EMERG "negative max_nob: %d\n", 365 printk(KERN_EMERG "negative max_nob: %d\n",
366 max_nob); 366 max_nob);
@@ -424,7 +424,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
424 __LASSERT(debug_buf == string_buf); 424 __LASSERT(debug_buf == string_buf);
425 425
426 tage->used += needed; 426 tage->used += needed;
427 __LASSERT(tage->used <= PAGE_CACHE_SIZE); 427 __LASSERT(tage->used <= PAGE_SIZE);
428 428
429console: 429console:
430 if ((mask & libcfs_printk) == 0) { 430 if ((mask & libcfs_printk) == 0) {
@@ -835,7 +835,7 @@ EXPORT_SYMBOL(cfs_trace_copyout_string);
835 835
836int cfs_trace_allocate_string_buffer(char **str, int nob) 836int cfs_trace_allocate_string_buffer(char **str, int nob)
837{ 837{
838 if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */ 838 if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */
839 return -EINVAL; 839 return -EINVAL;
840 840
841 *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO); 841 *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
@@ -951,7 +951,7 @@ int cfs_trace_set_debug_mb(int mb)
951 } 951 }
952 952
953 mb /= num_possible_cpus(); 953 mb /= num_possible_cpus();
954 pages = mb << (20 - PAGE_CACHE_SHIFT); 954 pages = mb << (20 - PAGE_SHIFT);
955 955
956 cfs_tracefile_write_lock(); 956 cfs_tracefile_write_lock();
957 957
@@ -977,7 +977,7 @@ int cfs_trace_get_debug_mb(void)
977 977
978 cfs_tracefile_read_unlock(); 978 cfs_tracefile_read_unlock();
979 979
980 return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1; 980 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
981} 981}
982 982
983static int tracefiled(void *arg) 983static int tracefiled(void *arg)
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h
index 4c77f9044dd3..ac84e7f4c859 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.h
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.h
@@ -87,7 +87,7 @@ void libcfs_unregister_panic_notifier(void);
87extern int libcfs_panic_in_progress; 87extern int libcfs_panic_in_progress;
88int cfs_trace_max_debug_mb(void); 88int cfs_trace_max_debug_mb(void);
89 89
90#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT)) 90#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
91#define TCD_STOCK_PAGES (TCD_MAX_PAGES) 91#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
92#define CFS_TRACEFILE_SIZE (500 << 20) 92#define CFS_TRACEFILE_SIZE (500 << 20)
93 93
@@ -96,7 +96,7 @@ int cfs_trace_max_debug_mb(void);
96/* 96/*
97 * Private declare for tracefile 97 * Private declare for tracefile
98 */ 98 */
99#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT)) 99#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
100#define TCD_STOCK_PAGES (TCD_MAX_PAGES) 100#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
101 101
102#define CFS_TRACEFILE_SIZE (500 << 20) 102#define CFS_TRACEFILE_SIZE (500 << 20)
@@ -257,7 +257,7 @@ do { \
257do { \ 257do { \
258 __LASSERT(tage); \ 258 __LASSERT(tage); \
259 __LASSERT(tage->page); \ 259 __LASSERT(tage->page); \
260 __LASSERT(tage->used <= PAGE_CACHE_SIZE); \ 260 __LASSERT(tage->used <= PAGE_SIZE); \
261 __LASSERT(page_count(tage->page) > 0); \ 261 __LASSERT(page_count(tage->page) > 0); \
262} while (0) 262} while (0)
263 263
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
index c74514f99f90..75d31217bf92 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -139,7 +139,7 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
139 for (i = 0; i < (int)niov; i++) { 139 for (i = 0; i < (int)niov; i++) {
140 /* We take the page pointer on trust */ 140 /* We take the page pointer on trust */
141 if (lmd->md_iov.kiov[i].kiov_offset + 141 if (lmd->md_iov.kiov[i].kiov_offset +
142 lmd->md_iov.kiov[i].kiov_len > PAGE_CACHE_SIZE) 142 lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE)
143 return -EINVAL; /* invalid length */ 143 return -EINVAL; /* invalid length */
144 144
145 total_length += lmd->md_iov.kiov[i].kiov_len; 145 total_length += lmd->md_iov.kiov[i].kiov_len;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index 0009a8de77d5..f19aa9320e34 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -549,12 +549,12 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
549 if (len <= frag_len) { 549 if (len <= frag_len) {
550 dst->kiov_len = len; 550 dst->kiov_len = len;
551 LASSERT(dst->kiov_offset + dst->kiov_len 551 LASSERT(dst->kiov_offset + dst->kiov_len
552 <= PAGE_CACHE_SIZE); 552 <= PAGE_SIZE);
553 return niov; 553 return niov;
554 } 554 }
555 555
556 dst->kiov_len = frag_len; 556 dst->kiov_len = frag_len;
557 LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE); 557 LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
558 558
559 len -= frag_len; 559 len -= frag_len;
560 dst++; 560 dst++;
@@ -887,7 +887,7 @@ lnet_msg2bufpool(lnet_msg_t *msg)
887 rbp = &the_lnet.ln_rtrpools[cpt][0]; 887 rbp = &the_lnet.ln_rtrpools[cpt][0];
888 888
889 LASSERT(msg->msg_len <= LNET_MTU); 889 LASSERT(msg->msg_len <= LNET_MTU);
890 while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) { 890 while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
891 rbp++; 891 rbp++;
892 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]); 892 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
893 } 893 }
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index cc0c2753dd63..891fd59401d7 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -166,9 +166,9 @@ lnet_ipif_enumerate(char ***namesp)
166 nalloc = 16; /* first guess at max interfaces */ 166 nalloc = 16; /* first guess at max interfaces */
167 toobig = 0; 167 toobig = 0;
168 for (;;) { 168 for (;;) {
169 if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) { 169 if (nalloc * sizeof(*ifr) > PAGE_SIZE) {
170 toobig = 1; 170 toobig = 1;
171 nalloc = PAGE_CACHE_SIZE / sizeof(*ifr); 171 nalloc = PAGE_SIZE / sizeof(*ifr);
172 CWARN("Too many interfaces: only enumerating first %d\n", 172 CWARN("Too many interfaces: only enumerating first %d\n",
173 nalloc); 173 nalloc);
174 } 174 }
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index 61459cf9d58f..b01dc424c514 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -27,8 +27,8 @@
27#define LNET_NRB_SMALL_PAGES 1 27#define LNET_NRB_SMALL_PAGES 1
28#define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */ 28#define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
29#define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4) 29#define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
30#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_CACHE_SIZE - 1) >> \ 30#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \
31 PAGE_CACHE_SHIFT) 31 PAGE_SHIFT)
32 32
33static char *forwarding = ""; 33static char *forwarding = "";
34module_param(forwarding, charp, 0444); 34module_param(forwarding, charp, 0444);
@@ -1338,7 +1338,7 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
1338 return NULL; 1338 return NULL;
1339 } 1339 }
1340 1340
1341 rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE; 1341 rb->rb_kiov[i].kiov_len = PAGE_SIZE;
1342 rb->rb_kiov[i].kiov_offset = 0; 1342 rb->rb_kiov[i].kiov_offset = 0;
1343 rb->rb_kiov[i].kiov_page = page; 1343 rb->rb_kiov[i].kiov_page = page;
1344 } 1344 }
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index eebc92412061..dcb6e506f592 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -90,7 +90,7 @@ brw_client_init(sfw_test_instance_t *tsi)
90 * NB: this is not going to work for variable page size, 90 * NB: this is not going to work for variable page size,
91 * but we have to keep it for compatibility 91 * but we have to keep it for compatibility
92 */ 92 */
93 len = npg * PAGE_CACHE_SIZE; 93 len = npg * PAGE_SIZE;
94 94
95 } else { 95 } else {
96 test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; 96 test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
@@ -104,7 +104,7 @@ brw_client_init(sfw_test_instance_t *tsi)
104 opc = breq->blk_opc; 104 opc = breq->blk_opc;
105 flags = breq->blk_flags; 105 flags = breq->blk_flags;
106 len = breq->blk_len; 106 len = breq->blk_len;
107 npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 107 npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
108 } 108 }
109 109
110 if (npg > LNET_MAX_IOV || npg <= 0) 110 if (npg > LNET_MAX_IOV || npg <= 0)
@@ -167,13 +167,13 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic)
167 167
168 if (pattern == LST_BRW_CHECK_SIMPLE) { 168 if (pattern == LST_BRW_CHECK_SIMPLE) {
169 memcpy(addr, &magic, BRW_MSIZE); 169 memcpy(addr, &magic, BRW_MSIZE);
170 addr += PAGE_CACHE_SIZE - BRW_MSIZE; 170 addr += PAGE_SIZE - BRW_MSIZE;
171 memcpy(addr, &magic, BRW_MSIZE); 171 memcpy(addr, &magic, BRW_MSIZE);
172 return; 172 return;
173 } 173 }
174 174
175 if (pattern == LST_BRW_CHECK_FULL) { 175 if (pattern == LST_BRW_CHECK_FULL) {
176 for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) 176 for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++)
177 memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE); 177 memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE);
178 return; 178 return;
179 } 179 }
@@ -198,7 +198,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
198 if (data != magic) 198 if (data != magic)
199 goto bad_data; 199 goto bad_data;
200 200
201 addr += PAGE_CACHE_SIZE - BRW_MSIZE; 201 addr += PAGE_SIZE - BRW_MSIZE;
202 data = *((__u64 *)addr); 202 data = *((__u64 *)addr);
203 if (data != magic) 203 if (data != magic)
204 goto bad_data; 204 goto bad_data;
@@ -207,7 +207,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
207 } 207 }
208 208
209 if (pattern == LST_BRW_CHECK_FULL) { 209 if (pattern == LST_BRW_CHECK_FULL) {
210 for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) { 210 for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) {
211 data = *(((__u64 *)addr) + i); 211 data = *(((__u64 *)addr) + i);
212 if (data != magic) 212 if (data != magic)
213 goto bad_data; 213 goto bad_data;
@@ -278,7 +278,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
278 opc = breq->blk_opc; 278 opc = breq->blk_opc;
279 flags = breq->blk_flags; 279 flags = breq->blk_flags;
280 npg = breq->blk_npg; 280 npg = breq->blk_npg;
281 len = npg * PAGE_CACHE_SIZE; 281 len = npg * PAGE_SIZE;
282 282
283 } else { 283 } else {
284 test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; 284 test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
@@ -292,7 +292,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
292 opc = breq->blk_opc; 292 opc = breq->blk_opc;
293 flags = breq->blk_flags; 293 flags = breq->blk_flags;
294 len = breq->blk_len; 294 len = breq->blk_len;
295 npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 295 npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
296 } 296 }
297 297
298 rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc); 298 rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
@@ -463,10 +463,10 @@ brw_server_handle(struct srpc_server_rpc *rpc)
463 reply->brw_status = EINVAL; 463 reply->brw_status = EINVAL;
464 return 0; 464 return 0;
465 } 465 }
466 npg = reqst->brw_len >> PAGE_CACHE_SHIFT; 466 npg = reqst->brw_len >> PAGE_SHIFT;
467 467
468 } else { 468 } else {
469 npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 469 npg = (reqst->brw_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
470 } 470 }
471 471
472 replymsg->msg_ses_feats = reqstmsg->msg_ses_feats; 472 replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index 5c7cb72eac9a..79ee6c0bf7c1 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -743,7 +743,7 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
743 if (args->lstio_tes_param && 743 if (args->lstio_tes_param &&
744 (args->lstio_tes_param_len <= 0 || 744 (args->lstio_tes_param_len <= 0 ||
745 args->lstio_tes_param_len > 745 args->lstio_tes_param_len >
746 PAGE_CACHE_SIZE - sizeof(lstcon_test_t))) 746 PAGE_SIZE - sizeof(lstcon_test_t)))
747 return -EINVAL; 747 return -EINVAL;
748 748
749 LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1); 749 LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
@@ -819,7 +819,7 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr)
819 819
820 opc = data->ioc_u32[0]; 820 opc = data->ioc_u32[0];
821 821
822 if (data->ioc_plen1 > PAGE_CACHE_SIZE) 822 if (data->ioc_plen1 > PAGE_SIZE)
823 return -EINVAL; 823 return -EINVAL;
824 824
825 LIBCFS_ALLOC(buf, data->ioc_plen1); 825 LIBCFS_ALLOC(buf, data->ioc_plen1);
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index bcd78888f9cc..35a227d0c657 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -786,8 +786,8 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
786 test_bulk_req_t *brq = &req->tsr_u.bulk_v0; 786 test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
787 787
788 brq->blk_opc = param->blk_opc; 788 brq->blk_opc = param->blk_opc;
789 brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) / 789 brq->blk_npg = (param->blk_size + PAGE_SIZE - 1) /
790 PAGE_CACHE_SIZE; 790 PAGE_SIZE;
791 brq->blk_flags = param->blk_flags; 791 brq->blk_flags = param->blk_flags;
792 792
793 return 0; 793 return 0;
@@ -822,7 +822,7 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
822 if (transop == LST_TRANS_TSBCLIADD) { 822 if (transop == LST_TRANS_TSBCLIADD) {
823 npg = sfw_id_pages(test->tes_span); 823 npg = sfw_id_pages(test->tes_span);
824 nob = !(feats & LST_FEAT_BULK_LEN) ? 824 nob = !(feats & LST_FEAT_BULK_LEN) ?
825 npg * PAGE_CACHE_SIZE : 825 npg * PAGE_SIZE :
826 sizeof(lnet_process_id_packed_t) * test->tes_span; 826 sizeof(lnet_process_id_packed_t) * test->tes_span;
827 } 827 }
828 828
@@ -851,8 +851,8 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
851 LASSERT(nob > 0); 851 LASSERT(nob > 0);
852 852
853 len = !(feats & LST_FEAT_BULK_LEN) ? 853 len = !(feats & LST_FEAT_BULK_LEN) ?
854 PAGE_CACHE_SIZE : 854 PAGE_SIZE :
855 min_t(int, nob, PAGE_CACHE_SIZE); 855 min_t(int, nob, PAGE_SIZE);
856 nob -= len; 856 nob -= len;
857 857
858 bulk->bk_iovs[i].kiov_offset = 0; 858 bulk->bk_iovs[i].kiov_offset = 0;
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index 926c3970c498..e2c532399366 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -1161,7 +1161,7 @@ sfw_add_test(struct srpc_server_rpc *rpc)
1161 int len; 1161 int len;
1162 1162
1163 if (!(sn->sn_features & LST_FEAT_BULK_LEN)) { 1163 if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
1164 len = npg * PAGE_CACHE_SIZE; 1164 len = npg * PAGE_SIZE;
1165 1165
1166 } else { 1166 } else {
1167 len = sizeof(lnet_process_id_packed_t) * 1167 len = sizeof(lnet_process_id_packed_t) *
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 69be7d6f48fa..7d7748d96332 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -90,7 +90,7 @@ void srpc_set_counters(const srpc_counters_t *cnt)
90static int 90static int
91srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob) 91srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
92{ 92{
93 nob = min_t(int, nob, PAGE_CACHE_SIZE); 93 nob = min_t(int, nob, PAGE_SIZE);
94 94
95 LASSERT(nob > 0); 95 LASSERT(nob > 0);
96 LASSERT(i >= 0 && i < bk->bk_niov); 96 LASSERT(i >= 0 && i < bk->bk_niov);
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index 288522d4d7b9..e689ca1846e1 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -390,10 +390,10 @@ typedef struct sfw_test_instance {
390 } tsi_u; 390 } tsi_u;
391} sfw_test_instance_t; 391} sfw_test_instance_t;
392 392
393/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at 393/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
394 * the end of pages are not used */ 394 * pages are not used */
395#define SFW_MAX_CONCUR LST_MAX_CONCUR 395#define SFW_MAX_CONCUR LST_MAX_CONCUR
396#define SFW_ID_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lnet_process_id_packed_t)) 396#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t))
397#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE) 397#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
398#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE) 398#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
399 399
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
index 33e0b99e1fb4..c6c7f54637fb 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
@@ -52,7 +52,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
52 return; 52 return;
53 53
54 if (PagePrivate(page)) 54 if (PagePrivate(page))
55 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); 55 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
56 56
57 cancel_dirty_page(page); 57 cancel_dirty_page(page);
58 ClearPageMappedToDisk(page); 58 ClearPageMappedToDisk(page);
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index b5088b13a305..242bb1ef6245 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -1118,7 +1118,7 @@ struct lu_context_key {
1118 { \ 1118 { \
1119 type *value; \ 1119 type *value; \
1120 \ 1120 \
1121 CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \ 1121 CLASSERT(PAGE_SIZE >= sizeof (*value)); \
1122 \ 1122 \
1123 value = kzalloc(sizeof(*value), GFP_NOFS); \ 1123 value = kzalloc(sizeof(*value), GFP_NOFS); \
1124 if (!value) \ 1124 if (!value) \
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index da8bc6eadd13..5aae1d06a5fa 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -1022,16 +1022,16 @@ static inline int lu_dirent_size(struct lu_dirent *ent)
1022 * MDS_READPAGE page size 1022 * MDS_READPAGE page size
1023 * 1023 *
1024 * This is the directory page size packed in MDS_READPAGE RPC. 1024 * This is the directory page size packed in MDS_READPAGE RPC.
1025 * It's different than PAGE_CACHE_SIZE because the client needs to 1025 * It's different than PAGE_SIZE because the client needs to
1026 * access the struct lu_dirpage header packed at the beginning of 1026 * access the struct lu_dirpage header packed at the beginning of
1027 * the "page" and without this there isn't any way to know find the 1027 * the "page" and without this there isn't any way to know find the
1028 * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ. 1028 * lu_dirpage header is if client and server PAGE_SIZE differ.
1029 */ 1029 */
1030#define LU_PAGE_SHIFT 12 1030#define LU_PAGE_SHIFT 12
1031#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT) 1031#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
1032#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1)) 1032#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
1033 1033
1034#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT)) 1034#define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT))
1035 1035
1036/** @} lu_dir */ 1036/** @} lu_dir */
1037 1037
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index df94f9f3bef2..af77eb359c43 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -155,12 +155,12 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
155 if (cli->cl_max_mds_easize < body->max_mdsize) { 155 if (cli->cl_max_mds_easize < body->max_mdsize) {
156 cli->cl_max_mds_easize = body->max_mdsize; 156 cli->cl_max_mds_easize = body->max_mdsize;
157 cli->cl_default_mds_easize = 157 cli->cl_default_mds_easize =
158 min_t(__u32, body->max_mdsize, PAGE_CACHE_SIZE); 158 min_t(__u32, body->max_mdsize, PAGE_SIZE);
159 } 159 }
160 if (cli->cl_max_mds_cookiesize < body->max_cookiesize) { 160 if (cli->cl_max_mds_cookiesize < body->max_cookiesize) {
161 cli->cl_max_mds_cookiesize = body->max_cookiesize; 161 cli->cl_max_mds_cookiesize = body->max_cookiesize;
162 cli->cl_default_mds_cookiesize = 162 cli->cl_default_mds_cookiesize =
163 min_t(__u32, body->max_cookiesize, PAGE_CACHE_SIZE); 163 min_t(__u32, body->max_cookiesize, PAGE_SIZE);
164 } 164 }
165 } 165 }
166} 166}
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index 4fa1a18b7d15..69586a522eb7 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -99,21 +99,21 @@
99 */ 99 */
100#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS) 100#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
101#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS) 101#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
102#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) 102#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT)
103 103
104#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS) 104#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
105#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) 105#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
106#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) 106#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_SHIFT)
107#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE 107#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
108#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) 108#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_SHIFT)
109#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) 109#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
110 110
111/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */ 111/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
112# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0) 112# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
113# error "PTLRPC_MAX_BRW_PAGES isn't a power of two" 113# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
114# endif 114# endif
115# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE)) 115# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE))
116# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE" 116# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE"
117# endif 117# endif
118# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT) 118# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
119# error "PTLRPC_MAX_BRW_SIZE too big" 119# error "PTLRPC_MAX_BRW_SIZE too big"
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index 4a0f2e8b19f6..4264d97650ec 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -272,7 +272,7 @@ struct client_obd {
272 int cl_grant_shrink_interval; /* seconds */ 272 int cl_grant_shrink_interval; /* seconds */
273 273
274 /* A chunk is an optimal size used by osc_extent to determine 274 /* A chunk is an optimal size used by osc_extent to determine
275 * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) 275 * the extent size. A chunk is max(PAGE_SIZE, OST block size)
276 */ 276 */
277 int cl_chunkbits; 277 int cl_chunkbits;
278 int cl_chunk; 278 int cl_chunk;
@@ -1318,7 +1318,7 @@ bad_format:
1318 1318
1319static inline int cli_brw_size(struct obd_device *obd) 1319static inline int cli_brw_size(struct obd_device *obd)
1320{ 1320{
1321 return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; 1321 return obd->u.cli.cl_max_pages_per_rpc << PAGE_SHIFT;
1322} 1322}
1323 1323
1324#endif /* __OBD_H */ 1324#endif /* __OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index 225262fa67b6..f8ee3a3254ba 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -500,7 +500,7 @@ extern char obd_jobid_var[];
500 500
501#ifdef POISON_BULK 501#ifdef POISON_BULK
502#define POISON_PAGE(page, val) do { \ 502#define POISON_PAGE(page, val) do { \
503 memset(kmap(page), val, PAGE_CACHE_SIZE); \ 503 memset(kmap(page), val, PAGE_SIZE); \
504 kunmap(page); \ 504 kunmap(page); \
505} while (0) 505} while (0)
506#else 506#else
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
index aced41ab93a1..96141d17d07f 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
@@ -758,9 +758,9 @@ int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
758 * --bug 17336 758 * --bug 17336
759 */ 759 */
760 loff_t size = cl_isize_read(inode); 760 loff_t size = cl_isize_read(inode);
761 loff_t cur_index = start >> PAGE_CACHE_SHIFT; 761 loff_t cur_index = start >> PAGE_SHIFT;
762 loff_t size_index = (size - 1) >> 762 loff_t size_index = (size - 1) >>
763 PAGE_CACHE_SHIFT; 763 PAGE_SHIFT;
764 764
765 if ((size == 0 && cur_index != 0) || 765 if ((size == 0 && cur_index != 0) ||
766 size_index < cur_index) 766 size_index < cur_index)
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index b586d5a88d00..7dd7df59aa1f 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -307,8 +307,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
307 cli->cl_avail_grant = 0; 307 cli->cl_avail_grant = 0;
308 /* FIXME: Should limit this for the sum of all cl_dirty_max. */ 308 /* FIXME: Should limit this for the sum of all cl_dirty_max. */
309 cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024; 309 cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
310 if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > totalram_pages / 8) 310 if (cli->cl_dirty_max >> PAGE_SHIFT > totalram_pages / 8)
311 cli->cl_dirty_max = totalram_pages << (PAGE_CACHE_SHIFT - 3); 311 cli->cl_dirty_max = totalram_pages << (PAGE_SHIFT - 3);
312 INIT_LIST_HEAD(&cli->cl_cache_waiters); 312 INIT_LIST_HEAD(&cli->cl_cache_waiters);
313 INIT_LIST_HEAD(&cli->cl_loi_ready_list); 313 INIT_LIST_HEAD(&cli->cl_loi_ready_list);
314 INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list); 314 INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
@@ -353,15 +353,15 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
353 * In the future this should likely be increased. LU-1431 353 * In the future this should likely be increased. LU-1431
354 */ 354 */
355 cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES, 355 cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
356 LNET_MTU >> PAGE_CACHE_SHIFT); 356 LNET_MTU >> PAGE_SHIFT);
357 357
358 if (!strcmp(name, LUSTRE_MDC_NAME)) { 358 if (!strcmp(name, LUSTRE_MDC_NAME)) {
359 cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT; 359 cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
360 } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) { 360 } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
361 cli->cl_max_rpcs_in_flight = 2; 361 cli->cl_max_rpcs_in_flight = 2;
362 } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) { 362 } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */) {
363 cli->cl_max_rpcs_in_flight = 3; 363 cli->cl_max_rpcs_in_flight = 3;
364 } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) { 364 } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */) {
365 cli->cl_max_rpcs_in_flight = 4; 365 cli->cl_max_rpcs_in_flight = 4;
366 } else { 366 } else {
367 cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT; 367 cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 3e937b050203..b913ba9cf97c 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -107,7 +107,7 @@
107/* 107/*
108 * 50 ldlm locks for 1MB of RAM. 108 * 50 ldlm locks for 1MB of RAM.
109 */ 109 */
110#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50) 110#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50)
111 111
112/* 112/*
113 * Maximal possible grant step plan in %. 113 * Maximal possible grant step plan in %.
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index c7904a96f9af..74e193e52cd6 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -546,7 +546,7 @@ static inline int ldlm_req_handles_avail(int req_size, int off)
546{ 546{
547 int avail; 547 int avail;
548 548
549 avail = min_t(int, LDLM_MAXREQSIZE, PAGE_CACHE_SIZE - 512) - req_size; 549 avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512) - req_size;
550 if (likely(avail >= 0)) 550 if (likely(avail >= 0))
551 avail /= (int)sizeof(struct lustre_handle); 551 avail /= (int)sizeof(struct lustre_handle);
552 else 552 else
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 4e0a3e583330..e4c82883e580 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -134,9 +134,8 @@
134 * a header lu_dirpage which describes the start/end hash, and whether this 134 * a header lu_dirpage which describes the start/end hash, and whether this
135 * page is empty (contains no dir entry) or hash collide with next page. 135 * page is empty (contains no dir entry) or hash collide with next page.
136 * After client receives reply, several pages will be integrated into dir page 136 * After client receives reply, several pages will be integrated into dir page
137 * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the 137 * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the lu_dirpage
138 * lu_dirpage for this integrated page will be adjusted. See 138 * for this integrated page will be adjusted. See lmv_adjust_dirpages().
139 * lmv_adjust_dirpages().
140 * 139 *
141 */ 140 */
142 141
@@ -153,7 +152,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
153 struct page **page_pool; 152 struct page **page_pool;
154 struct page *page; 153 struct page *page;
155 struct lu_dirpage *dp; 154 struct lu_dirpage *dp;
156 int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_CACHE_SHIFT; 155 int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_SHIFT;
157 int nrdpgs = 0; /* number of pages read actually */ 156 int nrdpgs = 0; /* number of pages read actually */
158 int npages; 157 int npages;
159 int i; 158 int i;
@@ -193,8 +192,8 @@ static int ll_dir_filler(void *_hash, struct page *page0)
193 if (body->valid & OBD_MD_FLSIZE) 192 if (body->valid & OBD_MD_FLSIZE)
194 cl_isize_write(inode, body->size); 193 cl_isize_write(inode, body->size);
195 194
196 nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_CACHE_SIZE-1) 195 nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_SIZE-1)
197 >> PAGE_CACHE_SHIFT; 196 >> PAGE_SHIFT;
198 SetPageUptodate(page0); 197 SetPageUptodate(page0);
199 } 198 }
200 unlock_page(page0); 199 unlock_page(page0);
@@ -209,7 +208,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
209 page = page_pool[i]; 208 page = page_pool[i];
210 209
211 if (rc < 0 || i >= nrdpgs) { 210 if (rc < 0 || i >= nrdpgs) {
212 page_cache_release(page); 211 put_page(page);
213 continue; 212 continue;
214 } 213 }
215 214
@@ -230,7 +229,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
230 CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n", 229 CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n",
231 offset, ret); 230 offset, ret);
232 } 231 }
233 page_cache_release(page); 232 put_page(page);
234 } 233 }
235 234
236 if (page_pool != &page0) 235 if (page_pool != &page0)
@@ -247,7 +246,7 @@ void ll_release_page(struct page *page, int remove)
247 truncate_complete_page(page->mapping, page); 246 truncate_complete_page(page->mapping, page);
248 unlock_page(page); 247 unlock_page(page);
249 } 248 }
250 page_cache_release(page); 249 put_page(page);
251} 250}
252 251
253/* 252/*
@@ -273,7 +272,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
273 if (found > 0 && !radix_tree_exceptional_entry(page)) { 272 if (found > 0 && !radix_tree_exceptional_entry(page)) {
274 struct lu_dirpage *dp; 273 struct lu_dirpage *dp;
275 274
276 page_cache_get(page); 275 get_page(page);
277 spin_unlock_irq(&mapping->tree_lock); 276 spin_unlock_irq(&mapping->tree_lock);
278 /* 277 /*
279 * In contrast to find_lock_page() we are sure that directory 278 * In contrast to find_lock_page() we are sure that directory
@@ -313,7 +312,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
313 page = NULL; 312 page = NULL;
314 } 313 }
315 } else { 314 } else {
316 page_cache_release(page); 315 put_page(page);
317 page = ERR_PTR(-EIO); 316 page = ERR_PTR(-EIO);
318 } 317 }
319 318
@@ -1507,7 +1506,7 @@ skip_lmm:
1507 st.st_gid = body->gid; 1506 st.st_gid = body->gid;
1508 st.st_rdev = body->rdev; 1507 st.st_rdev = body->rdev;
1509 st.st_size = body->size; 1508 st.st_size = body->size;
1510 st.st_blksize = PAGE_CACHE_SIZE; 1509 st.st_blksize = PAGE_SIZE;
1511 st.st_blocks = body->blocks; 1510 st.st_blocks = body->blocks;
1512 st.st_atime = body->atime; 1511 st.st_atime = body->atime;
1513 st.st_mtime = body->mtime; 1512 st.st_mtime = body->mtime;
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 3e1572cb457b..e3c0f1dd4d31 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -310,10 +310,10 @@ static inline struct ll_inode_info *ll_i2info(struct inode *inode)
310/* default to about 40meg of readahead on a given system. That much tied 310/* default to about 40meg of readahead on a given system. That much tied
311 * up in 512k readahead requests serviced at 40ms each is about 1GB/s. 311 * up in 512k readahead requests serviced at 40ms each is about 1GB/s.
312 */ 312 */
313#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT)) 313#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_SHIFT))
314 314
315/* default to read-ahead full files smaller than 2MB on the second read */ 315/* default to read-ahead full files smaller than 2MB on the second read */
316#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT)) 316#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_SHIFT))
317 317
318enum ra_stat { 318enum ra_stat {
319 RA_STAT_HIT = 0, 319 RA_STAT_HIT = 0,
@@ -975,13 +975,13 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
975static inline void ll_invalidate_page(struct page *vmpage) 975static inline void ll_invalidate_page(struct page *vmpage)
976{ 976{
977 struct address_space *mapping = vmpage->mapping; 977 struct address_space *mapping = vmpage->mapping;
978 loff_t offset = vmpage->index << PAGE_CACHE_SHIFT; 978 loff_t offset = vmpage->index << PAGE_SHIFT;
979 979
980 LASSERT(PageLocked(vmpage)); 980 LASSERT(PageLocked(vmpage));
981 if (!mapping) 981 if (!mapping)
982 return; 982 return;
983 983
984 ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE); 984 ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE);
985 truncate_complete_page(mapping, vmpage); 985 truncate_complete_page(mapping, vmpage);
986} 986}
987 987
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 6d6bb33e3655..b57a992688a8 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -85,7 +85,7 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
85 85
86 si_meminfo(&si); 86 si_meminfo(&si);
87 pages = si.totalram - si.totalhigh; 87 pages = si.totalram - si.totalhigh;
88 if (pages >> (20 - PAGE_CACHE_SHIFT) < 512) 88 if (pages >> (20 - PAGE_SHIFT) < 512)
89 lru_page_max = pages / 2; 89 lru_page_max = pages / 2;
90 else 90 else
91 lru_page_max = (pages / 4) * 3; 91 lru_page_max = (pages / 4) * 3;
@@ -272,12 +272,12 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
272 valid != CLIENT_CONNECT_MDT_REQD) { 272 valid != CLIENT_CONNECT_MDT_REQD) {
273 char *buf; 273 char *buf;
274 274
275 buf = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 275 buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
276 if (!buf) { 276 if (!buf) {
277 err = -ENOMEM; 277 err = -ENOMEM;
278 goto out_md_fid; 278 goto out_md_fid;
279 } 279 }
280 obd_connect_flags2str(buf, PAGE_CACHE_SIZE, 280 obd_connect_flags2str(buf, PAGE_SIZE,
281 valid ^ CLIENT_CONNECT_MDT_REQD, ","); 281 valid ^ CLIENT_CONNECT_MDT_REQD, ",");
282 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n", 282 LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
283 sbi->ll_md_exp->exp_obd->obd_name, buf); 283 sbi->ll_md_exp->exp_obd->obd_name, buf);
@@ -335,7 +335,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
335 if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) 335 if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
336 sbi->ll_md_brw_size = data->ocd_brw_size; 336 sbi->ll_md_brw_size = data->ocd_brw_size;
337 else 337 else
338 sbi->ll_md_brw_size = PAGE_CACHE_SIZE; 338 sbi->ll_md_brw_size = PAGE_SIZE;
339 339
340 if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) { 340 if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
341 LCONSOLE_INFO("Layout lock feature supported.\n"); 341 LCONSOLE_INFO("Layout lock feature supported.\n");
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 69445a9f2011..5b484e62ffd0 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -58,7 +58,7 @@ void policy_from_vma(ldlm_policy_data_t *policy,
58 size_t count) 58 size_t count)
59{ 59{
60 policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) + 60 policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
61 (vma->vm_pgoff << PAGE_CACHE_SHIFT); 61 (vma->vm_pgoff << PAGE_SHIFT);
62 policy->l_extent.end = (policy->l_extent.start + count - 1) | 62 policy->l_extent.end = (policy->l_extent.start + count - 1) |
63 ~CFS_PAGE_MASK; 63 ~CFS_PAGE_MASK;
64} 64}
@@ -321,7 +321,7 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
321 321
322 vmpage = vio->u.fault.ft_vmpage; 322 vmpage = vio->u.fault.ft_vmpage;
323 if (result != 0 && vmpage) { 323 if (result != 0 && vmpage) {
324 page_cache_release(vmpage); 324 put_page(vmpage);
325 vmf->page = NULL; 325 vmf->page = NULL;
326 } 326 }
327 } 327 }
@@ -360,7 +360,7 @@ restart:
360 lock_page(vmpage); 360 lock_page(vmpage);
361 if (unlikely(!vmpage->mapping)) { /* unlucky */ 361 if (unlikely(!vmpage->mapping)) { /* unlucky */
362 unlock_page(vmpage); 362 unlock_page(vmpage);
363 page_cache_release(vmpage); 363 put_page(vmpage);
364 vmf->page = NULL; 364 vmf->page = NULL;
365 365
366 if (!printed && ++count > 16) { 366 if (!printed && ++count > 16) {
@@ -457,7 +457,7 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
457 LASSERTF(last > first, "last %llu first %llu\n", last, first); 457 LASSERTF(last > first, "last %llu first %llu\n", last, first);
458 if (mapping_mapped(mapping)) { 458 if (mapping_mapped(mapping)) {
459 rc = 0; 459 rc = 0;
460 unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1, 460 unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
461 last - first + 1, 0); 461 last - first + 1, 0);
462 } 462 }
463 463
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index b725fc16cf49..f169c0db63b4 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -218,7 +218,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
218 offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; 218 offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
219 bio_for_each_segment(bvec, bio, iter) { 219 bio_for_each_segment(bvec, bio, iter) {
220 BUG_ON(bvec.bv_offset != 0); 220 BUG_ON(bvec.bv_offset != 0);
221 BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE); 221 BUG_ON(bvec.bv_len != PAGE_SIZE);
222 222
223 pages[page_count] = bvec.bv_page; 223 pages[page_count] = bvec.bv_page;
224 offsets[page_count] = offset; 224 offsets[page_count] = offset;
@@ -232,7 +232,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
232 (rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ, 232 (rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
233 page_count); 233 page_count);
234 234
235 pvec->ldp_size = page_count << PAGE_CACHE_SHIFT; 235 pvec->ldp_size = page_count << PAGE_SHIFT;
236 pvec->ldp_nr = page_count; 236 pvec->ldp_nr = page_count;
237 237
238 /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to 238 /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
@@ -507,7 +507,7 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused,
507 507
508 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); 508 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
509 509
510 lo->lo_blocksize = PAGE_CACHE_SIZE; 510 lo->lo_blocksize = PAGE_SIZE;
511 lo->lo_device = bdev; 511 lo->lo_device = bdev;
512 lo->lo_flags = lo_flags; 512 lo->lo_flags = lo_flags;
513 lo->lo_backing_file = file; 513 lo->lo_backing_file = file;
@@ -525,11 +525,11 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused,
525 lo->lo_queue->queuedata = lo; 525 lo->lo_queue->queuedata = lo;
526 526
527 /* queue parameters */ 527 /* queue parameters */
528 CLASSERT(PAGE_CACHE_SIZE < (1 << (sizeof(unsigned short) * 8))); 528 CLASSERT(PAGE_SIZE < (1 << (sizeof(unsigned short) * 8)));
529 blk_queue_logical_block_size(lo->lo_queue, 529 blk_queue_logical_block_size(lo->lo_queue,
530 (unsigned short)PAGE_CACHE_SIZE); 530 (unsigned short)PAGE_SIZE);
531 blk_queue_max_hw_sectors(lo->lo_queue, 531 blk_queue_max_hw_sectors(lo->lo_queue,
532 LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9)); 532 LLOOP_MAX_SEGMENTS << (PAGE_SHIFT - 9));
533 blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS); 533 blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
534 534
535 set_capacity(disks[lo->lo_number], size); 535 set_capacity(disks[lo->lo_number], size);
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 45941a6600fe..27ab1261400e 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -233,7 +233,7 @@ static ssize_t max_read_ahead_mb_show(struct kobject *kobj,
233 pages_number = sbi->ll_ra_info.ra_max_pages; 233 pages_number = sbi->ll_ra_info.ra_max_pages;
234 spin_unlock(&sbi->ll_lock); 234 spin_unlock(&sbi->ll_lock);
235 235
236 mult = 1 << (20 - PAGE_CACHE_SHIFT); 236 mult = 1 << (20 - PAGE_SHIFT);
237 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); 237 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
238} 238}
239 239
@@ -251,12 +251,12 @@ static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
251 if (rc) 251 if (rc)
252 return rc; 252 return rc;
253 253
254 pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */ 254 pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
255 255
256 if (pages_number > totalram_pages / 2) { 256 if (pages_number > totalram_pages / 2) {
257 257
258 CERROR("can't set file readahead more than %lu MB\n", 258 CERROR("can't set file readahead more than %lu MB\n",
259 totalram_pages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/ 259 totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/
260 return -ERANGE; 260 return -ERANGE;
261 } 261 }
262 262
@@ -281,7 +281,7 @@ static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj,
281 pages_number = sbi->ll_ra_info.ra_max_pages_per_file; 281 pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
282 spin_unlock(&sbi->ll_lock); 282 spin_unlock(&sbi->ll_lock);
283 283
284 mult = 1 << (20 - PAGE_CACHE_SHIFT); 284 mult = 1 << (20 - PAGE_SHIFT);
285 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); 285 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
286} 286}
287 287
@@ -326,7 +326,7 @@ static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj,
326 pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages; 326 pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
327 spin_unlock(&sbi->ll_lock); 327 spin_unlock(&sbi->ll_lock);
328 328
329 mult = 1 << (20 - PAGE_CACHE_SHIFT); 329 mult = 1 << (20 - PAGE_SHIFT);
330 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); 330 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
331} 331}
332 332
@@ -349,7 +349,7 @@ static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj,
349 */ 349 */
350 if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) { 350 if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
351 CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n", 351 CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
352 sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT)); 352 sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_SHIFT));
353 return -ERANGE; 353 return -ERANGE;
354 } 354 }
355 355
@@ -366,7 +366,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
366 struct super_block *sb = m->private; 366 struct super_block *sb = m->private;
367 struct ll_sb_info *sbi = ll_s2sbi(sb); 367 struct ll_sb_info *sbi = ll_s2sbi(sb);
368 struct cl_client_cache *cache = &sbi->ll_cache; 368 struct cl_client_cache *cache = &sbi->ll_cache;
369 int shift = 20 - PAGE_CACHE_SHIFT; 369 int shift = 20 - PAGE_SHIFT;
370 int max_cached_mb; 370 int max_cached_mb;
371 int unused_mb; 371 int unused_mb;
372 372
@@ -405,7 +405,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
405 return -EFAULT; 405 return -EFAULT;
406 kernbuf[count] = 0; 406 kernbuf[count] = 0;
407 407
408 mult = 1 << (20 - PAGE_CACHE_SHIFT); 408 mult = 1 << (20 - PAGE_SHIFT);
409 buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) - 409 buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
410 kernbuf; 410 kernbuf;
411 rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); 411 rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
@@ -415,7 +415,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
415 if (pages_number < 0 || pages_number > totalram_pages) { 415 if (pages_number < 0 || pages_number > totalram_pages) {
416 CERROR("%s: can't set max cache more than %lu MB\n", 416 CERROR("%s: can't set max cache more than %lu MB\n",
417 ll_get_fsname(sb, NULL, 0), 417 ll_get_fsname(sb, NULL, 0),
418 totalram_pages >> (20 - PAGE_CACHE_SHIFT)); 418 totalram_pages >> (20 - PAGE_SHIFT));
419 return -ERANGE; 419 return -ERANGE;
420 } 420 }
421 421
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index 34614acf3f8e..edab6c5b7e50 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -146,10 +146,10 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
146 */ 146 */
147 io->ci_lockreq = CILR_NEVER; 147 io->ci_lockreq = CILR_NEVER;
148 148
149 pos = vmpage->index << PAGE_CACHE_SHIFT; 149 pos = vmpage->index << PAGE_SHIFT;
150 150
151 /* Create a temp IO to serve write. */ 151 /* Create a temp IO to serve write. */
152 result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE); 152 result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_SIZE);
153 if (result == 0) { 153 if (result == 0) {
154 cio->cui_fd = LUSTRE_FPRIVATE(file); 154 cio->cui_fd = LUSTRE_FPRIVATE(file);
155 cio->cui_iter = NULL; 155 cio->cui_iter = NULL;
@@ -498,7 +498,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
498 } 498 }
499 if (rc != 1) 499 if (rc != 1)
500 unlock_page(vmpage); 500 unlock_page(vmpage);
501 page_cache_release(vmpage); 501 put_page(vmpage);
502 } else { 502 } else {
503 which = RA_STAT_FAILED_GRAB_PAGE; 503 which = RA_STAT_FAILED_GRAB_PAGE;
504 msg = "g_c_p_n failed"; 504 msg = "g_c_p_n failed";
@@ -521,13 +521,13 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
521 * striped over, rather than having a constant value for all files here. 521 * striped over, rather than having a constant value for all files here.
522 */ 522 */
523 523
524/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)). 524/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_SHIFT)).
525 * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled 525 * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
526 * by default, this should be adjusted corresponding with max_read_ahead_mb 526 * by default, this should be adjusted corresponding with max_read_ahead_mb
527 * and max_read_ahead_per_file_mb otherwise the readahead budget can be used 527 * and max_read_ahead_per_file_mb otherwise the readahead budget can be used
528 * up quickly which will affect read performance significantly. See LU-2816 528 * up quickly which will affect read performance significantly. See LU-2816
529 */ 529 */
530#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT) 530#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_SHIFT)
531 531
532static inline int stride_io_mode(struct ll_readahead_state *ras) 532static inline int stride_io_mode(struct ll_readahead_state *ras)
533{ 533{
@@ -739,7 +739,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
739 end = rpc_boundary; 739 end = rpc_boundary;
740 740
741 /* Truncate RA window to end of file */ 741 /* Truncate RA window to end of file */
742 end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT)); 742 end = min(end, (unsigned long)((kms - 1) >> PAGE_SHIFT));
743 743
744 ras->ras_next_readahead = max(end, end + 1); 744 ras->ras_next_readahead = max(end, end + 1);
745 RAS_CDEBUG(ras); 745 RAS_CDEBUG(ras);
@@ -776,7 +776,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
776 if (reserved != 0) 776 if (reserved != 0)
777 ll_ra_count_put(ll_i2sbi(inode), reserved); 777 ll_ra_count_put(ll_i2sbi(inode), reserved);
778 778
779 if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT)) 779 if (ra_end == end + 1 && ra_end == (kms >> PAGE_SHIFT))
780 ll_ra_stats_inc(mapping, RA_STAT_EOF); 780 ll_ra_stats_inc(mapping, RA_STAT_EOF);
781 781
782 /* if we didn't get to the end of the region we reserved from 782 /* if we didn't get to the end of the region we reserved from
@@ -985,8 +985,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
985 if (ras->ras_requests == 2 && !ras->ras_request_index) { 985 if (ras->ras_requests == 2 && !ras->ras_request_index) {
986 __u64 kms_pages; 986 __u64 kms_pages;
987 987
988 kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> 988 kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >>
989 PAGE_CACHE_SHIFT; 989 PAGE_SHIFT;
990 990
991 CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages, 991 CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages,
992 ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file); 992 ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
@@ -1173,7 +1173,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
1173 * PageWriteback or clean the page. 1173 * PageWriteback or clean the page.
1174 */ 1174 */
1175 result = cl_sync_file_range(inode, offset, 1175 result = cl_sync_file_range(inode, offset,
1176 offset + PAGE_CACHE_SIZE - 1, 1176 offset + PAGE_SIZE - 1,
1177 CL_FSYNC_LOCAL, 1); 1177 CL_FSYNC_LOCAL, 1);
1178 if (result > 0) { 1178 if (result > 0) {
1179 /* actually we may have written more than one page. 1179 /* actually we may have written more than one page.
@@ -1211,7 +1211,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
1211 int ignore_layout = 0; 1211 int ignore_layout = 0;
1212 1212
1213 if (wbc->range_cyclic) { 1213 if (wbc->range_cyclic) {
1214 start = mapping->writeback_index << PAGE_CACHE_SHIFT; 1214 start = mapping->writeback_index << PAGE_SHIFT;
1215 end = OBD_OBJECT_EOF; 1215 end = OBD_OBJECT_EOF;
1216 } else { 1216 } else {
1217 start = wbc->range_start; 1217 start = wbc->range_start;
@@ -1241,7 +1241,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
1241 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) { 1241 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
1242 if (end == OBD_OBJECT_EOF) 1242 if (end == OBD_OBJECT_EOF)
1243 end = i_size_read(inode); 1243 end = i_size_read(inode);
1244 mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) + 1; 1244 mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
1245 } 1245 }
1246 return result; 1246 return result;
1247} 1247}
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 7a5db67bc680..69aa15e8e3ef 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -87,7 +87,7 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
87 * below because they are run with page locked and all our io is 87 * below because they are run with page locked and all our io is
88 * happening with locked page too 88 * happening with locked page too
89 */ 89 */
90 if (offset == 0 && length == PAGE_CACHE_SIZE) { 90 if (offset == 0 && length == PAGE_SIZE) {
91 env = cl_env_get(&refcheck); 91 env = cl_env_get(&refcheck);
92 if (!IS_ERR(env)) { 92 if (!IS_ERR(env)) {
93 inode = vmpage->mapping->host; 93 inode = vmpage->mapping->host;
@@ -193,8 +193,8 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr,
193 return -EFBIG; 193 return -EFBIG;
194 } 194 }
195 195
196 *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 196 *max_pages = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
197 *max_pages -= user_addr >> PAGE_CACHE_SHIFT; 197 *max_pages -= user_addr >> PAGE_SHIFT;
198 198
199 *pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS); 199 *pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS);
200 if (*pages) { 200 if (*pages) {
@@ -217,7 +217,7 @@ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
217 for (i = 0; i < npages; i++) { 217 for (i = 0; i < npages; i++) {
218 if (do_dirty) 218 if (do_dirty)
219 set_page_dirty_lock(pages[i]); 219 set_page_dirty_lock(pages[i]);
220 page_cache_release(pages[i]); 220 put_page(pages[i]);
221 } 221 }
222 kvfree(pages); 222 kvfree(pages);
223} 223}
@@ -357,7 +357,7 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
357 * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. 357 * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc.
358 */ 358 */
359#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \ 359#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \
360 PAGE_CACHE_SIZE) & ~(DT_MAX_BRW_SIZE - 1)) 360 PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
361static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, 361static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
362 loff_t file_offset) 362 loff_t file_offset)
363{ 363{
@@ -382,8 +382,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
382 CDEBUG(D_VFSTRACE, 382 CDEBUG(D_VFSTRACE,
383 "VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n", 383 "VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
384 inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE, 384 inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
385 file_offset, file_offset, count >> PAGE_CACHE_SHIFT, 385 file_offset, file_offset, count >> PAGE_SHIFT,
386 MAX_DIO_SIZE >> PAGE_CACHE_SHIFT); 386 MAX_DIO_SIZE >> PAGE_SHIFT);
387 387
388 /* Check that all user buffers are aligned as well */ 388 /* Check that all user buffers are aligned as well */
389 if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK) 389 if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK)
@@ -432,8 +432,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
432 * page worth of page pointers = 4MB on i386. 432 * page worth of page pointers = 4MB on i386.
433 */ 433 */
434 if (result == -ENOMEM && 434 if (result == -ENOMEM &&
435 size > (PAGE_CACHE_SIZE / sizeof(*pages)) * 435 size > (PAGE_SIZE / sizeof(*pages)) *
436 PAGE_CACHE_SIZE) { 436 PAGE_SIZE) {
437 size = ((((size / 2) - 1) | 437 size = ((((size / 2) - 1) |
438 ~CFS_PAGE_MASK) + 1) & 438 ~CFS_PAGE_MASK) + 1) &
439 CFS_PAGE_MASK; 439 CFS_PAGE_MASK;
@@ -474,10 +474,10 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
474 loff_t pos, unsigned len, unsigned flags, 474 loff_t pos, unsigned len, unsigned flags,
475 struct page **pagep, void **fsdata) 475 struct page **pagep, void **fsdata)
476{ 476{
477 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 477 pgoff_t index = pos >> PAGE_SHIFT;
478 struct page *page; 478 struct page *page;
479 int rc; 479 int rc;
480 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 480 unsigned from = pos & (PAGE_SIZE - 1);
481 481
482 page = grab_cache_page_write_begin(mapping, index, flags); 482 page = grab_cache_page_write_begin(mapping, index, flags);
483 if (!page) 483 if (!page)
@@ -488,7 +488,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
488 rc = ll_prepare_write(file, page, from, from + len); 488 rc = ll_prepare_write(file, page, from, from + len);
489 if (rc) { 489 if (rc) {
490 unlock_page(page); 490 unlock_page(page);
491 page_cache_release(page); 491 put_page(page);
492 } 492 }
493 return rc; 493 return rc;
494} 494}
@@ -497,12 +497,12 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
497 loff_t pos, unsigned len, unsigned copied, 497 loff_t pos, unsigned len, unsigned copied,
498 struct page *page, void *fsdata) 498 struct page *page, void *fsdata)
499{ 499{
500 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 500 unsigned from = pos & (PAGE_SIZE - 1);
501 int rc; 501 int rc;
502 502
503 rc = ll_commit_write(file, page, from, from + copied); 503 rc = ll_commit_write(file, page, from, from + copied);
504 unlock_page(page); 504 unlock_page(page);
505 page_cache_release(page); 505 put_page(page);
506 506
507 return rc ?: copied; 507 return rc ?: copied;
508} 508}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index fb0c26ee7ff3..85a835976174 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -512,9 +512,9 @@ static int vvp_io_read_start(const struct lu_env *env,
512 vio->cui_ra_window_set = 1; 512 vio->cui_ra_window_set = 1;
513 bead->lrr_start = cl_index(obj, pos); 513 bead->lrr_start = cl_index(obj, pos);
514 /* 514 /*
515 * XXX: explicit PAGE_CACHE_SIZE 515 * XXX: explicit PAGE_SIZE
516 */ 516 */
517 bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1); 517 bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1);
518 ll_ra_read_in(file, bead); 518 ll_ra_read_in(file, bead);
519 } 519 }
520 520
@@ -959,7 +959,7 @@ static int vvp_io_prepare_write(const struct lu_env *env,
959 * We're completely overwriting an existing page, so _don't_ 959 * We're completely overwriting an existing page, so _don't_
960 * set it up to date until commit_write 960 * set it up to date until commit_write
961 */ 961 */
962 if (from == 0 && to == PAGE_CACHE_SIZE) { 962 if (from == 0 && to == PAGE_SIZE) {
963 CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n"); 963 CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
964 POISON_PAGE(page, 0x11); 964 POISON_PAGE(page, 0x11);
965 } else 965 } else
@@ -1022,7 +1022,7 @@ static int vvp_io_commit_write(const struct lu_env *env,
1022 set_page_dirty(vmpage); 1022 set_page_dirty(vmpage);
1023 vvp_write_pending(cl2ccc(obj), cp); 1023 vvp_write_pending(cl2ccc(obj), cp);
1024 } else if (result == -EDQUOT) { 1024 } else if (result == -EDQUOT) {
1025 pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT; 1025 pgoff_t last_index = i_size_read(inode) >> PAGE_SHIFT;
1026 bool need_clip = true; 1026 bool need_clip = true;
1027 1027
1028 /* 1028 /*
@@ -1040,7 +1040,7 @@ static int vvp_io_commit_write(const struct lu_env *env,
1040 * being. 1040 * being.
1041 */ 1041 */
1042 if (last_index > pg->cp_index) { 1042 if (last_index > pg->cp_index) {
1043 to = PAGE_CACHE_SIZE; 1043 to = PAGE_SIZE;
1044 need_clip = false; 1044 need_clip = false;
1045 } else if (last_index == pg->cp_index) { 1045 } else if (last_index == pg->cp_index) {
1046 int size_to = i_size_read(inode) & ~CFS_PAGE_MASK; 1046 int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 850bae734075..33ca3eb34965 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -57,7 +57,7 @@ static void vvp_page_fini_common(struct ccc_page *cp)
57 struct page *vmpage = cp->cpg_page; 57 struct page *vmpage = cp->cpg_page;
58 58
59 LASSERT(vmpage); 59 LASSERT(vmpage);
60 page_cache_release(vmpage); 60 put_page(vmpage);
61} 61}
62 62
63static void vvp_page_fini(const struct lu_env *env, 63static void vvp_page_fini(const struct lu_env *env,
@@ -164,12 +164,12 @@ static int vvp_page_unmap(const struct lu_env *env,
164 LASSERT(vmpage); 164 LASSERT(vmpage);
165 LASSERT(PageLocked(vmpage)); 165 LASSERT(PageLocked(vmpage));
166 166
167 offset = vmpage->index << PAGE_CACHE_SHIFT; 167 offset = vmpage->index << PAGE_SHIFT;
168 168
169 /* 169 /*
170 * XXX is it safe to call this with the page lock held? 170 * XXX is it safe to call this with the page lock held?
171 */ 171 */
172 ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE); 172 ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_SIZE);
173 return 0; 173 return 0;
174} 174}
175 175
@@ -537,7 +537,7 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
537 CLOBINVRNT(env, obj, ccc_object_invariant(obj)); 537 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
538 538
539 cpg->cpg_page = vmpage; 539 cpg->cpg_page = vmpage;
540 page_cache_get(vmpage); 540 get_page(vmpage);
541 541
542 INIT_LIST_HEAD(&cpg->cpg_pending_linkage); 542 INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
543 if (page->cp_type == CPT_CACHEABLE) { 543 if (page->cp_type == CPT_CACHEABLE) {
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 0f776cf8a5aa..9abb7c2b9231 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -2017,7 +2017,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
2017 * |s|e|f|p|ent| 0 | ... | 0 | 2017 * |s|e|f|p|ent| 0 | ... | 0 |
2018 * '----------------- -----' 2018 * '----------------- -----'
2019 * 2019 *
2020 * However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is 2020 * However, on hosts where the native VM page size (PAGE_SIZE) is
2021 * larger than LU_PAGE_SIZE, a single host page may contain multiple 2021 * larger than LU_PAGE_SIZE, a single host page may contain multiple
2022 * lu_dirpages. After reading the lu_dirpages from the MDS, the 2022 * lu_dirpages. After reading the lu_dirpages from the MDS, the
2023 * ldp_hash_end of the first lu_dirpage refers to the one immediately 2023 * ldp_hash_end of the first lu_dirpage refers to the one immediately
@@ -2048,7 +2048,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
2048 * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span 2048 * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
2049 * to the first entry of the next lu_dirpage. 2049 * to the first entry of the next lu_dirpage.
2050 */ 2050 */
2051#if PAGE_CACHE_SIZE > LU_PAGE_SIZE 2051#if PAGE_SIZE > LU_PAGE_SIZE
2052static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) 2052static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
2053{ 2053{
2054 int i; 2054 int i;
@@ -2101,7 +2101,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
2101} 2101}
2102#else 2102#else
2103#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0) 2103#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
2104#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */ 2104#endif /* PAGE_SIZE > LU_PAGE_SIZE */
2105 2105
2106static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data, 2106static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
2107 struct page **pages, struct ptlrpc_request **request) 2107 struct page **pages, struct ptlrpc_request **request)
@@ -2110,7 +2110,7 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
2110 struct lmv_obd *lmv = &obd->u.lmv; 2110 struct lmv_obd *lmv = &obd->u.lmv;
2111 __u64 offset = op_data->op_offset; 2111 __u64 offset = op_data->op_offset;
2112 int rc; 2112 int rc;
2113 int ncfspgs; /* pages read in PAGE_CACHE_SIZE */ 2113 int ncfspgs; /* pages read in PAGE_SIZE */
2114 int nlupgs; /* pages read in LU_PAGE_SIZE */ 2114 int nlupgs; /* pages read in LU_PAGE_SIZE */
2115 struct lmv_tgt_desc *tgt; 2115 struct lmv_tgt_desc *tgt;
2116 2116
@@ -2129,8 +2129,8 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
2129 if (rc != 0) 2129 if (rc != 0)
2130 return rc; 2130 return rc;
2131 2131
2132 ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_CACHE_SIZE - 1) 2132 ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_SIZE - 1)
2133 >> PAGE_CACHE_SHIFT; 2133 >> PAGE_SHIFT;
2134 nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT; 2134 nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
2135 LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK)); 2135 LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
2136 LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages); 2136 LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages);
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 55dd8ef9525b..b91d3ff18b02 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -1002,10 +1002,10 @@ restart_bulk:
1002 1002
1003 /* NB req now owns desc and will free it when it gets freed */ 1003 /* NB req now owns desc and will free it when it gets freed */
1004 for (i = 0; i < op_data->op_npages; i++) 1004 for (i = 0; i < op_data->op_npages; i++)
1005 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE); 1005 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
1006 1006
1007 mdc_readdir_pack(req, op_data->op_offset, 1007 mdc_readdir_pack(req, op_data->op_offset,
1008 PAGE_CACHE_SIZE * op_data->op_npages, 1008 PAGE_SIZE * op_data->op_npages,
1009 &op_data->op_fid1); 1009 &op_data->op_fid1);
1010 1010
1011 ptlrpc_request_set_replen(req); 1011 ptlrpc_request_set_replen(req);
@@ -1037,7 +1037,7 @@ restart_bulk:
1037 if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) { 1037 if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
1038 CERROR("Unexpected # bytes transferred: %d (%ld expected)\n", 1038 CERROR("Unexpected # bytes transferred: %d (%ld expected)\n",
1039 req->rq_bulk->bd_nob_transferred, 1039 req->rq_bulk->bd_nob_transferred,
1040 PAGE_CACHE_SIZE * op_data->op_npages); 1040 PAGE_SIZE * op_data->op_npages);
1041 ptlrpc_req_finished(req); 1041 ptlrpc_req_finished(req);
1042 return -EPROTO; 1042 return -EPROTO;
1043 } 1043 }
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index b7dc87248032..3924b095bfb0 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -1113,7 +1113,7 @@ static int mgc_import_event(struct obd_device *obd,
1113} 1113}
1114 1114
1115enum { 1115enum {
1116 CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT), 1116 CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_SHIFT),
1117 CONFIG_READ_NRPAGES = 4 1117 CONFIG_READ_NRPAGES = 4
1118}; 1118};
1119 1119
@@ -1137,19 +1137,19 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
1137 LASSERT(cfg->cfg_instance); 1137 LASSERT(cfg->cfg_instance);
1138 LASSERT(cfg->cfg_sb == cfg->cfg_instance); 1138 LASSERT(cfg->cfg_sb == cfg->cfg_instance);
1139 1139
1140 inst = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); 1140 inst = kzalloc(PAGE_SIZE, GFP_KERNEL);
1141 if (!inst) 1141 if (!inst)
1142 return -ENOMEM; 1142 return -ENOMEM;
1143 1143
1144 pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance); 1144 pos = snprintf(inst, PAGE_SIZE, "%p", cfg->cfg_instance);
1145 if (pos >= PAGE_CACHE_SIZE) { 1145 if (pos >= PAGE_SIZE) {
1146 kfree(inst); 1146 kfree(inst);
1147 return -E2BIG; 1147 return -E2BIG;
1148 } 1148 }
1149 1149
1150 ++pos; 1150 ++pos;
1151 buf = inst + pos; 1151 buf = inst + pos;
1152 bufsz = PAGE_CACHE_SIZE - pos; 1152 bufsz = PAGE_SIZE - pos;
1153 1153
1154 while (datalen > 0) { 1154 while (datalen > 0) {
1155 int entry_len = sizeof(*entry); 1155 int entry_len = sizeof(*entry);
@@ -1181,7 +1181,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
1181 /* Keep this swab for normal mixed endian handling. LU-1644 */ 1181 /* Keep this swab for normal mixed endian handling. LU-1644 */
1182 if (mne_swab) 1182 if (mne_swab)
1183 lustre_swab_mgs_nidtbl_entry(entry); 1183 lustre_swab_mgs_nidtbl_entry(entry);
1184 if (entry->mne_length > PAGE_CACHE_SIZE) { 1184 if (entry->mne_length > PAGE_SIZE) {
1185 CERROR("MNE too large (%u)\n", entry->mne_length); 1185 CERROR("MNE too large (%u)\n", entry->mne_length);
1186 break; 1186 break;
1187 } 1187 }
@@ -1371,7 +1371,7 @@ again:
1371 } 1371 }
1372 body->mcb_offset = cfg->cfg_last_idx + 1; 1372 body->mcb_offset = cfg->cfg_last_idx + 1;
1373 body->mcb_type = cld->cld_type; 1373 body->mcb_type = cld->cld_type;
1374 body->mcb_bits = PAGE_CACHE_SHIFT; 1374 body->mcb_bits = PAGE_SHIFT;
1375 body->mcb_units = nrpages; 1375 body->mcb_units = nrpages;
1376 1376
1377 /* allocate bulk transfer descriptor */ 1377 /* allocate bulk transfer descriptor */
@@ -1383,7 +1383,7 @@ again:
1383 } 1383 }
1384 1384
1385 for (i = 0; i < nrpages; i++) 1385 for (i = 0; i < nrpages; i++)
1386 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE); 1386 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
1387 1387
1388 ptlrpc_request_set_replen(req); 1388 ptlrpc_request_set_replen(req);
1389 rc = ptlrpc_queue_wait(req); 1389 rc = ptlrpc_queue_wait(req);
@@ -1411,7 +1411,7 @@ again:
1411 goto out; 1411 goto out;
1412 } 1412 }
1413 1413
1414 if (ealen > nrpages << PAGE_CACHE_SHIFT) { 1414 if (ealen > nrpages << PAGE_SHIFT) {
1415 rc = -EINVAL; 1415 rc = -EINVAL;
1416 goto out; 1416 goto out;
1417 } 1417 }
@@ -1439,7 +1439,7 @@ again:
1439 1439
1440 ptr = kmap(pages[i]); 1440 ptr = kmap(pages[i]);
1441 rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr, 1441 rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
1442 min_t(int, ealen, PAGE_CACHE_SIZE), 1442 min_t(int, ealen, PAGE_SIZE),
1443 mne_swab); 1443 mne_swab);
1444 kunmap(pages[i]); 1444 kunmap(pages[i]);
1445 if (rc2 < 0) { 1445 if (rc2 < 0) {
@@ -1448,7 +1448,7 @@ again:
1448 break; 1448 break;
1449 } 1449 }
1450 1450
1451 ealen -= PAGE_CACHE_SIZE; 1451 ealen -= PAGE_SIZE;
1452 } 1452 }
1453 1453
1454out: 1454out:
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index 231a2f26c693..394580016638 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -1477,7 +1477,7 @@ loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
1477 /* 1477 /*
1478 * XXX for now. 1478 * XXX for now.
1479 */ 1479 */
1480 return (loff_t)idx << PAGE_CACHE_SHIFT; 1480 return (loff_t)idx << PAGE_SHIFT;
1481} 1481}
1482EXPORT_SYMBOL(cl_offset); 1482EXPORT_SYMBOL(cl_offset);
1483 1483
@@ -1489,13 +1489,13 @@ pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
1489 /* 1489 /*
1490 * XXX for now. 1490 * XXX for now.
1491 */ 1491 */
1492 return offset >> PAGE_CACHE_SHIFT; 1492 return offset >> PAGE_SHIFT;
1493} 1493}
1494EXPORT_SYMBOL(cl_index); 1494EXPORT_SYMBOL(cl_index);
1495 1495
1496int cl_page_size(const struct cl_object *obj) 1496int cl_page_size(const struct cl_object *obj)
1497{ 1497{
1498 return 1 << PAGE_CACHE_SHIFT; 1498 return 1 << PAGE_SHIFT;
1499} 1499}
1500EXPORT_SYMBOL(cl_page_size); 1500EXPORT_SYMBOL(cl_page_size);
1501 1501
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index 1a938e1376f9..c2cf015962dd 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -461,9 +461,9 @@ static int obd_init_checks(void)
461 CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len); 461 CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len);
462 ret = -EINVAL; 462 ret = -EINVAL;
463 } 463 }
464 if ((u64val & ~CFS_PAGE_MASK) >= PAGE_CACHE_SIZE) { 464 if ((u64val & ~CFS_PAGE_MASK) >= PAGE_SIZE) {
465 CWARN("mask failed: u64val %llu >= %llu\n", u64val, 465 CWARN("mask failed: u64val %llu >= %llu\n", u64val,
466 (__u64)PAGE_CACHE_SIZE); 466 (__u64)PAGE_SIZE);
467 ret = -EINVAL; 467 ret = -EINVAL;
468 } 468 }
469 469
@@ -509,7 +509,7 @@ static int __init obdclass_init(void)
509 * For clients with less memory, a larger fraction is needed 509 * For clients with less memory, a larger fraction is needed
510 * for other purposes (mostly for BGL). 510 * for other purposes (mostly for BGL).
511 */ 511 */
512 if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT)) 512 if (totalram_pages <= 512 << (20 - PAGE_SHIFT))
513 obd_max_dirty_pages = totalram_pages / 4; 513 obd_max_dirty_pages = totalram_pages / 4;
514 else 514 else
515 obd_max_dirty_pages = totalram_pages / 2; 515 obd_max_dirty_pages = totalram_pages / 2;
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
index 9496c09b2b69..b41b65e2f021 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
@@ -47,7 +47,6 @@
47#include "../../include/lustre/lustre_idl.h" 47#include "../../include/lustre/lustre_idl.h"
48 48
49#include <linux/fs.h> 49#include <linux/fs.h>
50#include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */
51 50
52void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid) 51void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
53{ 52{
@@ -71,8 +70,8 @@ void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
71 if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits)) 70 if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits))
72 dst->i_blkbits = ffs(src->o_blksize) - 1; 71 dst->i_blkbits = ffs(src->o_blksize) - 1;
73 72
74 if (dst->i_blkbits < PAGE_CACHE_SHIFT) 73 if (dst->i_blkbits < PAGE_SHIFT)
75 dst->i_blkbits = PAGE_CACHE_SHIFT; 74 dst->i_blkbits = PAGE_SHIFT;
76 75
77 /* allocation of space */ 76 /* allocation of space */
78 if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks) 77 if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks)
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
index fd333b9e968c..e6bf414a4444 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
@@ -100,7 +100,7 @@ static ssize_t max_dirty_mb_show(struct kobject *kobj, struct attribute *attr,
100 char *buf) 100 char *buf)
101{ 101{
102 return sprintf(buf, "%ul\n", 102 return sprintf(buf, "%ul\n",
103 obd_max_dirty_pages / (1 << (20 - PAGE_CACHE_SHIFT))); 103 obd_max_dirty_pages / (1 << (20 - PAGE_SHIFT)));
104} 104}
105 105
106static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr, 106static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
@@ -113,14 +113,14 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
113 if (rc) 113 if (rc)
114 return rc; 114 return rc;
115 115
116 val *= 1 << (20 - PAGE_CACHE_SHIFT); /* convert to pages */ 116 val *= 1 << (20 - PAGE_SHIFT); /* convert to pages */
117 117
118 if (val > ((totalram_pages / 10) * 9)) { 118 if (val > ((totalram_pages / 10) * 9)) {
119 /* Somebody wants to assign too much memory to dirty pages */ 119 /* Somebody wants to assign too much memory to dirty pages */
120 return -EINVAL; 120 return -EINVAL;
121 } 121 }
122 122
123 if (val < 4 << (20 - PAGE_CACHE_SHIFT)) { 123 if (val < 4 << (20 - PAGE_SHIFT)) {
124 /* Less than 4 Mb for dirty cache is also bad */ 124 /* Less than 4 Mb for dirty cache is also bad */
125 return -EINVAL; 125 return -EINVAL;
126 } 126 }
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 65a4746c89ca..978568ada8e9 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -840,8 +840,8 @@ static int lu_htable_order(void)
840 840
841#if BITS_PER_LONG == 32 841#if BITS_PER_LONG == 32
842 /* limit hashtable size for lowmem systems to low RAM */ 842 /* limit hashtable size for lowmem systems to low RAM */
843 if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT)) 843 if (cache_size > 1 << (30 - PAGE_SHIFT))
844 cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4; 844 cache_size = 1 << (30 - PAGE_SHIFT) * 3 / 4;
845#endif 845#endif
846 846
847 /* clear off unreasonable cache setting. */ 847 /* clear off unreasonable cache setting. */
@@ -853,7 +853,7 @@ static int lu_htable_order(void)
853 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; 853 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
854 } 854 }
855 cache_size = cache_size / 100 * lu_cache_percent * 855 cache_size = cache_size / 100 * lu_cache_percent *
856 (PAGE_CACHE_SIZE / 1024); 856 (PAGE_SIZE / 1024);
857 857
858 for (bits = 1; (1 << bits) < cache_size; ++bits) { 858 for (bits = 1; (1 << bits) < cache_size; ++bits) {
859 ; 859 ;
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 64ffe243f870..1e83669c204d 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -278,7 +278,7 @@ static void echo_page_fini(const struct lu_env *env,
278 struct page *vmpage = ep->ep_vmpage; 278 struct page *vmpage = ep->ep_vmpage;
279 279
280 atomic_dec(&eco->eo_npages); 280 atomic_dec(&eco->eo_npages);
281 page_cache_release(vmpage); 281 put_page(vmpage);
282} 282}
283 283
284static int echo_page_prep(const struct lu_env *env, 284static int echo_page_prep(const struct lu_env *env,
@@ -373,7 +373,7 @@ static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
373 struct echo_object *eco = cl2echo_obj(obj); 373 struct echo_object *eco = cl2echo_obj(obj);
374 374
375 ep->ep_vmpage = vmpage; 375 ep->ep_vmpage = vmpage;
376 page_cache_get(vmpage); 376 get_page(vmpage);
377 mutex_init(&ep->ep_lock); 377 mutex_init(&ep->ep_lock);
378 cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops); 378 cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
379 atomic_inc(&eco->eo_npages); 379 atomic_inc(&eco->eo_npages);
@@ -1138,7 +1138,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
1138 LASSERT(rc == 0); 1138 LASSERT(rc == 0);
1139 1139
1140 rc = cl_echo_enqueue0(env, eco, offset, 1140 rc = cl_echo_enqueue0(env, eco, offset,
1141 offset + npages * PAGE_CACHE_SIZE - 1, 1141 offset + npages * PAGE_SIZE - 1,
1142 rw == READ ? LCK_PR : LCK_PW, &lh.cookie, 1142 rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
1143 CEF_NEVER); 1143 CEF_NEVER);
1144 if (rc < 0) 1144 if (rc < 0)
@@ -1311,11 +1311,11 @@ echo_client_page_debug_setup(struct page *page, int rw, u64 id,
1311 int delta; 1311 int delta;
1312 1312
1313 /* no partial pages on the client */ 1313 /* no partial pages on the client */
1314 LASSERT(count == PAGE_CACHE_SIZE); 1314 LASSERT(count == PAGE_SIZE);
1315 1315
1316 addr = kmap(page); 1316 addr = kmap(page);
1317 1317
1318 for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { 1318 for (delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
1319 if (rw == OBD_BRW_WRITE) { 1319 if (rw == OBD_BRW_WRITE) {
1320 stripe_off = offset + delta; 1320 stripe_off = offset + delta;
1321 stripe_id = id; 1321 stripe_id = id;
@@ -1341,11 +1341,11 @@ static int echo_client_page_debug_check(struct page *page, u64 id,
1341 int rc2; 1341 int rc2;
1342 1342
1343 /* no partial pages on the client */ 1343 /* no partial pages on the client */
1344 LASSERT(count == PAGE_CACHE_SIZE); 1344 LASSERT(count == PAGE_SIZE);
1345 1345
1346 addr = kmap(page); 1346 addr = kmap(page);
1347 1347
1348 for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { 1348 for (rc = delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
1349 stripe_off = offset + delta; 1349 stripe_off = offset + delta;
1350 stripe_id = id; 1350 stripe_id = id;
1351 1351
@@ -1391,7 +1391,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
1391 return -EINVAL; 1391 return -EINVAL;
1392 1392
1393 /* XXX think again with misaligned I/O */ 1393 /* XXX think again with misaligned I/O */
1394 npages = count >> PAGE_CACHE_SHIFT; 1394 npages = count >> PAGE_SHIFT;
1395 1395
1396 if (rw == OBD_BRW_WRITE) 1396 if (rw == OBD_BRW_WRITE)
1397 brw_flags = OBD_BRW_ASYNC; 1397 brw_flags = OBD_BRW_ASYNC;
@@ -1408,7 +1408,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
1408 1408
1409 for (i = 0, pgp = pga, off = offset; 1409 for (i = 0, pgp = pga, off = offset;
1410 i < npages; 1410 i < npages;
1411 i++, pgp++, off += PAGE_CACHE_SIZE) { 1411 i++, pgp++, off += PAGE_SIZE) {
1412 1412
1413 LASSERT(!pgp->pg); /* for cleanup */ 1413 LASSERT(!pgp->pg); /* for cleanup */
1414 1414
@@ -1418,7 +1418,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
1418 goto out; 1418 goto out;
1419 1419
1420 pages[i] = pgp->pg; 1420 pages[i] = pgp->pg;
1421 pgp->count = PAGE_CACHE_SIZE; 1421 pgp->count = PAGE_SIZE;
1422 pgp->off = off; 1422 pgp->off = off;
1423 pgp->flag = brw_flags; 1423 pgp->flag = brw_flags;
1424 1424
@@ -1473,8 +1473,8 @@ static int echo_client_prep_commit(const struct lu_env *env,
1473 if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0) 1473 if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0)
1474 return -EINVAL; 1474 return -EINVAL;
1475 1475
1476 npages = batch >> PAGE_CACHE_SHIFT; 1476 npages = batch >> PAGE_SHIFT;
1477 tot_pages = count >> PAGE_CACHE_SHIFT; 1477 tot_pages = count >> PAGE_SHIFT;
1478 1478
1479 lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS); 1479 lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS);
1480 rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS); 1480 rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS);
@@ -1497,9 +1497,9 @@ static int echo_client_prep_commit(const struct lu_env *env,
1497 if (tot_pages < npages) 1497 if (tot_pages < npages)
1498 npages = tot_pages; 1498 npages = tot_pages;
1499 1499
1500 for (i = 0; i < npages; i++, off += PAGE_CACHE_SIZE) { 1500 for (i = 0; i < npages; i++, off += PAGE_SIZE) {
1501 rnb[i].offset = off; 1501 rnb[i].offset = off;
1502 rnb[i].len = PAGE_CACHE_SIZE; 1502 rnb[i].len = PAGE_SIZE;
1503 rnb[i].flags = brw_flags; 1503 rnb[i].flags = brw_flags;
1504 } 1504 }
1505 1505
@@ -1878,7 +1878,7 @@ static int __init obdecho_init(void)
1878{ 1878{
1879 LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n"); 1879 LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
1880 1880
1881 LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0); 1881 LASSERT(PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
1882 1882
1883 return echo_client_init(); 1883 return echo_client_init();
1884} 1884}
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index 57c43c506ef2..a3358c39b2f1 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -162,15 +162,15 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj,
162 if (rc) 162 if (rc)
163 return rc; 163 return rc;
164 164
165 pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */ 165 pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
166 166
167 if (pages_number <= 0 || 167 if (pages_number <= 0 ||
168 pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) || 168 pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) ||
169 pages_number > totalram_pages / 4) /* 1/4 of RAM */ 169 pages_number > totalram_pages / 4) /* 1/4 of RAM */
170 return -ERANGE; 170 return -ERANGE;
171 171
172 client_obd_list_lock(&cli->cl_loi_list_lock); 172 client_obd_list_lock(&cli->cl_loi_list_lock);
173 cli->cl_dirty_max = (u32)(pages_number << PAGE_CACHE_SHIFT); 173 cli->cl_dirty_max = (u32)(pages_number << PAGE_SHIFT);
174 osc_wake_cache_waiters(cli); 174 osc_wake_cache_waiters(cli);
175 client_obd_list_unlock(&cli->cl_loi_list_lock); 175 client_obd_list_unlock(&cli->cl_loi_list_lock);
176 176
@@ -182,7 +182,7 @@ static int osc_cached_mb_seq_show(struct seq_file *m, void *v)
182{ 182{
183 struct obd_device *dev = m->private; 183 struct obd_device *dev = m->private;
184 struct client_obd *cli = &dev->u.cli; 184 struct client_obd *cli = &dev->u.cli;
185 int shift = 20 - PAGE_CACHE_SHIFT; 185 int shift = 20 - PAGE_SHIFT;
186 186
187 seq_printf(m, 187 seq_printf(m,
188 "used_mb: %d\n" 188 "used_mb: %d\n"
@@ -211,7 +211,7 @@ static ssize_t osc_cached_mb_seq_write(struct file *file,
211 return -EFAULT; 211 return -EFAULT;
212 kernbuf[count] = 0; 212 kernbuf[count] = 0;
213 213
214 mult = 1 << (20 - PAGE_CACHE_SHIFT); 214 mult = 1 << (20 - PAGE_SHIFT);
215 buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) - 215 buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) -
216 kernbuf; 216 kernbuf;
217 rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); 217 rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
@@ -569,12 +569,12 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj,
569 569
570 /* if the max_pages is specified in bytes, convert to pages */ 570 /* if the max_pages is specified in bytes, convert to pages */
571 if (val >= ONE_MB_BRW_SIZE) 571 if (val >= ONE_MB_BRW_SIZE)
572 val >>= PAGE_CACHE_SHIFT; 572 val >>= PAGE_SHIFT;
573 573
574 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_CACHE_SHIFT)) - 1); 574 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
575 /* max_pages_per_rpc must be chunk aligned */ 575 /* max_pages_per_rpc must be chunk aligned */
576 val = (val + ~chunk_mask) & chunk_mask; 576 val = (val + ~chunk_mask) & chunk_mask;
577 if (val == 0 || val > ocd->ocd_brw_size >> PAGE_CACHE_SHIFT) { 577 if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) {
578 return -ERANGE; 578 return -ERANGE;
579 } 579 }
580 client_obd_list_lock(&cli->cl_loi_list_lock); 580 client_obd_list_lock(&cli->cl_loi_list_lock);
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 63363111380c..5f25bf83dcfc 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -544,7 +544,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
544 return -ERANGE; 544 return -ERANGE;
545 545
546 LASSERT(cur->oe_osclock == victim->oe_osclock); 546 LASSERT(cur->oe_osclock == victim->oe_osclock);
547 ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT; 547 ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT;
548 chunk_start = cur->oe_start >> ppc_bits; 548 chunk_start = cur->oe_start >> ppc_bits;
549 chunk_end = cur->oe_end >> ppc_bits; 549 chunk_end = cur->oe_end >> ppc_bits;
550 if (chunk_start != (victim->oe_end >> ppc_bits) + 1 && 550 if (chunk_start != (victim->oe_end >> ppc_bits) + 1 &&
@@ -647,8 +647,8 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
647 lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0); 647 lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0);
648 LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE); 648 LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
649 649
650 LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT); 650 LASSERT(cli->cl_chunkbits >= PAGE_SHIFT);
651 ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; 651 ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
652 chunk_mask = ~((1 << ppc_bits) - 1); 652 chunk_mask = ~((1 << ppc_bits) - 1);
653 chunksize = 1 << cli->cl_chunkbits; 653 chunksize = 1 << cli->cl_chunkbits;
654 chunk = index >> ppc_bits; 654 chunk = index >> ppc_bits;
@@ -871,8 +871,8 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
871 871
872 if (!sent) { 872 if (!sent) {
873 lost_grant = ext->oe_grants; 873 lost_grant = ext->oe_grants;
874 } else if (blocksize < PAGE_CACHE_SIZE && 874 } else if (blocksize < PAGE_SIZE &&
875 last_count != PAGE_CACHE_SIZE) { 875 last_count != PAGE_SIZE) {
876 /* For short writes we shouldn't count parts of pages that 876 /* For short writes we shouldn't count parts of pages that
877 * span a whole chunk on the OST side, or our accounting goes 877 * span a whole chunk on the OST side, or our accounting goes
878 * wrong. Should match the code in filter_grant_check. 878 * wrong. Should match the code in filter_grant_check.
@@ -884,7 +884,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
884 if (end) 884 if (end)
885 count += blocksize - end; 885 count += blocksize - end;
886 886
887 lost_grant = PAGE_CACHE_SIZE - count; 887 lost_grant = PAGE_SIZE - count;
888 } 888 }
889 if (ext->oe_grants > 0) 889 if (ext->oe_grants > 0)
890 osc_free_grant(cli, nr_pages, lost_grant); 890 osc_free_grant(cli, nr_pages, lost_grant);
@@ -967,7 +967,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
967 struct osc_async_page *oap; 967 struct osc_async_page *oap;
968 struct osc_async_page *tmp; 968 struct osc_async_page *tmp;
969 int pages_in_chunk = 0; 969 int pages_in_chunk = 0;
970 int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; 970 int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
971 __u64 trunc_chunk = trunc_index >> ppc_bits; 971 __u64 trunc_chunk = trunc_index >> ppc_bits;
972 int grants = 0; 972 int grants = 0;
973 int nr_pages = 0; 973 int nr_pages = 0;
@@ -1125,7 +1125,7 @@ static int osc_extent_make_ready(const struct lu_env *env,
1125 if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) { 1125 if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
1126 last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE); 1126 last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
1127 LASSERT(last->oap_count > 0); 1127 LASSERT(last->oap_count > 0);
1128 LASSERT(last->oap_page_off + last->oap_count <= PAGE_CACHE_SIZE); 1128 LASSERT(last->oap_page_off + last->oap_count <= PAGE_SIZE);
1129 last->oap_async_flags |= ASYNC_COUNT_STABLE; 1129 last->oap_async_flags |= ASYNC_COUNT_STABLE;
1130 } 1130 }
1131 1131
@@ -1134,7 +1134,7 @@ static int osc_extent_make_ready(const struct lu_env *env,
1134 */ 1134 */
1135 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { 1135 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
1136 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) { 1136 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
1137 oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off; 1137 oap->oap_count = PAGE_SIZE - oap->oap_page_off;
1138 oap->oap_async_flags |= ASYNC_COUNT_STABLE; 1138 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
1139 } 1139 }
1140 } 1140 }
@@ -1158,7 +1158,7 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
1158 struct osc_object *obj = ext->oe_obj; 1158 struct osc_object *obj = ext->oe_obj;
1159 struct client_obd *cli = osc_cli(obj); 1159 struct client_obd *cli = osc_cli(obj);
1160 struct osc_extent *next; 1160 struct osc_extent *next;
1161 int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; 1161 int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
1162 pgoff_t chunk = index >> ppc_bits; 1162 pgoff_t chunk = index >> ppc_bits;
1163 pgoff_t end_chunk; 1163 pgoff_t end_chunk;
1164 pgoff_t end_index; 1164 pgoff_t end_index;
@@ -1293,9 +1293,9 @@ static int osc_refresh_count(const struct lu_env *env,
1293 return 0; 1293 return 0;
1294 else if (cl_offset(obj, page->cp_index + 1) > kms) 1294 else if (cl_offset(obj, page->cp_index + 1) > kms)
1295 /* catch sub-page write at end of file */ 1295 /* catch sub-page write at end of file */
1296 return kms % PAGE_CACHE_SIZE; 1296 return kms % PAGE_SIZE;
1297 else 1297 else
1298 return PAGE_CACHE_SIZE; 1298 return PAGE_SIZE;
1299} 1299}
1300 1300
1301static int osc_completion(const struct lu_env *env, struct osc_async_page *oap, 1301static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
@@ -1376,10 +1376,10 @@ static void osc_consume_write_grant(struct client_obd *cli,
1376 assert_spin_locked(&cli->cl_loi_list_lock.lock); 1376 assert_spin_locked(&cli->cl_loi_list_lock.lock);
1377 LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT)); 1377 LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
1378 atomic_inc(&obd_dirty_pages); 1378 atomic_inc(&obd_dirty_pages);
1379 cli->cl_dirty += PAGE_CACHE_SIZE; 1379 cli->cl_dirty += PAGE_SIZE;
1380 pga->flag |= OBD_BRW_FROM_GRANT; 1380 pga->flag |= OBD_BRW_FROM_GRANT;
1381 CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n", 1381 CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
1382 PAGE_CACHE_SIZE, pga, pga->pg); 1382 PAGE_SIZE, pga, pga->pg);
1383 osc_update_next_shrink(cli); 1383 osc_update_next_shrink(cli);
1384} 1384}
1385 1385
@@ -1396,11 +1396,11 @@ static void osc_release_write_grant(struct client_obd *cli,
1396 1396
1397 pga->flag &= ~OBD_BRW_FROM_GRANT; 1397 pga->flag &= ~OBD_BRW_FROM_GRANT;
1398 atomic_dec(&obd_dirty_pages); 1398 atomic_dec(&obd_dirty_pages);
1399 cli->cl_dirty -= PAGE_CACHE_SIZE; 1399 cli->cl_dirty -= PAGE_SIZE;
1400 if (pga->flag & OBD_BRW_NOCACHE) { 1400 if (pga->flag & OBD_BRW_NOCACHE) {
1401 pga->flag &= ~OBD_BRW_NOCACHE; 1401 pga->flag &= ~OBD_BRW_NOCACHE;
1402 atomic_dec(&obd_dirty_transit_pages); 1402 atomic_dec(&obd_dirty_transit_pages);
1403 cli->cl_dirty_transit -= PAGE_CACHE_SIZE; 1403 cli->cl_dirty_transit -= PAGE_SIZE;
1404 } 1404 }
1405} 1405}
1406 1406
@@ -1456,7 +1456,7 @@ static void osc_unreserve_grant(struct client_obd *cli,
1456 * used, we should return these grants to OST. There're two cases where grants 1456 * used, we should return these grants to OST. There're two cases where grants
1457 * can be lost: 1457 * can be lost:
1458 * 1. truncate; 1458 * 1. truncate;
1459 * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was 1459 * 2. blocksize at OST is less than PAGE_SIZE and a partial page was
1460 * written. In this case OST may use less chunks to serve this partial 1460 * written. In this case OST may use less chunks to serve this partial
1461 * write. OSTs don't actually know the page size on the client side. so 1461 * write. OSTs don't actually know the page size on the client side. so
1462 * clients have to calculate lost grant by the blocksize on the OST. 1462 * clients have to calculate lost grant by the blocksize on the OST.
@@ -1469,7 +1469,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
1469 1469
1470 client_obd_list_lock(&cli->cl_loi_list_lock); 1470 client_obd_list_lock(&cli->cl_loi_list_lock);
1471 atomic_sub(nr_pages, &obd_dirty_pages); 1471 atomic_sub(nr_pages, &obd_dirty_pages);
1472 cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT; 1472 cli->cl_dirty -= nr_pages << PAGE_SHIFT;
1473 cli->cl_lost_grant += lost_grant; 1473 cli->cl_lost_grant += lost_grant;
1474 if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) { 1474 if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
1475 /* borrow some grant from truncate to avoid the case that 1475 /* borrow some grant from truncate to avoid the case that
@@ -1512,11 +1512,11 @@ static int osc_enter_cache_try(struct client_obd *cli,
1512 if (rc < 0) 1512 if (rc < 0)
1513 return 0; 1513 return 0;
1514 1514
1515 if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max && 1515 if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
1516 atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) { 1516 atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
1517 osc_consume_write_grant(cli, &oap->oap_brw_page); 1517 osc_consume_write_grant(cli, &oap->oap_brw_page);
1518 if (transient) { 1518 if (transient) {
1519 cli->cl_dirty_transit += PAGE_CACHE_SIZE; 1519 cli->cl_dirty_transit += PAGE_SIZE;
1520 atomic_inc(&obd_dirty_transit_pages); 1520 atomic_inc(&obd_dirty_transit_pages);
1521 oap->oap_brw_flags |= OBD_BRW_NOCACHE; 1521 oap->oap_brw_flags |= OBD_BRW_NOCACHE;
1522 } 1522 }
@@ -1562,7 +1562,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
1562 * of queued writes and create a discontiguous rpc stream 1562 * of queued writes and create a discontiguous rpc stream
1563 */ 1563 */
1564 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) || 1564 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
1565 cli->cl_dirty_max < PAGE_CACHE_SIZE || 1565 cli->cl_dirty_max < PAGE_SIZE ||
1566 cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) { 1566 cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) {
1567 rc = -EDQUOT; 1567 rc = -EDQUOT;
1568 goto out; 1568 goto out;
@@ -1632,7 +1632,7 @@ void osc_wake_cache_waiters(struct client_obd *cli)
1632 1632
1633 ocw->ocw_rc = -EDQUOT; 1633 ocw->ocw_rc = -EDQUOT;
1634 /* we can't dirty more */ 1634 /* we can't dirty more */
1635 if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) || 1635 if ((cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) ||
1636 (atomic_read(&obd_dirty_pages) + 1 > 1636 (atomic_read(&obd_dirty_pages) + 1 >
1637 obd_max_dirty_pages)) { 1637 obd_max_dirty_pages)) {
1638 CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n", 1638 CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index d720b1a1c18c..ce9ddd515f64 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -410,7 +410,7 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
410 int result; 410 int result;
411 411
412 opg->ops_from = 0; 412 opg->ops_from = 0;
413 opg->ops_to = PAGE_CACHE_SIZE; 413 opg->ops_to = PAGE_SIZE;
414 414
415 result = osc_prep_async_page(osc, opg, vmpage, 415 result = osc_prep_async_page(osc, opg, vmpage,
416 cl_offset(obj, page->cp_index)); 416 cl_offset(obj, page->cp_index));
@@ -487,9 +487,9 @@ static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
487/* LRU pages are freed in batch mode. OSC should at least free this 487/* LRU pages are freed in batch mode. OSC should at least free this
488 * number of pages to avoid running out of LRU budget, and.. 488 * number of pages to avoid running out of LRU budget, and..
489 */ 489 */
490static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */ 490static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT); /* 2M */
491/* free this number at most otherwise it will take too long time to finish. */ 491/* free this number at most otherwise it will take too long time to finish. */
492static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */ 492static const int lru_shrink_max = 32 << (20 - PAGE_SHIFT); /* 32M */
493 493
494/* Check if we can free LRU slots from this OSC. If there exists LRU waiters, 494/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
495 * we should free slots aggressively. In this way, slots are freed in a steady 495 * we should free slots aggressively. In this way, slots are freed in a steady
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 74805f1ae888..30526ebcad04 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -826,7 +826,7 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
826 oa->o_undirty = 0; 826 oa->o_undirty = 0;
827 } else { 827 } else {
828 long max_in_flight = (cli->cl_max_pages_per_rpc << 828 long max_in_flight = (cli->cl_max_pages_per_rpc <<
829 PAGE_CACHE_SHIFT)* 829 PAGE_SHIFT)*
830 (cli->cl_max_rpcs_in_flight + 1); 830 (cli->cl_max_rpcs_in_flight + 1);
831 oa->o_undirty = max(cli->cl_dirty_max, max_in_flight); 831 oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
832 } 832 }
@@ -909,11 +909,11 @@ static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
909static int osc_shrink_grant(struct client_obd *cli) 909static int osc_shrink_grant(struct client_obd *cli)
910{ 910{
911 __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) * 911 __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
912 (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT); 912 (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
913 913
914 client_obd_list_lock(&cli->cl_loi_list_lock); 914 client_obd_list_lock(&cli->cl_loi_list_lock);
915 if (cli->cl_avail_grant <= target_bytes) 915 if (cli->cl_avail_grant <= target_bytes)
916 target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; 916 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
917 client_obd_list_unlock(&cli->cl_loi_list_lock); 917 client_obd_list_unlock(&cli->cl_loi_list_lock);
918 918
919 return osc_shrink_grant_to_target(cli, target_bytes); 919 return osc_shrink_grant_to_target(cli, target_bytes);
@@ -929,8 +929,8 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
929 * We don't want to shrink below a single RPC, as that will negatively 929 * We don't want to shrink below a single RPC, as that will negatively
930 * impact block allocation and long-term performance. 930 * impact block allocation and long-term performance.
931 */ 931 */
932 if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT) 932 if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
933 target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; 933 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
934 934
935 if (target_bytes >= cli->cl_avail_grant) { 935 if (target_bytes >= cli->cl_avail_grant) {
936 client_obd_list_unlock(&cli->cl_loi_list_lock); 936 client_obd_list_unlock(&cli->cl_loi_list_lock);
@@ -978,7 +978,7 @@ static int osc_should_shrink_grant(struct client_obd *client)
978 * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export) 978 * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
979 * Keep comment here so that it can be found by searching. 979 * Keep comment here so that it can be found by searching.
980 */ 980 */
981 int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; 981 int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
982 982
983 if (client->cl_import->imp_state == LUSTRE_IMP_FULL && 983 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
984 client->cl_avail_grant > brw_size) 984 client->cl_avail_grant > brw_size)
@@ -1052,7 +1052,7 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1052 } 1052 }
1053 1053
1054 /* determine the appropriate chunk size used by osc_extent. */ 1054 /* determine the appropriate chunk size used by osc_extent. */
1055 cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize); 1055 cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize);
1056 client_obd_list_unlock(&cli->cl_loi_list_lock); 1056 client_obd_list_unlock(&cli->cl_loi_list_lock);
1057 1057
1058 CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n", 1058 CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
@@ -1317,9 +1317,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
1317 LASSERT(pg->count > 0); 1317 LASSERT(pg->count > 0);
1318 /* make sure there is no gap in the middle of page array */ 1318 /* make sure there is no gap in the middle of page array */
1319 LASSERTF(page_count == 1 || 1319 LASSERTF(page_count == 1 ||
1320 (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) && 1320 (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
1321 ergo(i > 0 && i < page_count - 1, 1321 ergo(i > 0 && i < page_count - 1,
1322 poff == 0 && pg->count == PAGE_CACHE_SIZE) && 1322 poff == 0 && pg->count == PAGE_SIZE) &&
1323 ergo(i == page_count - 1, poff == 0)), 1323 ergo(i == page_count - 1, poff == 0)),
1324 "i: %d/%d pg: %p off: %llu, count: %u\n", 1324 "i: %d/%d pg: %p off: %llu, count: %u\n",
1325 i, page_count, pg, pg->off, pg->count); 1325 i, page_count, pg, pg->off, pg->count);
@@ -1877,7 +1877,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1877 oap->oap_count; 1877 oap->oap_count;
1878 else 1878 else
1879 LASSERT(oap->oap_page_off + oap->oap_count == 1879 LASSERT(oap->oap_page_off + oap->oap_count ==
1880 PAGE_CACHE_SIZE); 1880 PAGE_SIZE);
1881 } 1881 }
1882 } 1882 }
1883 1883
@@ -1993,7 +1993,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1993 tmp->oap_request = ptlrpc_request_addref(req); 1993 tmp->oap_request = ptlrpc_request_addref(req);
1994 1994
1995 client_obd_list_lock(&cli->cl_loi_list_lock); 1995 client_obd_list_lock(&cli->cl_loi_list_lock);
1996 starting_offset >>= PAGE_CACHE_SHIFT; 1996 starting_offset >>= PAGE_SHIFT;
1997 if (cmd == OBD_BRW_READ) { 1997 if (cmd == OBD_BRW_READ) {
1998 cli->cl_r_in_flight++; 1998 cli->cl_r_in_flight++;
1999 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count); 1999 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
@@ -2790,12 +2790,12 @@ out:
2790 CFS_PAGE_MASK; 2790 CFS_PAGE_MASK;
2791 2791
2792 if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <= 2792 if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
2793 fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1) 2793 fm_key->fiemap.fm_start + PAGE_SIZE - 1)
2794 policy.l_extent.end = OBD_OBJECT_EOF; 2794 policy.l_extent.end = OBD_OBJECT_EOF;
2795 else 2795 else
2796 policy.l_extent.end = (fm_key->fiemap.fm_start + 2796 policy.l_extent.end = (fm_key->fiemap.fm_start +
2797 fm_key->fiemap.fm_length + 2797 fm_key->fiemap.fm_length +
2798 PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK; 2798 PAGE_SIZE - 1) & CFS_PAGE_MASK;
2799 2799
2800 ostid_build_res_name(&fm_key->oa.o_oi, &res_id); 2800 ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
2801 mode = ldlm_lock_match(exp->exp_obd->obd_namespace, 2801 mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index 1b7673eec4d7..cf3ac8eee9ee 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -174,12 +174,12 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
174 LASSERT(page); 174 LASSERT(page);
175 LASSERT(pageoffset >= 0); 175 LASSERT(pageoffset >= 0);
176 LASSERT(len > 0); 176 LASSERT(len > 0);
177 LASSERT(pageoffset + len <= PAGE_CACHE_SIZE); 177 LASSERT(pageoffset + len <= PAGE_SIZE);
178 178
179 desc->bd_nob += len; 179 desc->bd_nob += len;
180 180
181 if (pin) 181 if (pin)
182 page_cache_get(page); 182 get_page(page);
183 183
184 ptlrpc_add_bulk_page(desc, page, pageoffset, len); 184 ptlrpc_add_bulk_page(desc, page, pageoffset, len);
185} 185}
@@ -206,7 +206,7 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
206 206
207 if (unpin) { 207 if (unpin) {
208 for (i = 0; i < desc->bd_iov_count; i++) 208 for (i = 0; i < desc->bd_iov_count; i++)
209 page_cache_release(desc->bd_iov[i].kiov_page); 209 put_page(desc->bd_iov[i].kiov_page);
210 } 210 }
211 211
212 kfree(desc); 212 kfree(desc);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index b4eddf291269..cd94fed0ffdf 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -1092,7 +1092,7 @@ finish:
1092 1092
1093 if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) 1093 if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
1094 cli->cl_max_pages_per_rpc = 1094 cli->cl_max_pages_per_rpc =
1095 min(ocd->ocd_brw_size >> PAGE_CACHE_SHIFT, 1095 min(ocd->ocd_brw_size >> PAGE_SHIFT,
1096 cli->cl_max_pages_per_rpc); 1096 cli->cl_max_pages_per_rpc);
1097 else if (imp->imp_connect_op == MDS_CONNECT || 1097 else if (imp->imp_connect_op == MDS_CONNECT ||
1098 imp->imp_connect_op == MGS_CONNECT) 1098 imp->imp_connect_op == MGS_CONNECT)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index cee04efb6fb5..c95a91ce26c9 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -308,7 +308,7 @@ ptlrpc_lprocfs_req_history_max_seq_write(struct file *file,
308 * hose a kernel by allowing the request history to grow too 308 * hose a kernel by allowing the request history to grow too
309 * far. 309 * far.
310 */ 310 */
311 bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 311 bufpages = (svc->srv_buf_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
312 if (val > totalram_pages / (2 * bufpages)) 312 if (val > totalram_pages / (2 * bufpages))
313 return -ERANGE; 313 return -ERANGE;
314 314
@@ -1226,7 +1226,7 @@ int lprocfs_wr_import(struct file *file, const char __user *buffer,
1226 const char prefix[] = "connection="; 1226 const char prefix[] = "connection=";
1227 const int prefix_len = sizeof(prefix) - 1; 1227 const int prefix_len = sizeof(prefix) - 1;
1228 1228
1229 if (count > PAGE_CACHE_SIZE - 1 || count <= prefix_len) 1229 if (count > PAGE_SIZE - 1 || count <= prefix_len)
1230 return -EINVAL; 1230 return -EINVAL;
1231 1231
1232 kbuf = kzalloc(count + 1, GFP_NOFS); 1232 kbuf = kzalloc(count + 1, GFP_NOFS);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index 5f27d9c2e4ef..30d9a164e52d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -195,7 +195,7 @@ int ptlrpc_resend(struct obd_import *imp)
195 } 195 }
196 196
197 list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) { 197 list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) {
198 LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON, 198 LASSERTF((long)req > PAGE_SIZE && req != LP_POISON,
199 "req %p bad\n", req); 199 "req %p bad\n", req);
200 LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req); 200 LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
201 if (!ptlrpc_no_resend(req)) 201 if (!ptlrpc_no_resend(req))
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 72d5b9bf5b29..d3872b8c9a6e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -58,7 +58,7 @@
58 * bulk encryption page pools * 58 * bulk encryption page pools *
59 ****************************************/ 59 ****************************************/
60 60
61#define POINTERS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *)) 61#define POINTERS_PER_PAGE (PAGE_SIZE / sizeof(void *))
62#define PAGES_PER_POOL (POINTERS_PER_PAGE) 62#define PAGES_PER_POOL (POINTERS_PER_PAGE)
63 63
64#define IDLE_IDX_MAX (100) 64#define IDLE_IDX_MAX (100)
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index b793c04028a3..be72a8e5f221 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -172,9 +172,11 @@ static int vpfe_prepare_pipeline(struct vpfe_video_device *video)
172static int vpfe_update_pipe_state(struct vpfe_video_device *video) 172static int vpfe_update_pipe_state(struct vpfe_video_device *video)
173{ 173{
174 struct vpfe_pipeline *pipe = &video->pipe; 174 struct vpfe_pipeline *pipe = &video->pipe;
175 int ret;
175 176
176 if (vpfe_prepare_pipeline(video)) 177 ret = vpfe_prepare_pipeline(video);
177 return vpfe_prepare_pipeline(video); 178 if (ret)
179 return ret;
178 180
179 /* 181 /*
180 * Find out if there is any input video 182 * Find out if there is any input video
@@ -182,9 +184,10 @@ static int vpfe_update_pipe_state(struct vpfe_video_device *video)
182 */ 184 */
183 if (pipe->input_num == 0) { 185 if (pipe->input_num == 0) {
184 pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS; 186 pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS;
185 if (vpfe_update_current_ext_subdev(video)) { 187 ret = vpfe_update_current_ext_subdev(video);
188 if (ret) {
186 pr_err("Invalid external subdev\n"); 189 pr_err("Invalid external subdev\n");
187 return vpfe_update_current_ext_subdev(video); 190 return ret;
188 } 191 }
189 } else { 192 } else {
190 pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT; 193 pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT;
@@ -667,6 +670,7 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
667 struct v4l2_subdev *subdev; 670 struct v4l2_subdev *subdev;
668 struct v4l2_format format; 671 struct v4l2_format format;
669 struct media_pad *remote; 672 struct media_pad *remote;
673 int ret;
670 674
671 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n"); 675 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n");
672 676
@@ -695,10 +699,11 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
695 sd_fmt.pad = remote->index; 699 sd_fmt.pad = remote->index;
696 sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; 700 sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
697 /* get output format of remote subdev */ 701 /* get output format of remote subdev */
698 if (v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt)) { 702 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
703 if (ret) {
699 v4l2_err(&vpfe_dev->v4l2_dev, 704 v4l2_err(&vpfe_dev->v4l2_dev,
700 "invalid remote subdev for video node\n"); 705 "invalid remote subdev for video node\n");
701 return v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt); 706 return ret;
702 } 707 }
703 /* convert to pix format */ 708 /* convert to pix format */
704 mbus.code = sd_fmt.format.code; 709 mbus.code = sd_fmt.format.code;
@@ -725,6 +730,7 @@ static int vpfe_s_fmt(struct file *file, void *priv,
725 struct vpfe_video_device *video = video_drvdata(file); 730 struct vpfe_video_device *video = video_drvdata(file);
726 struct vpfe_device *vpfe_dev = video->vpfe_dev; 731 struct vpfe_device *vpfe_dev = video->vpfe_dev;
727 struct v4l2_format format; 732 struct v4l2_format format;
733 int ret;
728 734
729 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n"); 735 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n");
730 /* If streaming is started, return error */ 736 /* If streaming is started, return error */
@@ -733,8 +739,9 @@ static int vpfe_s_fmt(struct file *file, void *priv,
733 return -EBUSY; 739 return -EBUSY;
734 } 740 }
735 /* get adjacent subdev's output pad format */ 741 /* get adjacent subdev's output pad format */
736 if (__vpfe_video_get_format(video, &format)) 742 ret = __vpfe_video_get_format(video, &format);
737 return __vpfe_video_get_format(video, &format); 743 if (ret)
744 return ret;
738 *fmt = format; 745 *fmt = format;
739 video->fmt = *fmt; 746 video->fmt = *fmt;
740 return 0; 747 return 0;
@@ -757,11 +764,13 @@ static int vpfe_try_fmt(struct file *file, void *priv,
757 struct vpfe_video_device *video = video_drvdata(file); 764 struct vpfe_video_device *video = video_drvdata(file);
758 struct vpfe_device *vpfe_dev = video->vpfe_dev; 765 struct vpfe_device *vpfe_dev = video->vpfe_dev;
759 struct v4l2_format format; 766 struct v4l2_format format;
767 int ret;
760 768
761 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n"); 769 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n");
762 /* get adjacent subdev's output pad format */ 770 /* get adjacent subdev's output pad format */
763 if (__vpfe_video_get_format(video, &format)) 771 ret = __vpfe_video_get_format(video, &format);
764 return __vpfe_video_get_format(video, &format); 772 if (ret)
773 return ret;
765 774
766 *fmt = format; 775 *fmt = format;
767 return 0; 776 return 0;
@@ -838,8 +847,9 @@ static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
838 847
839 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n"); 848 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
840 849
841 if (mutex_lock_interruptible(&video->lock)) 850 ret = mutex_lock_interruptible(&video->lock);
842 return mutex_lock_interruptible(&video->lock); 851 if (ret)
852 return ret;
843 /* 853 /*
844 * If streaming is started return device busy 854 * If streaming is started return device busy
845 * error 855 * error
@@ -940,8 +950,9 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
940 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n"); 950 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
941 951
942 /* Call decoder driver function to set the standard */ 952 /* Call decoder driver function to set the standard */
943 if (mutex_lock_interruptible(&video->lock)) 953 ret = mutex_lock_interruptible(&video->lock);
944 return mutex_lock_interruptible(&video->lock); 954 if (ret)
955 return ret;
945 sdinfo = video->current_ext_subdev; 956 sdinfo = video->current_ext_subdev;
946 /* If streaming is started, return device busy error */ 957 /* If streaming is started, return device busy error */
947 if (video->started) { 958 if (video->started) {
@@ -1327,8 +1338,9 @@ static int vpfe_reqbufs(struct file *file, void *priv,
1327 return -EINVAL; 1338 return -EINVAL;
1328 } 1339 }
1329 1340
1330 if (mutex_lock_interruptible(&video->lock)) 1341 ret = mutex_lock_interruptible(&video->lock);
1331 return mutex_lock_interruptible(&video->lock); 1342 if (ret)
1343 return ret;
1332 1344
1333 if (video->io_usrs != 0) { 1345 if (video->io_usrs != 0) {
1334 v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n"); 1346 v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
@@ -1354,10 +1366,11 @@ static int vpfe_reqbufs(struct file *file, void *priv,
1354 q->buf_struct_size = sizeof(struct vpfe_cap_buffer); 1366 q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
1355 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 1367 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1356 1368
1357 if (vb2_queue_init(q)) { 1369 ret = vb2_queue_init(q);
1370 if (ret) {
1358 v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n"); 1371 v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n");
1359 vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev); 1372 vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev);
1360 return vb2_queue_init(q); 1373 return ret;
1361 } 1374 }
1362 1375
1363 fh->io_allowed = 1; 1376 fh->io_allowed = 1;
@@ -1533,8 +1546,9 @@ static int vpfe_streamoff(struct file *file, void *priv,
1533 return -EINVAL; 1546 return -EINVAL;
1534 } 1547 }
1535 1548
1536 if (mutex_lock_interruptible(&video->lock)) 1549 ret = mutex_lock_interruptible(&video->lock);
1537 return mutex_lock_interruptible(&video->lock); 1550 if (ret)
1551 return ret;
1538 1552
1539 vpfe_stop_capture(video); 1553 vpfe_stop_capture(video);
1540 ret = vb2_streamoff(&video->buffer_queue, buf_type); 1554 ret = vb2_streamoff(&video->buffer_queue, buf_type);
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
new file mode 100644
index 000000000000..d277f048789e
--- /dev/null
+++ b/drivers/staging/olpc_dcon/Kconfig
@@ -0,0 +1,35 @@
1config FB_OLPC_DCON
2 tristate "One Laptop Per Child Display CONtroller support"
3 depends on OLPC && FB
4 depends on I2C
5 depends on (GPIO_CS5535 || GPIO_CS5535=n)
6 select BACKLIGHT_CLASS_DEVICE
7 ---help---
8 In order to support very low power operation, the XO laptop uses a
9 secondary Display CONtroller, or DCON. This secondary controller
10 is present in the video pipeline between the primary display
11 controller (integrate into the processor or chipset) and the LCD
12 panel. It allows the main processor/display controller to be
13 completely powered off while still retaining an image on the display.
14 This controller is only available on OLPC platforms. Unless you have
15 one of these platforms, you will want to say 'N'.
16
17config FB_OLPC_DCON_1
18 bool "OLPC XO-1 DCON support"
19 depends on FB_OLPC_DCON && GPIO_CS5535
20 default y
21 ---help---
22 Enable support for the DCON in XO-1 model laptops. The kernel
23 communicates with the DCON using model-specific code. If you
24 have an XO-1 (or if you're unsure what model you have), you should
25 say 'Y'.
26
27config FB_OLPC_DCON_1_5
28 bool "OLPC XO-1.5 DCON support"
29 depends on FB_OLPC_DCON && ACPI
30 default y
31 ---help---
32 Enable support for the DCON in XO-1.5 model laptops. The kernel
33 communicates with the DCON using model-specific code. If you
34 have an XO-1.5 (or if you're unsure what model you have), you
35 should say 'Y'.
diff --git a/drivers/staging/olpc_dcon/Makefile b/drivers/staging/olpc_dcon/Makefile
new file mode 100644
index 000000000000..36c7e67fec20
--- /dev/null
+++ b/drivers/staging/olpc_dcon/Makefile
@@ -0,0 +1,6 @@
1olpc-dcon-objs += olpc_dcon.o
2olpc-dcon-$(CONFIG_FB_OLPC_DCON_1) += olpc_dcon_xo_1.o
3olpc-dcon-$(CONFIG_FB_OLPC_DCON_1_5) += olpc_dcon_xo_1_5.o
4obj-$(CONFIG_FB_OLPC_DCON) += olpc-dcon.o
5
6
diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO
new file mode 100644
index 000000000000..61c2e65ac354
--- /dev/null
+++ b/drivers/staging/olpc_dcon/TODO
@@ -0,0 +1,9 @@
1TODO:
2 - see if vx855 gpio API can be made similar enough to cs5535 so we can
3 share more code
4 - allow simultaneous XO-1 and XO-1.5 support
5
6Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
7copy:
8 Daniel Drake <dsd@laptop.org>
9 Jens Frederich <jfrederich@gmail.com>
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
new file mode 100644
index 000000000000..f45b2ef05f48
--- /dev/null
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -0,0 +1,813 @@
1/*
2 * Mainly by David Woodhouse, somewhat modified by Jordan Crouse
3 *
4 * Copyright © 2006-2007 Red Hat, Inc.
5 * Copyright © 2006-2007 Advanced Micro Devices, Inc.
6 * Copyright © 2009 VIA Technology, Inc.
7 * Copyright (c) 2010-2011 Andres Salomon <dilinger@queued.net>
8 *
9 * This program is free software. You can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/kernel.h>
17#include <linux/fb.h>
18#include <linux/console.h>
19#include <linux/i2c.h>
20#include <linux/platform_device.h>
21#include <linux/interrupt.h>
22#include <linux/delay.h>
23#include <linux/module.h>
24#include <linux/backlight.h>
25#include <linux/device.h>
26#include <linux/uaccess.h>
27#include <linux/ctype.h>
28#include <linux/reboot.h>
29#include <linux/olpc-ec.h>
30#include <asm/tsc.h>
31#include <asm/olpc.h>
32
33#include "olpc_dcon.h"
34
35/* Module definitions */
36
37static ushort resumeline = 898;
38module_param(resumeline, ushort, 0444);
39
40static struct dcon_platform_data *pdata;
41
42/* I2C structures */
43
44/* Platform devices */
45static struct platform_device *dcon_device;
46
47static unsigned short normal_i2c[] = { 0x0d, I2C_CLIENT_END };
48
49static s32 dcon_write(struct dcon_priv *dcon, u8 reg, u16 val)
50{
51 return i2c_smbus_write_word_data(dcon->client, reg, val);
52}
53
54static s32 dcon_read(struct dcon_priv *dcon, u8 reg)
55{
56 return i2c_smbus_read_word_data(dcon->client, reg);
57}
58
59/* ===== API functions - these are called by a variety of users ==== */
60
61static int dcon_hw_init(struct dcon_priv *dcon, int is_init)
62{
63 u16 ver;
64 int rc = 0;
65
66 ver = dcon_read(dcon, DCON_REG_ID);
67 if ((ver >> 8) != 0xDC) {
68 pr_err("DCON ID not 0xDCxx: 0x%04x instead.\n", ver);
69 rc = -ENXIO;
70 goto err;
71 }
72
73 if (is_init) {
74 pr_info("Discovered DCON version %x\n", ver & 0xFF);
75 rc = pdata->init(dcon);
76 if (rc != 0) {
77 pr_err("Unable to init.\n");
78 goto err;
79 }
80 }
81
82 if (ver < 0xdc02) {
83 dev_err(&dcon->client->dev,
84 "DCON v1 is unsupported, giving up..\n");
85 rc = -ENODEV;
86 goto err;
87 }
88
89 /* SDRAM setup/hold time */
90 dcon_write(dcon, 0x3a, 0xc040);
91 dcon_write(dcon, DCON_REG_MEM_OPT_A, 0x0000); /* clear option bits */
92 dcon_write(dcon, DCON_REG_MEM_OPT_A,
93 MEM_DLL_CLOCK_DELAY | MEM_POWER_DOWN);
94 dcon_write(dcon, DCON_REG_MEM_OPT_B, MEM_SOFT_RESET);
95
96 /* Colour swizzle, AA, no passthrough, backlight */
97 if (is_init) {
98 dcon->disp_mode = MODE_PASSTHRU | MODE_BL_ENABLE |
99 MODE_CSWIZZLE | MODE_COL_AA;
100 }
101 dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
102
103 /* Set the scanline to interrupt on during resume */
104 dcon_write(dcon, DCON_REG_SCAN_INT, resumeline);
105
106err:
107 return rc;
108}
109
110/*
111 * The smbus doesn't always come back due to what is believed to be
112 * hardware (power rail) bugs. For older models where this is known to
113 * occur, our solution is to attempt to wait for the bus to stabilize;
114 * if it doesn't happen, cut power to the dcon, repower it, and wait
115 * for the bus to stabilize. Rinse, repeat until we have a working
116 * smbus. For newer models, we simply BUG(); we want to know if this
117 * still happens despite the power fixes that have been made!
118 */
119static int dcon_bus_stabilize(struct dcon_priv *dcon, int is_powered_down)
120{
121 unsigned long timeout;
122 u8 pm;
123 int x;
124
125power_up:
126 if (is_powered_down) {
127 pm = 1;
128 x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
129 if (x) {
130 pr_warn("unable to force dcon to power up: %d!\n", x);
131 return x;
132 }
133 usleep_range(10000, 11000); /* we'll be conservative */
134 }
135
136 pdata->bus_stabilize_wiggle();
137
138 for (x = -1, timeout = 50; timeout && x < 0; timeout--) {
139 usleep_range(1000, 1100);
140 x = dcon_read(dcon, DCON_REG_ID);
141 }
142 if (x < 0) {
143 pr_err("unable to stabilize dcon's smbus, reasserting power and praying.\n");
144 BUG_ON(olpc_board_at_least(olpc_board(0xc2)));
145 pm = 0;
146 olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
147 msleep(100);
148 is_powered_down = 1;
149 goto power_up; /* argh, stupid hardware.. */
150 }
151
152 if (is_powered_down)
153 return dcon_hw_init(dcon, 0);
154 return 0;
155}
156
157static void dcon_set_backlight(struct dcon_priv *dcon, u8 level)
158{
159 dcon->bl_val = level;
160 dcon_write(dcon, DCON_REG_BRIGHT, dcon->bl_val);
161
162 /* Purposely turn off the backlight when we go to level 0 */
163 if (dcon->bl_val == 0) {
164 dcon->disp_mode &= ~MODE_BL_ENABLE;
165 dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
166 } else if (!(dcon->disp_mode & MODE_BL_ENABLE)) {
167 dcon->disp_mode |= MODE_BL_ENABLE;
168 dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
169 }
170}
171
172/* Set the output type to either color or mono */
173static int dcon_set_mono_mode(struct dcon_priv *dcon, bool enable_mono)
174{
175 if (dcon->mono == enable_mono)
176 return 0;
177
178 dcon->mono = enable_mono;
179
180 if (enable_mono) {
181 dcon->disp_mode &= ~(MODE_CSWIZZLE | MODE_COL_AA);
182 dcon->disp_mode |= MODE_MONO_LUMA;
183 } else {
184 dcon->disp_mode &= ~(MODE_MONO_LUMA);
185 dcon->disp_mode |= MODE_CSWIZZLE | MODE_COL_AA;
186 }
187
188 dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
189 return 0;
190}
191
192/* For now, this will be really stupid - we need to address how
193 * DCONLOAD works in a sleep and account for it accordingly
194 */
195
196static void dcon_sleep(struct dcon_priv *dcon, bool sleep)
197{
198 int x;
199
200 /* Turn off the backlight and put the DCON to sleep */
201
202 if (dcon->asleep == sleep)
203 return;
204
205 if (!olpc_board_at_least(olpc_board(0xc2)))
206 return;
207
208 if (sleep) {
209 u8 pm = 0;
210
211 x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
212 if (x)
213 pr_warn("unable to force dcon to power down: %d!\n", x);
214 else
215 dcon->asleep = sleep;
216 } else {
217 /* Only re-enable the backlight if the backlight value is set */
218 if (dcon->bl_val != 0)
219 dcon->disp_mode |= MODE_BL_ENABLE;
220 x = dcon_bus_stabilize(dcon, 1);
221 if (x)
222 pr_warn("unable to reinit dcon hardware: %d!\n", x);
223 else
224 dcon->asleep = sleep;
225
226 /* Restore backlight */
227 dcon_set_backlight(dcon, dcon->bl_val);
228 }
229
230 /* We should turn off some stuff in the framebuffer - but what? */
231}
232
233/* the DCON seems to get confused if we change DCONLOAD too
234 * frequently -- i.e., approximately faster than frame time.
235 * normally we don't change it this fast, so in general we won't
236 * delay here.
237 */
238static void dcon_load_holdoff(struct dcon_priv *dcon)
239{
240 ktime_t delta_t, now;
241
242 while (1) {
243 now = ktime_get();
244 delta_t = ktime_sub(now, dcon->load_time);
245 if (ktime_to_ns(delta_t) > NSEC_PER_MSEC * 20)
246 break;
247 mdelay(4);
248 }
249}
250
251static bool dcon_blank_fb(struct dcon_priv *dcon, bool blank)
252{
253 int err;
254
255 console_lock();
256 if (!lock_fb_info(dcon->fbinfo)) {
257 console_unlock();
258 dev_err(&dcon->client->dev, "unable to lock framebuffer\n");
259 return false;
260 }
261
262 dcon->ignore_fb_events = true;
263 err = fb_blank(dcon->fbinfo,
264 blank ? FB_BLANK_POWERDOWN : FB_BLANK_UNBLANK);
265 dcon->ignore_fb_events = false;
266 unlock_fb_info(dcon->fbinfo);
267 console_unlock();
268
269 if (err) {
270 dev_err(&dcon->client->dev, "couldn't %sblank framebuffer\n",
271 blank ? "" : "un");
272 return false;
273 }
274 return true;
275}
276
277/* Set the source of the display (CPU or DCON) */
278static void dcon_source_switch(struct work_struct *work)
279{
280 struct dcon_priv *dcon = container_of(work, struct dcon_priv,
281 switch_source);
282 int source = dcon->pending_src;
283
284 if (dcon->curr_src == source)
285 return;
286
287 dcon_load_holdoff(dcon);
288
289 dcon->switched = false;
290
291 switch (source) {
292 case DCON_SOURCE_CPU:
293 pr_info("dcon_source_switch to CPU\n");
294 /* Enable the scanline interrupt bit */
295 if (dcon_write(dcon, DCON_REG_MODE,
296 dcon->disp_mode | MODE_SCAN_INT))
297 pr_err("couldn't enable scanline interrupt!\n");
298 else
299 /* Wait up to one second for the scanline interrupt */
300 wait_event_timeout(dcon->waitq, dcon->switched, HZ);
301
302 if (!dcon->switched)
303 pr_err("Timeout entering CPU mode; expect a screen glitch.\n");
304
305 /* Turn off the scanline interrupt */
306 if (dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode))
307 pr_err("couldn't disable scanline interrupt!\n");
308
309 /*
310 * Ideally we'd like to disable interrupts here so that the
311 * fb unblanking and DCON turn on happen at a known time value;
312 * however, we can't do that right now with fb_blank
313 * messing with semaphores.
314 *
315 * For now, we just hope..
316 */
317 if (!dcon_blank_fb(dcon, false)) {
318 pr_err("Failed to enter CPU mode\n");
319 dcon->pending_src = DCON_SOURCE_DCON;
320 return;
321 }
322
323 /* And turn off the DCON */
324 pdata->set_dconload(1);
325 dcon->load_time = ktime_get();
326
327 pr_info("The CPU has control\n");
328 break;
329 case DCON_SOURCE_DCON:
330 {
331 ktime_t delta_t;
332
333 pr_info("dcon_source_switch to DCON\n");
334
335 /* Clear DCONLOAD - this implies that the DCON is in control */
336 pdata->set_dconload(0);
337 dcon->load_time = ktime_get();
338
339 wait_event_timeout(dcon->waitq, dcon->switched, HZ/2);
340
341 if (!dcon->switched) {
342 pr_err("Timeout entering DCON mode; expect a screen glitch.\n");
343 } else {
344 /* sometimes the DCON doesn't follow its own rules,
345 * and doesn't wait for two vsync pulses before
346 * ack'ing the frame load with an IRQ. the result
347 * is that the display shows the *previously*
348 * loaded frame. we can detect this by looking at
349 * the time between asserting DCONLOAD and the IRQ --
350 * if it's less than 20msec, then the DCON couldn't
351 * have seen two VSYNC pulses. in that case we
352 * deassert and reassert, and hope for the best.
353 * see http://dev.laptop.org/ticket/9664
354 */
355 delta_t = ktime_sub(dcon->irq_time, dcon->load_time);
356 if (dcon->switched && ktime_to_ns(delta_t)
357 < NSEC_PER_MSEC * 20) {
358 pr_err("missed loading, retrying\n");
359 pdata->set_dconload(1);
360 mdelay(41);
361 pdata->set_dconload(0);
362 dcon->load_time = ktime_get();
363 mdelay(41);
364 }
365 }
366
367 dcon_blank_fb(dcon, true);
368 pr_info("The DCON has control\n");
369 break;
370 }
371 default:
372 BUG();
373 }
374
375 dcon->curr_src = source;
376}
377
378static void dcon_set_source(struct dcon_priv *dcon, int arg)
379{
380 if (dcon->pending_src == arg)
381 return;
382
383 dcon->pending_src = arg;
384
385 if (dcon->curr_src != arg)
386 schedule_work(&dcon->switch_source);
387}
388
389static void dcon_set_source_sync(struct dcon_priv *dcon, int arg)
390{
391 dcon_set_source(dcon, arg);
392 flush_scheduled_work();
393}
394
395static ssize_t dcon_mode_show(struct device *dev,
396 struct device_attribute *attr, char *buf)
397{
398 struct dcon_priv *dcon = dev_get_drvdata(dev);
399
400 return sprintf(buf, "%4.4X\n", dcon->disp_mode);
401}
402
403static ssize_t dcon_sleep_show(struct device *dev,
404 struct device_attribute *attr, char *buf)
405{
406 struct dcon_priv *dcon = dev_get_drvdata(dev);
407
408 return sprintf(buf, "%d\n", dcon->asleep);
409}
410
411static ssize_t dcon_freeze_show(struct device *dev,
412 struct device_attribute *attr, char *buf)
413{
414 struct dcon_priv *dcon = dev_get_drvdata(dev);
415
416 return sprintf(buf, "%d\n", dcon->curr_src == DCON_SOURCE_DCON ? 1 : 0);
417}
418
419static ssize_t dcon_mono_show(struct device *dev,
420 struct device_attribute *attr, char *buf)
421{
422 struct dcon_priv *dcon = dev_get_drvdata(dev);
423
424 return sprintf(buf, "%d\n", dcon->mono);
425}
426
427static ssize_t dcon_resumeline_show(struct device *dev,
428 struct device_attribute *attr, char *buf)
429{
430 return sprintf(buf, "%d\n", resumeline);
431}
432
433static ssize_t dcon_mono_store(struct device *dev,
434 struct device_attribute *attr, const char *buf, size_t count)
435{
436 unsigned long enable_mono;
437 int rc;
438
439 rc = kstrtoul(buf, 10, &enable_mono);
440 if (rc)
441 return rc;
442
443 dcon_set_mono_mode(dev_get_drvdata(dev), enable_mono ? true : false);
444
445 return count;
446}
447
448static ssize_t dcon_freeze_store(struct device *dev,
449 struct device_attribute *attr, const char *buf, size_t count)
450{
451 struct dcon_priv *dcon = dev_get_drvdata(dev);
452 unsigned long output;
453 int ret;
454
455 ret = kstrtoul(buf, 10, &output);
456 if (ret)
457 return ret;
458
459 pr_info("dcon_freeze_store: %lu\n", output);
460
461 switch (output) {
462 case 0:
463 dcon_set_source(dcon, DCON_SOURCE_CPU);
464 break;
465 case 1:
466 dcon_set_source_sync(dcon, DCON_SOURCE_DCON);
467 break;
468 case 2: /* normally unused */
469 dcon_set_source(dcon, DCON_SOURCE_DCON);
470 break;
471 default:
472 return -EINVAL;
473 }
474
475 return count;
476}
477
478static ssize_t dcon_resumeline_store(struct device *dev,
479 struct device_attribute *attr, const char *buf, size_t count)
480{
481 unsigned short rl;
482 int rc;
483
484 rc = kstrtou16(buf, 10, &rl);
485 if (rc)
486 return rc;
487
488 resumeline = rl;
489 dcon_write(dev_get_drvdata(dev), DCON_REG_SCAN_INT, resumeline);
490
491 return count;
492}
493
494static ssize_t dcon_sleep_store(struct device *dev,
495 struct device_attribute *attr, const char *buf, size_t count)
496{
497 unsigned long output;
498 int ret;
499
500 ret = kstrtoul(buf, 10, &output);
501 if (ret)
502 return ret;
503
504 dcon_sleep(dev_get_drvdata(dev), output ? true : false);
505 return count;
506}
507
508static struct device_attribute dcon_device_files[] = {
509 __ATTR(mode, 0444, dcon_mode_show, NULL),
510 __ATTR(sleep, 0644, dcon_sleep_show, dcon_sleep_store),
511 __ATTR(freeze, 0644, dcon_freeze_show, dcon_freeze_store),
512 __ATTR(monochrome, 0644, dcon_mono_show, dcon_mono_store),
513 __ATTR(resumeline, 0644, dcon_resumeline_show, dcon_resumeline_store),
514};
515
516static int dcon_bl_update(struct backlight_device *dev)
517{
518 struct dcon_priv *dcon = bl_get_data(dev);
519 u8 level = dev->props.brightness & 0x0F;
520
521 if (dev->props.power != FB_BLANK_UNBLANK)
522 level = 0;
523
524 if (level != dcon->bl_val)
525 dcon_set_backlight(dcon, level);
526
527 /* power down the DCON when the screen is blanked */
528 if (!dcon->ignore_fb_events)
529 dcon_sleep(dcon, !!(dev->props.state & BL_CORE_FBBLANK));
530
531 return 0;
532}
533
534static int dcon_bl_get(struct backlight_device *dev)
535{
536 struct dcon_priv *dcon = bl_get_data(dev);
537
538 return dcon->bl_val;
539}
540
541static const struct backlight_ops dcon_bl_ops = {
542 .update_status = dcon_bl_update,
543 .get_brightness = dcon_bl_get,
544};
545
546static struct backlight_properties dcon_bl_props = {
547 .max_brightness = 15,
548 .type = BACKLIGHT_RAW,
549 .power = FB_BLANK_UNBLANK,
550};
551
552static int dcon_reboot_notify(struct notifier_block *nb,
553 unsigned long foo, void *bar)
554{
555 struct dcon_priv *dcon = container_of(nb, struct dcon_priv, reboot_nb);
556
557 if (!dcon || !dcon->client)
558 return NOTIFY_DONE;
559
560 /* Turn off the DCON. Entirely. */
561 dcon_write(dcon, DCON_REG_MODE, 0x39);
562 dcon_write(dcon, DCON_REG_MODE, 0x32);
563 return NOTIFY_DONE;
564}
565
566static int unfreeze_on_panic(struct notifier_block *nb,
567 unsigned long e, void *p)
568{
569 pdata->set_dconload(1);
570 return NOTIFY_DONE;
571}
572
573static struct notifier_block dcon_panic_nb = {
574 .notifier_call = unfreeze_on_panic,
575};
576
577static int dcon_detect(struct i2c_client *client, struct i2c_board_info *info)
578{
579 strlcpy(info->type, "olpc_dcon", I2C_NAME_SIZE);
580
581 return 0;
582}
583
584static int dcon_probe(struct i2c_client *client, const struct i2c_device_id *id)
585{
586 struct dcon_priv *dcon;
587 int rc, i, j;
588
589 if (!pdata)
590 return -ENXIO;
591
592 dcon = kzalloc(sizeof(*dcon), GFP_KERNEL);
593 if (!dcon)
594 return -ENOMEM;
595
596 dcon->client = client;
597 init_waitqueue_head(&dcon->waitq);
598 INIT_WORK(&dcon->switch_source, dcon_source_switch);
599 dcon->reboot_nb.notifier_call = dcon_reboot_notify;
600 dcon->reboot_nb.priority = -1;
601
602 i2c_set_clientdata(client, dcon);
603
604 if (num_registered_fb < 1) {
605 dev_err(&client->dev, "DCON driver requires a registered fb\n");
606 rc = -EIO;
607 goto einit;
608 }
609 dcon->fbinfo = registered_fb[0];
610
611 rc = dcon_hw_init(dcon, 1);
612 if (rc)
613 goto einit;
614
615 /* Add the DCON device */
616
617 dcon_device = platform_device_alloc("dcon", -1);
618
619 if (!dcon_device) {
620 pr_err("Unable to create the DCON device\n");
621 rc = -ENOMEM;
622 goto eirq;
623 }
624 rc = platform_device_add(dcon_device);
625 platform_set_drvdata(dcon_device, dcon);
626
627 if (rc) {
628 pr_err("Unable to add the DCON device\n");
629 goto edev;
630 }
631
632 for (i = 0; i < ARRAY_SIZE(dcon_device_files); i++) {
633 rc = device_create_file(&dcon_device->dev,
634 &dcon_device_files[i]);
635 if (rc) {
636 dev_err(&dcon_device->dev, "Cannot create sysfs file\n");
637 goto ecreate;
638 }
639 }
640
641 dcon->bl_val = dcon_read(dcon, DCON_REG_BRIGHT) & 0x0F;
642
643 /* Add the backlight device for the DCON */
644 dcon_bl_props.brightness = dcon->bl_val;
645 dcon->bl_dev = backlight_device_register("dcon-bl", &dcon_device->dev,
646 dcon, &dcon_bl_ops, &dcon_bl_props);
647 if (IS_ERR(dcon->bl_dev)) {
648 dev_err(&client->dev, "cannot register backlight dev (%ld)\n",
649 PTR_ERR(dcon->bl_dev));
650 dcon->bl_dev = NULL;
651 }
652
653 register_reboot_notifier(&dcon->reboot_nb);
654 atomic_notifier_chain_register(&panic_notifier_list, &dcon_panic_nb);
655
656 return 0;
657
658 ecreate:
659 for (j = 0; j < i; j++)
660 device_remove_file(&dcon_device->dev, &dcon_device_files[j]);
661 edev:
662 platform_device_unregister(dcon_device);
663 dcon_device = NULL;
664 eirq:
665 free_irq(DCON_IRQ, dcon);
666 einit:
667 kfree(dcon);
668 return rc;
669}
670
671static int dcon_remove(struct i2c_client *client)
672{
673 struct dcon_priv *dcon = i2c_get_clientdata(client);
674
675 unregister_reboot_notifier(&dcon->reboot_nb);
676 atomic_notifier_chain_unregister(&panic_notifier_list, &dcon_panic_nb);
677
678 free_irq(DCON_IRQ, dcon);
679
680 backlight_device_unregister(dcon->bl_dev);
681
682 if (dcon_device)
683 platform_device_unregister(dcon_device);
684 cancel_work_sync(&dcon->switch_source);
685
686 kfree(dcon);
687
688 return 0;
689}
690
691#ifdef CONFIG_PM
692static int dcon_suspend(struct device *dev)
693{
694 struct i2c_client *client = to_i2c_client(dev);
695 struct dcon_priv *dcon = i2c_get_clientdata(client);
696
697 if (!dcon->asleep) {
698 /* Set up the DCON to have the source */
699 dcon_set_source_sync(dcon, DCON_SOURCE_DCON);
700 }
701
702 return 0;
703}
704
705static int dcon_resume(struct device *dev)
706{
707 struct i2c_client *client = to_i2c_client(dev);
708 struct dcon_priv *dcon = i2c_get_clientdata(client);
709
710 if (!dcon->asleep) {
711 dcon_bus_stabilize(dcon, 0);
712 dcon_set_source(dcon, DCON_SOURCE_CPU);
713 }
714
715 return 0;
716}
717
718#else
719
720#define dcon_suspend NULL
721#define dcon_resume NULL
722
723#endif /* CONFIG_PM */
724
725irqreturn_t dcon_interrupt(int irq, void *id)
726{
727 struct dcon_priv *dcon = id;
728 u8 status;
729
730 if (pdata->read_status(&status))
731 return IRQ_NONE;
732
733 switch (status & 3) {
734 case 3:
735 pr_debug("DCONLOAD_MISSED interrupt\n");
736 break;
737
738 case 2: /* switch to DCON mode */
739 case 1: /* switch to CPU mode */
740 dcon->switched = true;
741 dcon->irq_time = ktime_get();
742 wake_up(&dcon->waitq);
743 break;
744
745 case 0:
746 /* workaround resume case: the DCON (on 1.5) doesn't
747 * ever assert status 0x01 when switching to CPU mode
748 * during resume. this is because DCONLOAD is de-asserted
749 * _immediately_ upon exiting S3, so the actual release
750 * of the DCON happened long before this point.
751 * see http://dev.laptop.org/ticket/9869
752 */
753 if (dcon->curr_src != dcon->pending_src && !dcon->switched) {
754 dcon->switched = true;
755 dcon->irq_time = ktime_get();
756 wake_up(&dcon->waitq);
757 pr_debug("switching w/ status 0/0\n");
758 } else {
759 pr_debug("scanline interrupt w/CPU\n");
760 }
761 }
762
763 return IRQ_HANDLED;
764}
765
766static const struct dev_pm_ops dcon_pm_ops = {
767 .suspend = dcon_suspend,
768 .resume = dcon_resume,
769};
770
771static const struct i2c_device_id dcon_idtable[] = {
772 { "olpc_dcon", 0 },
773 { }
774};
775MODULE_DEVICE_TABLE(i2c, dcon_idtable);
776
777static struct i2c_driver dcon_driver = {
778 .driver = {
779 .name = "olpc_dcon",
780 .pm = &dcon_pm_ops,
781 },
782 .class = I2C_CLASS_DDC | I2C_CLASS_HWMON,
783 .id_table = dcon_idtable,
784 .probe = dcon_probe,
785 .remove = dcon_remove,
786 .detect = dcon_detect,
787 .address_list = normal_i2c,
788};
789
790static int __init olpc_dcon_init(void)
791{
792#ifdef CONFIG_FB_OLPC_DCON_1_5
793 /* XO-1.5 */
794 if (olpc_board_at_least(olpc_board(0xd0)))
795 pdata = &dcon_pdata_xo_1_5;
796#endif
797#ifdef CONFIG_FB_OLPC_DCON_1
798 if (!pdata)
799 pdata = &dcon_pdata_xo_1;
800#endif
801
802 return i2c_add_driver(&dcon_driver);
803}
804
805static void __exit olpc_dcon_exit(void)
806{
807 i2c_del_driver(&dcon_driver);
808}
809
810module_init(olpc_dcon_init);
811module_exit(olpc_dcon_exit);
812
813MODULE_LICENSE("GPL");
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
new file mode 100644
index 000000000000..215e7ec4dea2
--- /dev/null
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -0,0 +1,111 @@
1#ifndef OLPC_DCON_H_
2#define OLPC_DCON_H_
3
4#include <linux/notifier.h>
5#include <linux/workqueue.h>
6
7/* DCON registers */
8
9#define DCON_REG_ID 0
10#define DCON_REG_MODE 1
11
12#define MODE_PASSTHRU (1<<0)
13#define MODE_SLEEP (1<<1)
14#define MODE_SLEEP_AUTO (1<<2)
15#define MODE_BL_ENABLE (1<<3)
16#define MODE_BLANK (1<<4)
17#define MODE_CSWIZZLE (1<<5)
18#define MODE_COL_AA (1<<6)
19#define MODE_MONO_LUMA (1<<7)
20#define MODE_SCAN_INT (1<<8)
21#define MODE_CLOCKDIV (1<<9)
22#define MODE_DEBUG (1<<14)
23#define MODE_SELFTEST (1<<15)
24
25#define DCON_REG_HRES 0x2
26#define DCON_REG_HTOTAL 0x3
27#define DCON_REG_HSYNC_WIDTH 0x4
28#define DCON_REG_VRES 0x5
29#define DCON_REG_VTOTAL 0x6
30#define DCON_REG_VSYNC_WIDTH 0x7
31#define DCON_REG_TIMEOUT 0x8
32#define DCON_REG_SCAN_INT 0x9
33#define DCON_REG_BRIGHT 0xa
34#define DCON_REG_MEM_OPT_A 0x41
35#define DCON_REG_MEM_OPT_B 0x42
36
37/* Load Delay Locked Loop (DLL) settings for clock delay */
38#define MEM_DLL_CLOCK_DELAY (1<<0)
39/* Memory controller power down function */
40#define MEM_POWER_DOWN (1<<8)
41/* Memory controller software reset */
42#define MEM_SOFT_RESET (1<<0)
43
44/* Status values */
45
46#define DCONSTAT_SCANINT 0
47#define DCONSTAT_SCANINT_DCON 1
48#define DCONSTAT_DISPLAYLOAD 2
49#define DCONSTAT_MISSED 3
50
51/* Source values */
52
53#define DCON_SOURCE_DCON 0
54#define DCON_SOURCE_CPU 1
55
56/* Interrupt */
57#define DCON_IRQ 6
58
59struct dcon_priv {
60 struct i2c_client *client;
61 struct fb_info *fbinfo;
62 struct backlight_device *bl_dev;
63
64 wait_queue_head_t waitq;
65 struct work_struct switch_source;
66 struct notifier_block reboot_nb;
67
68 /* Shadow register for the DCON_REG_MODE register */
69 u8 disp_mode;
70
71 /* The current backlight value - this saves us some smbus traffic */
72 u8 bl_val;
73
74 /* Current source, initialized at probe time */
75 int curr_src;
76
77 /* Desired source */
78 int pending_src;
79
80 /* Variables used during switches */
81 bool switched;
82 ktime_t irq_time;
83 ktime_t load_time;
84
85 /* Current output type; true == mono, false == color */
86 bool mono;
87 bool asleep;
88 /* This get set while controlling fb blank state from the driver */
89 bool ignore_fb_events;
90};
91
92struct dcon_platform_data {
93 int (*init)(struct dcon_priv *);
94 void (*bus_stabilize_wiggle)(void);
95 void (*set_dconload)(int);
96 int (*read_status)(u8 *);
97};
98
99#include <linux/interrupt.h>
100
101irqreturn_t dcon_interrupt(int irq, void *id);
102
103#ifdef CONFIG_FB_OLPC_DCON_1
104extern struct dcon_platform_data dcon_pdata_xo_1;
105#endif
106
107#ifdef CONFIG_FB_OLPC_DCON_1_5
108extern struct dcon_platform_data dcon_pdata_xo_1_5;
109#endif
110
111#endif
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
new file mode 100644
index 000000000000..0c5a10c69401
--- /dev/null
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -0,0 +1,205 @@
1/*
2 * Mainly by David Woodhouse, somewhat modified by Jordan Crouse
3 *
4 * Copyright © 2006-2007 Red Hat, Inc.
5 * Copyright © 2006-2007 Advanced Micro Devices, Inc.
6 * Copyright © 2009 VIA Technology, Inc.
7 * Copyright (c) 2010 Andres Salomon <dilinger@queued.net>
8 *
9 * This program is free software. You can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/cs5535.h>
17#include <linux/gpio.h>
18#include <linux/delay.h>
19#include <asm/olpc.h>
20
21#include "olpc_dcon.h"
22
23static int dcon_init_xo_1(struct dcon_priv *dcon)
24{
25 unsigned char lob;
26
27 if (gpio_request(OLPC_GPIO_DCON_STAT0, "OLPC-DCON")) {
28 pr_err("failed to request STAT0 GPIO\n");
29 return -EIO;
30 }
31 if (gpio_request(OLPC_GPIO_DCON_STAT1, "OLPC-DCON")) {
32 pr_err("failed to request STAT1 GPIO\n");
33 goto err_gp_stat1;
34 }
35 if (gpio_request(OLPC_GPIO_DCON_IRQ, "OLPC-DCON")) {
36 pr_err("failed to request IRQ GPIO\n");
37 goto err_gp_irq;
38 }
39 if (gpio_request(OLPC_GPIO_DCON_LOAD, "OLPC-DCON")) {
40 pr_err("failed to request LOAD GPIO\n");
41 goto err_gp_load;
42 }
43 if (gpio_request(OLPC_GPIO_DCON_BLANK, "OLPC-DCON")) {
44 pr_err("failed to request BLANK GPIO\n");
45 goto err_gp_blank;
46 }
47
48 /* Turn off the event enable for GPIO7 just to be safe */
49 cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
50
51 /*
52 * Determine the current state by reading the GPIO bit; earlier
53 * stages of the boot process have established the state.
54 *
55 * Note that we read GPIO_OUTPUT_VAL rather than GPIO_READ_BACK here;
56 * this is because OFW will disable input for the pin and set a value..
57 * READ_BACK will only contain a valid value if input is enabled and
58 * then a value is set. So, future readings of the pin can use
59 * READ_BACK, but the first one cannot. Awesome, huh?
60 */
61 dcon->curr_src = cs5535_gpio_isset(OLPC_GPIO_DCON_LOAD, GPIO_OUTPUT_VAL)
62 ? DCON_SOURCE_CPU
63 : DCON_SOURCE_DCON;
64 dcon->pending_src = dcon->curr_src;
65
66 /* Set the directions for the GPIO pins */
67 gpio_direction_input(OLPC_GPIO_DCON_STAT0);
68 gpio_direction_input(OLPC_GPIO_DCON_STAT1);
69 gpio_direction_input(OLPC_GPIO_DCON_IRQ);
70 gpio_direction_input(OLPC_GPIO_DCON_BLANK);
71 gpio_direction_output(OLPC_GPIO_DCON_LOAD,
72 dcon->curr_src == DCON_SOURCE_CPU);
73
74 /* Set up the interrupt mappings */
75
76 /* Set the IRQ to pair 2 */
77 cs5535_gpio_setup_event(OLPC_GPIO_DCON_IRQ, 2, 0);
78
79 /* Enable group 2 to trigger the DCON interrupt */
80 cs5535_gpio_set_irq(2, DCON_IRQ);
81
82 /* Select edge level for interrupt (in PIC) */
83 lob = inb(0x4d0);
84 lob &= ~(1 << DCON_IRQ);
85 outb(lob, 0x4d0);
86
87 /* Register the interrupt handler */
88 if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", dcon)) {
89 pr_err("failed to request DCON's irq\n");
90 goto err_req_irq;
91 }
92
93 /* Clear INV_EN for GPIO7 (DCONIRQ) */
94 cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_INVERT);
95
96 /* Enable filter for GPIO12 (DCONBLANK) */
97 cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_FILTER);
98
99 /* Disable filter for GPIO7 */
100 cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_FILTER);
101
102 /* Disable event counter for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
103 cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_EVENT_COUNT);
104 cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_EVENT_COUNT);
105
106 /* Add GPIO12 to the Filter Event Pair #7 */
107 cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_FE7_SEL);
108
109 /* Turn off negative Edge Enable for GPIO12 */
110 cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_EN);
111
112 /* Enable negative Edge Enable for GPIO7 */
113 cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_EN);
114
115 /* Zero the filter amount for Filter Event Pair #7 */
116 cs5535_gpio_set(0, GPIO_FLTR7_AMOUNT);
117
118 /* Clear the negative edge status for GPIO7 and GPIO12 */
119 cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
120 cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_STS);
121
122 /* FIXME: Clear the positive status as well, just to be sure */
123 cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_POSITIVE_EDGE_STS);
124 cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_POSITIVE_EDGE_STS);
125
126 /* Enable events for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
127 cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
128 cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_EVENTS_ENABLE);
129
130 return 0;
131
132err_req_irq:
133 gpio_free(OLPC_GPIO_DCON_BLANK);
134err_gp_blank:
135 gpio_free(OLPC_GPIO_DCON_LOAD);
136err_gp_load:
137 gpio_free(OLPC_GPIO_DCON_IRQ);
138err_gp_irq:
139 gpio_free(OLPC_GPIO_DCON_STAT1);
140err_gp_stat1:
141 gpio_free(OLPC_GPIO_DCON_STAT0);
142 return -EIO;
143}
144
145static void dcon_wiggle_xo_1(void)
146{
147 int x;
148
149 /*
150 * According to HiMax, when powering the DCON up we should hold
151 * SMB_DATA high for 8 SMB_CLK cycles. This will force the DCON
152 * state machine to reset to a (sane) initial state. Mitch Bradley
153 * did some testing and discovered that holding for 16 SMB_CLK cycles
154 * worked a lot more reliably, so that's what we do here.
155 *
156 * According to the cs5536 spec, to set GPIO14 to SMB_CLK we must
157 * simultaneously set AUX1 IN/OUT to GPIO14; ditto for SMB_DATA and
158 * GPIO15.
159 */
160 cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
161 cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL);
162 cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_ENABLE);
163 cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE);
164 cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
165 cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
166 cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX2);
167 cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2);
168 cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
169 cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
170
171 for (x = 0; x < 16; x++) {
172 udelay(5);
173 cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
174 udelay(5);
175 cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
176 }
177 udelay(5);
178 cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
179 cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
180 cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
181 cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
182}
183
184static void dcon_set_dconload_1(int val)
185{
186 gpio_set_value(OLPC_GPIO_DCON_LOAD, val);
187}
188
189static int dcon_read_status_xo_1(u8 *status)
190{
191 *status = gpio_get_value(OLPC_GPIO_DCON_STAT0);
192 *status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1;
193
194 /* Clear the negative edge status for GPIO7 */
195 cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
196
197 return 0;
198}
199
200struct dcon_platform_data dcon_pdata_xo_1 = {
201 .init = dcon_init_xo_1,
202 .bus_stabilize_wiggle = dcon_wiggle_xo_1,
203 .set_dconload = dcon_set_dconload_1,
204 .read_status = dcon_read_status_xo_1,
205};
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
new file mode 100644
index 000000000000..6a4d379c16a3
--- /dev/null
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
@@ -0,0 +1,161 @@
1/*
2 * Copyright (c) 2009,2010 One Laptop per Child
3 *
4 * This program is free software. You can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/acpi.h>
12#include <linux/delay.h>
13#include <linux/gpio.h>
14#include <asm/olpc.h>
15
16/* TODO: this eventually belongs in linux/vx855.h */
17#define NR_VX855_GPI 14
18#define NR_VX855_GPO 13
19#define NR_VX855_GPIO 15
20
21#define VX855_GPI(n) (n)
22#define VX855_GPO(n) (NR_VX855_GPI + (n))
23#define VX855_GPIO(n) (NR_VX855_GPI + NR_VX855_GPO + (n))
24
25#include "olpc_dcon.h"
26
27/* Hardware setup on the XO 1.5:
28 * DCONLOAD connects to VX855_GPIO1 (not SMBCK2)
29 * DCONBLANK connects to VX855_GPIO8 (not SSPICLK) unused in driver
30 * DCONSTAT0 connects to VX855_GPI10 (not SSPISDI)
31 * DCONSTAT1 connects to VX855_GPI11 (not nSSPISS)
32 * DCONIRQ connects to VX855_GPIO12
33 * DCONSMBDATA connects to VX855 graphics CRTSPD
34 * DCONSMBCLK connects to VX855 graphics CRTSPCLK
35 */
36
37#define VX855_GENL_PURPOSE_OUTPUT 0x44c /* PMIO_Rx4c-4f */
38#define VX855_GPI_STATUS_CHG 0x450 /* PMIO_Rx50 */
39#define VX855_GPI_SCI_SMI 0x452 /* PMIO_Rx52 */
40#define BIT_GPIO12 0x40
41
42#define PREFIX "OLPC DCON:"
43
44static void dcon_clear_irq(void)
45{
46 /* irq status will appear in PMIO_Rx50[6] (RW1C) on gpio12 */
47 outb(BIT_GPIO12, VX855_GPI_STATUS_CHG);
48}
49
50static int dcon_was_irq(void)
51{
52 u_int8_t tmp;
53
54 /* irq status will appear in PMIO_Rx50[6] on gpio12 */
55 tmp = inb(VX855_GPI_STATUS_CHG);
56 return !!(tmp & BIT_GPIO12);
57
58 return 0;
59}
60
61static int dcon_init_xo_1_5(struct dcon_priv *dcon)
62{
63 unsigned int irq;
64
65 dcon_clear_irq();
66
67 /* set PMIO_Rx52[6] to enable SCI/SMI on gpio12 */
68 outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI);
69
70 /* Determine the current state of DCONLOAD, likely set by firmware */
71 /* GPIO1 */
72 dcon->curr_src = (inl(VX855_GENL_PURPOSE_OUTPUT) & 0x1000) ?
73 DCON_SOURCE_CPU : DCON_SOURCE_DCON;
74 dcon->pending_src = dcon->curr_src;
75
76 /* we're sharing the IRQ with ACPI */
77 irq = acpi_gbl_FADT.sci_interrupt;
78 if (request_irq(irq, &dcon_interrupt, IRQF_SHARED, "DCON", dcon)) {
79 pr_err("DCON (IRQ%d) allocation failed\n", irq);
80 return 1;
81 }
82
83 return 0;
84}
85
86static void set_i2c_line(int sda, int scl)
87{
88 unsigned char tmp;
89 unsigned int port = 0x26;
90
91 /* FIXME: This directly accesses the CRT GPIO controller !!! */
92 outb(port, 0x3c4);
93 tmp = inb(0x3c5);
94
95 if (scl)
96 tmp |= 0x20;
97 else
98 tmp &= ~0x20;
99
100 if (sda)
101 tmp |= 0x10;
102 else
103 tmp &= ~0x10;
104
105 tmp |= 0x01;
106
107 outb(port, 0x3c4);
108 outb(tmp, 0x3c5);
109}
110
111
112static void dcon_wiggle_xo_1_5(void)
113{
114 int x;
115
116 /*
117 * According to HiMax, when powering the DCON up we should hold
118 * SMB_DATA high for 8 SMB_CLK cycles. This will force the DCON
119 * state machine to reset to a (sane) initial state. Mitch Bradley
120 * did some testing and discovered that holding for 16 SMB_CLK cycles
121 * worked a lot more reliably, so that's what we do here.
122 */
123 set_i2c_line(1, 1);
124
125 for (x = 0; x < 16; x++) {
126 udelay(5);
127 set_i2c_line(1, 0);
128 udelay(5);
129 set_i2c_line(1, 1);
130 }
131 udelay(5);
132
133 /* set PMIO_Rx52[6] to enable SCI/SMI on gpio12 */
134 outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI);
135}
136
137static void dcon_set_dconload_xo_1_5(int val)
138{
139 gpio_set_value(VX855_GPIO(1), val);
140}
141
142static int dcon_read_status_xo_1_5(u8 *status)
143{
144 if (!dcon_was_irq())
145 return -1;
146
147 /* i believe this is the same as "inb(0x44b) & 3" */
148 *status = gpio_get_value(VX855_GPI(10));
149 *status |= gpio_get_value(VX855_GPI(11)) << 1;
150
151 dcon_clear_irq();
152
153 return 0;
154}
155
156struct dcon_platform_data dcon_pdata_xo_1_5 = {
157 .init = dcon_init_xo_1_5,
158 .bus_stabilize_wiggle = dcon_wiggle_xo_1_5,
159 .set_dconload = dcon_set_dconload_xo_1_5,
160 .read_status = dcon_read_status_xo_1_5,
161};
diff --git a/drivers/staging/rdma/hfi1/Kconfig b/drivers/staging/rdma/hfi1/Kconfig
index 3e668d852f03..a925fb0db706 100644
--- a/drivers/staging/rdma/hfi1/Kconfig
+++ b/drivers/staging/rdma/hfi1/Kconfig
@@ -2,6 +2,7 @@ config INFINIBAND_HFI1
2 tristate "Intel OPA Gen1 support" 2 tristate "Intel OPA Gen1 support"
3 depends on X86_64 && INFINIBAND_RDMAVT 3 depends on X86_64 && INFINIBAND_RDMAVT
4 select MMU_NOTIFIER 4 select MMU_NOTIFIER
5 select CRC32
5 default m 6 default m
6 ---help--- 7 ---help---
7 This is a low-level driver for Intel OPA Gen1 adapter. 8 This is a low-level driver for Intel OPA Gen1 adapter.
diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
index 05de0dad8762..4c6f1d7d2eaf 100644
--- a/drivers/staging/rdma/hfi1/TODO
+++ b/drivers/staging/rdma/hfi1/TODO
@@ -3,4 +3,4 @@ July, 2015
3- Remove unneeded file entries in sysfs 3- Remove unneeded file entries in sysfs
4- Remove software processing of IB protocol and place in library for use 4- Remove software processing of IB protocol and place in library for use
5 by qib, ipath (if still present), hfi1, and eventually soft-roce 5 by qib, ipath (if still present), hfi1, and eventually soft-roce
6 6- Replace incorrect uAPI
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
index 8396dc5fb6c1..c1c5bf82addb 100644
--- a/drivers/staging/rdma/hfi1/file_ops.c
+++ b/drivers/staging/rdma/hfi1/file_ops.c
@@ -49,6 +49,8 @@
49#include <linux/vmalloc.h> 49#include <linux/vmalloc.h>
50#include <linux/io.h> 50#include <linux/io.h>
51 51
52#include <rdma/ib.h>
53
52#include "hfi.h" 54#include "hfi.h"
53#include "pio.h" 55#include "pio.h"
54#include "device.h" 56#include "device.h"
@@ -190,6 +192,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
190 int uctxt_required = 1; 192 int uctxt_required = 1;
191 int must_be_root = 0; 193 int must_be_root = 0;
192 194
195 /* FIXME: This interface cannot continue out of staging */
196 if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
197 return -EACCES;
198
193 if (count < sizeof(cmd)) { 199 if (count < sizeof(cmd)) {
194 ret = -EINVAL; 200 ret = -EINVAL;
195 goto bail; 201 goto bail;
@@ -791,15 +797,16 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
791 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 797 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
792 798
793 dd->rcd[uctxt->ctxt] = NULL; 799 dd->rcd[uctxt->ctxt] = NULL;
800
801 hfi1_user_exp_rcv_free(fdata);
802 hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
803
794 uctxt->rcvwait_to = 0; 804 uctxt->rcvwait_to = 0;
795 uctxt->piowait_to = 0; 805 uctxt->piowait_to = 0;
796 uctxt->rcvnowait = 0; 806 uctxt->rcvnowait = 0;
797 uctxt->pionowait = 0; 807 uctxt->pionowait = 0;
798 uctxt->event_flags = 0; 808 uctxt->event_flags = 0;
799 809
800 hfi1_user_exp_rcv_free(fdata);
801 hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
802
803 hfi1_stats.sps_ctxts--; 810 hfi1_stats.sps_ctxts--;
804 if (++dd->freectxts == dd->num_user_contexts) 811 if (++dd->freectxts == dd->num_user_contexts)
805 aspm_enable_all(dd); 812 aspm_enable_all(dd);
@@ -1127,27 +1134,13 @@ bail:
1127 1134
1128static int user_init(struct file *fp) 1135static int user_init(struct file *fp)
1129{ 1136{
1130 int ret;
1131 unsigned int rcvctrl_ops = 0; 1137 unsigned int rcvctrl_ops = 0;
1132 struct hfi1_filedata *fd = fp->private_data; 1138 struct hfi1_filedata *fd = fp->private_data;
1133 struct hfi1_ctxtdata *uctxt = fd->uctxt; 1139 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1134 1140
1135 /* make sure that the context has already been setup */ 1141 /* make sure that the context has already been setup */
1136 if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) { 1142 if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
1137 ret = -EFAULT; 1143 return -EFAULT;
1138 goto done;
1139 }
1140
1141 /*
1142 * Subctxts don't need to initialize anything since master
1143 * has done it.
1144 */
1145 if (fd->subctxt) {
1146 ret = wait_event_interruptible(uctxt->wait, !test_bit(
1147 HFI1_CTXT_MASTER_UNINIT,
1148 &uctxt->event_flags));
1149 goto expected;
1150 }
1151 1144
1152 /* initialize poll variables... */ 1145 /* initialize poll variables... */
1153 uctxt->urgent = 0; 1146 uctxt->urgent = 0;
@@ -1202,19 +1195,7 @@ static int user_init(struct file *fp)
1202 wake_up(&uctxt->wait); 1195 wake_up(&uctxt->wait);
1203 } 1196 }
1204 1197
1205expected: 1198 return 0;
1206 /*
1207 * Expected receive has to be setup for all processes (including
1208 * shared contexts). However, it has to be done after the master
1209 * context has been fully configured as it depends on the
1210 * eager/expected split of the RcvArray entries.
1211 * Setting it up here ensures that the subcontexts will be waiting
1212 * (due to the above wait_event_interruptible() until the master
1213 * is setup.
1214 */
1215 ret = hfi1_user_exp_rcv_init(fp);
1216done:
1217 return ret;
1218} 1199}
1219 1200
1220static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len) 1201static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
@@ -1261,7 +1242,7 @@ static int setup_ctxt(struct file *fp)
1261 int ret = 0; 1242 int ret = 0;
1262 1243
1263 /* 1244 /*
1264 * Context should be set up only once (including allocation and 1245 * Context should be set up only once, including allocation and
1265 * programming of eager buffers. This is done if context sharing 1246 * programming of eager buffers. This is done if context sharing
1266 * is not requested or by the master process. 1247 * is not requested or by the master process.
1267 */ 1248 */
@@ -1282,10 +1263,29 @@ static int setup_ctxt(struct file *fp)
1282 if (ret) 1263 if (ret)
1283 goto done; 1264 goto done;
1284 } 1265 }
1266 } else {
1267 ret = wait_event_interruptible(uctxt->wait, !test_bit(
1268 HFI1_CTXT_MASTER_UNINIT,
1269 &uctxt->event_flags));
1270 if (ret)
1271 goto done;
1285 } 1272 }
1273
1286 ret = hfi1_user_sdma_alloc_queues(uctxt, fp); 1274 ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
1287 if (ret) 1275 if (ret)
1288 goto done; 1276 goto done;
1277 /*
1278 * Expected receive has to be setup for all processes (including
1279 * shared contexts). However, it has to be done after the master
1280 * context has been fully configured as it depends on the
1281 * eager/expected split of the RcvArray entries.
1282 * Setting it up here ensures that the subcontexts will be waiting
1283 * (due to the above wait_event_interruptible() until the master
1284 * is setup.
1285 */
1286 ret = hfi1_user_exp_rcv_init(fp);
1287 if (ret)
1288 goto done;
1289 1289
1290 set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags); 1290 set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags);
1291done: 1291done:
@@ -1565,29 +1565,8 @@ static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
1565{ 1565{
1566 struct hfi1_devdata *dd = filp->private_data; 1566 struct hfi1_devdata *dd = filp->private_data;
1567 1567
1568 switch (whence) { 1568 return fixed_size_llseek(filp, offset, whence,
1569 case SEEK_SET: 1569 (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE);
1570 break;
1571 case SEEK_CUR:
1572 offset += filp->f_pos;
1573 break;
1574 case SEEK_END:
1575 offset = ((dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) -
1576 offset;
1577 break;
1578 default:
1579 return -EINVAL;
1580 }
1581
1582 if (offset < 0)
1583 return -EINVAL;
1584
1585 if (offset >= (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE)
1586 return -EINVAL;
1587
1588 filp->f_pos = offset;
1589
1590 return filp->f_pos;
1591} 1570}
1592 1571
1593/* NOTE: assumes unsigned long is 8 bytes */ 1572/* NOTE: assumes unsigned long is 8 bytes */
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c
index c7ad0164ea9a..b3f0682a36c9 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.c
+++ b/drivers/staging/rdma/hfi1/mmu_rb.c
@@ -71,6 +71,7 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *,
71 struct mm_struct *, 71 struct mm_struct *,
72 unsigned long, unsigned long); 72 unsigned long, unsigned long);
73static void mmu_notifier_mem_invalidate(struct mmu_notifier *, 73static void mmu_notifier_mem_invalidate(struct mmu_notifier *,
74 struct mm_struct *,
74 unsigned long, unsigned long); 75 unsigned long, unsigned long);
75static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *, 76static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
76 unsigned long, unsigned long); 77 unsigned long, unsigned long);
@@ -137,7 +138,7 @@ void hfi1_mmu_rb_unregister(struct rb_root *root)
137 rbnode = rb_entry(node, struct mmu_rb_node, node); 138 rbnode = rb_entry(node, struct mmu_rb_node, node);
138 rb_erase(node, root); 139 rb_erase(node, root);
139 if (handler->ops->remove) 140 if (handler->ops->remove)
140 handler->ops->remove(root, rbnode, false); 141 handler->ops->remove(root, rbnode, NULL);
141 } 142 }
142 } 143 }
143 144
@@ -176,7 +177,7 @@ unlock:
176 return ret; 177 return ret;
177} 178}
178 179
179/* Caller must host handler lock */ 180/* Caller must hold handler lock */
180static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, 181static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
181 unsigned long addr, 182 unsigned long addr,
182 unsigned long len) 183 unsigned long len)
@@ -200,15 +201,21 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
200 return node; 201 return node;
201} 202}
202 203
204/* Caller must *not* hold handler lock. */
203static void __mmu_rb_remove(struct mmu_rb_handler *handler, 205static void __mmu_rb_remove(struct mmu_rb_handler *handler,
204 struct mmu_rb_node *node, bool arg) 206 struct mmu_rb_node *node, struct mm_struct *mm)
205{ 207{
208 unsigned long flags;
209
206 /* Validity of handler and node pointers has been checked by caller. */ 210 /* Validity of handler and node pointers has been checked by caller. */
207 hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr, 211 hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr,
208 node->len); 212 node->len);
213 spin_lock_irqsave(&handler->lock, flags);
209 __mmu_int_rb_remove(node, handler->root); 214 __mmu_int_rb_remove(node, handler->root);
215 spin_unlock_irqrestore(&handler->lock, flags);
216
210 if (handler->ops->remove) 217 if (handler->ops->remove)
211 handler->ops->remove(handler->root, node, arg); 218 handler->ops->remove(handler->root, node, mm);
212} 219}
213 220
214struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr, 221struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
@@ -231,14 +238,11 @@ struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
231void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node) 238void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node)
232{ 239{
233 struct mmu_rb_handler *handler = find_mmu_handler(root); 240 struct mmu_rb_handler *handler = find_mmu_handler(root);
234 unsigned long flags;
235 241
236 if (!handler || !node) 242 if (!handler || !node)
237 return; 243 return;
238 244
239 spin_lock_irqsave(&handler->lock, flags); 245 __mmu_rb_remove(handler, node, NULL);
240 __mmu_rb_remove(handler, node, false);
241 spin_unlock_irqrestore(&handler->lock, flags);
242} 246}
243 247
244static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root) 248static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root)
@@ -260,7 +264,7 @@ unlock:
260static inline void mmu_notifier_page(struct mmu_notifier *mn, 264static inline void mmu_notifier_page(struct mmu_notifier *mn,
261 struct mm_struct *mm, unsigned long addr) 265 struct mm_struct *mm, unsigned long addr)
262{ 266{
263 mmu_notifier_mem_invalidate(mn, addr, addr + PAGE_SIZE); 267 mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE);
264} 268}
265 269
266static inline void mmu_notifier_range_start(struct mmu_notifier *mn, 270static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
@@ -268,25 +272,31 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
268 unsigned long start, 272 unsigned long start,
269 unsigned long end) 273 unsigned long end)
270{ 274{
271 mmu_notifier_mem_invalidate(mn, start, end); 275 mmu_notifier_mem_invalidate(mn, mm, start, end);
272} 276}
273 277
274static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, 278static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
279 struct mm_struct *mm,
275 unsigned long start, unsigned long end) 280 unsigned long start, unsigned long end)
276{ 281{
277 struct mmu_rb_handler *handler = 282 struct mmu_rb_handler *handler =
278 container_of(mn, struct mmu_rb_handler, mn); 283 container_of(mn, struct mmu_rb_handler, mn);
279 struct rb_root *root = handler->root; 284 struct rb_root *root = handler->root;
280 struct mmu_rb_node *node; 285 struct mmu_rb_node *node, *ptr = NULL;
281 unsigned long flags; 286 unsigned long flags;
282 287
283 spin_lock_irqsave(&handler->lock, flags); 288 spin_lock_irqsave(&handler->lock, flags);
284 for (node = __mmu_int_rb_iter_first(root, start, end - 1); node; 289 for (node = __mmu_int_rb_iter_first(root, start, end - 1);
285 node = __mmu_int_rb_iter_next(node, start, end - 1)) { 290 node; node = ptr) {
291 /* Guard against node removal. */
292 ptr = __mmu_int_rb_iter_next(node, start, end - 1);
286 hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u", 293 hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u",
287 node->addr, node->len); 294 node->addr, node->len);
288 if (handler->ops->invalidate(root, node)) 295 if (handler->ops->invalidate(root, node)) {
289 __mmu_rb_remove(handler, node, true); 296 spin_unlock_irqrestore(&handler->lock, flags);
297 __mmu_rb_remove(handler, node, mm);
298 spin_lock_irqsave(&handler->lock, flags);
299 }
290 } 300 }
291 spin_unlock_irqrestore(&handler->lock, flags); 301 spin_unlock_irqrestore(&handler->lock, flags);
292} 302}
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/staging/rdma/hfi1/mmu_rb.h
index f8523fdb8a18..19a306e83c7d 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.h
+++ b/drivers/staging/rdma/hfi1/mmu_rb.h
@@ -59,7 +59,8 @@ struct mmu_rb_node {
59struct mmu_rb_ops { 59struct mmu_rb_ops {
60 bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long); 60 bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long);
61 int (*insert)(struct rb_root *, struct mmu_rb_node *); 61 int (*insert)(struct rb_root *, struct mmu_rb_node *);
62 void (*remove)(struct rb_root *, struct mmu_rb_node *, bool); 62 void (*remove)(struct rb_root *, struct mmu_rb_node *,
63 struct mm_struct *);
63 int (*invalidate)(struct rb_root *, struct mmu_rb_node *); 64 int (*invalidate)(struct rb_root *, struct mmu_rb_node *);
64}; 65};
65 66
diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c
index 29a5ad28019b..dc9119e1b458 100644
--- a/drivers/staging/rdma/hfi1/qp.c
+++ b/drivers/staging/rdma/hfi1/qp.c
@@ -519,10 +519,12 @@ static void iowait_sdma_drained(struct iowait *wait)
519 * do the flush work until that QP's 519 * do the flush work until that QP's
520 * sdma work has finished. 520 * sdma work has finished.
521 */ 521 */
522 spin_lock(&qp->s_lock);
522 if (qp->s_flags & RVT_S_WAIT_DMA) { 523 if (qp->s_flags & RVT_S_WAIT_DMA) {
523 qp->s_flags &= ~RVT_S_WAIT_DMA; 524 qp->s_flags &= ~RVT_S_WAIT_DMA;
524 hfi1_schedule_send(qp); 525 hfi1_schedule_send(qp);
525 } 526 }
527 spin_unlock(&qp->s_lock);
526} 528}
527 529
528/** 530/**
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c
index 0861e095df8d..8bd56d5c783d 100644
--- a/drivers/staging/rdma/hfi1/user_exp_rcv.c
+++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c
@@ -87,7 +87,8 @@ static u32 find_phys_blocks(struct page **, unsigned, struct tid_pageset *);
87static int set_rcvarray_entry(struct file *, unsigned long, u32, 87static int set_rcvarray_entry(struct file *, unsigned long, u32,
88 struct tid_group *, struct page **, unsigned); 88 struct tid_group *, struct page **, unsigned);
89static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *); 89static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *);
90static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *, bool); 90static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *,
91 struct mm_struct *);
91static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *); 92static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
92static int program_rcvarray(struct file *, unsigned long, struct tid_group *, 93static int program_rcvarray(struct file *, unsigned long, struct tid_group *,
93 struct tid_pageset *, unsigned, u16, struct page **, 94 struct tid_pageset *, unsigned, u16, struct page **,
@@ -254,6 +255,8 @@ int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
254 struct hfi1_ctxtdata *uctxt = fd->uctxt; 255 struct hfi1_ctxtdata *uctxt = fd->uctxt;
255 struct tid_group *grp, *gptr; 256 struct tid_group *grp, *gptr;
256 257
258 if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
259 return 0;
257 /* 260 /*
258 * The notifier would have been removed when the process'es mm 261 * The notifier would have been removed when the process'es mm
259 * was freed. 262 * was freed.
@@ -899,7 +902,7 @@ static int unprogram_rcvarray(struct file *fp, u32 tidinfo,
899 if (!node || node->rcventry != (uctxt->expected_base + rcventry)) 902 if (!node || node->rcventry != (uctxt->expected_base + rcventry))
900 return -EBADF; 903 return -EBADF;
901 if (HFI1_CAP_IS_USET(TID_UNMAP)) 904 if (HFI1_CAP_IS_USET(TID_UNMAP))
902 mmu_rb_remove(&fd->tid_rb_root, &node->mmu, false); 905 mmu_rb_remove(&fd->tid_rb_root, &node->mmu, NULL);
903 else 906 else
904 hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu); 907 hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu);
905 908
@@ -965,7 +968,7 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
965 continue; 968 continue;
966 if (HFI1_CAP_IS_USET(TID_UNMAP)) 969 if (HFI1_CAP_IS_USET(TID_UNMAP))
967 mmu_rb_remove(&fd->tid_rb_root, 970 mmu_rb_remove(&fd->tid_rb_root,
968 &node->mmu, false); 971 &node->mmu, NULL);
969 else 972 else
970 hfi1_mmu_rb_remove(&fd->tid_rb_root, 973 hfi1_mmu_rb_remove(&fd->tid_rb_root,
971 &node->mmu); 974 &node->mmu);
@@ -1032,7 +1035,7 @@ static int mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *node)
1032} 1035}
1033 1036
1034static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node, 1037static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node,
1035 bool notifier) 1038 struct mm_struct *mm)
1036{ 1039{
1037 struct hfi1_filedata *fdata = 1040 struct hfi1_filedata *fdata =
1038 container_of(root, struct hfi1_filedata, tid_rb_root); 1041 container_of(root, struct hfi1_filedata, tid_rb_root);
diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c
index ab6b6a42000f..d53a659548e0 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.c
+++ b/drivers/staging/rdma/hfi1/user_sdma.c
@@ -278,7 +278,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *);
278static void user_sdma_free_request(struct user_sdma_request *, bool); 278static void user_sdma_free_request(struct user_sdma_request *, bool);
279static int pin_vector_pages(struct user_sdma_request *, 279static int pin_vector_pages(struct user_sdma_request *,
280 struct user_sdma_iovec *); 280 struct user_sdma_iovec *);
281static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned); 281static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned,
282 unsigned);
282static int check_header_template(struct user_sdma_request *, 283static int check_header_template(struct user_sdma_request *,
283 struct hfi1_pkt_header *, u32, u32); 284 struct hfi1_pkt_header *, u32, u32);
284static int set_txreq_header(struct user_sdma_request *, 285static int set_txreq_header(struct user_sdma_request *,
@@ -299,7 +300,8 @@ static int defer_packet_queue(
299static void activate_packet_queue(struct iowait *, int); 300static void activate_packet_queue(struct iowait *, int);
300static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long); 301static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long);
301static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *); 302static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *);
302static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *, bool); 303static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *,
304 struct mm_struct *);
303static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *); 305static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
304 306
305static struct mmu_rb_ops sdma_rb_ops = { 307static struct mmu_rb_ops sdma_rb_ops = {
@@ -1063,8 +1065,10 @@ static int pin_vector_pages(struct user_sdma_request *req,
1063 rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root, 1065 rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root,
1064 (unsigned long)iovec->iov.iov_base, 1066 (unsigned long)iovec->iov.iov_base,
1065 iovec->iov.iov_len); 1067 iovec->iov.iov_len);
1066 if (rb_node) 1068 if (rb_node && !IS_ERR(rb_node))
1067 node = container_of(rb_node, struct sdma_mmu_node, rb); 1069 node = container_of(rb_node, struct sdma_mmu_node, rb);
1070 else
1071 rb_node = NULL;
1068 1072
1069 if (!node) { 1073 if (!node) {
1070 node = kzalloc(sizeof(*node), GFP_KERNEL); 1074 node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -1107,7 +1111,8 @@ retry:
1107 goto bail; 1111 goto bail;
1108 } 1112 }
1109 if (pinned != npages) { 1113 if (pinned != npages) {
1110 unpin_vector_pages(current->mm, pages, pinned); 1114 unpin_vector_pages(current->mm, pages, node->npages,
1115 pinned);
1111 ret = -EFAULT; 1116 ret = -EFAULT;
1112 goto bail; 1117 goto bail;
1113 } 1118 }
@@ -1147,9 +1152,9 @@ bail:
1147} 1152}
1148 1153
1149static void unpin_vector_pages(struct mm_struct *mm, struct page **pages, 1154static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
1150 unsigned npages) 1155 unsigned start, unsigned npages)
1151{ 1156{
1152 hfi1_release_user_pages(mm, pages, npages, 0); 1157 hfi1_release_user_pages(mm, pages + start, npages, 0);
1153 kfree(pages); 1158 kfree(pages);
1154} 1159}
1155 1160
@@ -1502,7 +1507,7 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
1502 &req->pq->sdma_rb_root, 1507 &req->pq->sdma_rb_root,
1503 (unsigned long)req->iovs[i].iov.iov_base, 1508 (unsigned long)req->iovs[i].iov.iov_base,
1504 req->iovs[i].iov.iov_len); 1509 req->iovs[i].iov.iov_len);
1505 if (!mnode) 1510 if (!mnode || IS_ERR(mnode))
1506 continue; 1511 continue;
1507 1512
1508 node = container_of(mnode, struct sdma_mmu_node, rb); 1513 node = container_of(mnode, struct sdma_mmu_node, rb);
@@ -1547,7 +1552,7 @@ static int sdma_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode)
1547} 1552}
1548 1553
1549static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode, 1554static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
1550 bool notifier) 1555 struct mm_struct *mm)
1551{ 1556{
1552 struct sdma_mmu_node *node = 1557 struct sdma_mmu_node *node =
1553 container_of(mnode, struct sdma_mmu_node, rb); 1558 container_of(mnode, struct sdma_mmu_node, rb);
@@ -1557,14 +1562,20 @@ static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
1557 node->pq->n_locked -= node->npages; 1562 node->pq->n_locked -= node->npages;
1558 spin_unlock(&node->pq->evict_lock); 1563 spin_unlock(&node->pq->evict_lock);
1559 1564
1560 unpin_vector_pages(notifier ? NULL : current->mm, node->pages, 1565 /*
1566 * If mm is set, we are being called by the MMU notifier and we
1567 * should not pass a mm_struct to unpin_vector_page(). This is to
1568 * prevent a deadlock when hfi1_release_user_pages() attempts to
1569 * take the mmap_sem, which the MMU notifier has already taken.
1570 */
1571 unpin_vector_pages(mm ? NULL : current->mm, node->pages, 0,
1561 node->npages); 1572 node->npages);
1562 /* 1573 /*
1563 * If called by the MMU notifier, we have to adjust the pinned 1574 * If called by the MMU notifier, we have to adjust the pinned
1564 * page count ourselves. 1575 * page count ourselves.
1565 */ 1576 */
1566 if (notifier) 1577 if (mm)
1567 current->mm->pinned_vm -= node->npages; 1578 mm->pinned_vm -= node->npages;
1568 kfree(node); 1579 kfree(node);
1569} 1580}
1570 1581
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index a24443ba59ea..97e5b69e0668 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -779,14 +779,6 @@ static int lio_target_init_nodeacl(struct se_node_acl *se_nacl,
779 return 0; 779 return 0;
780} 780}
781 781
782static void lio_target_cleanup_nodeacl( struct se_node_acl *se_nacl)
783{
784 struct iscsi_node_acl *acl = container_of(se_nacl,
785 struct iscsi_node_acl, se_node_acl);
786
787 configfs_remove_default_groups(&acl->se_node_acl.acl_fabric_stat_group);
788}
789
790/* End items for lio_target_acl_cit */ 782/* End items for lio_target_acl_cit */
791 783
792/* Start items for lio_target_tpg_attrib_cit */ 784/* Start items for lio_target_tpg_attrib_cit */
@@ -1247,6 +1239,16 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
1247 if (IS_ERR(tiqn)) 1239 if (IS_ERR(tiqn))
1248 return ERR_CAST(tiqn); 1240 return ERR_CAST(tiqn);
1249 1241
1242 pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
1243 pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
1244 " %s\n", name);
1245 return &tiqn->tiqn_wwn;
1246}
1247
1248static void lio_target_add_wwn_groups(struct se_wwn *wwn)
1249{
1250 struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
1251
1250 config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group, 1252 config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group,
1251 "iscsi_instance", &iscsi_stat_instance_cit); 1253 "iscsi_instance", &iscsi_stat_instance_cit);
1252 configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_instance_group, 1254 configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_instance_group,
@@ -1271,12 +1273,6 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
1271 "iscsi_logout_stats", &iscsi_stat_logout_cit); 1273 "iscsi_logout_stats", &iscsi_stat_logout_cit);
1272 configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group, 1274 configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group,
1273 &tiqn->tiqn_wwn.fabric_stat_group); 1275 &tiqn->tiqn_wwn.fabric_stat_group);
1274
1275
1276 pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
1277 pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
1278 " %s\n", name);
1279 return &tiqn->tiqn_wwn;
1280} 1276}
1281 1277
1282static void lio_target_call_coredeltiqn( 1278static void lio_target_call_coredeltiqn(
@@ -1284,8 +1280,6 @@ static void lio_target_call_coredeltiqn(
1284{ 1280{
1285 struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn); 1281 struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
1286 1282
1287 configfs_remove_default_groups(&tiqn->tiqn_wwn.fabric_stat_group);
1288
1289 pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n", 1283 pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n",
1290 tiqn->tiqn); 1284 tiqn->tiqn);
1291 iscsit_del_tiqn(tiqn); 1285 iscsit_del_tiqn(tiqn);
@@ -1660,12 +1654,12 @@ const struct target_core_fabric_ops iscsi_ops = {
1660 .aborted_task = lio_aborted_task, 1654 .aborted_task = lio_aborted_task,
1661 .fabric_make_wwn = lio_target_call_coreaddtiqn, 1655 .fabric_make_wwn = lio_target_call_coreaddtiqn,
1662 .fabric_drop_wwn = lio_target_call_coredeltiqn, 1656 .fabric_drop_wwn = lio_target_call_coredeltiqn,
1657 .add_wwn_groups = lio_target_add_wwn_groups,
1663 .fabric_make_tpg = lio_target_tiqn_addtpg, 1658 .fabric_make_tpg = lio_target_tiqn_addtpg,
1664 .fabric_drop_tpg = lio_target_tiqn_deltpg, 1659 .fabric_drop_tpg = lio_target_tiqn_deltpg,
1665 .fabric_make_np = lio_target_call_addnptotpg, 1660 .fabric_make_np = lio_target_call_addnptotpg,
1666 .fabric_drop_np = lio_target_call_delnpfromtpg, 1661 .fabric_drop_np = lio_target_call_delnpfromtpg,
1667 .fabric_init_nodeacl = lio_target_init_nodeacl, 1662 .fabric_init_nodeacl = lio_target_init_nodeacl,
1668 .fabric_cleanup_nodeacl = lio_target_cleanup_nodeacl,
1669 1663
1670 .tfc_discovery_attrs = lio_target_discovery_auth_attrs, 1664 .tfc_discovery_attrs = lio_target_discovery_auth_attrs,
1671 .tfc_wwn_attrs = lio_target_wwn_attrs, 1665 .tfc_wwn_attrs = lio_target_wwn_attrs,
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 1bd5c72b663e..31a096aa16ab 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -338,10 +338,8 @@ static void target_fabric_nacl_base_release(struct config_item *item)
338{ 338{
339 struct se_node_acl *se_nacl = container_of(to_config_group(item), 339 struct se_node_acl *se_nacl = container_of(to_config_group(item),
340 struct se_node_acl, acl_group); 340 struct se_node_acl, acl_group);
341 struct target_fabric_configfs *tf = se_nacl->se_tpg->se_tpg_wwn->wwn_tf;
342 341
343 if (tf->tf_ops->fabric_cleanup_nodeacl) 342 configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group);
344 tf->tf_ops->fabric_cleanup_nodeacl(se_nacl);
345 core_tpg_del_initiator_node_acl(se_nacl); 343 core_tpg_del_initiator_node_acl(se_nacl);
346} 344}
347 345
@@ -383,14 +381,6 @@ static struct config_group *target_fabric_make_nodeacl(
383 if (IS_ERR(se_nacl)) 381 if (IS_ERR(se_nacl))
384 return ERR_CAST(se_nacl); 382 return ERR_CAST(se_nacl);
385 383
386 if (tf->tf_ops->fabric_init_nodeacl) {
387 int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name);
388 if (ret) {
389 core_tpg_del_initiator_node_acl(se_nacl);
390 return ERR_PTR(ret);
391 }
392 }
393
394 config_group_init_type_name(&se_nacl->acl_group, name, 384 config_group_init_type_name(&se_nacl->acl_group, name,
395 &tf->tf_tpg_nacl_base_cit); 385 &tf->tf_tpg_nacl_base_cit);
396 386
@@ -414,6 +404,15 @@ static struct config_group *target_fabric_make_nodeacl(
414 configfs_add_default_group(&se_nacl->acl_fabric_stat_group, 404 configfs_add_default_group(&se_nacl->acl_fabric_stat_group,
415 &se_nacl->acl_group); 405 &se_nacl->acl_group);
416 406
407 if (tf->tf_ops->fabric_init_nodeacl) {
408 int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name);
409 if (ret) {
410 configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group);
411 core_tpg_del_initiator_node_acl(se_nacl);
412 return ERR_PTR(ret);
413 }
414 }
415
417 return &se_nacl->acl_group; 416 return &se_nacl->acl_group;
418} 417}
419 418
@@ -892,6 +891,7 @@ static void target_fabric_release_wwn(struct config_item *item)
892 struct se_wwn, wwn_group); 891 struct se_wwn, wwn_group);
893 struct target_fabric_configfs *tf = wwn->wwn_tf; 892 struct target_fabric_configfs *tf = wwn->wwn_tf;
894 893
894 configfs_remove_default_groups(&wwn->fabric_stat_group);
895 tf->tf_ops->fabric_drop_wwn(wwn); 895 tf->tf_ops->fabric_drop_wwn(wwn);
896} 896}
897 897
@@ -945,6 +945,8 @@ static struct config_group *target_fabric_make_wwn(
945 &tf->tf_wwn_fabric_stats_cit); 945 &tf->tf_wwn_fabric_stats_cit);
946 configfs_add_default_group(&wwn->fabric_stat_group, &wwn->wwn_group); 946 configfs_add_default_group(&wwn->fabric_stat_group, &wwn->wwn_group);
947 947
948 if (tf->tf_ops->add_wwn_groups)
949 tf->tf_ops->add_wwn_groups(wwn);
948 return &wwn->wwn_group; 950 return &wwn->wwn_group;
949} 951}
950 952
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index c37eedc35a24..3c3dc4a3d52c 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -376,6 +376,8 @@ config MTK_THERMAL
376 tristate "Temperature sensor driver for mediatek SoCs" 376 tristate "Temperature sensor driver for mediatek SoCs"
377 depends on ARCH_MEDIATEK || COMPILE_TEST 377 depends on ARCH_MEDIATEK || COMPILE_TEST
378 depends on HAS_IOMEM 378 depends on HAS_IOMEM
379 depends on NVMEM || NVMEM=n
380 depends on RESET_CONTROLLER
379 default y 381 default y
380 help 382 help
381 Enable this option if you want to have support for thermal management 383 Enable this option if you want to have support for thermal management
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 36d07295f8e3..5e820b541506 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -68,12 +68,12 @@ static inline int _step_to_temp(int step)
68 * Every step equals (1 * 200) / 255 celsius, and finally 68 * Every step equals (1 * 200) / 255 celsius, and finally
69 * need convert to millicelsius. 69 * need convert to millicelsius.
70 */ 70 */
71 return (HISI_TEMP_BASE + (step * 200 / 255)) * 1000; 71 return (HISI_TEMP_BASE * 1000 + (step * 200000 / 255));
72} 72}
73 73
74static inline long _temp_to_step(long temp) 74static inline long _temp_to_step(long temp)
75{ 75{
76 return ((temp / 1000 - HISI_TEMP_BASE) * 255 / 200); 76 return ((temp - HISI_TEMP_BASE * 1000) * 255) / 200000;
77} 77}
78 78
79static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data, 79static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data,
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index 3d93b1c07cee..507632b9648e 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -27,7 +27,6 @@
27#include <linux/thermal.h> 27#include <linux/thermal.h>
28#include <linux/reset.h> 28#include <linux/reset.h>
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/nvmem-consumer.h>
31 30
32/* AUXADC Registers */ 31/* AUXADC Registers */
33#define AUXADC_CON0_V 0x000 32#define AUXADC_CON0_V 0x000
@@ -619,7 +618,7 @@ static struct platform_driver mtk_thermal_driver = {
619 618
620module_platform_driver(mtk_thermal_driver); 619module_platform_driver(mtk_thermal_driver);
621 620
622MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de"); 621MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
623MODULE_AUTHOR("Hanyi Wu <hanyi.wu@mediatek.com>"); 622MODULE_AUTHOR("Hanyi Wu <hanyi.wu@mediatek.com>");
624MODULE_DESCRIPTION("Mediatek thermal driver"); 623MODULE_DESCRIPTION("Mediatek thermal driver");
625MODULE_LICENSE("GPL v2"); 624MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 49ac23d3e776..d8ec44b194d6 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -803,8 +803,8 @@ static int thermal_of_populate_trip(struct device_node *np,
803 * otherwise, it returns a corresponding ERR_PTR(). Caller must 803 * otherwise, it returns a corresponding ERR_PTR(). Caller must
804 * check the return value with help of IS_ERR() helper. 804 * check the return value with help of IS_ERR() helper.
805 */ 805 */
806static struct __thermal_zone * 806static struct __thermal_zone
807thermal_of_build_thermal_zone(struct device_node *np) 807__init *thermal_of_build_thermal_zone(struct device_node *np)
808{ 808{
809 struct device_node *child = NULL, *gchild; 809 struct device_node *child = NULL, *gchild;
810 struct __thermal_zone *tz; 810 struct __thermal_zone *tz;
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 1246aa6fcab0..2f1a863a8e15 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -301,7 +301,7 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
301 capped_extra_power = 0; 301 capped_extra_power = 0;
302 extra_power = 0; 302 extra_power = 0;
303 for (i = 0; i < num_actors; i++) { 303 for (i = 0; i < num_actors; i++) {
304 u64 req_range = req_power[i] * power_range; 304 u64 req_range = (u64)req_power[i] * power_range;
305 305
306 granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range, 306 granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range,
307 total_req_power); 307 total_req_power);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index d4b54653ecf8..5133cd1e10b7 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -688,7 +688,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
688{ 688{
689 struct thermal_zone_device *tz = to_thermal_zone(dev); 689 struct thermal_zone_device *tz = to_thermal_zone(dev);
690 int trip, ret; 690 int trip, ret;
691 unsigned long temperature; 691 int temperature;
692 692
693 if (!tz->ops->set_trip_temp) 693 if (!tz->ops->set_trip_temp)
694 return -EPERM; 694 return -EPERM;
@@ -696,7 +696,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
696 if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip)) 696 if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip))
697 return -EINVAL; 697 return -EINVAL;
698 698
699 if (kstrtoul(buf, 10, &temperature)) 699 if (kstrtoint(buf, 10, &temperature))
700 return -EINVAL; 700 return -EINVAL;
701 701
702 ret = tz->ops->set_trip_temp(tz, trip, temperature); 702 ret = tz->ops->set_trip_temp(tz, trip, temperature);
@@ -899,9 +899,9 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
899{ 899{
900 struct thermal_zone_device *tz = to_thermal_zone(dev); 900 struct thermal_zone_device *tz = to_thermal_zone(dev);
901 int ret = 0; 901 int ret = 0;
902 unsigned long temperature; 902 int temperature;
903 903
904 if (kstrtoul(buf, 10, &temperature)) 904 if (kstrtoint(buf, 10, &temperature))
905 return -EINVAL; 905 return -EINVAL;
906 906
907 if (!tz->ops->set_emul_temp) { 907 if (!tz->ops->set_emul_temp) {
@@ -959,7 +959,7 @@ static DEVICE_ATTR(sustainable_power, S_IWUSR | S_IRUGO, sustainable_power_show,
959 struct thermal_zone_device *tz = to_thermal_zone(dev); \ 959 struct thermal_zone_device *tz = to_thermal_zone(dev); \
960 \ 960 \
961 if (tz->tzp) \ 961 if (tz->tzp) \
962 return sprintf(buf, "%u\n", tz->tzp->name); \ 962 return sprintf(buf, "%d\n", tz->tzp->name); \
963 else \ 963 else \
964 return -EIO; \ 964 return -EIO; \
965 } \ 965 } \
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index e16a49b507ef..cf0dc51a2690 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -626,7 +626,7 @@ static int pty_unix98_ioctl(struct tty_struct *tty,
626 */ 626 */
627 627
628static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver, 628static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver,
629 struct inode *ptm_inode, int idx) 629 struct file *file, int idx)
630{ 630{
631 /* Master must be open via /dev/ptmx */ 631 /* Master must be open via /dev/ptmx */
632 return ERR_PTR(-EIO); 632 return ERR_PTR(-EIO);
@@ -642,12 +642,12 @@ static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver,
642 */ 642 */
643 643
644static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver, 644static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver,
645 struct inode *pts_inode, int idx) 645 struct file *file, int idx)
646{ 646{
647 struct tty_struct *tty; 647 struct tty_struct *tty;
648 648
649 mutex_lock(&devpts_mutex); 649 mutex_lock(&devpts_mutex);
650 tty = devpts_get_priv(pts_inode); 650 tty = devpts_get_priv(file->f_path.dentry);
651 mutex_unlock(&devpts_mutex); 651 mutex_unlock(&devpts_mutex);
652 /* Master must be open before slave */ 652 /* Master must be open before slave */
653 if (!tty) 653 if (!tty)
@@ -663,14 +663,14 @@ static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)
663/* this is called once with whichever end is closed last */ 663/* this is called once with whichever end is closed last */
664static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty) 664static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
665{ 665{
666 struct inode *ptmx_inode; 666 struct pts_fs_info *fsi;
667 667
668 if (tty->driver->subtype == PTY_TYPE_MASTER) 668 if (tty->driver->subtype == PTY_TYPE_MASTER)
669 ptmx_inode = tty->driver_data; 669 fsi = tty->driver_data;
670 else 670 else
671 ptmx_inode = tty->link->driver_data; 671 fsi = tty->link->driver_data;
672 devpts_kill_index(ptmx_inode, tty->index); 672 devpts_kill_index(fsi, tty->index);
673 devpts_del_ref(ptmx_inode); 673 devpts_put_ref(fsi);
674} 674}
675 675
676static const struct tty_operations ptm_unix98_ops = { 676static const struct tty_operations ptm_unix98_ops = {
@@ -720,8 +720,9 @@ static const struct tty_operations pty_unix98_ops = {
720 720
721static int ptmx_open(struct inode *inode, struct file *filp) 721static int ptmx_open(struct inode *inode, struct file *filp)
722{ 722{
723 struct pts_fs_info *fsi;
723 struct tty_struct *tty; 724 struct tty_struct *tty;
724 struct inode *slave_inode; 725 struct dentry *dentry;
725 int retval; 726 int retval;
726 int index; 727 int index;
727 728
@@ -734,54 +735,46 @@ static int ptmx_open(struct inode *inode, struct file *filp)
734 if (retval) 735 if (retval)
735 return retval; 736 return retval;
736 737
738 fsi = devpts_get_ref(inode, filp);
739 retval = -ENODEV;
740 if (!fsi)
741 goto out_free_file;
742
737 /* find a device that is not in use. */ 743 /* find a device that is not in use. */
738 mutex_lock(&devpts_mutex); 744 mutex_lock(&devpts_mutex);
739 index = devpts_new_index(inode); 745 index = devpts_new_index(fsi);
740 if (index < 0) {
741 retval = index;
742 mutex_unlock(&devpts_mutex);
743 goto err_file;
744 }
745
746 mutex_unlock(&devpts_mutex); 746 mutex_unlock(&devpts_mutex);
747 747
748 mutex_lock(&tty_mutex); 748 retval = index;
749 tty = tty_init_dev(ptm_driver, index); 749 if (index < 0)
750 goto out_put_ref;
750 751
751 if (IS_ERR(tty)) {
752 retval = PTR_ERR(tty);
753 goto out;
754 }
755 752
753 mutex_lock(&tty_mutex);
754 tty = tty_init_dev(ptm_driver, index);
756 /* The tty returned here is locked so we can safely 755 /* The tty returned here is locked so we can safely
757 drop the mutex */ 756 drop the mutex */
758 mutex_unlock(&tty_mutex); 757 mutex_unlock(&tty_mutex);
759 758
760 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ 759 retval = PTR_ERR(tty);
761 tty->driver_data = inode; 760 if (IS_ERR(tty))
761 goto out;
762 762
763 /* 763 /*
764 * In the case where all references to ptmx inode are dropped and we 764 * From here on out, the tty is "live", and the index and
765 * still have /dev/tty opened pointing to the master/slave pair (ptmx 765 * fsi will be killed/put by the tty_release()
766 * is closed/released before /dev/tty), we must make sure that the inode
767 * is still valid when we call the final pty_unix98_shutdown, thus we
768 * hold an additional reference to the ptmx inode. For the same /dev/tty
769 * last close case, we also need to make sure the super_block isn't
770 * destroyed (devpts instance unmounted), before /dev/tty is closed and
771 * on its release devpts_kill_index is called.
772 */ 766 */
773 devpts_add_ref(inode); 767 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
768 tty->driver_data = fsi;
774 769
775 tty_add_file(tty, filp); 770 tty_add_file(tty, filp);
776 771
777 slave_inode = devpts_pty_new(inode, 772 dentry = devpts_pty_new(fsi, index, tty->link);
778 MKDEV(UNIX98_PTY_SLAVE_MAJOR, index), index, 773 if (IS_ERR(dentry)) {
779 tty->link); 774 retval = PTR_ERR(dentry);
780 if (IS_ERR(slave_inode)) {
781 retval = PTR_ERR(slave_inode);
782 goto err_release; 775 goto err_release;
783 } 776 }
784 tty->link->driver_data = slave_inode; 777 tty->link->driver_data = dentry;
785 778
786 retval = ptm_driver->ops->open(tty, filp); 779 retval = ptm_driver->ops->open(tty, filp);
787 if (retval) 780 if (retval)
@@ -793,12 +786,14 @@ static int ptmx_open(struct inode *inode, struct file *filp)
793 return 0; 786 return 0;
794err_release: 787err_release:
795 tty_unlock(tty); 788 tty_unlock(tty);
789 // This will also put-ref the fsi
796 tty_release(inode, filp); 790 tty_release(inode, filp);
797 return retval; 791 return retval;
798out: 792out:
799 mutex_unlock(&tty_mutex); 793 devpts_kill_index(fsi, index);
800 devpts_kill_index(inode, index); 794out_put_ref:
801err_file: 795 devpts_put_ref(fsi);
796out_free_file:
802 tty_free_file(filp); 797 tty_free_file(filp);
803 return retval; 798 return retval;
804} 799}
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index e213da01a3d7..00ad2637b08c 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1403,9 +1403,18 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p)
1403 /* 1403 /*
1404 * Empty the RX FIFO, we are not interested in anything 1404 * Empty the RX FIFO, we are not interested in anything
1405 * received during the half-duplex transmission. 1405 * received during the half-duplex transmission.
1406 * Enable previously disabled RX interrupts.
1406 */ 1407 */
1407 if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) 1408 if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
1408 serial8250_clear_fifos(p); 1409 serial8250_clear_fifos(p);
1410
1411 serial8250_rpm_get(p);
1412
1413 p->ier |= UART_IER_RLSI | UART_IER_RDI;
1414 serial_port_out(&p->port, UART_IER, p->ier);
1415
1416 serial8250_rpm_put(p);
1417 }
1409} 1418}
1410 1419
1411static void serial8250_em485_handle_stop_tx(unsigned long arg) 1420static void serial8250_em485_handle_stop_tx(unsigned long arg)
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 64742a086ae3..4d7cb9c04fce 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -324,7 +324,6 @@ config SERIAL_8250_EM
324config SERIAL_8250_RT288X 324config SERIAL_8250_RT288X
325 bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support" 325 bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support"
326 depends on SERIAL_8250 326 depends on SERIAL_8250
327 depends on MIPS || COMPILE_TEST
328 default y if MIPS_ALCHEMY || SOC_RT288X || SOC_RT305X || SOC_RT3883 || SOC_MT7620 327 default y if MIPS_ALCHEMY || SOC_RT288X || SOC_RT305X || SOC_RT3883 || SOC_MT7620
329 help 328 help
330 Selecting this option will add support for the alternate register 329 Selecting this option will add support for the alternate register
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index c9fdfc8bf47f..d08baa668d5d 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -72,7 +72,7 @@ static void uartlite_outbe32(u32 val, void __iomem *addr)
72 iowrite32be(val, addr); 72 iowrite32be(val, addr);
73} 73}
74 74
75static const struct uartlite_reg_ops uartlite_be = { 75static struct uartlite_reg_ops uartlite_be = {
76 .in = uartlite_inbe32, 76 .in = uartlite_inbe32,
77 .out = uartlite_outbe32, 77 .out = uartlite_outbe32,
78}; 78};
@@ -87,21 +87,21 @@ static void uartlite_outle32(u32 val, void __iomem *addr)
87 iowrite32(val, addr); 87 iowrite32(val, addr);
88} 88}
89 89
90static const struct uartlite_reg_ops uartlite_le = { 90static struct uartlite_reg_ops uartlite_le = {
91 .in = uartlite_inle32, 91 .in = uartlite_inle32,
92 .out = uartlite_outle32, 92 .out = uartlite_outle32,
93}; 93};
94 94
95static inline u32 uart_in32(u32 offset, struct uart_port *port) 95static inline u32 uart_in32(u32 offset, struct uart_port *port)
96{ 96{
97 const struct uartlite_reg_ops *reg_ops = port->private_data; 97 struct uartlite_reg_ops *reg_ops = port->private_data;
98 98
99 return reg_ops->in(port->membase + offset); 99 return reg_ops->in(port->membase + offset);
100} 100}
101 101
102static inline void uart_out32(u32 val, u32 offset, struct uart_port *port) 102static inline void uart_out32(u32 val, u32 offset, struct uart_port *port)
103{ 103{
104 const struct uartlite_reg_ops *reg_ops = port->private_data; 104 struct uartlite_reg_ops *reg_ops = port->private_data;
105 105
106 reg_ops->out(val, port->membase + offset); 106 reg_ops->out(val, port->membase + offset);
107} 107}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 8d26ed79bb4c..24d5491ef0da 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1367,12 +1367,12 @@ static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
1367 * Locking: tty_mutex must be held. If the tty is found, bump the tty kref. 1367 * Locking: tty_mutex must be held. If the tty is found, bump the tty kref.
1368 */ 1368 */
1369static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver, 1369static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
1370 struct inode *inode, int idx) 1370 struct file *file, int idx)
1371{ 1371{
1372 struct tty_struct *tty; 1372 struct tty_struct *tty;
1373 1373
1374 if (driver->ops->lookup) 1374 if (driver->ops->lookup)
1375 tty = driver->ops->lookup(driver, inode, idx); 1375 tty = driver->ops->lookup(driver, file, idx);
1376 else 1376 else
1377 tty = driver->ttys[idx]; 1377 tty = driver->ttys[idx];
1378 1378
@@ -2040,7 +2040,7 @@ static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
2040 } 2040 }
2041 2041
2042 /* check whether we're reopening an existing tty */ 2042 /* check whether we're reopening an existing tty */
2043 tty = tty_driver_lookup_tty(driver, inode, index); 2043 tty = tty_driver_lookup_tty(driver, filp, index);
2044 if (IS_ERR(tty)) { 2044 if (IS_ERR(tty)) {
2045 mutex_unlock(&tty_mutex); 2045 mutex_unlock(&tty_mutex);
2046 goto out; 2046 goto out;
@@ -2049,14 +2049,13 @@ static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
2049 if (tty) { 2049 if (tty) {
2050 mutex_unlock(&tty_mutex); 2050 mutex_unlock(&tty_mutex);
2051 retval = tty_lock_interruptible(tty); 2051 retval = tty_lock_interruptible(tty);
2052 tty_kref_put(tty); /* drop kref from tty_driver_lookup_tty() */
2052 if (retval) { 2053 if (retval) {
2053 if (retval == -EINTR) 2054 if (retval == -EINTR)
2054 retval = -ERESTARTSYS; 2055 retval = -ERESTARTSYS;
2055 tty = ERR_PTR(retval); 2056 tty = ERR_PTR(retval);
2056 goto out; 2057 goto out;
2057 } 2058 }
2058 /* safe to drop the kref from tty_driver_lookup_tty() */
2059 tty_kref_put(tty);
2060 retval = tty_reopen(tty); 2059 retval = tty_reopen(tty);
2061 if (retval < 0) { 2060 if (retval < 0) {
2062 tty_unlock(tty); 2061 tty_unlock(tty);
@@ -2158,7 +2157,7 @@ retry_open:
2158 read_lock(&tasklist_lock); 2157 read_lock(&tasklist_lock);
2159 spin_lock_irq(&current->sighand->siglock); 2158 spin_lock_irq(&current->sighand->siglock);
2160 noctty = (filp->f_flags & O_NOCTTY) || 2159 noctty = (filp->f_flags & O_NOCTTY) ||
2161 device == MKDEV(TTY_MAJOR, 0) || 2160 (IS_ENABLED(CONFIG_VT) && device == MKDEV(TTY_MAJOR, 0)) ||
2162 device == MKDEV(TTYAUX_MAJOR, 1) || 2161 device == MKDEV(TTYAUX_MAJOR, 1) ||
2163 (tty->driver->type == TTY_DRIVER_TYPE_PTY && 2162 (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
2164 tty->driver->subtype == PTY_TYPE_MASTER); 2163 tty->driver->subtype == PTY_TYPE_MASTER);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 83fd30b0577c..a6c4a1b895bd 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -744,11 +744,15 @@ static void acm_tty_flush_chars(struct tty_struct *tty)
744 int err; 744 int err;
745 unsigned long flags; 745 unsigned long flags;
746 746
747 if (!cur) /* nothing to do */
748 return;
749
747 acm->putbuffer = NULL; 750 acm->putbuffer = NULL;
748 err = usb_autopm_get_interface_async(acm->control); 751 err = usb_autopm_get_interface_async(acm->control);
749 spin_lock_irqsave(&acm->write_lock, flags); 752 spin_lock_irqsave(&acm->write_lock, flags);
750 if (err < 0) { 753 if (err < 0) {
751 cur->use = 0; 754 cur->use = 0;
755 acm->putbuffer = cur;
752 goto out; 756 goto out;
753 } 757 }
754 758
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 5eb1a87228b4..31ccdccd7a04 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -75,8 +75,6 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
75 * be the first thing immediately following the endpoint descriptor. 75 * be the first thing immediately following the endpoint descriptor.
76 */ 76 */
77 desc = (struct usb_ss_ep_comp_descriptor *) buffer; 77 desc = (struct usb_ss_ep_comp_descriptor *) buffer;
78 buffer += desc->bLength;
79 size -= desc->bLength;
80 78
81 if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP || 79 if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP ||
82 size < USB_DT_SS_EP_COMP_SIZE) { 80 size < USB_DT_SS_EP_COMP_SIZE) {
@@ -100,7 +98,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
100 ep->desc.wMaxPacketSize; 98 ep->desc.wMaxPacketSize;
101 return; 99 return;
102 } 100 }
103 101 buffer += desc->bLength;
102 size -= desc->bLength;
104 memcpy(&ep->ss_ep_comp, desc, USB_DT_SS_EP_COMP_SIZE); 103 memcpy(&ep->ss_ep_comp, desc, USB_DT_SS_EP_COMP_SIZE);
105 104
106 /* Check the various values */ 105 /* Check the various values */
@@ -146,12 +145,6 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
146 ep->ss_ep_comp.bmAttributes = 2; 145 ep->ss_ep_comp.bmAttributes = 2;
147 } 146 }
148 147
149 /* Parse a possible SuperSpeedPlus isoc ep companion descriptor */
150 if (usb_endpoint_xfer_isoc(&ep->desc) &&
151 USB_SS_SSP_ISOC_COMP(desc->bmAttributes))
152 usb_parse_ssp_isoc_endpoint_companion(ddev, cfgno, inum, asnum,
153 ep, buffer, size);
154
155 if (usb_endpoint_xfer_isoc(&ep->desc)) 148 if (usb_endpoint_xfer_isoc(&ep->desc))
156 max_tx = (desc->bMaxBurst + 1) * 149 max_tx = (desc->bMaxBurst + 1) *
157 (USB_SS_MULT(desc->bmAttributes)) * 150 (USB_SS_MULT(desc->bmAttributes)) *
@@ -171,6 +164,11 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
171 max_tx); 164 max_tx);
172 ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx); 165 ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx);
173 } 166 }
167 /* Parse a possible SuperSpeedPlus isoc ep companion descriptor */
168 if (usb_endpoint_xfer_isoc(&ep->desc) &&
169 USB_SS_SSP_ISOC_COMP(desc->bmAttributes))
170 usb_parse_ssp_isoc_endpoint_companion(ddev, cfgno, inum, asnum,
171 ep, buffer, size);
174} 172}
175 173
176static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, 174static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index f9d42cf23e55..7859d738df41 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -73,6 +73,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
73 if (companion->bus != pdev->bus || 73 if (companion->bus != pdev->bus ||
74 PCI_SLOT(companion->devfn) != slot) 74 PCI_SLOT(companion->devfn) != slot)
75 continue; 75 continue;
76
77 /*
78 * Companion device should be either UHCI,OHCI or EHCI host
79 * controller, otherwise skip.
80 */
81 if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
82 companion->class != CL_EHCI)
83 continue;
84
76 companion_hcd = pci_get_drvdata(companion); 85 companion_hcd = pci_get_drvdata(companion);
77 if (!companion_hcd || !companion_hcd->self.root_hub) 86 if (!companion_hcd || !companion_hcd->self.root_hub)
78 continue; 87 continue;
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 14718a9ffcfb..460c855be0d0 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -249,18 +249,12 @@ static int usb_port_runtime_suspend(struct device *dev)
249 249
250 return retval; 250 return retval;
251} 251}
252
253static int usb_port_prepare(struct device *dev)
254{
255 return 1;
256}
257#endif 252#endif
258 253
259static const struct dev_pm_ops usb_port_pm_ops = { 254static const struct dev_pm_ops usb_port_pm_ops = {
260#ifdef CONFIG_PM 255#ifdef CONFIG_PM
261 .runtime_suspend = usb_port_runtime_suspend, 256 .runtime_suspend = usb_port_runtime_suspend,
262 .runtime_resume = usb_port_runtime_resume, 257 .runtime_resume = usb_port_runtime_resume,
263 .prepare = usb_port_prepare,
264#endif 258#endif
265}; 259};
266 260
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index dcb85e3cd5a7..479187c32571 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -312,13 +312,7 @@ static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
312 312
313static int usb_dev_prepare(struct device *dev) 313static int usb_dev_prepare(struct device *dev)
314{ 314{
315 struct usb_device *udev = to_usb_device(dev); 315 return 0; /* Implement eventually? */
316
317 /* Return 0 if the current wakeup setting is wrong, otherwise 1 */
318 if (udev->do_remote_wakeup != device_may_wakeup(dev))
319 return 0;
320
321 return 1;
322} 316}
323 317
324static void usb_dev_complete(struct device *dev) 318static void usb_dev_complete(struct device *dev)
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index e9940dd004e4..818f158232bb 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -2254,6 +2254,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
2254{ 2254{
2255 u32 intmsk; 2255 u32 intmsk;
2256 u32 val; 2256 u32 val;
2257 u32 usbcfg;
2257 2258
2258 /* Kill any ep0 requests as controller will be reinitialized */ 2259 /* Kill any ep0 requests as controller will be reinitialized */
2259 kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET); 2260 kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
@@ -2267,10 +2268,16 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
2267 * set configuration. 2268 * set configuration.
2268 */ 2269 */
2269 2270
2271 /* keep other bits untouched (so e.g. forced modes are not lost) */
2272 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
2273 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
2274 GUSBCFG_HNPCAP);
2275
2270 /* set the PLL on, remove the HNP/SRP and set the PHY */ 2276 /* set the PLL on, remove the HNP/SRP and set the PHY */
2271 val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; 2277 val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
2272 dwc2_writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) | 2278 usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
2273 (val << GUSBCFG_USBTRDTIM_SHIFT), hsotg->regs + GUSBCFG); 2279 (val << GUSBCFG_USBTRDTIM_SHIFT);
2280 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
2274 2281
2275 dwc2_hsotg_init_fifo(hsotg); 2282 dwc2_hsotg_init_fifo(hsotg);
2276 2283
@@ -3031,6 +3038,7 @@ static struct usb_ep_ops dwc2_hsotg_ep_ops = {
3031static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg) 3038static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
3032{ 3039{
3033 u32 trdtim; 3040 u32 trdtim;
3041 u32 usbcfg;
3034 /* unmask subset of endpoint interrupts */ 3042 /* unmask subset of endpoint interrupts */
3035 3043
3036 dwc2_writel(DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK | 3044 dwc2_writel(DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
@@ -3054,11 +3062,16 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
3054 3062
3055 dwc2_hsotg_init_fifo(hsotg); 3063 dwc2_hsotg_init_fifo(hsotg);
3056 3064
3065 /* keep other bits untouched (so e.g. forced modes are not lost) */
3066 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
3067 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
3068 GUSBCFG_HNPCAP);
3069
3057 /* set the PLL on, remove the HNP/SRP and set the PHY */ 3070 /* set the PLL on, remove the HNP/SRP and set the PHY */
3058 trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; 3071 trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
3059 dwc2_writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) | 3072 usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
3060 (trdtim << GUSBCFG_USBTRDTIM_SHIFT), 3073 (trdtim << GUSBCFG_USBTRDTIM_SHIFT);
3061 hsotg->regs + GUSBCFG); 3074 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
3062 3075
3063 if (using_dma(hsotg)) 3076 if (using_dma(hsotg))
3064 __orr32(hsotg->regs + GAHBCFG, GAHBCFG_DMA_EN); 3077 __orr32(hsotg->regs + GAHBCFG, GAHBCFG_DMA_EN);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 17fd81447c9f..34277ced26bd 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -67,23 +67,9 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
67static int dwc3_core_soft_reset(struct dwc3 *dwc) 67static int dwc3_core_soft_reset(struct dwc3 *dwc)
68{ 68{
69 u32 reg; 69 u32 reg;
70 int retries = 1000;
70 int ret; 71 int ret;
71 72
72 /* Before Resetting PHY, put Core in Reset */
73 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
74 reg |= DWC3_GCTL_CORESOFTRESET;
75 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
76
77 /* Assert USB3 PHY reset */
78 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
79 reg |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
80 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
81
82 /* Assert USB2 PHY reset */
83 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
84 reg |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
85 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
86
87 usb_phy_init(dwc->usb2_phy); 73 usb_phy_init(dwc->usb2_phy);
88 usb_phy_init(dwc->usb3_phy); 74 usb_phy_init(dwc->usb3_phy);
89 ret = phy_init(dwc->usb2_generic_phy); 75 ret = phy_init(dwc->usb2_generic_phy);
@@ -95,26 +81,28 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
95 phy_exit(dwc->usb2_generic_phy); 81 phy_exit(dwc->usb2_generic_phy);
96 return ret; 82 return ret;
97 } 83 }
98 mdelay(100);
99 84
100 /* Clear USB3 PHY reset */ 85 /*
101 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); 86 * We're resetting only the device side because, if we're in host mode,
102 reg &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST; 87 * XHCI driver will reset the host block. If dwc3 was configured for
103 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); 88 * host-only mode, then we can return early.
89 */
90 if (dwc->dr_mode == USB_DR_MODE_HOST)
91 return 0;
104 92
105 /* Clear USB2 PHY reset */ 93 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
106 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); 94 reg |= DWC3_DCTL_CSFTRST;
107 reg &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST; 95 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
108 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
109 96
110 mdelay(100); 97 do {
98 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
99 if (!(reg & DWC3_DCTL_CSFTRST))
100 return 0;
111 101
112 /* After PHYs are stable we can take Core out of reset state */ 102 udelay(1);
113 reg = dwc3_readl(dwc->regs, DWC3_GCTL); 103 } while (--retries);
114 reg &= ~DWC3_GCTL_CORESOFTRESET;
115 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
116 104
117 return 0; 105 return -ETIMEDOUT;
118} 106}
119 107
120/** 108/**
@@ -1162,6 +1150,11 @@ static int dwc3_suspend(struct device *dev)
1162 phy_exit(dwc->usb2_generic_phy); 1150 phy_exit(dwc->usb2_generic_phy);
1163 phy_exit(dwc->usb3_generic_phy); 1151 phy_exit(dwc->usb3_generic_phy);
1164 1152
1153 usb_phy_set_suspend(dwc->usb2_phy, 1);
1154 usb_phy_set_suspend(dwc->usb3_phy, 1);
1155 WARN_ON(phy_power_off(dwc->usb2_generic_phy) < 0);
1156 WARN_ON(phy_power_off(dwc->usb3_generic_phy) < 0);
1157
1165 pinctrl_pm_select_sleep_state(dev); 1158 pinctrl_pm_select_sleep_state(dev);
1166 1159
1167 return 0; 1160 return 0;
@@ -1175,11 +1168,21 @@ static int dwc3_resume(struct device *dev)
1175 1168
1176 pinctrl_pm_select_default_state(dev); 1169 pinctrl_pm_select_default_state(dev);
1177 1170
1171 usb_phy_set_suspend(dwc->usb2_phy, 0);
1172 usb_phy_set_suspend(dwc->usb3_phy, 0);
1173 ret = phy_power_on(dwc->usb2_generic_phy);
1174 if (ret < 0)
1175 return ret;
1176
1177 ret = phy_power_on(dwc->usb3_generic_phy);
1178 if (ret < 0)
1179 goto err_usb2phy_power;
1180
1178 usb_phy_init(dwc->usb3_phy); 1181 usb_phy_init(dwc->usb3_phy);
1179 usb_phy_init(dwc->usb2_phy); 1182 usb_phy_init(dwc->usb2_phy);
1180 ret = phy_init(dwc->usb2_generic_phy); 1183 ret = phy_init(dwc->usb2_generic_phy);
1181 if (ret < 0) 1184 if (ret < 0)
1182 return ret; 1185 goto err_usb3phy_power;
1183 1186
1184 ret = phy_init(dwc->usb3_generic_phy); 1187 ret = phy_init(dwc->usb3_generic_phy);
1185 if (ret < 0) 1188 if (ret < 0)
@@ -1212,6 +1215,12 @@ static int dwc3_resume(struct device *dev)
1212err_usb2phy_init: 1215err_usb2phy_init:
1213 phy_exit(dwc->usb2_generic_phy); 1216 phy_exit(dwc->usb2_generic_phy);
1214 1217
1218err_usb3phy_power:
1219 phy_power_off(dwc->usb3_generic_phy);
1220
1221err_usb2phy_power:
1222 phy_power_off(dwc->usb2_generic_phy);
1223
1215 return ret; 1224 return ret;
1216} 1225}
1217 1226
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 9ac37fe1b6a7..cebf9e38b60a 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -645,7 +645,7 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
645 file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset); 645 file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
646 if (!file) { 646 if (!file) {
647 ret = -ENOMEM; 647 ret = -ENOMEM;
648 goto err1; 648 goto err2;
649 } 649 }
650 650
651 if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) { 651 if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) {
@@ -653,7 +653,7 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
653 dwc, &dwc3_mode_fops); 653 dwc, &dwc3_mode_fops);
654 if (!file) { 654 if (!file) {
655 ret = -ENOMEM; 655 ret = -ENOMEM;
656 goto err1; 656 goto err2;
657 } 657 }
658 } 658 }
659 659
@@ -663,19 +663,22 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
663 dwc, &dwc3_testmode_fops); 663 dwc, &dwc3_testmode_fops);
664 if (!file) { 664 if (!file) {
665 ret = -ENOMEM; 665 ret = -ENOMEM;
666 goto err1; 666 goto err2;
667 } 667 }
668 668
669 file = debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root, 669 file = debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root,
670 dwc, &dwc3_link_state_fops); 670 dwc, &dwc3_link_state_fops);
671 if (!file) { 671 if (!file) {
672 ret = -ENOMEM; 672 ret = -ENOMEM;
673 goto err1; 673 goto err2;
674 } 674 }
675 } 675 }
676 676
677 return 0; 677 return 0;
678 678
679err2:
680 kfree(dwc->regset);
681
679err1: 682err1:
680 debugfs_remove_recursive(root); 683 debugfs_remove_recursive(root);
681 684
@@ -686,5 +689,5 @@ err0:
686void dwc3_debugfs_exit(struct dwc3 *dwc) 689void dwc3_debugfs_exit(struct dwc3 *dwc)
687{ 690{
688 debugfs_remove_recursive(dwc->root); 691 debugfs_remove_recursive(dwc->root);
689 dwc->root = NULL; 692 kfree(dwc->regset);
690} 693}
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 2be268d2423d..72664700b8a2 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -39,8 +39,6 @@
39#define USBSS_IRQ_COREIRQ_EN BIT(0) 39#define USBSS_IRQ_COREIRQ_EN BIT(0)
40#define USBSS_IRQ_COREIRQ_CLR BIT(0) 40#define USBSS_IRQ_COREIRQ_CLR BIT(0)
41 41
42static u64 kdwc3_dma_mask;
43
44struct dwc3_keystone { 42struct dwc3_keystone {
45 struct device *dev; 43 struct device *dev;
46 struct clk *clk; 44 struct clk *clk;
@@ -108,9 +106,6 @@ static int kdwc3_probe(struct platform_device *pdev)
108 if (IS_ERR(kdwc->usbss)) 106 if (IS_ERR(kdwc->usbss))
109 return PTR_ERR(kdwc->usbss); 107 return PTR_ERR(kdwc->usbss);
110 108
111 kdwc3_dma_mask = dma_get_mask(dev);
112 dev->dma_mask = &kdwc3_dma_mask;
113
114 kdwc->clk = devm_clk_get(kdwc->dev, "usb"); 109 kdwc->clk = devm_clk_get(kdwc->dev, "usb");
115 110
116 error = clk_prepare_enable(kdwc->clk); 111 error = clk_prepare_enable(kdwc->clk);
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 22e9606d8e08..55da2c7f727f 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -496,7 +496,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
496 ret = pm_runtime_get_sync(dev); 496 ret = pm_runtime_get_sync(dev);
497 if (ret < 0) { 497 if (ret < 0) {
498 dev_err(dev, "get_sync failed with err %d\n", ret); 498 dev_err(dev, "get_sync failed with err %d\n", ret);
499 goto err0; 499 goto err1;
500 } 500 }
501 501
502 dwc3_omap_map_offset(omap); 502 dwc3_omap_map_offset(omap);
@@ -516,28 +516,24 @@ static int dwc3_omap_probe(struct platform_device *pdev)
516 516
517 ret = dwc3_omap_extcon_register(omap); 517 ret = dwc3_omap_extcon_register(omap);
518 if (ret < 0) 518 if (ret < 0)
519 goto err2; 519 goto err1;
520 520
521 ret = of_platform_populate(node, NULL, NULL, dev); 521 ret = of_platform_populate(node, NULL, NULL, dev);
522 if (ret) { 522 if (ret) {
523 dev_err(&pdev->dev, "failed to create dwc3 core\n"); 523 dev_err(&pdev->dev, "failed to create dwc3 core\n");
524 goto err3; 524 goto err2;
525 } 525 }
526 526
527 dwc3_omap_enable_irqs(omap); 527 dwc3_omap_enable_irqs(omap);
528 528
529 return 0; 529 return 0;
530 530
531err3: 531err2:
532 extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb); 532 extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb);
533 extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb); 533 extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb);
534err2:
535 dwc3_omap_disable_irqs(omap);
536 534
537err1: 535err1:
538 pm_runtime_put_sync(dev); 536 pm_runtime_put_sync(dev);
539
540err0:
541 pm_runtime_disable(dev); 537 pm_runtime_disable(dev);
542 538
543 return ret; 539 return ret;
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 009d83048c8c..adc1e8a624cb 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -35,6 +35,7 @@
35#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30 35#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30
36#define PCI_DEVICE_ID_INTEL_SPTH 0xa130 36#define PCI_DEVICE_ID_INTEL_SPTH 0xa130
37#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa 37#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa
38#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
38#define PCI_DEVICE_ID_INTEL_APL 0x5aaa 39#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
39 40
40static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; 41static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
@@ -213,6 +214,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
213 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), }, 214 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
214 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), }, 215 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
215 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), }, 216 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
217 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), },
216 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, 218 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
217 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, 219 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
218 { } /* Terminating Entry */ 220 { } /* Terminating Entry */
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 3ac170f9d94d..8e4a1b195e9b 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -568,7 +568,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
568 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 568 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
569 569
570 if (!usb_endpoint_xfer_isoc(desc)) 570 if (!usb_endpoint_xfer_isoc(desc))
571 return 0; 571 goto out;
572 572
573 /* Link TRB for ISOC. The HWO bit is never reset */ 573 /* Link TRB for ISOC. The HWO bit is never reset */
574 trb_st_hw = &dep->trb_pool[0]; 574 trb_st_hw = &dep->trb_pool[0];
@@ -582,9 +582,10 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
582 trb_link->ctrl |= DWC3_TRB_CTRL_HWO; 582 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
583 } 583 }
584 584
585out:
585 switch (usb_endpoint_type(desc)) { 586 switch (usb_endpoint_type(desc)) {
586 case USB_ENDPOINT_XFER_CONTROL: 587 case USB_ENDPOINT_XFER_CONTROL:
587 strlcat(dep->name, "-control", sizeof(dep->name)); 588 /* don't change name */
588 break; 589 break;
589 case USB_ENDPOINT_XFER_ISOC: 590 case USB_ENDPOINT_XFER_ISOC:
590 strlcat(dep->name, "-isoc", sizeof(dep->name)); 591 strlcat(dep->name, "-isoc", sizeof(dep->name));
@@ -2487,7 +2488,11 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2487 * implemented. 2488 * implemented.
2488 */ 2489 */
2489 2490
2490 dwc->gadget_driver->resume(&dwc->gadget); 2491 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2492 spin_unlock(&dwc->lock);
2493 dwc->gadget_driver->resume(&dwc->gadget);
2494 spin_lock(&dwc->lock);
2495 }
2491} 2496}
2492 2497
2493static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, 2498static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
@@ -2931,6 +2936,9 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
2931 2936
2932int dwc3_gadget_suspend(struct dwc3 *dwc) 2937int dwc3_gadget_suspend(struct dwc3 *dwc)
2933{ 2938{
2939 if (!dwc->gadget_driver)
2940 return 0;
2941
2934 if (dwc->pullups_connected) { 2942 if (dwc->pullups_connected) {
2935 dwc3_gadget_disable_irq(dwc); 2943 dwc3_gadget_disable_irq(dwc);
2936 dwc3_gadget_run_stop(dwc, true, true); 2944 dwc3_gadget_run_stop(dwc, true, true);
@@ -2949,6 +2957,9 @@ int dwc3_gadget_resume(struct dwc3 *dwc)
2949 struct dwc3_ep *dep; 2957 struct dwc3_ep *dep;
2950 int ret; 2958 int ret;
2951 2959
2960 if (!dwc->gadget_driver)
2961 return 0;
2962
2952 /* Start with SuperSpeed Default */ 2963 /* Start with SuperSpeed Default */
2953 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2964 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2954 2965
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index a5c62093c26c..524e233d48de 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -651,12 +651,15 @@ static int bos_desc(struct usb_composite_dev *cdev)
651 ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(1); 651 ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(1);
652 ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY; 652 ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
653 ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE; 653 ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE;
654 ssp_cap->bReserved = 0;
655 ssp_cap->wReserved = 0;
654 656
655 /* SSAC = 1 (2 attributes) */ 657 /* SSAC = 1 (2 attributes) */
656 ssp_cap->bmAttributes = cpu_to_le32(1); 658 ssp_cap->bmAttributes = cpu_to_le32(1);
657 659
658 /* Min RX/TX Lane Count = 1 */ 660 /* Min RX/TX Lane Count = 1 */
659 ssp_cap->wFunctionalitySupport = (1 << 8) | (1 << 12); 661 ssp_cap->wFunctionalitySupport =
662 cpu_to_le16((1 << 8) | (1 << 12));
660 663
661 /* 664 /*
662 * bmSublinkSpeedAttr[0]: 665 * bmSublinkSpeedAttr[0]:
@@ -666,7 +669,7 @@ static int bos_desc(struct usb_composite_dev *cdev)
666 * LSM = 10 (10 Gbps) 669 * LSM = 10 (10 Gbps)
667 */ 670 */
668 ssp_cap->bmSublinkSpeedAttr[0] = 671 ssp_cap->bmSublinkSpeedAttr[0] =
669 (3 << 4) | (1 << 14) | (0xa << 16); 672 cpu_to_le32((3 << 4) | (1 << 14) | (0xa << 16));
670 /* 673 /*
671 * bmSublinkSpeedAttr[1] = 674 * bmSublinkSpeedAttr[1] =
672 * ST = Symmetric, TX 675 * ST = Symmetric, TX
@@ -675,7 +678,8 @@ static int bos_desc(struct usb_composite_dev *cdev)
675 * LSM = 10 (10 Gbps) 678 * LSM = 10 (10 Gbps)
676 */ 679 */
677 ssp_cap->bmSublinkSpeedAttr[1] = 680 ssp_cap->bmSublinkSpeedAttr[1] =
678 (3 << 4) | (1 << 14) | (0xa << 16) | (1 << 7); 681 cpu_to_le32((3 << 4) | (1 << 14) |
682 (0xa << 16) | (1 << 7));
679 } 683 }
680 684
681 return le16_to_cpu(bos->wTotalLength); 685 return le16_to_cpu(bos->wTotalLength);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 8cfce105c7ee..15b648cbc75c 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -646,6 +646,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
646 work); 646 work);
647 int ret = io_data->req->status ? io_data->req->status : 647 int ret = io_data->req->status ? io_data->req->status :
648 io_data->req->actual; 648 io_data->req->actual;
649 bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
649 650
650 if (io_data->read && ret > 0) { 651 if (io_data->read && ret > 0) {
651 use_mm(io_data->mm); 652 use_mm(io_data->mm);
@@ -657,13 +658,11 @@ static void ffs_user_copy_worker(struct work_struct *work)
657 658
658 io_data->kiocb->ki_complete(io_data->kiocb, ret, ret); 659 io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
659 660
660 if (io_data->ffs->ffs_eventfd && 661 if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
661 !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
662 eventfd_signal(io_data->ffs->ffs_eventfd, 1); 662 eventfd_signal(io_data->ffs->ffs_eventfd, 1);
663 663
664 usb_ep_free_request(io_data->ep, io_data->req); 664 usb_ep_free_request(io_data->ep, io_data->req);
665 665
666 io_data->kiocb->private = NULL;
667 if (io_data->read) 666 if (io_data->read)
668 kfree(io_data->to_free); 667 kfree(io_data->to_free);
669 kfree(io_data->buf); 668 kfree(io_data->buf);
@@ -1147,8 +1146,8 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
1147 ffs->sb = sb; 1146 ffs->sb = sb;
1148 data->ffs_data = NULL; 1147 data->ffs_data = NULL;
1149 sb->s_fs_info = ffs; 1148 sb->s_fs_info = ffs;
1150 sb->s_blocksize = PAGE_CACHE_SIZE; 1149 sb->s_blocksize = PAGE_SIZE;
1151 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1150 sb->s_blocksize_bits = PAGE_SHIFT;
1152 sb->s_magic = FUNCTIONFS_MAGIC; 1151 sb->s_magic = FUNCTIONFS_MAGIC;
1153 sb->s_op = &ffs_sb_operations; 1152 sb->s_op = &ffs_sb_operations;
1154 sb->s_time_gran = 1; 1153 sb->s_time_gran = 1;
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 84c0ee5ebd1e..58fc199a18ec 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -24,6 +24,7 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/device.h> 25#include <linux/device.h>
26#include <linux/kfifo.h> 26#include <linux/kfifo.h>
27#include <linux/spinlock.h>
27 28
28#include <sound/core.h> 29#include <sound/core.h>
29#include <sound/initval.h> 30#include <sound/initval.h>
@@ -89,6 +90,7 @@ struct f_midi {
89 unsigned int buflen, qlen; 90 unsigned int buflen, qlen;
90 /* This fifo is used as a buffer ring for pre-allocated IN usb_requests */ 91 /* This fifo is used as a buffer ring for pre-allocated IN usb_requests */
91 DECLARE_KFIFO_PTR(in_req_fifo, struct usb_request *); 92 DECLARE_KFIFO_PTR(in_req_fifo, struct usb_request *);
93 spinlock_t transmit_lock;
92 unsigned int in_last_port; 94 unsigned int in_last_port;
93 95
94 struct gmidi_in_port in_ports_array[/* in_ports */]; 96 struct gmidi_in_port in_ports_array[/* in_ports */];
@@ -358,7 +360,9 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
358 /* allocate a bunch of read buffers and queue them all at once. */ 360 /* allocate a bunch of read buffers and queue them all at once. */
359 for (i = 0; i < midi->qlen && err == 0; i++) { 361 for (i = 0; i < midi->qlen && err == 0; i++) {
360 struct usb_request *req = 362 struct usb_request *req =
361 midi_alloc_ep_req(midi->out_ep, midi->buflen); 363 midi_alloc_ep_req(midi->out_ep,
364 max_t(unsigned, midi->buflen,
365 bulk_out_desc.wMaxPacketSize));
362 if (req == NULL) 366 if (req == NULL)
363 return -ENOMEM; 367 return -ENOMEM;
364 368
@@ -597,17 +601,24 @@ static void f_midi_transmit(struct f_midi *midi)
597{ 601{
598 struct usb_ep *ep = midi->in_ep; 602 struct usb_ep *ep = midi->in_ep;
599 int ret; 603 int ret;
604 unsigned long flags;
600 605
601 /* We only care about USB requests if IN endpoint is enabled */ 606 /* We only care about USB requests if IN endpoint is enabled */
602 if (!ep || !ep->enabled) 607 if (!ep || !ep->enabled)
603 goto drop_out; 608 goto drop_out;
604 609
610 spin_lock_irqsave(&midi->transmit_lock, flags);
611
605 do { 612 do {
606 ret = f_midi_do_transmit(midi, ep); 613 ret = f_midi_do_transmit(midi, ep);
607 if (ret < 0) 614 if (ret < 0) {
615 spin_unlock_irqrestore(&midi->transmit_lock, flags);
608 goto drop_out; 616 goto drop_out;
617 }
609 } while (ret); 618 } while (ret);
610 619
620 spin_unlock_irqrestore(&midi->transmit_lock, flags);
621
611 return; 622 return;
612 623
613drop_out: 624drop_out:
@@ -1201,6 +1212,8 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
1201 if (status) 1212 if (status)
1202 goto setup_fail; 1213 goto setup_fail;
1203 1214
1215 spin_lock_init(&midi->transmit_lock);
1216
1204 ++opts->refcnt; 1217 ++opts->refcnt;
1205 mutex_unlock(&opts->lock); 1218 mutex_unlock(&opts->lock);
1206 1219
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 5cdaf0150a4e..e64479f882a5 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1954,8 +1954,8 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
1954 return -ENODEV; 1954 return -ENODEV;
1955 1955
1956 /* superblock */ 1956 /* superblock */
1957 sb->s_blocksize = PAGE_CACHE_SIZE; 1957 sb->s_blocksize = PAGE_SIZE;
1958 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1958 sb->s_blocksize_bits = PAGE_SHIFT;
1959 sb->s_magic = GADGETFS_MAGIC; 1959 sb->s_magic = GADGETFS_MAGIC;
1960 sb->s_op = &gadget_fs_operations; 1960 sb->s_op = &gadget_fs_operations;
1961 sb->s_time_gran = 1; 1961 sb->s_time_gran = 1;
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 81d42cce885a..18569de06b04 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1045,20 +1045,6 @@ static void reset_all_endpoints(struct usba_udc *udc)
1045 list_del_init(&req->queue); 1045 list_del_init(&req->queue);
1046 request_complete(ep, req, -ECONNRESET); 1046 request_complete(ep, req, -ECONNRESET);
1047 } 1047 }
1048
1049 /* NOTE: normally, the next call to the gadget driver is in
1050 * charge of disabling endpoints... usually disconnect().
1051 * The exception would be entering a high speed test mode.
1052 *
1053 * FIXME remove this code ... and retest thoroughly.
1054 */
1055 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1056 if (ep->ep.desc) {
1057 spin_unlock(&udc->lock);
1058 usba_ep_disable(&ep->ep);
1059 spin_lock(&udc->lock);
1060 }
1061 }
1062} 1048}
1063 1049
1064static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex) 1050static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex)
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index 4151597e9d28..e4e70e11d0f6 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -371,12 +371,6 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
371 INIT_WORK(&gadget->work, usb_gadget_state_work); 371 INIT_WORK(&gadget->work, usb_gadget_state_work);
372 gadget->dev.parent = parent; 372 gadget->dev.parent = parent;
373 373
374#ifdef CONFIG_HAS_DMA
375 dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask);
376 gadget->dev.dma_parms = parent->dma_parms;
377 gadget->dev.dma_mask = parent->dma_mask;
378#endif
379
380 if (release) 374 if (release)
381 gadget->dev.release = release; 375 gadget->dev.release = release;
382 else 376 else
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 80c1de239e9a..bad0d1f9a41d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1861,6 +1861,12 @@ no_bw:
1861 kfree(xhci->rh_bw); 1861 kfree(xhci->rh_bw);
1862 kfree(xhci->ext_caps); 1862 kfree(xhci->ext_caps);
1863 1863
1864 xhci->usb2_ports = NULL;
1865 xhci->usb3_ports = NULL;
1866 xhci->port_array = NULL;
1867 xhci->rh_bw = NULL;
1868 xhci->ext_caps = NULL;
1869
1864 xhci->page_size = 0; 1870 xhci->page_size = 0;
1865 xhci->page_shift = 0; 1871 xhci->page_shift = 0;
1866 xhci->bus_state[0].bus_suspended = 0; 1872 xhci->bus_state[0].bus_suspended = 0;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index f0640b7a1c42..48672fac7ff3 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -48,6 +48,7 @@
48#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f 48#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
49#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f 49#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
50#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8 50#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
51#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
51 52
52static const char hcd_name[] = "xhci_hcd"; 53static const char hcd_name[] = "xhci_hcd";
53 54
@@ -155,7 +156,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
155 (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || 156 (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
156 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI || 157 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
157 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || 158 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
158 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) { 159 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
160 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
159 xhci->quirks |= XHCI_PME_STUCK_QUIRK; 161 xhci->quirks |= XHCI_PME_STUCK_QUIRK;
160 } 162 }
161 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 163 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
@@ -302,6 +304,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
302 struct xhci_hcd *xhci; 304 struct xhci_hcd *xhci;
303 305
304 xhci = hcd_to_xhci(pci_get_drvdata(dev)); 306 xhci = hcd_to_xhci(pci_get_drvdata(dev));
307 xhci->xhc_state |= XHCI_STATE_REMOVING;
305 if (xhci->shared_hcd) { 308 if (xhci->shared_hcd) {
306 usb_remove_hcd(xhci->shared_hcd); 309 usb_remove_hcd(xhci->shared_hcd);
307 usb_put_hcd(xhci->shared_hcd); 310 usb_put_hcd(xhci->shared_hcd);
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 5c15e9bc5f7a..474b5fa14900 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -39,12 +39,25 @@ static const struct xhci_driver_overrides xhci_plat_overrides __initconst = {
39 39
40static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci) 40static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
41{ 41{
42 struct usb_hcd *hcd = xhci_to_hcd(xhci);
43
42 /* 44 /*
43 * As of now platform drivers don't provide MSI support so we ensure 45 * As of now platform drivers don't provide MSI support so we ensure
44 * here that the generic code does not try to make a pci_dev from our 46 * here that the generic code does not try to make a pci_dev from our
45 * dev struct in order to setup MSI 47 * dev struct in order to setup MSI
46 */ 48 */
47 xhci->quirks |= XHCI_PLAT; 49 xhci->quirks |= XHCI_PLAT;
50
51 /*
52 * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
53 * to 1. However, these SoCs don't support 64-bit address memory
54 * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
55 * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
56 * xhci_gen_setup().
57 */
58 if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2) ||
59 xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3))
60 xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
48} 61}
49 62
50/* called during probe() after chip reset completes */ 63/* called during probe() after chip reset completes */
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 5a2e2e3936c4..529c3c40f901 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -14,7 +14,7 @@
14#include "xhci.h" /* for hcd_to_xhci() */ 14#include "xhci.h" /* for hcd_to_xhci() */
15 15
16enum xhci_plat_type { 16enum xhci_plat_type {
17 XHCI_PLAT_TYPE_MARVELL_ARMADA, 17 XHCI_PLAT_TYPE_MARVELL_ARMADA = 1,
18 XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2, 18 XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2,
19 XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3, 19 XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3,
20}; 20};
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 7cf66212ceae..99b4ff42f7a0 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -4004,7 +4004,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4004 int reserved_trbs = xhci->cmd_ring_reserved_trbs; 4004 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
4005 int ret; 4005 int ret;
4006 4006
4007 if (xhci->xhc_state) { 4007 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
4008 (xhci->xhc_state & XHCI_STATE_HALTED)) {
4008 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n"); 4009 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
4009 return -ESHUTDOWN; 4010 return -ESHUTDOWN;
4010 } 4011 }
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index d51ee0c3cf9f..9e71c96ad74a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -147,7 +147,8 @@ static int xhci_start(struct xhci_hcd *xhci)
147 "waited %u microseconds.\n", 147 "waited %u microseconds.\n",
148 XHCI_MAX_HALT_USEC); 148 XHCI_MAX_HALT_USEC);
149 if (!ret) 149 if (!ret)
150 xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING); 150 /* clear state flags. Including dying, halted or removing */
151 xhci->xhc_state = 0;
151 152
152 return ret; 153 return ret;
153} 154}
@@ -1108,8 +1109,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1108 /* Resume root hubs only when have pending events. */ 1109 /* Resume root hubs only when have pending events. */
1109 status = readl(&xhci->op_regs->status); 1110 status = readl(&xhci->op_regs->status);
1110 if (status & STS_EINT) { 1111 if (status & STS_EINT) {
1111 usb_hcd_resume_root_hub(hcd);
1112 usb_hcd_resume_root_hub(xhci->shared_hcd); 1112 usb_hcd_resume_root_hub(xhci->shared_hcd);
1113 usb_hcd_resume_root_hub(hcd);
1113 } 1114 }
1114 } 1115 }
1115 1116
@@ -1124,10 +1125,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1124 1125
1125 /* Re-enable port polling. */ 1126 /* Re-enable port polling. */
1126 xhci_dbg(xhci, "%s: starting port polling.\n", __func__); 1127 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1127 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1128 usb_hcd_poll_rh_status(hcd);
1129 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 1128 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1130 usb_hcd_poll_rh_status(xhci->shared_hcd); 1129 usb_hcd_poll_rh_status(xhci->shared_hcd);
1130 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1131 usb_hcd_poll_rh_status(hcd);
1131 1132
1132 return retval; 1133 return retval;
1133} 1134}
@@ -2773,7 +2774,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2773 if (ret <= 0) 2774 if (ret <= 0)
2774 return ret; 2775 return ret;
2775 xhci = hcd_to_xhci(hcd); 2776 xhci = hcd_to_xhci(hcd);
2776 if (xhci->xhc_state & XHCI_STATE_DYING) 2777 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
2778 (xhci->xhc_state & XHCI_STATE_REMOVING))
2777 return -ENODEV; 2779 return -ENODEV;
2778 2780
2779 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 2781 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
@@ -3820,7 +3822,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3820 3822
3821 mutex_lock(&xhci->mutex); 3823 mutex_lock(&xhci->mutex);
3822 3824
3823 if (xhci->xhc_state) /* dying or halted */ 3825 if (xhci->xhc_state) /* dying, removing or halted */
3824 goto out; 3826 goto out;
3825 3827
3826 if (!udev->slot_id) { 3828 if (!udev->slot_id) {
@@ -4948,6 +4950,16 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4948 return retval; 4950 return retval;
4949 xhci_dbg(xhci, "Reset complete\n"); 4951 xhci_dbg(xhci, "Reset complete\n");
4950 4952
4953 /*
4954 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
4955 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
4956 * address memory pointers actually. So, this driver clears the AC64
4957 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
4958 * DMA_BIT_MASK(32)) in this xhci_gen_setup().
4959 */
4960 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
4961 xhci->hcc_params &= ~BIT(0);
4962
4951 /* Set dma_mask and coherent_dma_mask to 64-bits, 4963 /* Set dma_mask and coherent_dma_mask to 64-bits,
4952 * if xHC supports 64-bit addressing */ 4964 * if xHC supports 64-bit addressing */
4953 if (HCC_64BIT_ADDR(xhci->hcc_params) && 4965 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index e293e0974f48..6c629c97f8ad 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1605,6 +1605,7 @@ struct xhci_hcd {
1605 */ 1605 */
1606#define XHCI_STATE_DYING (1 << 0) 1606#define XHCI_STATE_DYING (1 << 0)
1607#define XHCI_STATE_HALTED (1 << 1) 1607#define XHCI_STATE_HALTED (1 << 1)
1608#define XHCI_STATE_REMOVING (1 << 2)
1608 /* Statistics */ 1609 /* Statistics */
1609 int error_bitmask; 1610 int error_bitmask;
1610 unsigned int quirks; 1611 unsigned int quirks;
@@ -1641,6 +1642,7 @@ struct xhci_hcd {
1641#define XHCI_PME_STUCK_QUIRK (1 << 20) 1642#define XHCI_PME_STUCK_QUIRK (1 << 20)
1642#define XHCI_MTK_HOST (1 << 21) 1643#define XHCI_MTK_HOST (1 << 21)
1643#define XHCI_SSIC_PORT_UNUSED (1 << 22) 1644#define XHCI_SSIC_PORT_UNUSED (1 << 22)
1645#define XHCI_NO_64BIT_SUPPORT (1 << 23)
1644 unsigned int num_active_eps; 1646 unsigned int num_active_eps;
1645 unsigned int limit_active_eps; 1647 unsigned int limit_active_eps;
1646 /* There are two roothubs to keep track of bus suspend info for */ 1648 /* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index 5e5a8fa005f8..bc8889956d17 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -83,9 +83,9 @@ static int jz4740_musb_init(struct musb *musb)
83{ 83{
84 usb_phy_generic_register(); 84 usb_phy_generic_register();
85 musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); 85 musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
86 if (!musb->xceiv) { 86 if (IS_ERR(musb->xceiv)) {
87 pr_err("HS UDC: no transceiver configured\n"); 87 pr_err("HS UDC: no transceiver configured\n");
88 return -ENODEV; 88 return PTR_ERR(musb->xceiv);
89 } 89 }
90 90
91 /* Silicon does not implement ConfigData register. 91 /* Silicon does not implement ConfigData register.
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 87bd578799a8..152865b36522 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1164,12 +1164,12 @@ static int musb_gadget_disable(struct usb_ep *ep)
1164 musb_writew(epio, MUSB_RXMAXP, 0); 1164 musb_writew(epio, MUSB_RXMAXP, 0);
1165 } 1165 }
1166 1166
1167 musb_ep->desc = NULL;
1168 musb_ep->end_point.desc = NULL;
1169
1170 /* abort all pending DMA and requests */ 1167 /* abort all pending DMA and requests */
1171 nuke(musb_ep, -ESHUTDOWN); 1168 nuke(musb_ep, -ESHUTDOWN);
1172 1169
1170 musb_ep->desc = NULL;
1171 musb_ep->end_point.desc = NULL;
1172
1173 schedule_work(&musb->irq_work); 1173 schedule_work(&musb->irq_work);
1174 1174
1175 spin_unlock_irqrestore(&(musb->lock), flags); 1175 spin_unlock_irqrestore(&(musb->lock), flags);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 58487a473521..2f8ad7f1f482 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2735,7 +2735,7 @@ static const struct hc_driver musb_hc_driver = {
2735 .description = "musb-hcd", 2735 .description = "musb-hcd",
2736 .product_desc = "MUSB HDRC host driver", 2736 .product_desc = "MUSB HDRC host driver",
2737 .hcd_priv_size = sizeof(struct musb *), 2737 .hcd_priv_size = sizeof(struct musb *),
2738 .flags = HCD_USB2 | HCD_MEMORY | HCD_BH, 2738 .flags = HCD_USB2 | HCD_MEMORY,
2739 2739
2740 /* not using irq handler or reset hooks from usbcore, since 2740 /* not using irq handler or reset hooks from usbcore, since
2741 * those must be shared with peripheral code for OTG configs 2741 * those must be shared with peripheral code for OTG configs
diff --git a/drivers/usb/phy/phy-qcom-8x16-usb.c b/drivers/usb/phy/phy-qcom-8x16-usb.c
index 579587d97217..3d7af85aecb9 100644
--- a/drivers/usb/phy/phy-qcom-8x16-usb.c
+++ b/drivers/usb/phy/phy-qcom-8x16-usb.c
@@ -65,9 +65,7 @@ struct phy_8x16 {
65 void __iomem *regs; 65 void __iomem *regs;
66 struct clk *core_clk; 66 struct clk *core_clk;
67 struct clk *iface_clk; 67 struct clk *iface_clk;
68 struct regulator *v3p3; 68 struct regulator_bulk_data regulator[3];
69 struct regulator *v1p8;
70 struct regulator *vdd;
71 69
72 struct reset_control *phy_reset; 70 struct reset_control *phy_reset;
73 71
@@ -78,51 +76,6 @@ struct phy_8x16 {
78 struct notifier_block reboot_notify; 76 struct notifier_block reboot_notify;
79}; 77};
80 78
81static int phy_8x16_regulators_enable(struct phy_8x16 *qphy)
82{
83 int ret;
84
85 ret = regulator_set_voltage(qphy->vdd, HSPHY_VDD_MIN, HSPHY_VDD_MAX);
86 if (ret)
87 return ret;
88
89 ret = regulator_enable(qphy->vdd);
90 if (ret)
91 return ret;
92
93 ret = regulator_set_voltage(qphy->v3p3, HSPHY_3P3_MIN, HSPHY_3P3_MAX);
94 if (ret)
95 goto off_vdd;
96
97 ret = regulator_enable(qphy->v3p3);
98 if (ret)
99 goto off_vdd;
100
101 ret = regulator_set_voltage(qphy->v1p8, HSPHY_1P8_MIN, HSPHY_1P8_MAX);
102 if (ret)
103 goto off_3p3;
104
105 ret = regulator_enable(qphy->v1p8);
106 if (ret)
107 goto off_3p3;
108
109 return 0;
110
111off_3p3:
112 regulator_disable(qphy->v3p3);
113off_vdd:
114 regulator_disable(qphy->vdd);
115
116 return ret;
117}
118
119static void phy_8x16_regulators_disable(struct phy_8x16 *qphy)
120{
121 regulator_disable(qphy->v1p8);
122 regulator_disable(qphy->v3p3);
123 regulator_disable(qphy->vdd);
124}
125
126static int phy_8x16_notify_connect(struct usb_phy *phy, 79static int phy_8x16_notify_connect(struct usb_phy *phy,
127 enum usb_device_speed speed) 80 enum usb_device_speed speed)
128{ 81{
@@ -261,7 +214,6 @@ static void phy_8x16_shutdown(struct usb_phy *phy)
261 214
262static int phy_8x16_read_devicetree(struct phy_8x16 *qphy) 215static int phy_8x16_read_devicetree(struct phy_8x16 *qphy)
263{ 216{
264 struct regulator_bulk_data regs[3];
265 struct device *dev = qphy->phy.dev; 217 struct device *dev = qphy->phy.dev;
266 int ret; 218 int ret;
267 219
@@ -273,18 +225,15 @@ static int phy_8x16_read_devicetree(struct phy_8x16 *qphy)
273 if (IS_ERR(qphy->iface_clk)) 225 if (IS_ERR(qphy->iface_clk))
274 return PTR_ERR(qphy->iface_clk); 226 return PTR_ERR(qphy->iface_clk);
275 227
276 regs[0].supply = "v3p3"; 228 qphy->regulator[0].supply = "v3p3";
277 regs[1].supply = "v1p8"; 229 qphy->regulator[1].supply = "v1p8";
278 regs[2].supply = "vddcx"; 230 qphy->regulator[2].supply = "vddcx";
279 231
280 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(regs), regs); 232 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(qphy->regulator),
233 qphy->regulator);
281 if (ret) 234 if (ret)
282 return ret; 235 return ret;
283 236
284 qphy->v3p3 = regs[0].consumer;
285 qphy->v1p8 = regs[1].consumer;
286 qphy->vdd = regs[2].consumer;
287
288 qphy->phy_reset = devm_reset_control_get(dev, "phy"); 237 qphy->phy_reset = devm_reset_control_get(dev, "phy");
289 if (IS_ERR(qphy->phy_reset)) 238 if (IS_ERR(qphy->phy_reset))
290 return PTR_ERR(qphy->phy_reset); 239 return PTR_ERR(qphy->phy_reset);
@@ -364,8 +313,9 @@ static int phy_8x16_probe(struct platform_device *pdev)
364 if (ret < 0) 313 if (ret < 0)
365 goto off_core; 314 goto off_core;
366 315
367 ret = phy_8x16_regulators_enable(qphy); 316 ret = regulator_bulk_enable(ARRAY_SIZE(qphy->regulator),
368 if (0 && ret) 317 qphy->regulator);
318 if (WARN_ON(ret))
369 goto off_clks; 319 goto off_clks;
370 320
371 qphy->vbus_notify.notifier_call = phy_8x16_vbus_notify; 321 qphy->vbus_notify.notifier_call = phy_8x16_vbus_notify;
@@ -387,7 +337,7 @@ off_extcon:
387 extcon_unregister_notifier(qphy->vbus_edev, EXTCON_USB, 337 extcon_unregister_notifier(qphy->vbus_edev, EXTCON_USB,
388 &qphy->vbus_notify); 338 &qphy->vbus_notify);
389off_power: 339off_power:
390 phy_8x16_regulators_disable(qphy); 340 regulator_bulk_disable(ARRAY_SIZE(qphy->regulator), qphy->regulator);
391off_clks: 341off_clks:
392 clk_disable_unprepare(qphy->iface_clk); 342 clk_disable_unprepare(qphy->iface_clk);
393off_core: 343off_core:
@@ -413,7 +363,7 @@ static int phy_8x16_remove(struct platform_device *pdev)
413 363
414 clk_disable_unprepare(qphy->iface_clk); 364 clk_disable_unprepare(qphy->iface_clk);
415 clk_disable_unprepare(qphy->core_clk); 365 clk_disable_unprepare(qphy->core_clk);
416 phy_8x16_regulators_disable(qphy); 366 regulator_bulk_disable(ARRAY_SIZE(qphy->regulator), qphy->regulator);
417 return 0; 367 return 0;
418} 368}
419 369
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index b4de70ee16d3..000f9750149f 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -190,7 +190,8 @@ static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
190 goto __usbhs_pkt_handler_end; 190 goto __usbhs_pkt_handler_end;
191 } 191 }
192 192
193 ret = func(pkt, &is_done); 193 if (likely(func))
194 ret = func(pkt, &is_done);
194 195
195 if (is_done) 196 if (is_done)
196 __usbhsf_pkt_del(pkt); 197 __usbhsf_pkt_del(pkt);
@@ -889,6 +890,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
889 890
890 pkt->trans = len; 891 pkt->trans = len;
891 892
893 usbhsf_tx_irq_ctrl(pipe, 0);
892 INIT_WORK(&pkt->work, xfer_work); 894 INIT_WORK(&pkt->work, xfer_work);
893 schedule_work(&pkt->work); 895 schedule_work(&pkt->work);
894 896
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 664b263e4b20..53d104b56ef1 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -158,10 +158,14 @@ static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
158 struct usbhs_pipe *pipe = pkt->pipe; 158 struct usbhs_pipe *pipe = pkt->pipe;
159 struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe); 159 struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
160 struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt); 160 struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
161 unsigned long flags;
161 162
162 ureq->req.actual = pkt->actual; 163 ureq->req.actual = pkt->actual;
163 164
164 usbhsg_queue_pop(uep, ureq, 0); 165 usbhs_lock(priv, flags);
166 if (uep)
167 __usbhsg_queue_pop(uep, ureq, 0);
168 usbhs_unlock(priv, flags);
165} 169}
166 170
167static void usbhsg_queue_push(struct usbhsg_uep *uep, 171static void usbhsg_queue_push(struct usbhsg_uep *uep,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index fbfe761c7fba..7c9f25e9c422 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -109,6 +109,7 @@ static const struct usb_device_id id_table[] = {
109 { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */ 109 { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
110 { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */ 110 { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
111 { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */ 111 { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
112 { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
112 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ 113 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
113 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ 114 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
114 { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ 115 { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
@@ -118,6 +119,7 @@ static const struct usb_device_id id_table[] = {
118 { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */ 119 { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
119 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ 120 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
120 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ 121 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
122 { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
121 { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ 123 { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
122 { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ 124 { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
123 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ 125 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -141,6 +143,8 @@ static const struct usb_device_id id_table[] = {
141 { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */ 143 { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
142 { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */ 144 { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
143 { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */ 145 { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
146 { USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
147 { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
144 { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ 148 { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
145 { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ 149 { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
146 { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */ 150 { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
@@ -165,6 +169,7 @@ static const struct usb_device_id id_table[] = {
165 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ 169 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
166 { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ 170 { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
167 { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ 171 { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
172 { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
168 { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ 173 { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
169 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ 174 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
170 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ 175 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index b283eb8b86d6..bbeeb2bd55a8 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -447,6 +447,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
447 struct usb_serial *serial = port->serial; 447 struct usb_serial *serial = port->serial;
448 struct cypress_private *priv; 448 struct cypress_private *priv;
449 449
450 if (!port->interrupt_out_urb || !port->interrupt_in_urb) {
451 dev_err(&port->dev, "required endpoint is missing\n");
452 return -ENODEV;
453 }
454
450 priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL); 455 priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL);
451 if (!priv) 456 if (!priv)
452 return -ENOMEM; 457 return -ENOMEM;
@@ -606,12 +611,6 @@ static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port)
606 cypress_set_termios(tty, port, &priv->tmp_termios); 611 cypress_set_termios(tty, port, &priv->tmp_termios);
607 612
608 /* setup the port and start reading from the device */ 613 /* setup the port and start reading from the device */
609 if (!port->interrupt_in_urb) {
610 dev_err(&port->dev, "%s - interrupt_in_urb is empty!\n",
611 __func__);
612 return -1;
613 }
614
615 usb_fill_int_urb(port->interrupt_in_urb, serial->dev, 614 usb_fill_int_urb(port->interrupt_in_urb, serial->dev,
616 usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress), 615 usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress),
617 port->interrupt_in_urb->transfer_buffer, 616 port->interrupt_in_urb->transfer_buffer,
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 010a42a92688..16e8e37b3b36 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1251,8 +1251,27 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
1251 1251
1252static int digi_startup(struct usb_serial *serial) 1252static int digi_startup(struct usb_serial *serial)
1253{ 1253{
1254 struct device *dev = &serial->interface->dev;
1254 struct digi_serial *serial_priv; 1255 struct digi_serial *serial_priv;
1255 int ret; 1256 int ret;
1257 int i;
1258
1259 /* check whether the device has the expected number of endpoints */
1260 if (serial->num_port_pointers < serial->type->num_ports + 1) {
1261 dev_err(dev, "OOB endpoints missing\n");
1262 return -ENODEV;
1263 }
1264
1265 for (i = 0; i < serial->type->num_ports + 1 ; i++) {
1266 if (!serial->port[i]->read_urb) {
1267 dev_err(dev, "bulk-in endpoint missing\n");
1268 return -ENODEV;
1269 }
1270 if (!serial->port[i]->write_urb) {
1271 dev_err(dev, "bulk-out endpoint missing\n");
1272 return -ENODEV;
1273 }
1274 }
1256 1275
1257 serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL); 1276 serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
1258 if (!serial_priv) 1277 if (!serial_priv)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 427ae43ee898..3a814e802dee 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1004,6 +1004,10 @@ static const struct usb_device_id id_table_combined[] = {
1004 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) }, 1004 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
1005 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) }, 1005 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
1006 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) }, 1006 { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
1007 /* ICP DAS I-756xU devices */
1008 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
1009 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
1010 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
1007 { } /* Terminating entry */ 1011 { } /* Terminating entry */
1008}; 1012};
1009 1013
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index a84df2513994..c5d6c1e73e8e 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -872,6 +872,14 @@
872#define NOVITUS_BONO_E_PID 0x6010 872#define NOVITUS_BONO_E_PID 0x6010
873 873
874/* 874/*
875 * ICPDAS I-756*U devices
876 */
877#define ICPDAS_VID 0x1b5c
878#define ICPDAS_I7560U_PID 0x0103
879#define ICPDAS_I7561U_PID 0x0104
880#define ICPDAS_I7563U_PID 0x0105
881
882/*
875 * RT Systems programming cables for various ham radios 883 * RT Systems programming cables for various ham radios
876 */ 884 */
877#define RTSYSTEMS_VID 0x2100 /* Vendor ID */ 885#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 4446b8d70ac2..885655315de1 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -376,14 +376,21 @@ static void mct_u232_msr_to_state(struct usb_serial_port *port,
376 376
377static int mct_u232_port_probe(struct usb_serial_port *port) 377static int mct_u232_port_probe(struct usb_serial_port *port)
378{ 378{
379 struct usb_serial *serial = port->serial;
379 struct mct_u232_private *priv; 380 struct mct_u232_private *priv;
380 381
382 /* check first to simplify error handling */
383 if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) {
384 dev_err(&port->dev, "expected endpoint missing\n");
385 return -ENODEV;
386 }
387
381 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 388 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
382 if (!priv) 389 if (!priv)
383 return -ENOMEM; 390 return -ENOMEM;
384 391
385 /* Use second interrupt-in endpoint for reading. */ 392 /* Use second interrupt-in endpoint for reading. */
386 priv->read_urb = port->serial->port[1]->interrupt_in_urb; 393 priv->read_urb = serial->port[1]->interrupt_in_urb;
387 priv->read_urb->context = port; 394 priv->read_urb->context = port;
388 395
389 spin_lock_init(&priv->lock); 396 spin_lock_init(&priv->lock);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 348e19834b83..c6f497f16526 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1818,6 +1818,8 @@ static const struct usb_device_id option_ids[] = {
1818 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) }, 1818 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
1819 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) }, 1819 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
1820 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, 1820 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
1821 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
1822 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1821 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 1823 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1822 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 1824 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1823 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ 1825 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index dba51362d2e2..90901861bfc0 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -123,7 +123,7 @@ static int slave_configure(struct scsi_device *sdev)
123 unsigned int max_sectors = 64; 123 unsigned int max_sectors = 64;
124 124
125 if (us->fflags & US_FL_MAX_SECTORS_MIN) 125 if (us->fflags & US_FL_MAX_SECTORS_MIN)
126 max_sectors = PAGE_CACHE_SIZE >> 9; 126 max_sectors = PAGE_SIZE >> 9;
127 if (queue_max_hw_sectors(sdev->request_queue) > max_sectors) 127 if (queue_max_hw_sectors(sdev->request_queue) > max_sectors)
128 blk_queue_max_hw_sectors(sdev->request_queue, 128 blk_queue_max_hw_sectors(sdev->request_queue,
129 max_sectors); 129 max_sectors);
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 13e4cc31bc79..16bc679dc2fc 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -2,7 +2,7 @@
2 * USB Attached SCSI 2 * USB Attached SCSI
3 * Note that this is not the same as the USB Mass Storage driver 3 * Note that this is not the same as the USB Mass Storage driver
4 * 4 *
5 * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2014 5 * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2016
6 * Copyright Matthew Wilcox for Intel Corp, 2010 6 * Copyright Matthew Wilcox for Intel Corp, 2010
7 * Copyright Sarah Sharp for Intel Corp, 2010 7 * Copyright Sarah Sharp for Intel Corp, 2010
8 * 8 *
@@ -781,6 +781,17 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
781 return SUCCESS; 781 return SUCCESS;
782} 782}
783 783
784static int uas_target_alloc(struct scsi_target *starget)
785{
786 struct uas_dev_info *devinfo = (struct uas_dev_info *)
787 dev_to_shost(starget->dev.parent)->hostdata;
788
789 if (devinfo->flags & US_FL_NO_REPORT_LUNS)
790 starget->no_report_luns = 1;
791
792 return 0;
793}
794
784static int uas_slave_alloc(struct scsi_device *sdev) 795static int uas_slave_alloc(struct scsi_device *sdev)
785{ 796{
786 struct uas_dev_info *devinfo = 797 struct uas_dev_info *devinfo =
@@ -824,7 +835,6 @@ static int uas_slave_configure(struct scsi_device *sdev)
824 if (devinfo->flags & US_FL_BROKEN_FUA) 835 if (devinfo->flags & US_FL_BROKEN_FUA)
825 sdev->broken_fua = 1; 836 sdev->broken_fua = 1;
826 837
827 scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
828 return 0; 838 return 0;
829} 839}
830 840
@@ -832,6 +842,7 @@ static struct scsi_host_template uas_host_template = {
832 .module = THIS_MODULE, 842 .module = THIS_MODULE,
833 .name = "uas", 843 .name = "uas",
834 .queuecommand = uas_queuecommand, 844 .queuecommand = uas_queuecommand,
845 .target_alloc = uas_target_alloc,
835 .slave_alloc = uas_slave_alloc, 846 .slave_alloc = uas_slave_alloc,
836 .slave_configure = uas_slave_configure, 847 .slave_configure = uas_slave_configure,
837 .eh_abort_handler = uas_eh_abort_handler, 848 .eh_abort_handler = uas_eh_abort_handler,
@@ -956,6 +967,12 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
956 if (result) 967 if (result)
957 goto set_alt0; 968 goto set_alt0;
958 969
970 /*
971 * 1 tag is reserved for untagged commands +
972 * 1 tag to avoid off by one errors in some bridge firmwares
973 */
974 shost->can_queue = devinfo->qdepth - 2;
975
959 usb_set_intfdata(intf, shost); 976 usb_set_intfdata(intf, shost);
960 result = scsi_add_host(shost, &intf->dev); 977 result = scsi_add_host(shost, &intf->dev);
961 if (result) 978 if (result)
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index ccc113e83d88..53341a77d89f 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -64,6 +64,13 @@ UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
64 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 64 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
65 US_FL_NO_ATA_1X), 65 US_FL_NO_ATA_1X),
66 66
67/* Reported-by: David Webb <djw@noc.ac.uk> */
68UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
69 "Seagate",
70 "Expansion Desk",
71 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
72 US_FL_NO_REPORT_LUNS),
73
67/* Reported-by: Hans de Goede <hdegoede@redhat.com> */ 74/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
68UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999, 75UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
69 "Seagate", 76 "Seagate",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 43576ed31ccd..9de988a0f856 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -482,7 +482,7 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
482 US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 | 482 US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
483 US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE | 483 US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
484 US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES | 484 US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
485 US_FL_MAX_SECTORS_240); 485 US_FL_MAX_SECTORS_240 | US_FL_NO_REPORT_LUNS);
486 486
487 p = quirks; 487 p = quirks;
488 while (*p) { 488 while (*p) {
@@ -532,6 +532,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
532 case 'i': 532 case 'i':
533 f |= US_FL_IGNORE_DEVICE; 533 f |= US_FL_IGNORE_DEVICE;
534 break; 534 break;
535 case 'j':
536 f |= US_FL_NO_REPORT_LUNS;
537 break;
535 case 'l': 538 case 'l':
536 f |= US_FL_NOT_LOCKABLE; 539 f |= US_FL_NOT_LOCKABLE;
537 break; 540 break;
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index facaaf003f19..e40da7759a0e 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -741,6 +741,17 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
741 if (!(size > 0)) 741 if (!(size > 0))
742 return 0; 742 return 0;
743 743
744 if (size > urb->transfer_buffer_length) {
745 /* should not happen, probably malicious packet */
746 if (ud->side == USBIP_STUB) {
747 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
748 return 0;
749 } else {
750 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
751 return -EPIPE;
752 }
753 }
754
744 ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size); 755 ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
745 if (ret != size) { 756 if (ret != size) {
746 dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret); 757 dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index fe274b5851c7..93e66a9148b9 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -440,13 +440,14 @@ static int clcdfb_register(struct clcd_fb *fb)
440 fb->off_ienb = CLCD_PL111_IENB; 440 fb->off_ienb = CLCD_PL111_IENB;
441 fb->off_cntl = CLCD_PL111_CNTL; 441 fb->off_cntl = CLCD_PL111_CNTL;
442 } else { 442 } else {
443#ifdef CONFIG_ARCH_VERSATILE 443 if (of_machine_is_compatible("arm,versatile-ab") ||
444 fb->off_ienb = CLCD_PL111_IENB; 444 of_machine_is_compatible("arm,versatile-pb")) {
445 fb->off_cntl = CLCD_PL111_CNTL; 445 fb->off_ienb = CLCD_PL111_IENB;
446#else 446 fb->off_cntl = CLCD_PL111_CNTL;
447 fb->off_ienb = CLCD_PL110_IENB; 447 } else {
448 fb->off_cntl = CLCD_PL110_CNTL; 448 fb->off_ienb = CLCD_PL110_IENB;
449#endif 449 fb->off_cntl = CLCD_PL110_CNTL;
450 }
450 } 451 }
451 452
452 fb->clk = clk_get(&fb->dev->dev, NULL); 453 fb->clk = clk_get(&fb->dev->dev, NULL);
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
index abfd1f6e3327..1954ec913ce5 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
@@ -200,20 +200,16 @@ static struct omap_dss_driver sharp_ls_ops = {
200static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags, 200static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags,
201 char *desc, struct gpio_desc **gpiod) 201 char *desc, struct gpio_desc **gpiod)
202{ 202{
203 struct gpio_desc *gd;
204 int r; 203 int r;
205 204
206 *gpiod = NULL;
207
208 r = devm_gpio_request_one(dev, gpio, flags, desc); 205 r = devm_gpio_request_one(dev, gpio, flags, desc);
209 if (r) 206 if (r) {
207 *gpiod = NULL;
210 return r == -ENOENT ? 0 : r; 208 return r == -ENOENT ? 0 : r;
209 }
211 210
212 gd = gpio_to_desc(gpio); 211 *gpiod = gpio_to_desc(gpio);
213 if (IS_ERR(gd))
214 return PTR_ERR(gd) == -ENOENT ? 0 : PTR_ERR(gd);
215 212
216 *gpiod = gd;
217 return 0; 213 return 0;
218} 214}
219 215
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 71a923e53f93..3b1ca4411073 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -735,7 +735,7 @@ out:
735 735
736out_unmap: 736out_unmap:
737 for (i = 0; i < nr_pages; i++) 737 for (i = 0; i < nr_pages; i++)
738 page_cache_release(pages[i]); 738 put_page(pages[i]);
739 739
740 kfree(pages); 740 kfree(pages);
741 741
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index f6f28cc7eb45..e76bd91a29da 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -17,6 +17,7 @@
17 * 17 *
18 */ 18 */
19 19
20#include <linux/delay.h>
20#define VIRTIO_PCI_NO_LEGACY 21#define VIRTIO_PCI_NO_LEGACY
21#include "virtio_pci_common.h" 22#include "virtio_pci_common.h"
22 23
@@ -271,9 +272,13 @@ static void vp_reset(struct virtio_device *vdev)
271 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 272 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
272 /* 0 status means a reset. */ 273 /* 0 status means a reset. */
273 vp_iowrite8(0, &vp_dev->common->device_status); 274 vp_iowrite8(0, &vp_dev->common->device_status);
274 /* Flush out the status write, and flush in device writes, 275 /* After writing 0 to device_status, the driver MUST wait for a read of
275 * including MSI-X interrupts, if any. */ 276 * device_status to return 0 before reinitializing the device.
276 vp_ioread8(&vp_dev->common->device_status); 277 * This will flush out the status write, and flush in device writes,
278 * including MSI-X interrupts, if any.
279 */
280 while (vp_ioread8(&vp_dev->common->device_status))
281 msleep(1);
277 /* Flush pending VQ/configuration callbacks. */ 282 /* Flush pending VQ/configuration callbacks. */
278 vp_synchronize_vectors(vdev); 283 vp_synchronize_vectors(vdev);
279} 284}
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 5c802d47892c..ca6bfddaacad 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -1006,7 +1006,7 @@ struct virtqueue *vring_create_virtqueue(
1006 const char *name) 1006 const char *name)
1007{ 1007{
1008 struct virtqueue *vq; 1008 struct virtqueue *vq;
1009 void *queue; 1009 void *queue = NULL;
1010 dma_addr_t dma_addr; 1010 dma_addr_t dma_addr;
1011 size_t queue_size_in_bytes; 1011 size_t queue_size_in_bytes;
1012 struct vring vring; 1012 struct vring vring;
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 9781e0dd59d6..d46839f51e73 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -151,6 +151,8 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
151static void balloon_process(struct work_struct *work); 151static void balloon_process(struct work_struct *work);
152static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); 152static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
153 153
154static void release_memory_resource(struct resource *resource);
155
154/* When ballooning out (allocating memory to return to Xen) we don't really 156/* When ballooning out (allocating memory to return to Xen) we don't really
155 want the kernel to try too hard since that can trigger the oom killer. */ 157 want the kernel to try too hard since that can trigger the oom killer. */
156#define GFP_BALLOON \ 158#define GFP_BALLOON \
@@ -267,6 +269,20 @@ static struct resource *additional_memory_resource(phys_addr_t size)
267 return NULL; 269 return NULL;
268 } 270 }
269 271
272#ifdef CONFIG_SPARSEMEM
273 {
274 unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT);
275 unsigned long pfn = res->start >> PAGE_SHIFT;
276
277 if (pfn > limit) {
278 pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
279 pfn, limit);
280 release_memory_resource(res);
281 return NULL;
282 }
283 }
284#endif
285
270 return res; 286 return res;
271} 287}
272 288
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 488017a0806a..cb7138c97c69 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -484,9 +484,19 @@ static void eoi_pirq(struct irq_data *data)
484 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; 484 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
485 int rc = 0; 485 int rc = 0;
486 486
487 irq_move_irq(data); 487 if (!VALID_EVTCHN(evtchn))
488 return;
488 489
489 if (VALID_EVTCHN(evtchn)) 490 if (unlikely(irqd_is_setaffinity_pending(data))) {
491 int masked = test_and_set_mask(evtchn);
492
493 clear_evtchn(evtchn);
494
495 irq_move_masked_irq(data);
496
497 if (!masked)
498 unmask_evtchn(evtchn);
499 } else
490 clear_evtchn(evtchn); 500 clear_evtchn(evtchn);
491 501
492 if (pirq_needs_eoi(data->irq)) { 502 if (pirq_needs_eoi(data->irq)) {
@@ -1357,9 +1367,19 @@ static void ack_dynirq(struct irq_data *data)
1357{ 1367{
1358 int evtchn = evtchn_from_irq(data->irq); 1368 int evtchn = evtchn_from_irq(data->irq);
1359 1369
1360 irq_move_irq(data); 1370 if (!VALID_EVTCHN(evtchn))
1371 return;
1361 1372
1362 if (VALID_EVTCHN(evtchn)) 1373 if (unlikely(irqd_is_setaffinity_pending(data))) {
1374 int masked = test_and_set_mask(evtchn);
1375
1376 clear_evtchn(evtchn);
1377
1378 irq_move_masked_irq(data);
1379
1380 if (!masked)
1381 unmask_evtchn(evtchn);
1382 } else
1363 clear_evtchn(evtchn); 1383 clear_evtchn(evtchn);
1364} 1384}
1365 1385
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 38272ad24551..f4edd6df3df2 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -316,7 +316,6 @@ static int evtchn_resize_ring(struct per_user_data *u)
316{ 316{
317 unsigned int new_size; 317 unsigned int new_size;
318 evtchn_port_t *new_ring, *old_ring; 318 evtchn_port_t *new_ring, *old_ring;
319 unsigned int p, c;
320 319
321 /* 320 /*
322 * Ensure the ring is large enough to capture all possible 321 * Ensure the ring is large enough to capture all possible
@@ -346,20 +345,17 @@ static int evtchn_resize_ring(struct per_user_data *u)
346 /* 345 /*
347 * Copy the old ring contents to the new ring. 346 * Copy the old ring contents to the new ring.
348 * 347 *
349 * If the ring contents crosses the end of the current ring, 348 * To take care of wrapping, a full ring, and the new index
350 * it needs to be copied in two chunks. 349 * pointing into the second half, simply copy the old contents
350 * twice.
351 * 351 *
352 * +---------+ +------------------+ 352 * +---------+ +------------------+
353 * |34567 12| -> | 1234567 | 353 * |34567 12| -> |34567 1234567 12|
354 * +-----p-c-+ +------------------+ 354 * +-----p-c-+ +-------c------p---+
355 */ 355 */
356 p = evtchn_ring_offset(u, u->ring_prod); 356 memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
357 c = evtchn_ring_offset(u, u->ring_cons); 357 memcpy(new_ring + u->ring_size, old_ring,
358 if (p < c) { 358 u->ring_size * sizeof(*u->ring));
359 memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring));
360 memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring));
361 } else
362 memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring));
363 359
364 u->ring = new_ring; 360 u->ring = new_ring;
365 u->ring_size = new_size; 361 u->ring_size = new_size;