aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig17
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/ac.c117
-rw-r--r--drivers/acpi/acpi_platform.c1
-rw-r--r--drivers/acpi/acpi_processor.c8
-rw-r--r--drivers/acpi/acpica/acglobal.h4
-rw-r--r--drivers/acpi/acpica/tbutils.c7
-rw-r--r--drivers/acpi/battery.c329
-rw-r--r--drivers/acpi/blacklist.c21
-rw-r--r--drivers/acpi/cm_sbs.c105
-rw-r--r--drivers/acpi/ec.c21
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/acpi/video.c16
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.c15
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/ahci_imx.c179
-rw-r--r--drivers/ata/libahci.c7
-rw-r--r--drivers/ata/libata-core.c17
-rw-r--r--drivers/base/dd.c17
-rw-r--r--drivers/block/floppy.c11
-rw-r--r--drivers/block/virtio_blk.c4
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c5
-rw-r--r--drivers/bus/mvebu-mbus.c22
-rw-r--r--drivers/char/agp/frontend.c1
-rw-r--r--drivers/char/random.c7
-rw-r--r--drivers/char/tpm/tpm_ppi.c8
-rw-r--r--drivers/clk/bcm/clk-kona-setup.c33
-rw-r--r--drivers/clk/bcm/clk-kona.c64
-rw-r--r--drivers/clk/bcm/clk-kona.h28
-rw-r--r--drivers/clk/clk-divider.c37
-rw-r--r--drivers/clk/clk.c74
-rw-r--r--drivers/clk/shmobile/clk-mstp.c9
-rw-r--r--drivers/clk/socfpga/clk-pll.c7
-rw-r--r--drivers/clk/socfpga/clk.c23
-rw-r--r--drivers/clk/st/clkgen-pll.c4
-rw-r--r--drivers/clk/tegra/clk-pll.c66
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c2
-rw-r--r--drivers/clocksource/arm_arch_timer.c6
-rw-r--r--drivers/clocksource/tcb_clksrc.c8
-rw-r--r--drivers/clocksource/timer-marco.c2
-rw-r--r--drivers/clocksource/zevio-timer.c7
-rw-r--r--drivers/connector/cn_proc.c2
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c16
-rw-r--r--drivers/cpufreq/cpufreq_governor.c6
-rw-r--r--drivers/cpufreq/intel_pstate.c34
-rw-r--r--drivers/cpufreq/longhaul.c36
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c4
-rw-r--r--drivers/cpufreq/powernow-k6.c23
-rw-r--r--drivers/cpufreq/powernow-k7.c4
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c5
-rw-r--r--drivers/crypto/caam/error.c10
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/dw/core.c11
-rw-r--r--drivers/dma/mv_xor.c8
-rw-r--r--drivers/dma/sa11x0-dma.c4
-rw-r--r--drivers/firewire/core.h4
-rw-r--r--drivers/firewire/ohci.c2
-rw-r--r--drivers/firmware/iscsi_ibft.c1
-rw-r--r--drivers/gpio/gpio-ich.c4
-rw-r--r--drivers/gpio/gpio-mcp23s08.c12
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c20
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h33
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c366
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c130
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c36
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c52
-rw-r--r--drivers/gpu/drm/i915/intel_display.c37
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c58
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c9
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c12
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c40
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c60
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c44
-rw-r--r--drivers/gpu/drm/radeon/cik.c148
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c1
-rw-r--r--drivers/gpu/drm/radeon/cikd.h9
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c28
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c1
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c135
-rw-r--r--drivers/gpu/drm/radeon/r600.c14
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c131
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c17
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c1
-rw-r--r--drivers/gpu/drm/radeon/si.c28
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c1
-rw-r--r--drivers/gpu/drm/radeon/sid.h4
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c10
-rw-r--r--drivers/hid/hid-core.c5
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-sensor-hub.c3
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/coretemp.c4
-rw-r--r--drivers/hwmon/emc1403.c10
-rw-r--r--drivers/hwmon/ntc_thermistor.c15
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c3
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c2
-rw-r--r--drivers/i2c/busses/i2c-qup.c2
-rw-r--r--drivers/i2c/busses/i2c-rcar.c9
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/iio/adc/Kconfig4
-rw-r--r--drivers/iio/adc/exynos_adc.c6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig6
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c39
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c13
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h14
-rw-r--r--drivers/infiniband/hw/mlx4/main.c67
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c8
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c38
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h2
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/atkbd.c29
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c7
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c7
-rw-r--r--drivers/input/misc/bma150.c4
-rw-r--r--drivers/input/mouse/Kconfig2
-rw-r--r--drivers/input/mouse/elantech.c26
-rw-r--r--drivers/input/mouse/elantech.h1
-rw-r--r--drivers/input/mouse/synaptics.c158
-rw-r--r--drivers/input/serio/ambakmi.c3
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/amd_iommu_init.c2
-rw-r--r--drivers/iommu/amd_iommu_v2.c2
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c54
-rw-r--r--drivers/irqchip/irq-crossbar.c2
-rw-r--r--drivers/isdn/hisax/icc.c2
-rw-r--r--drivers/md/dm-cache-target.c3
-rw-r--r--drivers/md/dm-crypt.c61
-rw-r--r--drivers/md/dm-mpath.c12
-rw-r--r--drivers/md/dm-thin.c106
-rw-r--r--drivers/md/dm-verity.c15
-rw-r--r--drivers/md/md.c8
-rw-r--r--drivers/md/raid10.c13
-rw-r--r--drivers/media/i2c/ov7670.c2
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c2
-rw-r--r--drivers/media/media-device.c1
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c16
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c2
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c34
-rw-r--r--drivers/media/platform/davinci/vpif_display.c35
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c2
-rw-r--r--drivers/media/tuners/fc2580.c6
-rw-r--r--drivers/media/tuners/fc2580_priv.h1
-rw-r--r--drivers/media/usb/dvb-usb-v2/Makefile1
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c48
-rw-r--r--drivers/media/usb/gspca/sonixb.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c12
-rw-r--r--drivers/memory/mvebu-devbus.c15
-rw-r--r--drivers/mfd/rtsx_pcr.c132
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c418
-rw-r--r--drivers/mtd/nand/davinci_nand.c6
-rw-r--r--drivers/mtd/ubi/block.c2
-rw-r--r--drivers/mtd/ubi/wl.c6
-rw-r--r--drivers/net/bonding/bond_alb.c54
-rw-r--r--drivers/net/bonding/bond_main.c134
-rw-r--r--drivers/net/bonding/bond_options.c1
-rw-r--r--drivers/net/bonding/bond_sysfs.c2
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/can/c_can/c_can.c648
-rw-r--r--drivers/net/can/c_can/c_can.h23
-rw-r--r--drivers/net/can/c_can/c_can_pci.c9
-rw-r--r--drivers/net/can/c_can/c_can_platform.c2
-rw-r--r--drivers/net/can/dev.c2
-rw-r--r--drivers/net/can/led.c3
-rw-r--r--drivers/net/can/sja1000/peak_pci.c14
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c16
-rw-r--r--drivers/net/can/slcan.c6
-rw-r--r--drivers/net/ethernet/Kconfig13
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/altera/Kconfig1
-rw-r--r--drivers/net/ethernet/altera/Makefile1
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c118
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.h3
-rw-r--r--drivers/net/ethernet/altera/altera_msgdmahw.h13
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c338
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.h3
-rw-r--r--drivers/net/ethernet/altera/altera_sgdmahw.h26
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h53
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c116
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c206
-rw-r--r--drivers/net/ethernet/altera/altera_utils.c20
-rw-r--r--drivers/net/ethernet/altera/altera_utils.h8
-rw-r--r--drivers/net/ethernet/arc/emac.h2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c82
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c60
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c4
-rw-r--r--drivers/net/ethernet/cadence/Kconfig4
-rw-r--r--drivers/net/ethernet/cadence/macb.c35
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c706
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c6
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c223
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c3
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c5
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.h20
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c71
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c13
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c40
-rw-r--r--drivers/net/ethernet/jme.c53
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c152
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c80
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h16
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c66
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c9
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c22
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c11
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h42
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c13
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c31
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c14
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h4
-rw-r--r--drivers/net/ethernet/sfc/nic.c14
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c17
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/macvlan.c21
-rw-r--r--drivers/net/macvtap.c9
-rw-r--r--drivers/net/phy/mdio-gpio.c4
-rw-r--r--drivers/net/phy/micrel.c6
-rw-r--r--drivers/net/phy/phy.c27
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/slip/slip.c6
-rw-r--r--drivers/net/team/team.c9
-rw-r--r--drivers/net/usb/cdc_mbim.c57
-rw-r--r--drivers/net/usb/cdc_ncm.c2
-rw-r--r--drivers/net/usb/ipheth.c10
-rw-r--r--drivers/net/usb/qmi_wwan.c32
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vxlan.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c14
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c55
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c19
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c22
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c6
-rw-r--r--drivers/net/xen-netback/common.h2
-rw-r--r--drivers/net/xen-netback/interface.c30
-rw-r--r--drivers/net/xen-netback/netback.c102
-rw-r--r--drivers/of/base.c4
-rw-r--r--drivers/pci/host/pci-mvebu.c92
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c4
-rw-r--r--drivers/pci/pci.c5
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c17
-rw-r--r--drivers/pinctrl/pinctrl-single.c13
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c3
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c3
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c23
-rw-r--r--drivers/pnp/pnpacpi/core.c44
-rw-r--r--drivers/pnp/pnpbios/bioscalls.c2
-rw-r--r--drivers/pnp/quirks.c4
-rw-r--r--drivers/ptp/Kconfig3
-rw-r--r--drivers/rtc/rtc-hym8563.c3
-rw-r--r--drivers/rtc/rtc-pcf8523.c4
-rw-r--r--drivers/s390/cio/chsc.c22
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c1
-rw-r--r--drivers/scsi/scsi_netlink.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c3
-rw-r--r--drivers/scsi/virtio_scsi.c6
-rw-r--r--drivers/sh/Makefile14
-rw-r--r--drivers/sh/pm_runtime.c20
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c16
-rw-r--r--drivers/spi/spi-qup.c2
-rw-r--r--drivers/spi/spi.c124
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c2
-rw-r--r--drivers/staging/iio/resolver/ad2s1200.c3
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c7
-rw-r--r--drivers/staging/imx-drm/imx-tve.c2
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c13
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_devtable.h2
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c2
-rw-r--r--drivers/staging/rtl8723au/os_dep/os_intfs.c2
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_ops_linux.c2
-rw-r--r--drivers/staging/speakup/main.c1
-rw-r--r--drivers/staging/speakup/selection.c52
-rw-r--r--drivers/staging/speakup/speakup.h1
-rw-r--r--drivers/staging/speakup/speakup_acntsa.c8
-rw-r--r--drivers/target/iscsi/iscsi_target.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c28
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c1
-rw-r--r--drivers/target/target_core_device.c12
-rw-r--r--drivers/target/target_core_transport.c2
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c8
-rw-r--r--drivers/tty/hvc/hvc_console.c2
-rw-r--r--drivers/tty/n_tty.c4
-rw-r--r--drivers/tty/serial/8250/8250_core.c2
-rw-r--r--drivers/tty/tty_buffer.c31
-rw-r--r--drivers/usb/core/driver.c9
-rw-r--r--drivers/usb/core/hub.c15
-rw-r--r--drivers/usb/gadget/at91_udc.c10
-rw-r--r--drivers/usb/host/ehci-fsl.c3
-rw-r--r--drivers/usb/host/ohci-hub.c18
-rw-r--r--drivers/usb/host/ohci-pci.c1
-rw-r--r--drivers/usb/host/ohci.h2
-rw-r--r--drivers/usb/host/pci-quirks.c7
-rw-r--r--drivers/usb/host/xhci-mem.c20
-rw-r--r--drivers/usb/phy/phy-fsm-usb.c9
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h5
-rw-r--r--drivers/usb/serial/io_ti.c2
-rw-r--r--drivers/usb/serial/io_usbvend.h2
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/qcserial.c15
-rw-r--r--drivers/usb/storage/shuttle_usbat.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h14
-rw-r--r--drivers/xen/events/events_fifo.c41
382 files changed, 6598 insertions, 3425 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index d05d81b19b50..7183b6af5dac 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -119,7 +119,7 @@ obj-$(CONFIG_SGI_SN) += sn/
119obj-y += firmware/ 119obj-y += firmware/
120obj-$(CONFIG_CRYPTO) += crypto/ 120obj-$(CONFIG_CRYPTO) += crypto/
121obj-$(CONFIG_SUPERH) += sh/ 121obj-$(CONFIG_SUPERH) += sh/
122obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += sh/ 122obj-$(CONFIG_ARCH_SHMOBILE) += sh/
123ifndef CONFIG_ARCH_USES_GETTIMEOFFSET 123ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
124obj-y += clocksource/ 124obj-y += clocksource/
125endif 125endif
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index ab686b310100..a34a22841002 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -47,6 +47,23 @@ config ACPI_SLEEP
47 depends on SUSPEND || HIBERNATION 47 depends on SUSPEND || HIBERNATION
48 default y 48 default y
49 49
50config ACPI_PROCFS_POWER
51 bool "Deprecated power /proc/acpi directories"
52 depends on PROC_FS
53 help
54 For backwards compatibility, this option allows
55 deprecated power /proc/acpi/ directories to exist, even when
56 they have been replaced by functions in /sys.
57 The deprecated directories (and their replacements) include:
58 /proc/acpi/battery/* (/sys/class/power_supply/*)
59 /proc/acpi/ac_adapter/* (sys/class/power_supply/*)
60 This option has no effect on /proc/acpi/ directories
61 and functions, which do not yet exist in /sys
62 This option, together with the proc directories, will be
63 deleted in the future.
64
65 Say N to delete power /proc/acpi/ directories that have moved to /sys/
66
50config ACPI_EC_DEBUGFS 67config ACPI_EC_DEBUGFS
51 tristate "EC read/write access through /sys/kernel/debug/ec" 68 tristate "EC read/write access through /sys/kernel/debug/ec"
52 default n 69 default n
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 0331f91d56e6..bce34afadcd0 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -47,6 +47,7 @@ acpi-y += sysfs.o
47acpi-$(CONFIG_X86) += acpi_cmos_rtc.o 47acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
48acpi-$(CONFIG_DEBUG_FS) += debugfs.o 48acpi-$(CONFIG_DEBUG_FS) += debugfs.o
49acpi-$(CONFIG_ACPI_NUMA) += numa.o 49acpi-$(CONFIG_ACPI_NUMA) += numa.o
50acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
50ifdef CONFIG_ACPI_VIDEO 51ifdef CONFIG_ACPI_VIDEO
51acpi-y += video_detect.o 52acpi-y += video_detect.o
52endif 53endif
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 2c01c1da29ce..c67f6f5ad611 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -52,11 +52,39 @@ MODULE_AUTHOR("Paul Diefenbaugh");
52MODULE_DESCRIPTION("ACPI AC Adapter Driver"); 52MODULE_DESCRIPTION("ACPI AC Adapter Driver");
53MODULE_LICENSE("GPL"); 53MODULE_LICENSE("GPL");
54 54
55static int acpi_ac_add(struct acpi_device *device);
56static int acpi_ac_remove(struct acpi_device *device);
57static void acpi_ac_notify(struct acpi_device *device, u32 event);
58
59static const struct acpi_device_id ac_device_ids[] = {
60 {"ACPI0003", 0},
61 {"", 0},
62};
63MODULE_DEVICE_TABLE(acpi, ac_device_ids);
64
65#ifdef CONFIG_PM_SLEEP
66static int acpi_ac_resume(struct device *dev);
67#endif
68static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
69
55static int ac_sleep_before_get_state_ms; 70static int ac_sleep_before_get_state_ms;
56 71
72static struct acpi_driver acpi_ac_driver = {
73 .name = "ac",
74 .class = ACPI_AC_CLASS,
75 .ids = ac_device_ids,
76 .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
77 .ops = {
78 .add = acpi_ac_add,
79 .remove = acpi_ac_remove,
80 .notify = acpi_ac_notify,
81 },
82 .drv.pm = &acpi_ac_pm,
83};
84
57struct acpi_ac { 85struct acpi_ac {
58 struct power_supply charger; 86 struct power_supply charger;
59 struct platform_device *pdev; 87 struct acpi_device * device;
60 unsigned long long state; 88 unsigned long long state;
61 struct notifier_block battery_nb; 89 struct notifier_block battery_nb;
62}; 90};
@@ -69,10 +97,12 @@ struct acpi_ac {
69 97
70static int acpi_ac_get_state(struct acpi_ac *ac) 98static int acpi_ac_get_state(struct acpi_ac *ac)
71{ 99{
72 acpi_status status; 100 acpi_status status = AE_OK;
73 acpi_handle handle = ACPI_HANDLE(&ac->pdev->dev); 101
102 if (!ac)
103 return -EINVAL;
74 104
75 status = acpi_evaluate_integer(handle, "_PSR", NULL, 105 status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL,
76 &ac->state); 106 &ac->state);
77 if (ACPI_FAILURE(status)) { 107 if (ACPI_FAILURE(status)) {
78 ACPI_EXCEPTION((AE_INFO, status, 108 ACPI_EXCEPTION((AE_INFO, status,
@@ -117,10 +147,9 @@ static enum power_supply_property ac_props[] = {
117 Driver Model 147 Driver Model
118 -------------------------------------------------------------------------- */ 148 -------------------------------------------------------------------------- */
119 149
120static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) 150static void acpi_ac_notify(struct acpi_device *device, u32 event)
121{ 151{
122 struct acpi_ac *ac = data; 152 struct acpi_ac *ac = acpi_driver_data(device);
123 struct acpi_device *adev;
124 153
125 if (!ac) 154 if (!ac)
126 return; 155 return;
@@ -143,11 +172,10 @@ static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
143 msleep(ac_sleep_before_get_state_ms); 172 msleep(ac_sleep_before_get_state_ms);
144 173
145 acpi_ac_get_state(ac); 174 acpi_ac_get_state(ac);
146 adev = ACPI_COMPANION(&ac->pdev->dev); 175 acpi_bus_generate_netlink_event(device->pnp.device_class,
147 acpi_bus_generate_netlink_event(adev->pnp.device_class, 176 dev_name(&device->dev), event,
148 dev_name(&ac->pdev->dev), 177 (u32) ac->state);
149 event, (u32) ac->state); 178 acpi_notifier_call_chain(device, event, (u32) ac->state);
150 acpi_notifier_call_chain(adev, event, (u32) ac->state);
151 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); 179 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
152 } 180 }
153 181
@@ -192,49 +220,39 @@ static struct dmi_system_id ac_dmi_table[] = {
192 {}, 220 {},
193}; 221};
194 222
195static int acpi_ac_probe(struct platform_device *pdev) 223static int acpi_ac_add(struct acpi_device *device)
196{ 224{
197 int result = 0; 225 int result = 0;
198 struct acpi_ac *ac = NULL; 226 struct acpi_ac *ac = NULL;
199 struct acpi_device *adev;
200 227
201 if (!pdev)
202 return -EINVAL;
203 228
204 adev = ACPI_COMPANION(&pdev->dev); 229 if (!device)
205 if (!adev) 230 return -EINVAL;
206 return -ENODEV;
207 231
208 ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL); 232 ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL);
209 if (!ac) 233 if (!ac)
210 return -ENOMEM; 234 return -ENOMEM;
211 235
212 strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME); 236 ac->device = device;
213 strcpy(acpi_device_class(adev), ACPI_AC_CLASS); 237 strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME);
214 ac->pdev = pdev; 238 strcpy(acpi_device_class(device), ACPI_AC_CLASS);
215 platform_set_drvdata(pdev, ac); 239 device->driver_data = ac;
216 240
217 result = acpi_ac_get_state(ac); 241 result = acpi_ac_get_state(ac);
218 if (result) 242 if (result)
219 goto end; 243 goto end;
220 244
221 ac->charger.name = acpi_device_bid(adev); 245 ac->charger.name = acpi_device_bid(device);
222 ac->charger.type = POWER_SUPPLY_TYPE_MAINS; 246 ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
223 ac->charger.properties = ac_props; 247 ac->charger.properties = ac_props;
224 ac->charger.num_properties = ARRAY_SIZE(ac_props); 248 ac->charger.num_properties = ARRAY_SIZE(ac_props);
225 ac->charger.get_property = get_ac_property; 249 ac->charger.get_property = get_ac_property;
226 result = power_supply_register(&pdev->dev, &ac->charger); 250 result = power_supply_register(&ac->device->dev, &ac->charger);
227 if (result) 251 if (result)
228 goto end; 252 goto end;
229 253
230 result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
231 ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac);
232 if (result) {
233 power_supply_unregister(&ac->charger);
234 goto end;
235 }
236 printk(KERN_INFO PREFIX "%s [%s] (%s)\n", 254 printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
237 acpi_device_name(adev), acpi_device_bid(adev), 255 acpi_device_name(device), acpi_device_bid(device),
238 ac->state ? "on-line" : "off-line"); 256 ac->state ? "on-line" : "off-line");
239 257
240 ac->battery_nb.notifier_call = acpi_ac_battery_notify; 258 ac->battery_nb.notifier_call = acpi_ac_battery_notify;
@@ -256,7 +274,7 @@ static int acpi_ac_resume(struct device *dev)
256 if (!dev) 274 if (!dev)
257 return -EINVAL; 275 return -EINVAL;
258 276
259 ac = platform_get_drvdata(to_platform_device(dev)); 277 ac = acpi_driver_data(to_acpi_device(dev));
260 if (!ac) 278 if (!ac)
261 return -EINVAL; 279 return -EINVAL;
262 280
@@ -270,19 +288,17 @@ static int acpi_ac_resume(struct device *dev)
270#else 288#else
271#define acpi_ac_resume NULL 289#define acpi_ac_resume NULL
272#endif 290#endif
273static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume);
274 291
275static int acpi_ac_remove(struct platform_device *pdev) 292static int acpi_ac_remove(struct acpi_device *device)
276{ 293{
277 struct acpi_ac *ac; 294 struct acpi_ac *ac = NULL;
295
278 296
279 if (!pdev) 297 if (!device || !acpi_driver_data(device))
280 return -EINVAL; 298 return -EINVAL;
281 299
282 acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), 300 ac = acpi_driver_data(device);
283 ACPI_ALL_NOTIFY, acpi_ac_notify_handler);
284 301
285 ac = platform_get_drvdata(pdev);
286 if (ac->charger.dev) 302 if (ac->charger.dev)
287 power_supply_unregister(&ac->charger); 303 power_supply_unregister(&ac->charger);
288 unregister_acpi_notifier(&ac->battery_nb); 304 unregister_acpi_notifier(&ac->battery_nb);
@@ -292,23 +308,6 @@ static int acpi_ac_remove(struct platform_device *pdev)
292 return 0; 308 return 0;
293} 309}
294 310
295static const struct acpi_device_id acpi_ac_match[] = {
296 { "ACPI0003", 0 },
297 { }
298};
299MODULE_DEVICE_TABLE(acpi, acpi_ac_match);
300
301static struct platform_driver acpi_ac_driver = {
302 .probe = acpi_ac_probe,
303 .remove = acpi_ac_remove,
304 .driver = {
305 .name = "acpi-ac",
306 .owner = THIS_MODULE,
307 .pm = &acpi_ac_pm_ops,
308 .acpi_match_table = ACPI_PTR(acpi_ac_match),
309 },
310};
311
312static int __init acpi_ac_init(void) 311static int __init acpi_ac_init(void)
313{ 312{
314 int result; 313 int result;
@@ -316,7 +315,7 @@ static int __init acpi_ac_init(void)
316 if (acpi_disabled) 315 if (acpi_disabled)
317 return -ENODEV; 316 return -ENODEV;
318 317
319 result = platform_driver_register(&acpi_ac_driver); 318 result = acpi_bus_register_driver(&acpi_ac_driver);
320 if (result < 0) 319 if (result < 0)
321 return -ENODEV; 320 return -ENODEV;
322 321
@@ -325,7 +324,7 @@ static int __init acpi_ac_init(void)
325 324
326static void __exit acpi_ac_exit(void) 325static void __exit acpi_ac_exit(void)
327{ 326{
328 platform_driver_unregister(&acpi_ac_driver); 327 acpi_bus_unregister_driver(&acpi_ac_driver);
329} 328}
330module_init(acpi_ac_init); 329module_init(acpi_ac_init);
331module_exit(acpi_ac_exit); 330module_exit(acpi_ac_exit);
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index dbfe49e5fd63..1d4950388fa1 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -29,7 +29,6 @@ ACPI_MODULE_NAME("platform");
29static const struct acpi_device_id acpi_platform_device_ids[] = { 29static const struct acpi_device_id acpi_platform_device_ids[] = {
30 30
31 { "PNP0D40" }, 31 { "PNP0D40" },
32 { "ACPI0003" },
33 { "VPC2004" }, 32 { "VPC2004" },
34 { "BCM4752" }, 33 { "BCM4752" },
35 34
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index c29c2c3ec0ad..52c81c49cc7d 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -170,6 +170,9 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
170 acpi_status status; 170 acpi_status status;
171 int ret; 171 int ret;
172 172
173 if (pr->apic_id == -1)
174 return -ENODEV;
175
173 status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); 176 status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
174 if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT)) 177 if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
175 return -ENODEV; 178 return -ENODEV;
@@ -260,10 +263,8 @@ static int acpi_processor_get_info(struct acpi_device *device)
260 } 263 }
261 264
262 apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id); 265 apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
263 if (apic_id < 0) { 266 if (apic_id < 0)
264 acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n"); 267 acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
265 return -ENODEV;
266 }
267 pr->apic_id = apic_id; 268 pr->apic_id = apic_id;
268 269
269 cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id); 270 cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
@@ -404,7 +405,6 @@ static int acpi_processor_add(struct acpi_device *device,
404 goto err; 405 goto err;
405 406
406 pr->dev = dev; 407 pr->dev = dev;
407 dev->offline = pr->flags.need_hotplug_init;
408 408
409 /* Trigger the processor driver's .probe() if present. */ 409 /* Trigger the processor driver's .probe() if present. */
410 if (device_attach(dev) >= 0) 410 if (device_attach(dev) >= 0)
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 49bbc71fad54..a08a448068dd 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -141,9 +141,9 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
141 * address. Although ACPICA adheres to the ACPI specification which 141 * address. Although ACPICA adheres to the ACPI specification which
142 * requires the use of the corresponding 64-bit address if it is non-zero, 142 * requires the use of the corresponding 64-bit address if it is non-zero,
143 * some machines have been found to have a corrupted non-zero 64-bit 143 * some machines have been found to have a corrupted non-zero 64-bit
144 * address. Default is FALSE, do not favor the 32-bit addresses. 144 * address. Default is TRUE, favor the 32-bit addresses.
145 */ 145 */
146ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE); 146ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, TRUE);
147 147
148/* 148/*
149 * Optionally truncate I/O addresses to 16 bits. Provides compatibility 149 * Optionally truncate I/O addresses to 16 bits. Provides compatibility
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index a4702eee91a8..9fb85f38de90 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -461,6 +461,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
461 u32 table_count; 461 u32 table_count;
462 struct acpi_table_header *table; 462 struct acpi_table_header *table;
463 acpi_physical_address address; 463 acpi_physical_address address;
464 acpi_physical_address rsdt_address;
464 u32 length; 465 u32 length;
465 u8 *table_entry; 466 u8 *table_entry;
466 acpi_status status; 467 acpi_status status;
@@ -488,11 +489,14 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
488 * as per the ACPI specification. 489 * as per the ACPI specification.
489 */ 490 */
490 address = (acpi_physical_address) rsdp->xsdt_physical_address; 491 address = (acpi_physical_address) rsdp->xsdt_physical_address;
492 rsdt_address =
493 (acpi_physical_address) rsdp->rsdt_physical_address;
491 table_entry_size = ACPI_XSDT_ENTRY_SIZE; 494 table_entry_size = ACPI_XSDT_ENTRY_SIZE;
492 } else { 495 } else {
493 /* Root table is an RSDT (32-bit physical addresses) */ 496 /* Root table is an RSDT (32-bit physical addresses) */
494 497
495 address = (acpi_physical_address) rsdp->rsdt_physical_address; 498 address = (acpi_physical_address) rsdp->rsdt_physical_address;
499 rsdt_address = address;
496 table_entry_size = ACPI_RSDT_ENTRY_SIZE; 500 table_entry_size = ACPI_RSDT_ENTRY_SIZE;
497 } 501 }
498 502
@@ -515,8 +519,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
515 519
516 /* Fall back to the RSDT */ 520 /* Fall back to the RSDT */
517 521
518 address = 522 address = rsdt_address;
519 (acpi_physical_address) rsdp->rsdt_physical_address;
520 table_entry_size = ACPI_RSDT_ENTRY_SIZE; 523 table_entry_size = ACPI_RSDT_ENTRY_SIZE;
521 } 524 }
522 } 525 }
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 9a2c63b20050..6e7b2a12860d 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -36,6 +36,12 @@
36#include <linux/suspend.h> 36#include <linux/suspend.h>
37#include <asm/unaligned.h> 37#include <asm/unaligned.h>
38 38
39#ifdef CONFIG_ACPI_PROCFS_POWER
40#include <linux/proc_fs.h>
41#include <linux/seq_file.h>
42#include <asm/uaccess.h>
43#endif
44
39#include <linux/acpi.h> 45#include <linux/acpi.h>
40#include <linux/power_supply.h> 46#include <linux/power_supply.h>
41 47
@@ -64,6 +70,19 @@ static unsigned int cache_time = 1000;
64module_param(cache_time, uint, 0644); 70module_param(cache_time, uint, 0644);
65MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); 71MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
66 72
73#ifdef CONFIG_ACPI_PROCFS_POWER
74extern struct proc_dir_entry *acpi_lock_battery_dir(void);
75extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
76
77enum acpi_battery_files {
78 info_tag = 0,
79 state_tag,
80 alarm_tag,
81 ACPI_BATTERY_NUMFILES,
82};
83
84#endif
85
67static const struct acpi_device_id battery_device_ids[] = { 86static const struct acpi_device_id battery_device_ids[] = {
68 {"PNP0C0A", 0}, 87 {"PNP0C0A", 0},
69 {"", 0}, 88 {"", 0},
@@ -299,6 +318,14 @@ static enum power_supply_property energy_battery_props[] = {
299 POWER_SUPPLY_PROP_SERIAL_NUMBER, 318 POWER_SUPPLY_PROP_SERIAL_NUMBER,
300}; 319};
301 320
321#ifdef CONFIG_ACPI_PROCFS_POWER
322inline char *acpi_battery_units(struct acpi_battery *battery)
323{
324 return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ?
325 "mA" : "mW";
326}
327#endif
328
302/* -------------------------------------------------------------------------- 329/* --------------------------------------------------------------------------
303 Battery Management 330 Battery Management
304 -------------------------------------------------------------------------- */ 331 -------------------------------------------------------------------------- */
@@ -717,6 +744,279 @@ static void acpi_battery_refresh(struct acpi_battery *battery)
717} 744}
718 745
719/* -------------------------------------------------------------------------- 746/* --------------------------------------------------------------------------
747 FS Interface (/proc)
748 -------------------------------------------------------------------------- */
749
750#ifdef CONFIG_ACPI_PROCFS_POWER
751static struct proc_dir_entry *acpi_battery_dir;
752
753static int acpi_battery_print_info(struct seq_file *seq, int result)
754{
755 struct acpi_battery *battery = seq->private;
756
757 if (result)
758 goto end;
759
760 seq_printf(seq, "present: %s\n",
761 acpi_battery_present(battery) ? "yes" : "no");
762 if (!acpi_battery_present(battery))
763 goto end;
764 if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
765 seq_printf(seq, "design capacity: unknown\n");
766 else
767 seq_printf(seq, "design capacity: %d %sh\n",
768 battery->design_capacity,
769 acpi_battery_units(battery));
770
771 if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
772 seq_printf(seq, "last full capacity: unknown\n");
773 else
774 seq_printf(seq, "last full capacity: %d %sh\n",
775 battery->full_charge_capacity,
776 acpi_battery_units(battery));
777
778 seq_printf(seq, "battery technology: %srechargeable\n",
779 (!battery->technology)?"non-":"");
780
781 if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
782 seq_printf(seq, "design voltage: unknown\n");
783 else
784 seq_printf(seq, "design voltage: %d mV\n",
785 battery->design_voltage);
786 seq_printf(seq, "design capacity warning: %d %sh\n",
787 battery->design_capacity_warning,
788 acpi_battery_units(battery));
789 seq_printf(seq, "design capacity low: %d %sh\n",
790 battery->design_capacity_low,
791 acpi_battery_units(battery));
792 seq_printf(seq, "cycle count: %i\n", battery->cycle_count);
793 seq_printf(seq, "capacity granularity 1: %d %sh\n",
794 battery->capacity_granularity_1,
795 acpi_battery_units(battery));
796 seq_printf(seq, "capacity granularity 2: %d %sh\n",
797 battery->capacity_granularity_2,
798 acpi_battery_units(battery));
799 seq_printf(seq, "model number: %s\n", battery->model_number);
800 seq_printf(seq, "serial number: %s\n", battery->serial_number);
801 seq_printf(seq, "battery type: %s\n", battery->type);
802 seq_printf(seq, "OEM info: %s\n", battery->oem_info);
803 end:
804 if (result)
805 seq_printf(seq, "ERROR: Unable to read battery info\n");
806 return result;
807}
808
809static int acpi_battery_print_state(struct seq_file *seq, int result)
810{
811 struct acpi_battery *battery = seq->private;
812
813 if (result)
814 goto end;
815
816 seq_printf(seq, "present: %s\n",
817 acpi_battery_present(battery) ? "yes" : "no");
818 if (!acpi_battery_present(battery))
819 goto end;
820
821 seq_printf(seq, "capacity state: %s\n",
822 (battery->state & 0x04) ? "critical" : "ok");
823 if ((battery->state & 0x01) && (battery->state & 0x02))
824 seq_printf(seq,
825 "charging state: charging/discharging\n");
826 else if (battery->state & 0x01)
827 seq_printf(seq, "charging state: discharging\n");
828 else if (battery->state & 0x02)
829 seq_printf(seq, "charging state: charging\n");
830 else
831 seq_printf(seq, "charging state: charged\n");
832
833 if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
834 seq_printf(seq, "present rate: unknown\n");
835 else
836 seq_printf(seq, "present rate: %d %s\n",
837 battery->rate_now, acpi_battery_units(battery));
838
839 if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
840 seq_printf(seq, "remaining capacity: unknown\n");
841 else
842 seq_printf(seq, "remaining capacity: %d %sh\n",
843 battery->capacity_now, acpi_battery_units(battery));
844 if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
845 seq_printf(seq, "present voltage: unknown\n");
846 else
847 seq_printf(seq, "present voltage: %d mV\n",
848 battery->voltage_now);
849 end:
850 if (result)
851 seq_printf(seq, "ERROR: Unable to read battery state\n");
852
853 return result;
854}
855
856static int acpi_battery_print_alarm(struct seq_file *seq, int result)
857{
858 struct acpi_battery *battery = seq->private;
859
860 if (result)
861 goto end;
862
863 if (!acpi_battery_present(battery)) {
864 seq_printf(seq, "present: no\n");
865 goto end;
866 }
867 seq_printf(seq, "alarm: ");
868 if (!battery->alarm)
869 seq_printf(seq, "unsupported\n");
870 else
871 seq_printf(seq, "%u %sh\n", battery->alarm,
872 acpi_battery_units(battery));
873 end:
874 if (result)
875 seq_printf(seq, "ERROR: Unable to read battery alarm\n");
876 return result;
877}
878
879static ssize_t acpi_battery_write_alarm(struct file *file,
880 const char __user * buffer,
881 size_t count, loff_t * ppos)
882{
883 int result = 0;
884 char alarm_string[12] = { '\0' };
885 struct seq_file *m = file->private_data;
886 struct acpi_battery *battery = m->private;
887
888 if (!battery || (count > sizeof(alarm_string) - 1))
889 return -EINVAL;
890 if (!acpi_battery_present(battery)) {
891 result = -ENODEV;
892 goto end;
893 }
894 if (copy_from_user(alarm_string, buffer, count)) {
895 result = -EFAULT;
896 goto end;
897 }
898 alarm_string[count] = '\0';
899 battery->alarm = simple_strtol(alarm_string, NULL, 0);
900 result = acpi_battery_set_alarm(battery);
901 end:
902 if (!result)
903 return count;
904 return result;
905}
906
907typedef int(*print_func)(struct seq_file *seq, int result);
908
909static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = {
910 acpi_battery_print_info,
911 acpi_battery_print_state,
912 acpi_battery_print_alarm,
913};
914
915static int acpi_battery_read(int fid, struct seq_file *seq)
916{
917 struct acpi_battery *battery = seq->private;
918 int result = acpi_battery_update(battery);
919 return acpi_print_funcs[fid](seq, result);
920}
921
922#define DECLARE_FILE_FUNCTIONS(_name) \
923static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \
924{ \
925 return acpi_battery_read(_name##_tag, seq); \
926} \
927static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \
928{ \
929 return single_open(file, acpi_battery_read_##_name, PDE_DATA(inode)); \
930}
931
932DECLARE_FILE_FUNCTIONS(info);
933DECLARE_FILE_FUNCTIONS(state);
934DECLARE_FILE_FUNCTIONS(alarm);
935
936#undef DECLARE_FILE_FUNCTIONS
937
938#define FILE_DESCRIPTION_RO(_name) \
939 { \
940 .name = __stringify(_name), \
941 .mode = S_IRUGO, \
942 .ops = { \
943 .open = acpi_battery_##_name##_open_fs, \
944 .read = seq_read, \
945 .llseek = seq_lseek, \
946 .release = single_release, \
947 .owner = THIS_MODULE, \
948 }, \
949 }
950
951#define FILE_DESCRIPTION_RW(_name) \
952 { \
953 .name = __stringify(_name), \
954 .mode = S_IFREG | S_IRUGO | S_IWUSR, \
955 .ops = { \
956 .open = acpi_battery_##_name##_open_fs, \
957 .read = seq_read, \
958 .llseek = seq_lseek, \
959 .write = acpi_battery_write_##_name, \
960 .release = single_release, \
961 .owner = THIS_MODULE, \
962 }, \
963 }
964
965static const struct battery_file {
966 struct file_operations ops;
967 umode_t mode;
968 const char *name;
969} acpi_battery_file[] = {
970 FILE_DESCRIPTION_RO(info),
971 FILE_DESCRIPTION_RO(state),
972 FILE_DESCRIPTION_RW(alarm),
973};
974
975#undef FILE_DESCRIPTION_RO
976#undef FILE_DESCRIPTION_RW
977
978static int acpi_battery_add_fs(struct acpi_device *device)
979{
980 struct proc_dir_entry *entry = NULL;
981 int i;
982
983 printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
984 " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
985 if (!acpi_device_dir(device)) {
986 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
987 acpi_battery_dir);
988 if (!acpi_device_dir(device))
989 return -ENODEV;
990 }
991
992 for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
993 entry = proc_create_data(acpi_battery_file[i].name,
994 acpi_battery_file[i].mode,
995 acpi_device_dir(device),
996 &acpi_battery_file[i].ops,
997 acpi_driver_data(device));
998 if (!entry)
999 return -ENODEV;
1000 }
1001 return 0;
1002}
1003
1004static void acpi_battery_remove_fs(struct acpi_device *device)
1005{
1006 int i;
1007 if (!acpi_device_dir(device))
1008 return;
1009 for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i)
1010 remove_proc_entry(acpi_battery_file[i].name,
1011 acpi_device_dir(device));
1012
1013 remove_proc_entry(acpi_device_bid(device), acpi_battery_dir);
1014 acpi_device_dir(device) = NULL;
1015}
1016
1017#endif
1018
1019/* --------------------------------------------------------------------------
720 Driver Interface 1020 Driver Interface
721 -------------------------------------------------------------------------- */ 1021 -------------------------------------------------------------------------- */
722 1022
@@ -790,6 +1090,15 @@ static int acpi_battery_add(struct acpi_device *device)
790 result = acpi_battery_update(battery); 1090 result = acpi_battery_update(battery);
791 if (result) 1091 if (result)
792 goto fail; 1092 goto fail;
1093#ifdef CONFIG_ACPI_PROCFS_POWER
1094 result = acpi_battery_add_fs(device);
1095#endif
1096 if (result) {
1097#ifdef CONFIG_ACPI_PROCFS_POWER
1098 acpi_battery_remove_fs(device);
1099#endif
1100 goto fail;
1101 }
793 1102
794 printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n", 1103 printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
795 ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), 1104 ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
@@ -816,6 +1125,9 @@ static int acpi_battery_remove(struct acpi_device *device)
816 return -EINVAL; 1125 return -EINVAL;
817 battery = acpi_driver_data(device); 1126 battery = acpi_driver_data(device);
818 unregister_pm_notifier(&battery->pm_nb); 1127 unregister_pm_notifier(&battery->pm_nb);
1128#ifdef CONFIG_ACPI_PROCFS_POWER
1129 acpi_battery_remove_fs(device);
1130#endif
819 sysfs_remove_battery(battery); 1131 sysfs_remove_battery(battery);
820 mutex_destroy(&battery->lock); 1132 mutex_destroy(&battery->lock);
821 mutex_destroy(&battery->sysfs_lock); 1133 mutex_destroy(&battery->sysfs_lock);
@@ -866,7 +1178,19 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
866 1178
867 if (dmi_check_system(bat_dmi_table)) 1179 if (dmi_check_system(bat_dmi_table))
868 battery_bix_broken_package = 1; 1180 battery_bix_broken_package = 1;
869 acpi_bus_register_driver(&acpi_battery_driver); 1181
1182#ifdef CONFIG_ACPI_PROCFS_POWER
1183 acpi_battery_dir = acpi_lock_battery_dir();
1184 if (!acpi_battery_dir)
1185 return;
1186#endif
1187 if (acpi_bus_register_driver(&acpi_battery_driver) < 0) {
1188#ifdef CONFIG_ACPI_PROCFS_POWER
1189 acpi_unlock_battery_dir(acpi_battery_dir);
1190#endif
1191 return;
1192 }
1193 return;
870} 1194}
871 1195
872static int __init acpi_battery_init(void) 1196static int __init acpi_battery_init(void)
@@ -878,6 +1202,9 @@ static int __init acpi_battery_init(void)
878static void __exit acpi_battery_exit(void) 1202static void __exit acpi_battery_exit(void)
879{ 1203{
880 acpi_bus_unregister_driver(&acpi_battery_driver); 1204 acpi_bus_unregister_driver(&acpi_battery_driver);
1205#ifdef CONFIG_ACPI_PROCFS_POWER
1206 acpi_unlock_battery_dir(acpi_battery_dir);
1207#endif
881} 1208}
882 1209
883module_init(acpi_battery_init); 1210module_init(acpi_battery_init);
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index afec4526c48a..3d8413d02a97 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -314,6 +314,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
314 DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"), 314 DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
315 }, 315 },
316 }, 316 },
317 {
318 .callback = dmi_disable_osi_win8,
319 .ident = "Dell Inspiron 7737",
320 .matches = {
321 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
322 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"),
323 },
324 },
317 325
318 /* 326 /*
319 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. 327 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
@@ -374,6 +382,19 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
374 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"), 382 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"),
375 }, 383 },
376 }, 384 },
385 /*
386 * Without this this EEEpc exports a non working WMI interface, with
387 * this it exports a working "good old" eeepc_laptop interface, fixing
388 * both brightness control, and rfkill not working.
389 */
390 {
391 .callback = dmi_enable_osi_linux,
392 .ident = "Asus EEE PC 1015PX",
393 .matches = {
394 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
395 DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
396 },
397 },
377 {} 398 {}
378}; 399};
379 400
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
new file mode 100644
index 000000000000..6c9ee68e46fb
--- /dev/null
+++ b/drivers/acpi/cm_sbs.c
@@ -0,0 +1,105 @@
1/*
2 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or (at
7 * your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/acpi.h>
25#include <linux/types.h>
26#include <linux/proc_fs.h>
27#include <linux/seq_file.h>
28#include <acpi/acpi_bus.h>
29#include <acpi/acpi_drivers.h>
30
31#define PREFIX "ACPI: "
32
33ACPI_MODULE_NAME("cm_sbs");
34#define ACPI_AC_CLASS "ac_adapter"
35#define ACPI_BATTERY_CLASS "battery"
36#define _COMPONENT ACPI_SBS_COMPONENT
37static struct proc_dir_entry *acpi_ac_dir;
38static struct proc_dir_entry *acpi_battery_dir;
39
40static DEFINE_MUTEX(cm_sbs_mutex);
41
42static int lock_ac_dir_cnt;
43static int lock_battery_dir_cnt;
44
45struct proc_dir_entry *acpi_lock_ac_dir(void)
46{
47 mutex_lock(&cm_sbs_mutex);
48 if (!acpi_ac_dir)
49 acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir);
50 if (acpi_ac_dir) {
51 lock_ac_dir_cnt++;
52 } else {
53 printk(KERN_ERR PREFIX
54 "Cannot create %s\n", ACPI_AC_CLASS);
55 }
56 mutex_unlock(&cm_sbs_mutex);
57 return acpi_ac_dir;
58}
59EXPORT_SYMBOL(acpi_lock_ac_dir);
60
61void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param)
62{
63 mutex_lock(&cm_sbs_mutex);
64 if (acpi_ac_dir_param)
65 lock_ac_dir_cnt--;
66 if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) {
67 remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir);
68 acpi_ac_dir = NULL;
69 }
70 mutex_unlock(&cm_sbs_mutex);
71}
72EXPORT_SYMBOL(acpi_unlock_ac_dir);
73
74struct proc_dir_entry *acpi_lock_battery_dir(void)
75{
76 mutex_lock(&cm_sbs_mutex);
77 if (!acpi_battery_dir) {
78 acpi_battery_dir =
79 proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir);
80 }
81 if (acpi_battery_dir) {
82 lock_battery_dir_cnt++;
83 } else {
84 printk(KERN_ERR PREFIX
85 "Cannot create %s\n", ACPI_BATTERY_CLASS);
86 }
87 mutex_unlock(&cm_sbs_mutex);
88 return acpi_battery_dir;
89}
90EXPORT_SYMBOL(acpi_lock_battery_dir);
91
92void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param)
93{
94 mutex_lock(&cm_sbs_mutex);
95 if (acpi_battery_dir_param)
96 lock_battery_dir_cnt--;
97 if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param
98 && acpi_battery_dir) {
99 remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir);
100 acpi_battery_dir = NULL;
101 }
102 mutex_unlock(&cm_sbs_mutex);
103 return;
104}
105EXPORT_SYMBOL(acpi_unlock_battery_dir);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index d7d32c28829b..ad11ba4a412d 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -206,13 +206,13 @@ unlock:
206 spin_unlock_irqrestore(&ec->lock, flags); 206 spin_unlock_irqrestore(&ec->lock, flags);
207} 207}
208 208
209static int acpi_ec_sync_query(struct acpi_ec *ec); 209static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
210 210
211static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) 211static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
212{ 212{
213 if (state & ACPI_EC_FLAG_SCI) { 213 if (state & ACPI_EC_FLAG_SCI) {
214 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) 214 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
215 return acpi_ec_sync_query(ec); 215 return acpi_ec_sync_query(ec, NULL);
216 } 216 }
217 return 0; 217 return 0;
218} 218}
@@ -443,10 +443,8 @@ acpi_handle ec_get_handle(void)
443 443
444EXPORT_SYMBOL(ec_get_handle); 444EXPORT_SYMBOL(ec_get_handle);
445 445
446static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
447
448/* 446/*
449 * Clears stale _Q events that might have accumulated in the EC. 447 * Process _Q events that might have accumulated in the EC.
450 * Run with locked ec mutex. 448 * Run with locked ec mutex.
451 */ 449 */
452static void acpi_ec_clear(struct acpi_ec *ec) 450static void acpi_ec_clear(struct acpi_ec *ec)
@@ -455,7 +453,7 @@ static void acpi_ec_clear(struct acpi_ec *ec)
455 u8 value = 0; 453 u8 value = 0;
456 454
457 for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { 455 for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
458 status = acpi_ec_query_unlocked(ec, &value); 456 status = acpi_ec_sync_query(ec, &value);
459 if (status || !value) 457 if (status || !value)
460 break; 458 break;
461 } 459 }
@@ -582,13 +580,18 @@ static void acpi_ec_run(void *cxt)
582 kfree(handler); 580 kfree(handler);
583} 581}
584 582
585static int acpi_ec_sync_query(struct acpi_ec *ec) 583static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data)
586{ 584{
587 u8 value = 0; 585 u8 value = 0;
588 int status; 586 int status;
589 struct acpi_ec_query_handler *handler, *copy; 587 struct acpi_ec_query_handler *handler, *copy;
590 if ((status = acpi_ec_query_unlocked(ec, &value))) 588
589 status = acpi_ec_query_unlocked(ec, &value);
590 if (data)
591 *data = value;
592 if (status)
591 return status; 593 return status;
594
592 list_for_each_entry(handler, &ec->list, node) { 595 list_for_each_entry(handler, &ec->list, node) {
593 if (value == handler->query_bit) { 596 if (value == handler->query_bit) {
594 /* have custom handler for this bit */ 597 /* have custom handler for this bit */
@@ -612,7 +615,7 @@ static void acpi_ec_gpe_query(void *ec_cxt)
612 if (!ec) 615 if (!ec)
613 return; 616 return;
614 mutex_lock(&ec->mutex); 617 mutex_lock(&ec->mutex);
615 acpi_ec_sync_query(ec); 618 acpi_ec_sync_query(ec, NULL);
616 mutex_unlock(&ec->mutex); 619 mutex_unlock(&ec->mutex);
617} 620}
618 621
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index c1e31a41f949..25bbc55dca89 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -1278,8 +1278,8 @@ static int __init acpi_thermal_init(void)
1278 1278
1279static void __exit acpi_thermal_exit(void) 1279static void __exit acpi_thermal_exit(void)
1280{ 1280{
1281 destroy_workqueue(acpi_thermal_pm_queue);
1282 acpi_bus_unregister_driver(&acpi_thermal_driver); 1281 acpi_bus_unregister_driver(&acpi_thermal_driver);
1282 destroy_workqueue(acpi_thermal_pm_queue);
1283 1283
1284 return; 1284 return;
1285} 1285}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 8b6990e417ec..f8bc5a755dda 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -457,10 +457,10 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
457 }, 457 },
458 { 458 {
459 .callback = video_set_use_native_backlight, 459 .callback = video_set_use_native_backlight,
460 .ident = "ThinkPad T430s", 460 .ident = "ThinkPad T430 and T430s",
461 .matches = { 461 .matches = {
462 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 462 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
463 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"), 463 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430"),
464 }, 464 },
465 }, 465 },
466 { 466 {
@@ -472,7 +472,7 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
472 }, 472 },
473 }, 473 },
474 { 474 {
475 .callback = video_set_use_native_backlight, 475 .callback = video_set_use_native_backlight,
476 .ident = "ThinkPad X1 Carbon", 476 .ident = "ThinkPad X1 Carbon",
477 .matches = { 477 .matches = {
478 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 478 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -500,7 +500,7 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
500 .ident = "Dell Inspiron 7520", 500 .ident = "Dell Inspiron 7520",
501 .matches = { 501 .matches = {
502 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 502 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
503 DMI_MATCH(DMI_PRODUCT_VERSION, "Inspiron 7520"), 503 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
504 }, 504 },
505 }, 505 },
506 { 506 {
@@ -513,6 +513,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
513 }, 513 },
514 { 514 {
515 .callback = video_set_use_native_backlight, 515 .callback = video_set_use_native_backlight,
516 .ident = "Acer Aspire 5742G",
517 .matches = {
518 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
519 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5742G"),
520 },
521 },
522 {
523 .callback = video_set_use_native_backlight,
516 .ident = "Acer Aspire V5-431", 524 .ident = "Acer Aspire V5-431",
517 .matches = { 525 .matches = {
518 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 526 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index c2706047337f..0033fafc470b 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -815,7 +815,7 @@ config PATA_AT32
815 815
816config PATA_AT91 816config PATA_AT91
817 tristate "PATA support for AT91SAM9260" 817 tristate "PATA support for AT91SAM9260"
818 depends on ARM && ARCH_AT91 818 depends on ARM && SOC_AT91SAM9
819 help 819 help
820 This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. 820 This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
821 821
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 71e15b73513d..60707814a84b 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1115,6 +1115,17 @@ static bool ahci_broken_online(struct pci_dev *pdev)
1115 return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff); 1115 return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
1116} 1116}
1117 1117
1118static bool ahci_broken_devslp(struct pci_dev *pdev)
1119{
1120 /* device with broken DEVSLP but still showing SDS capability */
1121 static const struct pci_device_id ids[] = {
1122 { PCI_VDEVICE(INTEL, 0x0f23)}, /* Valleyview SoC */
1123 {}
1124 };
1125
1126 return pci_match_id(ids, pdev);
1127}
1128
1118#ifdef CONFIG_ATA_ACPI 1129#ifdef CONFIG_ATA_ACPI
1119static void ahci_gtf_filter_workaround(struct ata_host *host) 1130static void ahci_gtf_filter_workaround(struct ata_host *host)
1120{ 1131{
@@ -1364,6 +1375,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1364 1375
1365 hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar]; 1376 hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
1366 1377
1378 /* must set flag prior to save config in order to take effect */
1379 if (ahci_broken_devslp(pdev))
1380 hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
1381
1367 /* save initial config */ 1382 /* save initial config */
1368 ahci_pci_save_initial_config(pdev, hpriv); 1383 ahci_pci_save_initial_config(pdev, hpriv);
1369 1384
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index b5eb886da226..af63c75c2001 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -236,6 +236,7 @@ enum {
236 port start (wait until 236 port start (wait until
237 error-handling stage) */ 237 error-handling stage) */
238 AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */ 238 AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */
239 AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */
239 240
240 /* ap->flags bits */ 241 /* ap->flags bits */
241 242
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 497c7abe1c7d..8befeb69eeb1 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -29,9 +29,25 @@
29#include "ahci.h" 29#include "ahci.h"
30 30
31enum { 31enum {
32 PORT_PHY_CTL = 0x178, /* Port0 PHY Control */ 32 /* Timer 1-ms Register */
33 PORT_PHY_CTL_PDDQ_LOC = 0x100000, /* PORT_PHY_CTL bits */ 33 IMX_TIMER1MS = 0x00e0,
34 HOST_TIMER1MS = 0xe0, /* Timer 1-ms */ 34 /* Port0 PHY Control Register */
35 IMX_P0PHYCR = 0x0178,
36 IMX_P0PHYCR_TEST_PDDQ = 1 << 20,
37 IMX_P0PHYCR_CR_READ = 1 << 19,
38 IMX_P0PHYCR_CR_WRITE = 1 << 18,
39 IMX_P0PHYCR_CR_CAP_DATA = 1 << 17,
40 IMX_P0PHYCR_CR_CAP_ADDR = 1 << 16,
41 /* Port0 PHY Status Register */
42 IMX_P0PHYSR = 0x017c,
43 IMX_P0PHYSR_CR_ACK = 1 << 18,
44 IMX_P0PHYSR_CR_DATA_OUT = 0xffff << 0,
45 /* Lane0 Output Status Register */
46 IMX_LANE0_OUT_STAT = 0x2003,
47 IMX_LANE0_OUT_STAT_RX_PLL_STATE = 1 << 1,
48 /* Clock Reset Register */
49 IMX_CLOCK_RESET = 0x7f3f,
50 IMX_CLOCK_RESET_RESET = 1 << 0,
35}; 51};
36 52
37enum ahci_imx_type { 53enum ahci_imx_type {
@@ -54,9 +70,149 @@ MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support
54 70
55static void ahci_imx_host_stop(struct ata_host *host); 71static void ahci_imx_host_stop(struct ata_host *host);
56 72
73static int imx_phy_crbit_assert(void __iomem *mmio, u32 bit, bool assert)
74{
75 int timeout = 10;
76 u32 crval;
77 u32 srval;
78
79 /* Assert or deassert the bit */
80 crval = readl(mmio + IMX_P0PHYCR);
81 if (assert)
82 crval |= bit;
83 else
84 crval &= ~bit;
85 writel(crval, mmio + IMX_P0PHYCR);
86
87 /* Wait for the cr_ack signal */
88 do {
89 srval = readl(mmio + IMX_P0PHYSR);
90 if ((assert ? srval : ~srval) & IMX_P0PHYSR_CR_ACK)
91 break;
92 usleep_range(100, 200);
93 } while (--timeout);
94
95 return timeout ? 0 : -ETIMEDOUT;
96}
97
98static int imx_phy_reg_addressing(u16 addr, void __iomem *mmio)
99{
100 u32 crval = addr;
101 int ret;
102
103 /* Supply the address on cr_data_in */
104 writel(crval, mmio + IMX_P0PHYCR);
105
106 /* Assert the cr_cap_addr signal */
107 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, true);
108 if (ret)
109 return ret;
110
111 /* Deassert cr_cap_addr */
112 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, false);
113 if (ret)
114 return ret;
115
116 return 0;
117}
118
119static int imx_phy_reg_write(u16 val, void __iomem *mmio)
120{
121 u32 crval = val;
122 int ret;
123
124 /* Supply the data on cr_data_in */
125 writel(crval, mmio + IMX_P0PHYCR);
126
127 /* Assert the cr_cap_data signal */
128 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, true);
129 if (ret)
130 return ret;
131
132 /* Deassert cr_cap_data */
133 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, false);
134 if (ret)
135 return ret;
136
137 if (val & IMX_CLOCK_RESET_RESET) {
138 /*
139 * In case we're resetting the phy, it's unable to acknowledge,
140 * so we return immediately here.
141 */
142 crval |= IMX_P0PHYCR_CR_WRITE;
143 writel(crval, mmio + IMX_P0PHYCR);
144 goto out;
145 }
146
147 /* Assert the cr_write signal */
148 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, true);
149 if (ret)
150 return ret;
151
152 /* Deassert cr_write */
153 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, false);
154 if (ret)
155 return ret;
156
157out:
158 return 0;
159}
160
161static int imx_phy_reg_read(u16 *val, void __iomem *mmio)
162{
163 int ret;
164
165 /* Assert the cr_read signal */
166 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, true);
167 if (ret)
168 return ret;
169
170 /* Capture the data from cr_data_out[] */
171 *val = readl(mmio + IMX_P0PHYSR) & IMX_P0PHYSR_CR_DATA_OUT;
172
173 /* Deassert cr_read */
174 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, false);
175 if (ret)
176 return ret;
177
178 return 0;
179}
180
181static int imx_sata_phy_reset(struct ahci_host_priv *hpriv)
182{
183 void __iomem *mmio = hpriv->mmio;
184 int timeout = 10;
185 u16 val;
186 int ret;
187
188 /* Reset SATA PHY by setting RESET bit of PHY register CLOCK_RESET */
189 ret = imx_phy_reg_addressing(IMX_CLOCK_RESET, mmio);
190 if (ret)
191 return ret;
192 ret = imx_phy_reg_write(IMX_CLOCK_RESET_RESET, mmio);
193 if (ret)
194 return ret;
195
196 /* Wait for PHY RX_PLL to be stable */
197 do {
198 usleep_range(100, 200);
199 ret = imx_phy_reg_addressing(IMX_LANE0_OUT_STAT, mmio);
200 if (ret)
201 return ret;
202 ret = imx_phy_reg_read(&val, mmio);
203 if (ret)
204 return ret;
205 if (val & IMX_LANE0_OUT_STAT_RX_PLL_STATE)
206 break;
207 } while (--timeout);
208
209 return timeout ? 0 : -ETIMEDOUT;
210}
211
57static int imx_sata_enable(struct ahci_host_priv *hpriv) 212static int imx_sata_enable(struct ahci_host_priv *hpriv)
58{ 213{
59 struct imx_ahci_priv *imxpriv = hpriv->plat_data; 214 struct imx_ahci_priv *imxpriv = hpriv->plat_data;
215 struct device *dev = &imxpriv->ahci_pdev->dev;
60 int ret; 216 int ret;
61 217
62 if (imxpriv->no_device) 218 if (imxpriv->no_device)
@@ -101,6 +257,14 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
101 regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13, 257 regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
102 IMX6Q_GPR13_SATA_MPLL_CLK_EN, 258 IMX6Q_GPR13_SATA_MPLL_CLK_EN,
103 IMX6Q_GPR13_SATA_MPLL_CLK_EN); 259 IMX6Q_GPR13_SATA_MPLL_CLK_EN);
260
261 usleep_range(100, 200);
262
263 ret = imx_sata_phy_reset(hpriv);
264 if (ret) {
265 dev_err(dev, "failed to reset phy: %d\n", ret);
266 goto disable_regulator;
267 }
104 } 268 }
105 269
106 usleep_range(1000, 2000); 270 usleep_range(1000, 2000);
@@ -156,8 +320,8 @@ static void ahci_imx_error_handler(struct ata_port *ap)
156 * without full reset once the pddq mode is enabled making it 320 * without full reset once the pddq mode is enabled making it
157 * impossible to use as part of libata LPM. 321 * impossible to use as part of libata LPM.
158 */ 322 */
159 reg_val = readl(mmio + PORT_PHY_CTL); 323 reg_val = readl(mmio + IMX_P0PHYCR);
160 writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL); 324 writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR);
161 imx_sata_disable(hpriv); 325 imx_sata_disable(hpriv);
162 imxpriv->no_device = true; 326 imxpriv->no_device = true;
163} 327}
@@ -217,6 +381,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
217 if (!imxpriv) 381 if (!imxpriv)
218 return -ENOMEM; 382 return -ENOMEM;
219 383
384 imxpriv->ahci_pdev = pdev;
220 imxpriv->no_device = false; 385 imxpriv->no_device = false;
221 imxpriv->first_time = true; 386 imxpriv->first_time = true;
222 imxpriv->type = (enum ahci_imx_type)of_id->data; 387 imxpriv->type = (enum ahci_imx_type)of_id->data;
@@ -248,7 +413,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
248 413
249 /* 414 /*
250 * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL, 415 * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
251 * and IP vendor specific register HOST_TIMER1MS. 416 * and IP vendor specific register IMX_TIMER1MS.
252 * Configure CAP_SSS (support stagered spin up). 417 * Configure CAP_SSS (support stagered spin up).
253 * Implement the port0. 418 * Implement the port0.
254 * Get the ahb clock rate, and configure the TIMER1MS register. 419 * Get the ahb clock rate, and configure the TIMER1MS register.
@@ -265,7 +430,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
265 } 430 }
266 431
267 reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000; 432 reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
268 writel(reg_val, hpriv->mmio + HOST_TIMER1MS); 433 writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
269 434
270 ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 0, 0); 435 ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 0, 0);
271 if (ret) 436 if (ret)
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 6bd4f660b4e1..b9861453fc81 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -452,6 +452,13 @@ void ahci_save_initial_config(struct device *dev,
452 cap &= ~HOST_CAP_SNTF; 452 cap &= ~HOST_CAP_SNTF;
453 } 453 }
454 454
455 if ((cap2 & HOST_CAP2_SDS) && (hpriv->flags & AHCI_HFLAG_NO_DEVSLP)) {
456 dev_info(dev,
457 "controller can't do DEVSLP, turning off\n");
458 cap2 &= ~HOST_CAP2_SDS;
459 cap2 &= ~HOST_CAP2_SADM;
460 }
461
455 if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) { 462 if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
456 dev_info(dev, "controller can do FBS, turning on CAP_FBS\n"); 463 dev_info(dev, "controller can do FBS, turning on CAP_FBS\n");
457 cap |= HOST_CAP_FBS; 464 cap |= HOST_CAP_FBS;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 943cc8b83e59..18d97d5c7d90 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4224,10 +4224,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4224 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4224 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4225 4225
4226 /* devices that don't properly handle queued TRIM commands */ 4226 /* devices that don't properly handle queued TRIM commands */
4227 { "Micron_M500*", "MU0[1-4]*", ATA_HORKAGE_NO_NCQ_TRIM, }, 4227 { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4228 { "Crucial_CT???M500SSD*", "MU0[1-4]*", ATA_HORKAGE_NO_NCQ_TRIM, }, 4228 { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4229 { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, 4229 { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4230 { "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, 4230 { "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4231 4231
4232 /* 4232 /*
4233 * Some WD SATA-I drives spin up and down erratically when the link 4233 * Some WD SATA-I drives spin up and down erratically when the link
@@ -6314,6 +6314,8 @@ int ata_host_activate(struct ata_host *host, int irq,
6314static void ata_port_detach(struct ata_port *ap) 6314static void ata_port_detach(struct ata_port *ap)
6315{ 6315{
6316 unsigned long flags; 6316 unsigned long flags;
6317 struct ata_link *link;
6318 struct ata_device *dev;
6317 6319
6318 if (!ap->ops->error_handler) 6320 if (!ap->ops->error_handler)
6319 goto skip_eh; 6321 goto skip_eh;
@@ -6333,6 +6335,13 @@ static void ata_port_detach(struct ata_port *ap)
6333 cancel_delayed_work_sync(&ap->hotplug_task); 6335 cancel_delayed_work_sync(&ap->hotplug_task);
6334 6336
6335 skip_eh: 6337 skip_eh:
6338 /* clean up zpodd on port removal */
6339 ata_for_each_link(link, ap, HOST_FIRST) {
6340 ata_for_each_dev(dev, link, ALL) {
6341 if (zpodd_dev_enabled(dev))
6342 zpodd_exit(dev);
6343 }
6344 }
6336 if (ap->pmp_link) { 6345 if (ap->pmp_link) {
6337 int i; 6346 int i;
6338 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) 6347 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 8986b9f22781..62ec61e8f84a 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -52,6 +52,7 @@ static DEFINE_MUTEX(deferred_probe_mutex);
52static LIST_HEAD(deferred_probe_pending_list); 52static LIST_HEAD(deferred_probe_pending_list);
53static LIST_HEAD(deferred_probe_active_list); 53static LIST_HEAD(deferred_probe_active_list);
54static struct workqueue_struct *deferred_wq; 54static struct workqueue_struct *deferred_wq;
55static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
55 56
56/** 57/**
57 * deferred_probe_work_func() - Retry probing devices in the active list. 58 * deferred_probe_work_func() - Retry probing devices in the active list.
@@ -135,6 +136,17 @@ static bool driver_deferred_probe_enable = false;
135 * This functions moves all devices from the pending list to the active 136 * This functions moves all devices from the pending list to the active
136 * list and schedules the deferred probe workqueue to process them. It 137 * list and schedules the deferred probe workqueue to process them. It
137 * should be called anytime a driver is successfully bound to a device. 138 * should be called anytime a driver is successfully bound to a device.
139 *
140 * Note, there is a race condition in multi-threaded probe. In the case where
141 * more than one device is probing at the same time, it is possible for one
142 * probe to complete successfully while another is about to defer. If the second
143 * depends on the first, then it will get put on the pending list after the
144 * trigger event has already occured and will be stuck there.
145 *
146 * The atomic 'deferred_trigger_count' is used to determine if a successful
147 * trigger has occurred in the midst of probing a driver. If the trigger count
148 * changes in the midst of a probe, then deferred processing should be triggered
149 * again.
138 */ 150 */
139static void driver_deferred_probe_trigger(void) 151static void driver_deferred_probe_trigger(void)
140{ 152{
@@ -147,6 +159,7 @@ static void driver_deferred_probe_trigger(void)
147 * into the active list so they can be retried by the workqueue 159 * into the active list so they can be retried by the workqueue
148 */ 160 */
149 mutex_lock(&deferred_probe_mutex); 161 mutex_lock(&deferred_probe_mutex);
162 atomic_inc(&deferred_trigger_count);
150 list_splice_tail_init(&deferred_probe_pending_list, 163 list_splice_tail_init(&deferred_probe_pending_list,
151 &deferred_probe_active_list); 164 &deferred_probe_active_list);
152 mutex_unlock(&deferred_probe_mutex); 165 mutex_unlock(&deferred_probe_mutex);
@@ -265,6 +278,7 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
265static int really_probe(struct device *dev, struct device_driver *drv) 278static int really_probe(struct device *dev, struct device_driver *drv)
266{ 279{
267 int ret = 0; 280 int ret = 0;
281 int local_trigger_count = atomic_read(&deferred_trigger_count);
268 282
269 atomic_inc(&probe_count); 283 atomic_inc(&probe_count);
270 pr_debug("bus: '%s': %s: probing driver %s with device %s\n", 284 pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
@@ -310,6 +324,9 @@ probe_failed:
310 /* Driver requested deferred probing */ 324 /* Driver requested deferred probing */
311 dev_info(dev, "Driver %s requests probe deferral\n", drv->name); 325 dev_info(dev, "Driver %s requests probe deferral\n", drv->name);
312 driver_deferred_probe_add(dev); 326 driver_deferred_probe_add(dev);
327 /* Did a trigger occur while probing? Need to re-trigger if yes */
328 if (local_trigger_count != atomic_read(&deferred_trigger_count))
329 driver_deferred_probe_trigger();
313 } else if (ret != -ENODEV && ret != -ENXIO) { 330 } else if (ret != -ENODEV && ret != -ENXIO) {
314 /* driver matched but the probe failed */ 331 /* driver matched but the probe failed */
315 printk(KERN_WARNING 332 printk(KERN_WARNING
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 8f5565bf34cd..fa9bb742df6e 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3067,7 +3067,10 @@ static int raw_cmd_copyout(int cmd, void __user *param,
3067 int ret; 3067 int ret;
3068 3068
3069 while (ptr) { 3069 while (ptr) {
3070 ret = copy_to_user(param, ptr, sizeof(*ptr)); 3070 struct floppy_raw_cmd cmd = *ptr;
3071 cmd.next = NULL;
3072 cmd.kernel_data = NULL;
3073 ret = copy_to_user(param, &cmd, sizeof(cmd));
3071 if (ret) 3074 if (ret)
3072 return -EFAULT; 3075 return -EFAULT;
3073 param += sizeof(struct floppy_raw_cmd); 3076 param += sizeof(struct floppy_raw_cmd);
@@ -3121,10 +3124,11 @@ loop:
3121 return -ENOMEM; 3124 return -ENOMEM;
3122 *rcmd = ptr; 3125 *rcmd = ptr;
3123 ret = copy_from_user(ptr, param, sizeof(*ptr)); 3126 ret = copy_from_user(ptr, param, sizeof(*ptr));
3124 if (ret)
3125 return -EFAULT;
3126 ptr->next = NULL; 3127 ptr->next = NULL;
3127 ptr->buffer_length = 0; 3128 ptr->buffer_length = 0;
3129 ptr->kernel_data = NULL;
3130 if (ret)
3131 return -EFAULT;
3128 param += sizeof(struct floppy_raw_cmd); 3132 param += sizeof(struct floppy_raw_cmd);
3129 if (ptr->cmd_count > 33) 3133 if (ptr->cmd_count > 33)
3130 /* the command may now also take up the space 3134 /* the command may now also take up the space
@@ -3140,7 +3144,6 @@ loop:
3140 for (i = 0; i < 16; i++) 3144 for (i = 0; i < 16; i++)
3141 ptr->reply[i] = 0; 3145 ptr->reply[i] = 0;
3142 ptr->resultcode = 0; 3146 ptr->resultcode = 0;
3143 ptr->kernel_data = NULL;
3144 3147
3145 if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) { 3148 if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
3146 if (ptr->length <= 0) 3149 if (ptr->length <= 0)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6d8a87f252de..cb9b1f8326c3 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -144,11 +144,11 @@ static void virtblk_done(struct virtqueue *vq)
144 if (unlikely(virtqueue_is_broken(vq))) 144 if (unlikely(virtqueue_is_broken(vq)))
145 break; 145 break;
146 } while (!virtqueue_enable_cb(vq)); 146 } while (!virtqueue_enable_cb(vq));
147 spin_unlock_irqrestore(&vblk->vq_lock, flags);
148 147
149 /* In case queue is stopped waiting for more buffers. */ 148 /* In case queue is stopped waiting for more buffers. */
150 if (req_done) 149 if (req_done)
151 blk_mq_start_stopped_hw_queues(vblk->disk->queue); 150 blk_mq_start_stopped_hw_queues(vblk->disk->queue);
151 spin_unlock_irqrestore(&vblk->vq_lock, flags);
152} 152}
153 153
154static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) 154static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
@@ -202,8 +202,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
202 err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num); 202 err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num);
203 if (err) { 203 if (err) {
204 virtqueue_kick(vblk->vq); 204 virtqueue_kick(vblk->vq);
205 spin_unlock_irqrestore(&vblk->vq_lock, flags);
206 blk_mq_stop_hw_queue(hctx); 205 blk_mq_stop_hw_queue(hctx);
206 spin_unlock_irqrestore(&vblk->vq_lock, flags);
207 /* Out of mem doesn't actually happen, since we fall back 207 /* Out of mem doesn't actually happen, since we fall back
208 * to direct descriptors */ 208 * to direct descriptors */
209 if (err == -ENOMEM || err == -ENOSPC) 209 if (err == -ENOMEM || err == -ENOSPC)
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index be571fef185d..a83b57e57b63 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = {
82 { USB_DEVICE(0x04CA, 0x3004) }, 82 { USB_DEVICE(0x04CA, 0x3004) },
83 { USB_DEVICE(0x04CA, 0x3005) }, 83 { USB_DEVICE(0x04CA, 0x3005) },
84 { USB_DEVICE(0x04CA, 0x3006) }, 84 { USB_DEVICE(0x04CA, 0x3006) },
85 { USB_DEVICE(0x04CA, 0x3007) },
85 { USB_DEVICE(0x04CA, 0x3008) }, 86 { USB_DEVICE(0x04CA, 0x3008) },
86 { USB_DEVICE(0x04CA, 0x300b) }, 87 { USB_DEVICE(0x04CA, 0x300b) },
87 { USB_DEVICE(0x0930, 0x0219) }, 88 { USB_DEVICE(0x0930, 0x0219) },
@@ -131,6 +132,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
131 { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, 132 { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
132 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 133 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
133 { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, 134 { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
135 { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
134 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 136 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
135 { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, 137 { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
136 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 138 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f338b0c5a8de..a7dfbf9a3afb 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -152,6 +152,7 @@ static const struct usb_device_id blacklist_table[] = {
152 { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, 152 { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
153 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 153 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
154 { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, 154 { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
155 { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
155 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 156 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
156 { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, 157 { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
157 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 158 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
@@ -1485,10 +1486,8 @@ static int btusb_probe(struct usb_interface *intf,
1485 if (id->driver_info & BTUSB_BCM92035) 1486 if (id->driver_info & BTUSB_BCM92035)
1486 hdev->setup = btusb_setup_bcm92035; 1487 hdev->setup = btusb_setup_bcm92035;
1487 1488
1488 if (id->driver_info & BTUSB_INTEL) { 1489 if (id->driver_info & BTUSB_INTEL)
1489 usb_enable_autosuspend(data->udev);
1490 hdev->setup = btusb_setup_intel; 1490 hdev->setup = btusb_setup_intel;
1491 }
1492 1491
1493 /* Interface numbers are hardcoded in the specification */ 1492 /* Interface numbers are hardcoded in the specification */
1494 data->isoc = usb_ifnum_to_if(data->udev, 1); 1493 data->isoc = usb_ifnum_to_if(data->udev, 1);
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 293e2e0a0a87..00b73448b22e 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -56,6 +56,7 @@
56#include <linux/of.h> 56#include <linux/of.h>
57#include <linux/of_address.h> 57#include <linux/of_address.h>
58#include <linux/debugfs.h> 58#include <linux/debugfs.h>
59#include <linux/log2.h>
59 60
60/* 61/*
61 * DDR target is the same on all platforms. 62 * DDR target is the same on all platforms.
@@ -222,12 +223,6 @@ static int mvebu_mbus_window_conflicts(struct mvebu_mbus_state *mbus,
222 */ 223 */
223 if ((u64)base < wend && end > wbase) 224 if ((u64)base < wend && end > wbase)
224 return 0; 225 return 0;
225
226 /*
227 * Check if target/attribute conflicts
228 */
229 if (target == wtarget && attr == wattr)
230 return 0;
231 } 226 }
232 227
233 return 1; 228 return 1;
@@ -266,6 +261,17 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
266 mbus->soc->win_cfg_offset(win); 261 mbus->soc->win_cfg_offset(win);
267 u32 ctrl, remap_addr; 262 u32 ctrl, remap_addr;
268 263
264 if (!is_power_of_2(size)) {
265 WARN(true, "Invalid MBus window size: 0x%zx\n", size);
266 return -EINVAL;
267 }
268
269 if ((base & (phys_addr_t)(size - 1)) != 0) {
270 WARN(true, "Invalid MBus base/size: %pa len 0x%zx\n", &base,
271 size);
272 return -EINVAL;
273 }
274
269 ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) | 275 ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
270 (attr << WIN_CTRL_ATTR_SHIFT) | 276 (attr << WIN_CTRL_ATTR_SHIFT) |
271 (target << WIN_CTRL_TGT_SHIFT) | 277 (target << WIN_CTRL_TGT_SHIFT) |
@@ -413,6 +419,10 @@ static int mvebu_devs_debug_show(struct seq_file *seq, void *v)
413 win, (unsigned long long)wbase, 419 win, (unsigned long long)wbase,
414 (unsigned long long)(wbase + wsize), wtarget, wattr); 420 (unsigned long long)(wbase + wsize), wtarget, wattr);
415 421
422 if (!is_power_of_2(wsize) ||
423 ((wbase & (u64)(wsize - 1)) != 0))
424 seq_puts(seq, " (Invalid base/size!!)");
425
416 if (win < mbus->soc->num_remappable_wins) { 426 if (win < mbus->soc->num_remappable_wins) {
417 seq_printf(seq, " (remap %016llx)\n", 427 seq_printf(seq, " (remap %016llx)\n",
418 (unsigned long long)wremap); 428 (unsigned long long)wremap);
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 8121b4c70ede..b29703324e94 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -730,6 +730,7 @@ static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
730 730
731 agp_copy_info(agp_bridge, &kerninfo); 731 agp_copy_info(agp_bridge, &kerninfo);
732 732
733 memset(&userinfo, 0, sizeof(userinfo));
733 userinfo.version.major = kerninfo.version.major; 734 userinfo.version.major = kerninfo.version.major;
734 userinfo.version.minor = kerninfo.version.minor; 735 userinfo.version.minor = kerninfo.version.minor;
735 userinfo.bridge_id = kerninfo.device->vendor | 736 userinfo.bridge_id = kerninfo.device->vendor |
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 6b75713d953a..102c50d38902 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -995,8 +995,11 @@ retry:
995 ibytes = min_t(size_t, ibytes, have_bytes - reserved); 995 ibytes = min_t(size_t, ibytes, have_bytes - reserved);
996 if (ibytes < min) 996 if (ibytes < min)
997 ibytes = 0; 997 ibytes = 0;
998 entropy_count = max_t(int, 0, 998 if (have_bytes >= ibytes + reserved)
999 entropy_count - (ibytes << (ENTROPY_SHIFT + 3))); 999 entropy_count -= ibytes << (ENTROPY_SHIFT + 3);
1000 else
1001 entropy_count = reserved << (ENTROPY_SHIFT + 3);
1002
1000 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) 1003 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
1001 goto retry; 1004 goto retry;
1002 1005
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index b3ea223585bd..61dcc8011ec7 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -328,13 +328,11 @@ int tpm_add_ppi(struct kobject *parent)
328 /* Cache TPM ACPI handle and version string */ 328 /* Cache TPM ACPI handle and version string */
329 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, 329 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
330 ppi_callback, NULL, NULL, &tpm_ppi_handle); 330 ppi_callback, NULL, NULL, &tpm_ppi_handle);
331 if (tpm_ppi_handle == NULL) 331 return tpm_ppi_handle ? sysfs_create_group(parent, &ppi_attr_grp) : 0;
332 return -ENODEV;
333
334 return sysfs_create_group(parent, &ppi_attr_grp);
335} 332}
336 333
337void tpm_remove_ppi(struct kobject *parent) 334void tpm_remove_ppi(struct kobject *parent)
338{ 335{
339 sysfs_remove_group(parent, &ppi_attr_grp); 336 if (tpm_ppi_handle)
337 sysfs_remove_group(parent, &ppi_attr_grp);
340} 338}
diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c
index c7607feb18dd..54a06526f64f 100644
--- a/drivers/clk/bcm/clk-kona-setup.c
+++ b/drivers/clk/bcm/clk-kona-setup.c
@@ -27,7 +27,7 @@ LIST_HEAD(ccu_list); /* The list of set up CCUs */
27 27
28static bool clk_requires_trigger(struct kona_clk *bcm_clk) 28static bool clk_requires_trigger(struct kona_clk *bcm_clk)
29{ 29{
30 struct peri_clk_data *peri = bcm_clk->peri; 30 struct peri_clk_data *peri = bcm_clk->u.peri;
31 struct bcm_clk_sel *sel; 31 struct bcm_clk_sel *sel;
32 struct bcm_clk_div *div; 32 struct bcm_clk_div *div;
33 33
@@ -63,7 +63,7 @@ static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk)
63 u32 limit; 63 u32 limit;
64 64
65 BUG_ON(bcm_clk->type != bcm_clk_peri); 65 BUG_ON(bcm_clk->type != bcm_clk_peri);
66 peri = bcm_clk->peri; 66 peri = bcm_clk->u.peri;
67 name = bcm_clk->name; 67 name = bcm_clk->name;
68 range = bcm_clk->ccu->range; 68 range = bcm_clk->ccu->range;
69 69
@@ -81,19 +81,19 @@ static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk)
81 81
82 div = &peri->div; 82 div = &peri->div;
83 if (divider_exists(div)) { 83 if (divider_exists(div)) {
84 if (div->offset > limit) { 84 if (div->u.s.offset > limit) {
85 pr_err("%s: bad divider offset for %s (%u > %u)\n", 85 pr_err("%s: bad divider offset for %s (%u > %u)\n",
86 __func__, name, div->offset, limit); 86 __func__, name, div->u.s.offset, limit);
87 return false; 87 return false;
88 } 88 }
89 } 89 }
90 90
91 div = &peri->pre_div; 91 div = &peri->pre_div;
92 if (divider_exists(div)) { 92 if (divider_exists(div)) {
93 if (div->offset > limit) { 93 if (div->u.s.offset > limit) {
94 pr_err("%s: bad pre-divider offset for %s " 94 pr_err("%s: bad pre-divider offset for %s "
95 "(%u > %u)\n", 95 "(%u > %u)\n",
96 __func__, name, div->offset, limit); 96 __func__, name, div->u.s.offset, limit);
97 return false; 97 return false;
98 } 98 }
99 } 99 }
@@ -249,21 +249,22 @@ static bool div_valid(struct bcm_clk_div *div, const char *field_name,
249{ 249{
250 if (divider_is_fixed(div)) { 250 if (divider_is_fixed(div)) {
251 /* Any fixed divider value but 0 is OK */ 251 /* Any fixed divider value but 0 is OK */
252 if (div->fixed == 0) { 252 if (div->u.fixed == 0) {
253 pr_err("%s: bad %s fixed value 0 for %s\n", __func__, 253 pr_err("%s: bad %s fixed value 0 for %s\n", __func__,
254 field_name, clock_name); 254 field_name, clock_name);
255 return false; 255 return false;
256 } 256 }
257 return true; 257 return true;
258 } 258 }
259 if (!bitfield_valid(div->shift, div->width, field_name, clock_name)) 259 if (!bitfield_valid(div->u.s.shift, div->u.s.width,
260 field_name, clock_name))
260 return false; 261 return false;
261 262
262 if (divider_has_fraction(div)) 263 if (divider_has_fraction(div))
263 if (div->frac_width > div->width) { 264 if (div->u.s.frac_width > div->u.s.width) {
264 pr_warn("%s: bad %s fraction width for %s (%u > %u)\n", 265 pr_warn("%s: bad %s fraction width for %s (%u > %u)\n",
265 __func__, field_name, clock_name, 266 __func__, field_name, clock_name,
266 div->frac_width, div->width); 267 div->u.s.frac_width, div->u.s.width);
267 return false; 268 return false;
268 } 269 }
269 270
@@ -278,7 +279,7 @@ static bool div_valid(struct bcm_clk_div *div, const char *field_name,
278 */ 279 */
279static bool kona_dividers_valid(struct kona_clk *bcm_clk) 280static bool kona_dividers_valid(struct kona_clk *bcm_clk)
280{ 281{
281 struct peri_clk_data *peri = bcm_clk->peri; 282 struct peri_clk_data *peri = bcm_clk->u.peri;
282 struct bcm_clk_div *div; 283 struct bcm_clk_div *div;
283 struct bcm_clk_div *pre_div; 284 struct bcm_clk_div *pre_div;
284 u32 limit; 285 u32 limit;
@@ -295,7 +296,7 @@ static bool kona_dividers_valid(struct kona_clk *bcm_clk)
295 296
296 limit = BITS_PER_BYTE * sizeof(u32); 297 limit = BITS_PER_BYTE * sizeof(u32);
297 298
298 return div->frac_width + pre_div->frac_width <= limit; 299 return div->u.s.frac_width + pre_div->u.s.frac_width <= limit;
299} 300}
300 301
301 302
@@ -328,7 +329,7 @@ peri_clk_data_valid(struct kona_clk *bcm_clk)
328 if (!peri_clk_data_offsets_valid(bcm_clk)) 329 if (!peri_clk_data_offsets_valid(bcm_clk))
329 return false; 330 return false;
330 331
331 peri = bcm_clk->peri; 332 peri = bcm_clk->u.peri;
332 name = bcm_clk->name; 333 name = bcm_clk->name;
333 gate = &peri->gate; 334 gate = &peri->gate;
334 if (gate_exists(gate) && !gate_valid(gate, "gate", name)) 335 if (gate_exists(gate) && !gate_valid(gate, "gate", name))
@@ -588,12 +589,12 @@ static void bcm_clk_teardown(struct kona_clk *bcm_clk)
588{ 589{
589 switch (bcm_clk->type) { 590 switch (bcm_clk->type) {
590 case bcm_clk_peri: 591 case bcm_clk_peri:
591 peri_clk_teardown(bcm_clk->data, &bcm_clk->init_data); 592 peri_clk_teardown(bcm_clk->u.data, &bcm_clk->init_data);
592 break; 593 break;
593 default: 594 default:
594 break; 595 break;
595 } 596 }
596 bcm_clk->data = NULL; 597 bcm_clk->u.data = NULL;
597 bcm_clk->type = bcm_clk_none; 598 bcm_clk->type = bcm_clk_none;
598} 599}
599 600
@@ -644,7 +645,7 @@ struct clk *kona_clk_setup(struct ccu_data *ccu, const char *name,
644 break; 645 break;
645 } 646 }
646 bcm_clk->type = type; 647 bcm_clk->type = type;
647 bcm_clk->data = data; 648 bcm_clk->u.data = data;
648 649
649 /* Make sure everything makes sense before we set it up */ 650 /* Make sure everything makes sense before we set it up */
650 if (!kona_clk_valid(bcm_clk)) { 651 if (!kona_clk_valid(bcm_clk)) {
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index e3d339e08309..db11a87449f2 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -61,7 +61,7 @@ u64 do_div_round_closest(u64 dividend, unsigned long divisor)
61/* Convert a divider into the scaled divisor value it represents. */ 61/* Convert a divider into the scaled divisor value it represents. */
62static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div) 62static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
63{ 63{
64 return (u64)reg_div + ((u64)1 << div->frac_width); 64 return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
65} 65}
66 66
67/* 67/*
@@ -77,7 +77,7 @@ u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
77 BUG_ON(billionths >= BILLION); 77 BUG_ON(billionths >= BILLION);
78 78
79 combined = (u64)div_value * BILLION + billionths; 79 combined = (u64)div_value * BILLION + billionths;
80 combined <<= div->frac_width; 80 combined <<= div->u.s.frac_width;
81 81
82 return do_div_round_closest(combined, BILLION); 82 return do_div_round_closest(combined, BILLION);
83} 83}
@@ -87,7 +87,7 @@ static inline u64
87scaled_div_min(struct bcm_clk_div *div) 87scaled_div_min(struct bcm_clk_div *div)
88{ 88{
89 if (divider_is_fixed(div)) 89 if (divider_is_fixed(div))
90 return (u64)div->fixed; 90 return (u64)div->u.fixed;
91 91
92 return scaled_div_value(div, 0); 92 return scaled_div_value(div, 0);
93} 93}
@@ -98,9 +98,9 @@ u64 scaled_div_max(struct bcm_clk_div *div)
98 u32 reg_div; 98 u32 reg_div;
99 99
100 if (divider_is_fixed(div)) 100 if (divider_is_fixed(div))
101 return (u64)div->fixed; 101 return (u64)div->u.fixed;
102 102
103 reg_div = ((u32)1 << div->width) - 1; 103 reg_div = ((u32)1 << div->u.s.width) - 1;
104 104
105 return scaled_div_value(div, reg_div); 105 return scaled_div_value(div, reg_div);
106} 106}
@@ -115,7 +115,7 @@ divider(struct bcm_clk_div *div, u64 scaled_div)
115 BUG_ON(scaled_div < scaled_div_min(div)); 115 BUG_ON(scaled_div < scaled_div_min(div));
116 BUG_ON(scaled_div > scaled_div_max(div)); 116 BUG_ON(scaled_div > scaled_div_max(div));
117 117
118 return (u32)(scaled_div - ((u64)1 << div->frac_width)); 118 return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width));
119} 119}
120 120
121/* Return a rate scaled for use when dividing by a scaled divisor. */ 121/* Return a rate scaled for use when dividing by a scaled divisor. */
@@ -125,7 +125,7 @@ scale_rate(struct bcm_clk_div *div, u32 rate)
125 if (divider_is_fixed(div)) 125 if (divider_is_fixed(div))
126 return (u64)rate; 126 return (u64)rate;
127 127
128 return (u64)rate << div->frac_width; 128 return (u64)rate << div->u.s.frac_width;
129} 129}
130 130
131/* CCU access */ 131/* CCU access */
@@ -398,14 +398,14 @@ static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div)
398 u32 reg_div; 398 u32 reg_div;
399 399
400 if (divider_is_fixed(div)) 400 if (divider_is_fixed(div))
401 return (u64)div->fixed; 401 return (u64)div->u.fixed;
402 402
403 flags = ccu_lock(ccu); 403 flags = ccu_lock(ccu);
404 reg_val = __ccu_read(ccu, div->offset); 404 reg_val = __ccu_read(ccu, div->u.s.offset);
405 ccu_unlock(ccu, flags); 405 ccu_unlock(ccu, flags);
406 406
407 /* Extract the full divider field from the register value */ 407 /* Extract the full divider field from the register value */
408 reg_div = bitfield_extract(reg_val, div->shift, div->width); 408 reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width);
409 409
410 /* Return the scaled divisor value it represents */ 410 /* Return the scaled divisor value it represents */
411 return scaled_div_value(div, reg_div); 411 return scaled_div_value(div, reg_div);
@@ -433,16 +433,17 @@ static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
433 * state was defined in the device tree, we just find out 433 * state was defined in the device tree, we just find out
434 * what its current value is rather than updating it. 434 * what its current value is rather than updating it.
435 */ 435 */
436 if (div->scaled_div == BAD_SCALED_DIV_VALUE) { 436 if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) {
437 reg_val = __ccu_read(ccu, div->offset); 437 reg_val = __ccu_read(ccu, div->u.s.offset);
438 reg_div = bitfield_extract(reg_val, div->shift, div->width); 438 reg_div = bitfield_extract(reg_val, div->u.s.shift,
439 div->scaled_div = scaled_div_value(div, reg_div); 439 div->u.s.width);
440 div->u.s.scaled_div = scaled_div_value(div, reg_div);
440 441
441 return 0; 442 return 0;
442 } 443 }
443 444
444 /* Convert the scaled divisor to the value we need to record */ 445 /* Convert the scaled divisor to the value we need to record */
445 reg_div = divider(div, div->scaled_div); 446 reg_div = divider(div, div->u.s.scaled_div);
446 447
447 /* Clock needs to be enabled before changing the rate */ 448 /* Clock needs to be enabled before changing the rate */
448 enabled = __is_clk_gate_enabled(ccu, gate); 449 enabled = __is_clk_gate_enabled(ccu, gate);
@@ -452,9 +453,10 @@ static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
452 } 453 }
453 454
454 /* Replace the divider value and record the result */ 455 /* Replace the divider value and record the result */
455 reg_val = __ccu_read(ccu, div->offset); 456 reg_val = __ccu_read(ccu, div->u.s.offset);
456 reg_val = bitfield_replace(reg_val, div->shift, div->width, reg_div); 457 reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width,
457 __ccu_write(ccu, div->offset, reg_val); 458 reg_div);
459 __ccu_write(ccu, div->u.s.offset, reg_val);
458 460
459 /* If the trigger fails we still want to disable the gate */ 461 /* If the trigger fails we still want to disable the gate */
460 if (!__clk_trigger(ccu, trig)) 462 if (!__clk_trigger(ccu, trig))
@@ -490,11 +492,11 @@ static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
490 492
491 BUG_ON(divider_is_fixed(div)); 493 BUG_ON(divider_is_fixed(div));
492 494
493 previous = div->scaled_div; 495 previous = div->u.s.scaled_div;
494 if (previous == scaled_div) 496 if (previous == scaled_div)
495 return 0; /* No change */ 497 return 0; /* No change */
496 498
497 div->scaled_div = scaled_div; 499 div->u.s.scaled_div = scaled_div;
498 500
499 flags = ccu_lock(ccu); 501 flags = ccu_lock(ccu);
500 __ccu_write_enable(ccu); 502 __ccu_write_enable(ccu);
@@ -505,7 +507,7 @@ static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
505 ccu_unlock(ccu, flags); 507 ccu_unlock(ccu, flags);
506 508
507 if (ret) 509 if (ret)
508 div->scaled_div = previous; /* Revert the change */ 510 div->u.s.scaled_div = previous; /* Revert the change */
509 511
510 return ret; 512 return ret;
511 513
@@ -802,7 +804,7 @@ static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
802static int kona_peri_clk_enable(struct clk_hw *hw) 804static int kona_peri_clk_enable(struct clk_hw *hw)
803{ 805{
804 struct kona_clk *bcm_clk = to_kona_clk(hw); 806 struct kona_clk *bcm_clk = to_kona_clk(hw);
805 struct bcm_clk_gate *gate = &bcm_clk->peri->gate; 807 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
806 808
807 return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true); 809 return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true);
808} 810}
@@ -810,7 +812,7 @@ static int kona_peri_clk_enable(struct clk_hw *hw)
810static void kona_peri_clk_disable(struct clk_hw *hw) 812static void kona_peri_clk_disable(struct clk_hw *hw)
811{ 813{
812 struct kona_clk *bcm_clk = to_kona_clk(hw); 814 struct kona_clk *bcm_clk = to_kona_clk(hw);
813 struct bcm_clk_gate *gate = &bcm_clk->peri->gate; 815 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
814 816
815 (void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false); 817 (void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false);
816} 818}
@@ -818,7 +820,7 @@ static void kona_peri_clk_disable(struct clk_hw *hw)
818static int kona_peri_clk_is_enabled(struct clk_hw *hw) 820static int kona_peri_clk_is_enabled(struct clk_hw *hw)
819{ 821{
820 struct kona_clk *bcm_clk = to_kona_clk(hw); 822 struct kona_clk *bcm_clk = to_kona_clk(hw);
821 struct bcm_clk_gate *gate = &bcm_clk->peri->gate; 823 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
822 824
823 return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0; 825 return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0;
824} 826}
@@ -827,7 +829,7 @@ static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw,
827 unsigned long parent_rate) 829 unsigned long parent_rate)
828{ 830{
829 struct kona_clk *bcm_clk = to_kona_clk(hw); 831 struct kona_clk *bcm_clk = to_kona_clk(hw);
830 struct peri_clk_data *data = bcm_clk->peri; 832 struct peri_clk_data *data = bcm_clk->u.peri;
831 833
832 return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div, 834 return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div,
833 parent_rate); 835 parent_rate);
@@ -837,20 +839,20 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
837 unsigned long *parent_rate) 839 unsigned long *parent_rate)
838{ 840{
839 struct kona_clk *bcm_clk = to_kona_clk(hw); 841 struct kona_clk *bcm_clk = to_kona_clk(hw);
840 struct bcm_clk_div *div = &bcm_clk->peri->div; 842 struct bcm_clk_div *div = &bcm_clk->u.peri->div;
841 843
842 if (!divider_exists(div)) 844 if (!divider_exists(div))
843 return __clk_get_rate(hw->clk); 845 return __clk_get_rate(hw->clk);
844 846
845 /* Quietly avoid a zero rate */ 847 /* Quietly avoid a zero rate */
846 return round_rate(bcm_clk->ccu, div, &bcm_clk->peri->pre_div, 848 return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div,
847 rate ? rate : 1, *parent_rate, NULL); 849 rate ? rate : 1, *parent_rate, NULL);
848} 850}
849 851
850static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index) 852static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
851{ 853{
852 struct kona_clk *bcm_clk = to_kona_clk(hw); 854 struct kona_clk *bcm_clk = to_kona_clk(hw);
853 struct peri_clk_data *data = bcm_clk->peri; 855 struct peri_clk_data *data = bcm_clk->u.peri;
854 struct bcm_clk_sel *sel = &data->sel; 856 struct bcm_clk_sel *sel = &data->sel;
855 struct bcm_clk_trig *trig; 857 struct bcm_clk_trig *trig;
856 int ret; 858 int ret;
@@ -884,7 +886,7 @@ static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
884static u8 kona_peri_clk_get_parent(struct clk_hw *hw) 886static u8 kona_peri_clk_get_parent(struct clk_hw *hw)
885{ 887{
886 struct kona_clk *bcm_clk = to_kona_clk(hw); 888 struct kona_clk *bcm_clk = to_kona_clk(hw);
887 struct peri_clk_data *data = bcm_clk->peri; 889 struct peri_clk_data *data = bcm_clk->u.peri;
888 u8 index; 890 u8 index;
889 891
890 index = selector_read_index(bcm_clk->ccu, &data->sel); 892 index = selector_read_index(bcm_clk->ccu, &data->sel);
@@ -897,7 +899,7 @@ static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate,
897 unsigned long parent_rate) 899 unsigned long parent_rate)
898{ 900{
899 struct kona_clk *bcm_clk = to_kona_clk(hw); 901 struct kona_clk *bcm_clk = to_kona_clk(hw);
900 struct peri_clk_data *data = bcm_clk->peri; 902 struct peri_clk_data *data = bcm_clk->u.peri;
901 struct bcm_clk_div *div = &data->div; 903 struct bcm_clk_div *div = &data->div;
902 u64 scaled_div = 0; 904 u64 scaled_div = 0;
903 int ret; 905 int ret;
@@ -958,7 +960,7 @@ struct clk_ops kona_peri_clk_ops = {
958static bool __peri_clk_init(struct kona_clk *bcm_clk) 960static bool __peri_clk_init(struct kona_clk *bcm_clk)
959{ 961{
960 struct ccu_data *ccu = bcm_clk->ccu; 962 struct ccu_data *ccu = bcm_clk->ccu;
961 struct peri_clk_data *peri = bcm_clk->peri; 963 struct peri_clk_data *peri = bcm_clk->u.peri;
962 const char *name = bcm_clk->name; 964 const char *name = bcm_clk->name;
963 struct bcm_clk_trig *trig; 965 struct bcm_clk_trig *trig;
964 966
diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h
index 5e139adc3dc5..dee690951bb6 100644
--- a/drivers/clk/bcm/clk-kona.h
+++ b/drivers/clk/bcm/clk-kona.h
@@ -57,7 +57,7 @@
57#define divider_exists(div) FLAG_TEST(div, DIV, EXISTS) 57#define divider_exists(div) FLAG_TEST(div, DIV, EXISTS)
58#define divider_is_fixed(div) FLAG_TEST(div, DIV, FIXED) 58#define divider_is_fixed(div) FLAG_TEST(div, DIV, FIXED)
59#define divider_has_fraction(div) (!divider_is_fixed(div) && \ 59#define divider_has_fraction(div) (!divider_is_fixed(div) && \
60 (div)->frac_width > 0) 60 (div)->u.s.frac_width > 0)
61 61
62#define selector_exists(sel) ((sel)->width != 0) 62#define selector_exists(sel) ((sel)->width != 0)
63#define trigger_exists(trig) FLAG_TEST(trig, TRIG, EXISTS) 63#define trigger_exists(trig) FLAG_TEST(trig, TRIG, EXISTS)
@@ -244,9 +244,9 @@ struct bcm_clk_div {
244 u32 frac_width; /* field fraction width */ 244 u32 frac_width; /* field fraction width */
245 245
246 u64 scaled_div; /* scaled divider value */ 246 u64 scaled_div; /* scaled divider value */
247 }; 247 } s;
248 u32 fixed; /* non-zero fixed divider value */ 248 u32 fixed; /* non-zero fixed divider value */
249 }; 249 } u;
250 u32 flags; /* BCM_CLK_DIV_FLAGS_* below */ 250 u32 flags; /* BCM_CLK_DIV_FLAGS_* below */
251}; 251};
252 252
@@ -263,28 +263,28 @@ struct bcm_clk_div {
263/* A fixed (non-zero) divider */ 263/* A fixed (non-zero) divider */
264#define FIXED_DIVIDER(_value) \ 264#define FIXED_DIVIDER(_value) \
265 { \ 265 { \
266 .fixed = (_value), \ 266 .u.fixed = (_value), \
267 .flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED), \ 267 .flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED), \
268 } 268 }
269 269
270/* A divider with an integral divisor */ 270/* A divider with an integral divisor */
271#define DIVIDER(_offset, _shift, _width) \ 271#define DIVIDER(_offset, _shift, _width) \
272 { \ 272 { \
273 .offset = (_offset), \ 273 .u.s.offset = (_offset), \
274 .shift = (_shift), \ 274 .u.s.shift = (_shift), \
275 .width = (_width), \ 275 .u.s.width = (_width), \
276 .scaled_div = BAD_SCALED_DIV_VALUE, \ 276 .u.s.scaled_div = BAD_SCALED_DIV_VALUE, \
277 .flags = FLAG(DIV, EXISTS), \ 277 .flags = FLAG(DIV, EXISTS), \
278 } 278 }
279 279
280/* A divider whose divisor has an integer and fractional part */ 280/* A divider whose divisor has an integer and fractional part */
281#define FRAC_DIVIDER(_offset, _shift, _width, _frac_width) \ 281#define FRAC_DIVIDER(_offset, _shift, _width, _frac_width) \
282 { \ 282 { \
283 .offset = (_offset), \ 283 .u.s.offset = (_offset), \
284 .shift = (_shift), \ 284 .u.s.shift = (_shift), \
285 .width = (_width), \ 285 .u.s.width = (_width), \
286 .frac_width = (_frac_width), \ 286 .u.s.frac_width = (_frac_width), \
287 .scaled_div = BAD_SCALED_DIV_VALUE, \ 287 .u.s.scaled_div = BAD_SCALED_DIV_VALUE, \
288 .flags = FLAG(DIV, EXISTS), \ 288 .flags = FLAG(DIV, EXISTS), \
289 } 289 }
290 290
@@ -380,7 +380,7 @@ struct kona_clk {
380 union { 380 union {
381 void *data; 381 void *data;
382 struct peri_clk_data *peri; 382 struct peri_clk_data *peri;
383 }; 383 } u;
384}; 384};
385#define to_kona_clk(_hw) \ 385#define to_kona_clk(_hw) \
386 container_of(_hw, struct kona_clk, hw) 386 container_of(_hw, struct kona_clk, hw)
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index ec22112e569f..3fbee4540228 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -144,6 +144,37 @@ static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
144 return true; 144 return true;
145} 145}
146 146
147static int _round_up_table(const struct clk_div_table *table, int div)
148{
149 const struct clk_div_table *clkt;
150 int up = INT_MAX;
151
152 for (clkt = table; clkt->div; clkt++) {
153 if (clkt->div == div)
154 return clkt->div;
155 else if (clkt->div < div)
156 continue;
157
158 if ((clkt->div - div) < (up - div))
159 up = clkt->div;
160 }
161
162 return up;
163}
164
165static int _div_round_up(struct clk_divider *divider,
166 unsigned long parent_rate, unsigned long rate)
167{
168 int div = DIV_ROUND_UP(parent_rate, rate);
169
170 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
171 div = __roundup_pow_of_two(div);
172 if (divider->table)
173 div = _round_up_table(divider->table, div);
174
175 return div;
176}
177
147static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, 178static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
148 unsigned long *best_parent_rate) 179 unsigned long *best_parent_rate)
149{ 180{
@@ -159,7 +190,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
159 190
160 if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { 191 if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
161 parent_rate = *best_parent_rate; 192 parent_rate = *best_parent_rate;
162 bestdiv = DIV_ROUND_UP(parent_rate, rate); 193 bestdiv = _div_round_up(divider, parent_rate, rate);
163 bestdiv = bestdiv == 0 ? 1 : bestdiv; 194 bestdiv = bestdiv == 0 ? 1 : bestdiv;
164 bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; 195 bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
165 return bestdiv; 196 return bestdiv;
@@ -219,6 +250,10 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
219 u32 val; 250 u32 val;
220 251
221 div = DIV_ROUND_UP(parent_rate, rate); 252 div = DIV_ROUND_UP(parent_rate, rate);
253
254 if (!_is_valid_div(divider, div))
255 return -EINVAL;
256
222 value = _get_val(divider, div); 257 value = _get_val(divider, div);
223 258
224 if (value > div_mask(divider)) 259 if (value > div_mask(divider))
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index dff0373f53c1..7cf2c093cc54 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1984,9 +1984,28 @@ struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1984} 1984}
1985EXPORT_SYMBOL_GPL(__clk_register); 1985EXPORT_SYMBOL_GPL(__clk_register);
1986 1986
1987static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk) 1987/**
1988 * clk_register - allocate a new clock, register it and return an opaque cookie
1989 * @dev: device that is registering this clock
1990 * @hw: link to hardware-specific clock data
1991 *
1992 * clk_register is the primary interface for populating the clock tree with new
1993 * clock nodes. It returns a pointer to the newly allocated struct clk which
1994 * cannot be dereferenced by driver code but may be used in conjuction with the
1995 * rest of the clock API. In the event of an error clk_register will return an
1996 * error code; drivers must test for an error code after calling clk_register.
1997 */
1998struct clk *clk_register(struct device *dev, struct clk_hw *hw)
1988{ 1999{
1989 int i, ret; 2000 int i, ret;
2001 struct clk *clk;
2002
2003 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2004 if (!clk) {
2005 pr_err("%s: could not allocate clk\n", __func__);
2006 ret = -ENOMEM;
2007 goto fail_out;
2008 }
1990 2009
1991 clk->name = kstrdup(hw->init->name, GFP_KERNEL); 2010 clk->name = kstrdup(hw->init->name, GFP_KERNEL);
1992 if (!clk->name) { 2011 if (!clk->name) {
@@ -2026,7 +2045,7 @@ static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
2026 2045
2027 ret = __clk_init(dev, clk); 2046 ret = __clk_init(dev, clk);
2028 if (!ret) 2047 if (!ret)
2029 return 0; 2048 return clk;
2030 2049
2031fail_parent_names_copy: 2050fail_parent_names_copy:
2032 while (--i >= 0) 2051 while (--i >= 0)
@@ -2035,36 +2054,6 @@ fail_parent_names_copy:
2035fail_parent_names: 2054fail_parent_names:
2036 kfree(clk->name); 2055 kfree(clk->name);
2037fail_name: 2056fail_name:
2038 return ret;
2039}
2040
2041/**
2042 * clk_register - allocate a new clock, register it and return an opaque cookie
2043 * @dev: device that is registering this clock
2044 * @hw: link to hardware-specific clock data
2045 *
2046 * clk_register is the primary interface for populating the clock tree with new
2047 * clock nodes. It returns a pointer to the newly allocated struct clk which
2048 * cannot be dereferenced by driver code but may be used in conjuction with the
2049 * rest of the clock API. In the event of an error clk_register will return an
2050 * error code; drivers must test for an error code after calling clk_register.
2051 */
2052struct clk *clk_register(struct device *dev, struct clk_hw *hw)
2053{
2054 int ret;
2055 struct clk *clk;
2056
2057 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2058 if (!clk) {
2059 pr_err("%s: could not allocate clk\n", __func__);
2060 ret = -ENOMEM;
2061 goto fail_out;
2062 }
2063
2064 ret = _clk_register(dev, hw, clk);
2065 if (!ret)
2066 return clk;
2067
2068 kfree(clk); 2057 kfree(clk);
2069fail_out: 2058fail_out:
2070 return ERR_PTR(ret); 2059 return ERR_PTR(ret);
@@ -2151,9 +2140,10 @@ void clk_unregister(struct clk *clk)
2151 2140
2152 if (!hlist_empty(&clk->children)) { 2141 if (!hlist_empty(&clk->children)) {
2153 struct clk *child; 2142 struct clk *child;
2143 struct hlist_node *t;
2154 2144
2155 /* Reparent all children to the orphan list. */ 2145 /* Reparent all children to the orphan list. */
2156 hlist_for_each_entry(child, &clk->children, child_node) 2146 hlist_for_each_entry_safe(child, t, &clk->children, child_node)
2157 clk_set_parent(child, NULL); 2147 clk_set_parent(child, NULL);
2158 } 2148 }
2159 2149
@@ -2173,7 +2163,7 @@ EXPORT_SYMBOL_GPL(clk_unregister);
2173 2163
2174static void devm_clk_release(struct device *dev, void *res) 2164static void devm_clk_release(struct device *dev, void *res)
2175{ 2165{
2176 clk_unregister(res); 2166 clk_unregister(*(struct clk **)res);
2177} 2167}
2178 2168
2179/** 2169/**
@@ -2188,18 +2178,18 @@ static void devm_clk_release(struct device *dev, void *res)
2188struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) 2178struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
2189{ 2179{
2190 struct clk *clk; 2180 struct clk *clk;
2191 int ret; 2181 struct clk **clkp;
2192 2182
2193 clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL); 2183 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
2194 if (!clk) 2184 if (!clkp)
2195 return ERR_PTR(-ENOMEM); 2185 return ERR_PTR(-ENOMEM);
2196 2186
2197 ret = _clk_register(dev, hw, clk); 2187 clk = clk_register(dev, hw);
2198 if (!ret) { 2188 if (!IS_ERR(clk)) {
2199 devres_add(dev, clk); 2189 *clkp = clk;
2190 devres_add(dev, clkp);
2200 } else { 2191 } else {
2201 devres_free(clk); 2192 devres_free(clkp);
2202 clk = ERR_PTR(ret);
2203 } 2193 }
2204 2194
2205 return clk; 2195 return clk;
diff --git a/drivers/clk/shmobile/clk-mstp.c b/drivers/clk/shmobile/clk-mstp.c
index 2e5810c88d11..1f6324e29a80 100644
--- a/drivers/clk/shmobile/clk-mstp.c
+++ b/drivers/clk/shmobile/clk-mstp.c
@@ -156,6 +156,7 @@ cpg_mstp_clock_register(const char *name, const char *parent_name,
156static void __init cpg_mstp_clocks_init(struct device_node *np) 156static void __init cpg_mstp_clocks_init(struct device_node *np)
157{ 157{
158 struct mstp_clock_group *group; 158 struct mstp_clock_group *group;
159 const char *idxname;
159 struct clk **clks; 160 struct clk **clks;
160 unsigned int i; 161 unsigned int i;
161 162
@@ -184,6 +185,11 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
184 for (i = 0; i < MSTP_MAX_CLOCKS; ++i) 185 for (i = 0; i < MSTP_MAX_CLOCKS; ++i)
185 clks[i] = ERR_PTR(-ENOENT); 186 clks[i] = ERR_PTR(-ENOENT);
186 187
188 if (of_find_property(np, "clock-indices", &i))
189 idxname = "clock-indices";
190 else
191 idxname = "renesas,clock-indices";
192
187 for (i = 0; i < MSTP_MAX_CLOCKS; ++i) { 193 for (i = 0; i < MSTP_MAX_CLOCKS; ++i) {
188 const char *parent_name; 194 const char *parent_name;
189 const char *name; 195 const char *name;
@@ -197,8 +203,7 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
197 continue; 203 continue;
198 204
199 parent_name = of_clk_get_parent_name(np, i); 205 parent_name = of_clk_get_parent_name(np, i);
200 ret = of_property_read_u32_index(np, "renesas,clock-indices", i, 206 ret = of_property_read_u32_index(np, idxname, i, &clkidx);
201 &clkidx);
202 if (parent_name == NULL || ret < 0) 207 if (parent_name == NULL || ret < 0)
203 break; 208 break;
204 209
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index 88dafb5e9627..de6da957a09d 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -20,6 +20,7 @@
20#include <linux/clk-provider.h> 20#include <linux/clk-provider.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/of.h> 22#include <linux/of.h>
23#include <linux/of_address.h>
23 24
24#include "clk.h" 25#include "clk.h"
25 26
@@ -43,6 +44,8 @@
43 44
44#define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw) 45#define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw)
45 46
47void __iomem *clk_mgr_base_addr;
48
46static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, 49static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
47 unsigned long parent_rate) 50 unsigned long parent_rate)
48{ 51{
@@ -87,6 +90,7 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
87 const char *clk_name = node->name; 90 const char *clk_name = node->name;
88 const char *parent_name[SOCFPGA_MAX_PARENTS]; 91 const char *parent_name[SOCFPGA_MAX_PARENTS];
89 struct clk_init_data init; 92 struct clk_init_data init;
93 struct device_node *clkmgr_np;
90 int rc; 94 int rc;
91 int i = 0; 95 int i = 0;
92 96
@@ -96,6 +100,9 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
96 if (WARN_ON(!pll_clk)) 100 if (WARN_ON(!pll_clk))
97 return NULL; 101 return NULL;
98 102
103 clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
104 clk_mgr_base_addr = of_iomap(clkmgr_np, 0);
105 BUG_ON(!clk_mgr_base_addr);
99 pll_clk->hw.reg = clk_mgr_base_addr + reg; 106 pll_clk->hw.reg = clk_mgr_base_addr + reg;
100 107
101 of_property_read_string(node, "clock-output-names", &clk_name); 108 of_property_read_string(node, "clock-output-names", &clk_name);
diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
index 35a960a993f9..43db947e5f0e 100644
--- a/drivers/clk/socfpga/clk.c
+++ b/drivers/clk/socfpga/clk.c
@@ -17,28 +17,11 @@
17 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */ 19 */
20#include <linux/clk.h>
21#include <linux/clkdev.h>
22#include <linux/clk-provider.h>
23#include <linux/io.h>
24#include <linux/of.h> 20#include <linux/of.h>
25#include <linux/of_address.h>
26 21
27#include "clk.h" 22#include "clk.h"
28 23
29void __iomem *clk_mgr_base_addr; 24CLK_OF_DECLARE(socfpga_pll_clk, "altr,socfpga-pll-clock", socfpga_pll_init);
30 25CLK_OF_DECLARE(socfpga_perip_clk, "altr,socfpga-perip-clk", socfpga_periph_init);
31static const struct of_device_id socfpga_child_clocks[] __initconst = { 26CLK_OF_DECLARE(socfpga_gate_clk, "altr,socfpga-gate-clk", socfpga_gate_init);
32 { .compatible = "altr,socfpga-pll-clock", socfpga_pll_init, },
33 { .compatible = "altr,socfpga-perip-clk", socfpga_periph_init, },
34 { .compatible = "altr,socfpga-gate-clk", socfpga_gate_init, },
35 {},
36};
37
38static void __init socfpga_clkmgr_init(struct device_node *node)
39{
40 clk_mgr_base_addr = of_iomap(node, 0);
41 of_clk_init(socfpga_child_clocks);
42}
43CLK_OF_DECLARE(socfpga_mgr, "altr,clk-mgr", socfpga_clkmgr_init);
44 27
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index bca0a0badbfa..a886702f7c8b 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -521,8 +521,10 @@ static struct clk * __init clkgen_odf_register(const char *parent_name,
521 gate->lock = odf_lock; 521 gate->lock = odf_lock;
522 522
523 div = kzalloc(sizeof(*div), GFP_KERNEL); 523 div = kzalloc(sizeof(*div), GFP_KERNEL);
524 if (!div) 524 if (!div) {
525 kfree(gate);
525 return ERR_PTR(-ENOMEM); 526 return ERR_PTR(-ENOMEM);
527 }
526 528
527 div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO; 529 div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
528 div->reg = reg + pll_data->odf[odf].offset; 530 div->reg = reg + pll_data->odf[odf].offset;
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 0d20241e0770..6aad8abc69a2 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -58,9 +58,9 @@
58#define PLLDU_LFCON_SET_DIVN 600 58#define PLLDU_LFCON_SET_DIVN 600
59 59
60#define PLLE_BASE_DIVCML_SHIFT 24 60#define PLLE_BASE_DIVCML_SHIFT 24
61#define PLLE_BASE_DIVCML_WIDTH 4 61#define PLLE_BASE_DIVCML_MASK 0xf
62#define PLLE_BASE_DIVP_SHIFT 16 62#define PLLE_BASE_DIVP_SHIFT 16
63#define PLLE_BASE_DIVP_WIDTH 7 63#define PLLE_BASE_DIVP_WIDTH 6
64#define PLLE_BASE_DIVN_SHIFT 8 64#define PLLE_BASE_DIVN_SHIFT 8
65#define PLLE_BASE_DIVN_WIDTH 8 65#define PLLE_BASE_DIVN_WIDTH 8
66#define PLLE_BASE_DIVM_SHIFT 0 66#define PLLE_BASE_DIVM_SHIFT 0
@@ -183,6 +183,14 @@
183#define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\ 183#define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\
184 mask(p->params->div_nmp->divp_width)) 184 mask(p->params->div_nmp->divp_width))
185 185
186#define divm_shift(p) (p)->params->div_nmp->divm_shift
187#define divn_shift(p) (p)->params->div_nmp->divn_shift
188#define divp_shift(p) (p)->params->div_nmp->divp_shift
189
190#define divm_mask_shifted(p) (divm_mask(p) << divm_shift(p))
191#define divn_mask_shifted(p) (divn_mask(p) << divn_shift(p))
192#define divp_mask_shifted(p) (divp_mask(p) << divp_shift(p))
193
186#define divm_max(p) (divm_mask(p)) 194#define divm_max(p) (divm_mask(p))
187#define divn_max(p) (divn_mask(p)) 195#define divn_max(p) (divn_mask(p))
188#define divp_max(p) (1 << (divp_mask(p))) 196#define divp_max(p) (1 << (divp_mask(p)))
@@ -476,13 +484,12 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll,
476 } else { 484 } else {
477 val = pll_readl_base(pll); 485 val = pll_readl_base(pll);
478 486
479 val &= ~((divm_mask(pll) << div_nmp->divm_shift) | 487 val &= ~(divm_mask_shifted(pll) | divn_mask_shifted(pll) |
480 (divn_mask(pll) << div_nmp->divn_shift) | 488 divp_mask_shifted(pll));
481 (divp_mask(pll) << div_nmp->divp_shift));
482 489
483 val |= ((cfg->m << div_nmp->divm_shift) | 490 val |= (cfg->m << divm_shift(pll)) |
484 (cfg->n << div_nmp->divn_shift) | 491 (cfg->n << divn_shift(pll)) |
485 (cfg->p << div_nmp->divp_shift)); 492 (cfg->p << divp_shift(pll));
486 493
487 pll_writel_base(val, pll); 494 pll_writel_base(val, pll);
488 } 495 }
@@ -730,11 +737,12 @@ static int clk_plle_enable(struct clk_hw *hw)
730 if (pll->params->flags & TEGRA_PLLE_CONFIGURE) { 737 if (pll->params->flags & TEGRA_PLLE_CONFIGURE) {
731 /* configure dividers */ 738 /* configure dividers */
732 val = pll_readl_base(pll); 739 val = pll_readl_base(pll);
733 val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll)); 740 val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) |
734 val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT); 741 divm_mask_shifted(pll));
735 val |= sel.m << pll->params->div_nmp->divm_shift; 742 val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT);
736 val |= sel.n << pll->params->div_nmp->divn_shift; 743 val |= sel.m << divm_shift(pll);
737 val |= sel.p << pll->params->div_nmp->divp_shift; 744 val |= sel.n << divn_shift(pll);
745 val |= sel.p << divp_shift(pll);
738 val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT; 746 val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
739 pll_writel_base(val, pll); 747 pll_writel_base(val, pll);
740 } 748 }
@@ -745,10 +753,11 @@ static int clk_plle_enable(struct clk_hw *hw)
745 pll_writel_misc(val, pll); 753 pll_writel_misc(val, pll);
746 754
747 val = readl(pll->clk_base + PLLE_SS_CTRL); 755 val = readl(pll->clk_base + PLLE_SS_CTRL);
756 val &= ~PLLE_SS_COEFFICIENTS_MASK;
748 val |= PLLE_SS_DISABLE; 757 val |= PLLE_SS_DISABLE;
749 writel(val, pll->clk_base + PLLE_SS_CTRL); 758 writel(val, pll->clk_base + PLLE_SS_CTRL);
750 759
751 val |= pll_readl_base(pll); 760 val = pll_readl_base(pll);
752 val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE); 761 val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE);
753 pll_writel_base(val, pll); 762 pll_writel_base(val, pll);
754 763
@@ -1292,10 +1301,11 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
1292 pll_writel(val, PLLE_SS_CTRL, pll); 1301 pll_writel(val, PLLE_SS_CTRL, pll);
1293 1302
1294 val = pll_readl_base(pll); 1303 val = pll_readl_base(pll);
1295 val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll)); 1304 val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) |
1296 val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT); 1305 divm_mask_shifted(pll));
1297 val |= sel.m << pll->params->div_nmp->divm_shift; 1306 val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT);
1298 val |= sel.n << pll->params->div_nmp->divn_shift; 1307 val |= sel.m << divm_shift(pll);
1308 val |= sel.n << divn_shift(pll);
1299 val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT; 1309 val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
1300 pll_writel_base(val, pll); 1310 pll_writel_base(val, pll);
1301 udelay(1); 1311 udelay(1);
@@ -1410,6 +1420,15 @@ struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
1410 return clk; 1420 return clk;
1411} 1421}
1412 1422
1423static struct div_nmp pll_e_nmp = {
1424 .divn_shift = PLLE_BASE_DIVN_SHIFT,
1425 .divn_width = PLLE_BASE_DIVN_WIDTH,
1426 .divm_shift = PLLE_BASE_DIVM_SHIFT,
1427 .divm_width = PLLE_BASE_DIVM_WIDTH,
1428 .divp_shift = PLLE_BASE_DIVP_SHIFT,
1429 .divp_width = PLLE_BASE_DIVP_WIDTH,
1430};
1431
1413struct clk *tegra_clk_register_plle(const char *name, const char *parent_name, 1432struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
1414 void __iomem *clk_base, void __iomem *pmc, 1433 void __iomem *clk_base, void __iomem *pmc,
1415 unsigned long flags, struct tegra_clk_pll_params *pll_params, 1434 unsigned long flags, struct tegra_clk_pll_params *pll_params,
@@ -1420,6 +1439,10 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
1420 1439
1421 pll_params->flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS; 1440 pll_params->flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS;
1422 pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE; 1441 pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
1442
1443 if (!pll_params->div_nmp)
1444 pll_params->div_nmp = &pll_e_nmp;
1445
1423 pll = _tegra_init_pll(clk_base, pmc, pll_params, lock); 1446 pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
1424 if (IS_ERR(pll)) 1447 if (IS_ERR(pll))
1425 return ERR_CAST(pll); 1448 return ERR_CAST(pll);
@@ -1557,9 +1580,8 @@ struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name,
1557 int m; 1580 int m;
1558 1581
1559 m = _pll_fixed_mdiv(pll_params, parent_rate); 1582 m = _pll_fixed_mdiv(pll_params, parent_rate);
1560 val = m << PLL_BASE_DIVM_SHIFT; 1583 val = m << divm_shift(pll);
1561 val |= (pll_params->vco_min / parent_rate) 1584 val |= (pll_params->vco_min / parent_rate) << divn_shift(pll);
1562 << PLL_BASE_DIVN_SHIFT;
1563 pll_writel_base(val, pll); 1585 pll_writel_base(val, pll);
1564 } 1586 }
1565 1587
@@ -1718,7 +1740,7 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name,
1718 "pll_re_vco"); 1740 "pll_re_vco");
1719 } else { 1741 } else {
1720 val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL); 1742 val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL);
1721 pll_writel(val, pll_params->aux_reg, pll); 1743 pll_writel(val_aux, pll_params->aux_reg, pll);
1722 } 1744 }
1723 1745
1724 clk = _tegra_clk_register_pll(pll, name, parent_name, flags, 1746 clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index a535c7bf8574..422391242b39 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -100,6 +100,8 @@ void __init vexpress_osc_of_setup(struct device_node *node)
100 struct clk *clk; 100 struct clk *clk;
101 u32 range[2]; 101 u32 range[2];
102 102
103 vexpress_sysreg_of_early_init();
104
103 osc = kzalloc(sizeof(*osc), GFP_KERNEL); 105 osc = kzalloc(sizeof(*osc), GFP_KERNEL);
104 if (!osc) 106 if (!osc)
105 return; 107 return;
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 57e823c44d2a..5163ec13429d 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -66,6 +66,7 @@ static int arch_timer_ppi[MAX_TIMER_PPI];
66static struct clock_event_device __percpu *arch_timer_evt; 66static struct clock_event_device __percpu *arch_timer_evt;
67 67
68static bool arch_timer_use_virtual = true; 68static bool arch_timer_use_virtual = true;
69static bool arch_timer_c3stop;
69static bool arch_timer_mem_use_virtual; 70static bool arch_timer_mem_use_virtual;
70 71
71/* 72/*
@@ -263,7 +264,8 @@ static void __arch_timer_setup(unsigned type,
263 clk->features = CLOCK_EVT_FEAT_ONESHOT; 264 clk->features = CLOCK_EVT_FEAT_ONESHOT;
264 265
265 if (type == ARCH_CP15_TIMER) { 266 if (type == ARCH_CP15_TIMER) {
266 clk->features |= CLOCK_EVT_FEAT_C3STOP; 267 if (arch_timer_c3stop)
268 clk->features |= CLOCK_EVT_FEAT_C3STOP;
267 clk->name = "arch_sys_timer"; 269 clk->name = "arch_sys_timer";
268 clk->rating = 450; 270 clk->rating = 450;
269 clk->cpumask = cpumask_of(smp_processor_id()); 271 clk->cpumask = cpumask_of(smp_processor_id());
@@ -665,6 +667,8 @@ static void __init arch_timer_init(struct device_node *np)
665 } 667 }
666 } 668 }
667 669
670 arch_timer_c3stop = !of_property_read_bool(np, "always-on");
671
668 arch_timer_register(); 672 arch_timer_register();
669 arch_timer_common_init(); 673 arch_timer_common_init();
670} 674}
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 00fdd1170284..a8d7ea14f183 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -100,7 +100,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
100 || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) { 100 || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) {
101 __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR)); 101 __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
102 __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); 102 __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
103 clk_disable_unprepare(tcd->clk); 103 clk_disable(tcd->clk);
104 } 104 }
105 105
106 switch (m) { 106 switch (m) {
@@ -109,7 +109,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
109 * of oneshot, we get lower overhead and improved accuracy. 109 * of oneshot, we get lower overhead and improved accuracy.
110 */ 110 */
111 case CLOCK_EVT_MODE_PERIODIC: 111 case CLOCK_EVT_MODE_PERIODIC:
112 clk_prepare_enable(tcd->clk); 112 clk_enable(tcd->clk);
113 113
114 /* slow clock, count up to RC, then irq and restart */ 114 /* slow clock, count up to RC, then irq and restart */
115 __raw_writel(timer_clock 115 __raw_writel(timer_clock
@@ -126,7 +126,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
126 break; 126 break;
127 127
128 case CLOCK_EVT_MODE_ONESHOT: 128 case CLOCK_EVT_MODE_ONESHOT:
129 clk_prepare_enable(tcd->clk); 129 clk_enable(tcd->clk);
130 130
131 /* slow clock, count up to RC, then irq and stop */ 131 /* slow clock, count up to RC, then irq and stop */
132 __raw_writel(timer_clock | ATMEL_TC_CPCSTOP 132 __raw_writel(timer_clock | ATMEL_TC_CPCSTOP
@@ -194,7 +194,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
194 ret = clk_prepare_enable(t2_clk); 194 ret = clk_prepare_enable(t2_clk);
195 if (ret) 195 if (ret)
196 return ret; 196 return ret;
197 clk_disable_unprepare(t2_clk); 197 clk_disable(t2_clk);
198 198
199 clkevt.regs = tc->regs; 199 clkevt.regs = tc->regs;
200 clkevt.clk = t2_clk; 200 clkevt.clk = t2_clk;
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c
index b52e1c078b99..7f5374dbefd9 100644
--- a/drivers/clocksource/timer-marco.c
+++ b/drivers/clocksource/timer-marco.c
@@ -199,7 +199,7 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
199 199
200 action->dev_id = ce; 200 action->dev_id = ce;
201 BUG_ON(setup_irq(ce->irq, action)); 201 BUG_ON(setup_irq(ce->irq, action));
202 irq_set_affinity(action->irq, cpumask_of(cpu)); 202 irq_force_affinity(action->irq, cpumask_of(cpu));
203 203
204 clockevents_register_device(ce); 204 clockevents_register_device(ce);
205 return 0; 205 return 0;
diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c
index ca81809d159d..7ce442148c3f 100644
--- a/drivers/clocksource/zevio-timer.c
+++ b/drivers/clocksource/zevio-timer.c
@@ -212,4 +212,9 @@ error_free:
212 return ret; 212 return ret;
213} 213}
214 214
215CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_add); 215static void __init zevio_timer_init(struct device_node *node)
216{
217 BUG_ON(zevio_timer_add(node));
218}
219
220CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 148d707a1d43..ccdd4c7e748b 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -369,7 +369,7 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
369 return; 369 return;
370 370
371 /* Can only change if privileged. */ 371 /* Can only change if privileged. */
372 if (!capable(CAP_NET_ADMIN)) { 372 if (!__netlink_ns_capable(nsp, &init_user_ns, CAP_NET_ADMIN)) {
373 err = EPERM; 373 err = EPERM;
374 goto out; 374 goto out;
375 } 375 }
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 1bf6bbac3e03..09b9129c7bd3 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -130,7 +130,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
130 return -ENOENT; 130 return -ENOENT;
131 } 131 }
132 132
133 cpu_reg = devm_regulator_get_optional(cpu_dev, "cpu0"); 133 cpu_reg = regulator_get_optional(cpu_dev, "cpu0");
134 if (IS_ERR(cpu_reg)) { 134 if (IS_ERR(cpu_reg)) {
135 /* 135 /*
136 * If cpu0 regulator supply node is present, but regulator is 136 * If cpu0 regulator supply node is present, but regulator is
@@ -145,23 +145,23 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
145 PTR_ERR(cpu_reg)); 145 PTR_ERR(cpu_reg));
146 } 146 }
147 147
148 cpu_clk = devm_clk_get(cpu_dev, NULL); 148 cpu_clk = clk_get(cpu_dev, NULL);
149 if (IS_ERR(cpu_clk)) { 149 if (IS_ERR(cpu_clk)) {
150 ret = PTR_ERR(cpu_clk); 150 ret = PTR_ERR(cpu_clk);
151 pr_err("failed to get cpu0 clock: %d\n", ret); 151 pr_err("failed to get cpu0 clock: %d\n", ret);
152 goto out_put_node; 152 goto out_put_reg;
153 } 153 }
154 154
155 ret = of_init_opp_table(cpu_dev); 155 ret = of_init_opp_table(cpu_dev);
156 if (ret) { 156 if (ret) {
157 pr_err("failed to init OPP table: %d\n", ret); 157 pr_err("failed to init OPP table: %d\n", ret);
158 goto out_put_node; 158 goto out_put_clk;
159 } 159 }
160 160
161 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); 161 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
162 if (ret) { 162 if (ret) {
163 pr_err("failed to init cpufreq table: %d\n", ret); 163 pr_err("failed to init cpufreq table: %d\n", ret);
164 goto out_put_node; 164 goto out_put_clk;
165 } 165 }
166 166
167 of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance); 167 of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
@@ -216,6 +216,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
216 216
217out_free_table: 217out_free_table:
218 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); 218 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
219out_put_clk:
220 if (!IS_ERR(cpu_clk))
221 clk_put(cpu_clk);
222out_put_reg:
223 if (!IS_ERR(cpu_reg))
224 regulator_put(cpu_reg);
219out_put_node: 225out_put_node:
220 of_node_put(np); 226 of_node_put(np);
221 return ret; 227 return ret;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index ba43991ba98a..e1c6433b16e0 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -366,6 +366,11 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
366 break; 366 break;
367 367
368 case CPUFREQ_GOV_LIMITS: 368 case CPUFREQ_GOV_LIMITS:
369 mutex_lock(&dbs_data->mutex);
370 if (!cpu_cdbs->cur_policy) {
371 mutex_unlock(&dbs_data->mutex);
372 break;
373 }
369 mutex_lock(&cpu_cdbs->timer_mutex); 374 mutex_lock(&cpu_cdbs->timer_mutex);
370 if (policy->max < cpu_cdbs->cur_policy->cur) 375 if (policy->max < cpu_cdbs->cur_policy->cur)
371 __cpufreq_driver_target(cpu_cdbs->cur_policy, 376 __cpufreq_driver_target(cpu_cdbs->cur_policy,
@@ -375,6 +380,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
375 policy->min, CPUFREQ_RELATION_L); 380 policy->min, CPUFREQ_RELATION_L);
376 dbs_check_cpu(dbs_data, cpu); 381 dbs_check_cpu(dbs_data, cpu);
377 mutex_unlock(&cpu_cdbs->timer_mutex); 382 mutex_unlock(&cpu_cdbs->timer_mutex);
383 mutex_unlock(&dbs_data->mutex);
378 break; 384 break;
379 } 385 }
380 return 0; 386 return 0;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 099967302bf2..eab8ccfe6beb 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -37,6 +37,7 @@
37#define BYT_RATIOS 0x66a 37#define BYT_RATIOS 0x66a
38#define BYT_VIDS 0x66b 38#define BYT_VIDS 0x66b
39#define BYT_TURBO_RATIOS 0x66c 39#define BYT_TURBO_RATIOS 0x66c
40#define BYT_TURBO_VIDS 0x66d
40 41
41 42
42#define FRAC_BITS 6 43#define FRAC_BITS 6
@@ -70,8 +71,9 @@ struct pstate_data {
70}; 71};
71 72
72struct vid_data { 73struct vid_data {
73 int32_t min; 74 int min;
74 int32_t max; 75 int max;
76 int turbo;
75 int32_t ratio; 77 int32_t ratio;
76}; 78};
77 79
@@ -359,14 +361,14 @@ static int byt_get_min_pstate(void)
359{ 361{
360 u64 value; 362 u64 value;
361 rdmsrl(BYT_RATIOS, value); 363 rdmsrl(BYT_RATIOS, value);
362 return (value >> 8) & 0xFF; 364 return (value >> 8) & 0x3F;
363} 365}
364 366
365static int byt_get_max_pstate(void) 367static int byt_get_max_pstate(void)
366{ 368{
367 u64 value; 369 u64 value;
368 rdmsrl(BYT_RATIOS, value); 370 rdmsrl(BYT_RATIOS, value);
369 return (value >> 16) & 0xFF; 371 return (value >> 16) & 0x3F;
370} 372}
371 373
372static int byt_get_turbo_pstate(void) 374static int byt_get_turbo_pstate(void)
@@ -393,6 +395,9 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
393 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 395 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
394 vid = fp_toint(vid_fp); 396 vid = fp_toint(vid_fp);
395 397
398 if (pstate > cpudata->pstate.max_pstate)
399 vid = cpudata->vid.turbo;
400
396 val |= vid; 401 val |= vid;
397 402
398 wrmsrl(MSR_IA32_PERF_CTL, val); 403 wrmsrl(MSR_IA32_PERF_CTL, val);
@@ -402,13 +407,17 @@ static void byt_get_vid(struct cpudata *cpudata)
402{ 407{
403 u64 value; 408 u64 value;
404 409
410
405 rdmsrl(BYT_VIDS, value); 411 rdmsrl(BYT_VIDS, value);
406 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 412 cpudata->vid.min = int_tofp((value >> 8) & 0x3f);
407 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 413 cpudata->vid.max = int_tofp((value >> 16) & 0x3f);
408 cpudata->vid.ratio = div_fp( 414 cpudata->vid.ratio = div_fp(
409 cpudata->vid.max - cpudata->vid.min, 415 cpudata->vid.max - cpudata->vid.min,
410 int_tofp(cpudata->pstate.max_pstate - 416 int_tofp(cpudata->pstate.max_pstate -
411 cpudata->pstate.min_pstate)); 417 cpudata->pstate.min_pstate));
418
419 rdmsrl(BYT_TURBO_VIDS, value);
420 cpudata->vid.turbo = value & 0x7f;
412} 421}
413 422
414 423
@@ -545,12 +554,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
545 554
546 if (pstate_funcs.get_vid) 555 if (pstate_funcs.get_vid)
547 pstate_funcs.get_vid(cpu); 556 pstate_funcs.get_vid(cpu);
548 557 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
549 /*
550 * goto max pstate so we don't slow up boot if we are built-in if we are
551 * a module we will take care of it during normal operation
552 */
553 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
554} 558}
555 559
556static inline void intel_pstate_calc_busy(struct cpudata *cpu, 560static inline void intel_pstate_calc_busy(struct cpudata *cpu,
@@ -695,11 +699,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
695 cpu = all_cpu_data[cpunum]; 699 cpu = all_cpu_data[cpunum];
696 700
697 intel_pstate_get_cpu_pstates(cpu); 701 intel_pstate_get_cpu_pstates(cpu);
698 if (!cpu->pstate.current_pstate) {
699 all_cpu_data[cpunum] = NULL;
700 kfree(cpu);
701 return -ENODATA;
702 }
703 702
704 cpu->cpu = cpunum; 703 cpu->cpu = cpunum;
705 704
@@ -710,7 +709,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
710 cpu->timer.expires = jiffies + HZ/100; 709 cpu->timer.expires = jiffies + HZ/100;
711 intel_pstate_busy_pid_reset(cpu); 710 intel_pstate_busy_pid_reset(cpu);
712 intel_pstate_sample(cpu); 711 intel_pstate_sample(cpu);
713 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
714 712
715 add_timer_on(&cpu->timer, cpunum); 713 add_timer_on(&cpu->timer, cpunum);
716 714
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index d00e5d1abd25..5c4369b5d834 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -242,7 +242,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
242 * Sets a new clock ratio. 242 * Sets a new clock ratio.
243 */ 243 */
244 244
245static void longhaul_setstate(struct cpufreq_policy *policy, 245static int longhaul_setstate(struct cpufreq_policy *policy,
246 unsigned int table_index) 246 unsigned int table_index)
247{ 247{
248 unsigned int mults_index; 248 unsigned int mults_index;
@@ -258,10 +258,12 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
258 /* Safety precautions */ 258 /* Safety precautions */
259 mult = mults[mults_index & 0x1f]; 259 mult = mults[mults_index & 0x1f];
260 if (mult == -1) 260 if (mult == -1)
261 return; 261 return -EINVAL;
262
262 speed = calc_speed(mult); 263 speed = calc_speed(mult);
263 if ((speed > highest_speed) || (speed < lowest_speed)) 264 if ((speed > highest_speed) || (speed < lowest_speed))
264 return; 265 return -EINVAL;
266
265 /* Voltage transition before frequency transition? */ 267 /* Voltage transition before frequency transition? */
266 if (can_scale_voltage && longhaul_index < table_index) 268 if (can_scale_voltage && longhaul_index < table_index)
267 dir = 1; 269 dir = 1;
@@ -269,8 +271,6 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
269 freqs.old = calc_speed(longhaul_get_cpu_mult()); 271 freqs.old = calc_speed(longhaul_get_cpu_mult());
270 freqs.new = speed; 272 freqs.new = speed;
271 273
272 cpufreq_freq_transition_begin(policy, &freqs);
273
274 pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", 274 pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
275 fsb, mult/10, mult%10, print_speed(speed/1000)); 275 fsb, mult/10, mult%10, print_speed(speed/1000));
276retry_loop: 276retry_loop:
@@ -385,12 +385,14 @@ retry_loop:
385 goto retry_loop; 385 goto retry_loop;
386 } 386 }
387 } 387 }
388 /* Report true CPU frequency */
389 cpufreq_freq_transition_end(policy, &freqs, 0);
390 388
391 if (!bm_timeout) 389 if (!bm_timeout) {
392 printk(KERN_INFO PFX "Warning: Timeout while waiting for " 390 printk(KERN_INFO PFX "Warning: Timeout while waiting for "
393 "idle PCI bus.\n"); 391 "idle PCI bus.\n");
392 return -EBUSY;
393 }
394
395 return 0;
394} 396}
395 397
396/* 398/*
@@ -631,9 +633,10 @@ static int longhaul_target(struct cpufreq_policy *policy,
631 unsigned int i; 633 unsigned int i;
632 unsigned int dir = 0; 634 unsigned int dir = 0;
633 u8 vid, current_vid; 635 u8 vid, current_vid;
636 int retval = 0;
634 637
635 if (!can_scale_voltage) 638 if (!can_scale_voltage)
636 longhaul_setstate(policy, table_index); 639 retval = longhaul_setstate(policy, table_index);
637 else { 640 else {
638 /* On test system voltage transitions exceeding single 641 /* On test system voltage transitions exceeding single
639 * step up or down were turning motherboard off. Both 642 * step up or down were turning motherboard off. Both
@@ -648,7 +651,7 @@ static int longhaul_target(struct cpufreq_policy *policy,
648 while (i != table_index) { 651 while (i != table_index) {
649 vid = (longhaul_table[i].driver_data >> 8) & 0x1f; 652 vid = (longhaul_table[i].driver_data >> 8) & 0x1f;
650 if (vid != current_vid) { 653 if (vid != current_vid) {
651 longhaul_setstate(policy, i); 654 retval = longhaul_setstate(policy, i);
652 current_vid = vid; 655 current_vid = vid;
653 msleep(200); 656 msleep(200);
654 } 657 }
@@ -657,10 +660,11 @@ static int longhaul_target(struct cpufreq_policy *policy,
657 else 660 else
658 i--; 661 i--;
659 } 662 }
660 longhaul_setstate(policy, table_index); 663 retval = longhaul_setstate(policy, table_index);
661 } 664 }
665
662 longhaul_index = table_index; 666 longhaul_index = table_index;
663 return 0; 667 return retval;
664} 668}
665 669
666 670
@@ -968,7 +972,15 @@ static void __exit longhaul_exit(void)
968 972
969 for (i = 0; i < numscales; i++) { 973 for (i = 0; i < numscales; i++) {
970 if (mults[i] == maxmult) { 974 if (mults[i] == maxmult) {
975 struct cpufreq_freqs freqs;
976
977 freqs.old = policy->cur;
978 freqs.new = longhaul_table[i].frequency;
979 freqs.flags = 0;
980
981 cpufreq_freq_transition_begin(policy, &freqs);
971 longhaul_setstate(policy, i); 982 longhaul_setstate(policy, i);
983 cpufreq_freq_transition_end(policy, &freqs, 0);
972 break; 984 break;
973 } 985 }
974 } 986 }
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index f0bc31f5db27..d4add8621944 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -62,7 +62,7 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
62 set_cpus_allowed_ptr(current, &cpus_allowed); 62 set_cpus_allowed_ptr(current, &cpus_allowed);
63 63
64 /* setting the cpu frequency */ 64 /* setting the cpu frequency */
65 clk_set_rate(policy->clk, freq); 65 clk_set_rate(policy->clk, freq * 1000);
66 66
67 return 0; 67 return 0;
68} 68}
@@ -92,7 +92,7 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
92 i++) 92 i++)
93 loongson2_clockmod_table[i].frequency = (rate * i) / 8; 93 loongson2_clockmod_table[i].frequency = (rate * i) / 8;
94 94
95 ret = clk_set_rate(cpuclk, rate); 95 ret = clk_set_rate(cpuclk, rate * 1000);
96 if (ret) { 96 if (ret) {
97 clk_put(cpuclk); 97 clk_put(cpuclk);
98 return ret; 98 return ret;
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index 49f120e1bc7b..78904e6ca4a0 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -138,22 +138,14 @@ static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
138static int powernow_k6_target(struct cpufreq_policy *policy, 138static int powernow_k6_target(struct cpufreq_policy *policy,
139 unsigned int best_i) 139 unsigned int best_i)
140{ 140{
141 struct cpufreq_freqs freqs;
142 141
143 if (clock_ratio[best_i].driver_data > max_multiplier) { 142 if (clock_ratio[best_i].driver_data > max_multiplier) {
144 printk(KERN_ERR PFX "invalid target frequency\n"); 143 printk(KERN_ERR PFX "invalid target frequency\n");
145 return -EINVAL; 144 return -EINVAL;
146 } 145 }
147 146
148 freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
149 freqs.new = busfreq * clock_ratio[best_i].driver_data;
150
151 cpufreq_freq_transition_begin(policy, &freqs);
152
153 powernow_k6_set_cpu_multiplier(best_i); 147 powernow_k6_set_cpu_multiplier(best_i);
154 148
155 cpufreq_freq_transition_end(policy, &freqs, 0);
156
157 return 0; 149 return 0;
158} 150}
159 151
@@ -227,9 +219,20 @@ have_busfreq:
227static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) 219static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
228{ 220{
229 unsigned int i; 221 unsigned int i;
230 for (i = 0; i < 8; i++) { 222
231 if (i == max_multiplier) 223 for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
224 if (clock_ratio[i].driver_data == max_multiplier) {
225 struct cpufreq_freqs freqs;
226
227 freqs.old = policy->cur;
228 freqs.new = clock_ratio[i].frequency;
229 freqs.flags = 0;
230
231 cpufreq_freq_transition_begin(policy, &freqs);
232 powernow_k6_target(policy, i); 232 powernow_k6_target(policy, i);
233 cpufreq_freq_transition_end(policy, &freqs, 0);
234 break;
235 }
233 } 236 }
234 return 0; 237 return 0;
235} 238}
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index f911645c3f6d..e61e224475ad 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -269,8 +269,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
269 269
270 freqs.new = powernow_table[index].frequency; 270 freqs.new = powernow_table[index].frequency;
271 271
272 cpufreq_freq_transition_begin(policy, &freqs);
273
274 /* Now do the magic poking into the MSRs. */ 272 /* Now do the magic poking into the MSRs. */
275 273
276 if (have_a0 == 1) /* A0 errata 5 */ 274 if (have_a0 == 1) /* A0 errata 5 */
@@ -290,8 +288,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
290 if (have_a0 == 1) 288 if (have_a0 == 1)
291 local_irq_enable(); 289 local_irq_enable();
292 290
293 cpufreq_freq_transition_end(policy, &freqs, 0);
294
295 return 0; 291 return 0;
296} 292}
297 293
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index a1ca3dd04a8e..0af618abebaf 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -138,6 +138,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
138 struct cpufreq_frequency_table *table; 138 struct cpufreq_frequency_table *table;
139 struct cpu_data *data; 139 struct cpu_data *data;
140 unsigned int cpu = policy->cpu; 140 unsigned int cpu = policy->cpu;
141 u64 transition_latency_hz;
141 142
142 np = of_get_cpu_node(cpu, NULL); 143 np = of_get_cpu_node(cpu, NULL);
143 if (!np) 144 if (!np)
@@ -205,8 +206,10 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
205 for_each_cpu(i, per_cpu(cpu_mask, cpu)) 206 for_each_cpu(i, per_cpu(cpu_mask, cpu))
206 per_cpu(cpu_data, i) = data; 207 per_cpu(cpu_data, i) = data;
207 208
209 transition_latency_hz = 12ULL * NSEC_PER_SEC;
208 policy->cpuinfo.transition_latency = 210 policy->cpuinfo.transition_latency =
209 (12ULL * NSEC_PER_SEC) / fsl_get_sys_freq(); 211 do_div(transition_latency_hz, fsl_get_sys_freq());
212
210 of_node_put(np); 213 of_node_put(np);
211 214
212 return 0; 215 return 0;
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 9f25f5296029..0eabd81e1a90 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -16,9 +16,13 @@
16 char *tmp; \ 16 char *tmp; \
17 \ 17 \
18 tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \ 18 tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \
19 sprintf(tmp, format, param); \ 19 if (likely(tmp)) { \
20 strcat(str, tmp); \ 20 sprintf(tmp, format, param); \
21 kfree(tmp); \ 21 strcat(str, tmp); \
22 kfree(tmp); \
23 } else { \
24 strcat(str, "kmalloc failure in SPRINTFCAT"); \
25 } \
22} 26}
23 27
24static void report_jump_idx(u32 status, char *outstr) 28static void report_jump_idx(u32 status, char *outstr)
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index a886713937fd..d5d30ed863ce 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1009,6 +1009,7 @@ static void dmaengine_unmap(struct kref *kref)
1009 dma_unmap_page(dev, unmap->addr[i], unmap->len, 1009 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1010 DMA_BIDIRECTIONAL); 1010 DMA_BIDIRECTIONAL);
1011 } 1011 }
1012 cnt = unmap->map_cnt;
1012 mempool_free(unmap, __get_unmap_pool(cnt)->pool); 1013 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1013} 1014}
1014 1015
@@ -1074,6 +1075,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1074 memset(unmap, 0, sizeof(*unmap)); 1075 memset(unmap, 0, sizeof(*unmap));
1075 kref_init(&unmap->kref); 1076 kref_init(&unmap->kref);
1076 unmap->dev = dev; 1077 unmap->dev = dev;
1078 unmap->map_cnt = nr;
1077 1079
1078 return unmap; 1080 return unmap;
1079} 1081}
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index cfdbb92aae1d..7a740769c2fa 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1548,11 +1548,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1548 /* Disable BLOCK interrupts as well */ 1548 /* Disable BLOCK interrupts as well */
1549 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); 1549 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1550 1550
1551 err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt,
1552 IRQF_SHARED, "dw_dmac", dw);
1553 if (err)
1554 return err;
1555
1556 /* Create a pool of consistent memory blocks for hardware descriptors */ 1551 /* Create a pool of consistent memory blocks for hardware descriptors */
1557 dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, 1552 dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
1558 sizeof(struct dw_desc), 4, 0); 1553 sizeof(struct dw_desc), 4, 0);
@@ -1563,6 +1558,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1563 1558
1564 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); 1559 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1565 1560
1561 err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
1562 "dw_dmac", dw);
1563 if (err)
1564 return err;
1565
1566 INIT_LIST_HEAD(&dw->dma.channels); 1566 INIT_LIST_HEAD(&dw->dma.channels);
1567 for (i = 0; i < nr_channels; i++) { 1567 for (i = 0; i < nr_channels; i++) {
1568 struct dw_dma_chan *dwc = &dw->chan[i]; 1568 struct dw_dma_chan *dwc = &dw->chan[i];
@@ -1667,6 +1667,7 @@ int dw_dma_remove(struct dw_dma_chip *chip)
1667 dw_dma_off(dw); 1667 dw_dma_off(dw);
1668 dma_async_device_unregister(&dw->dma); 1668 dma_async_device_unregister(&dw->dma);
1669 1669
1670 free_irq(chip->irq, dw);
1670 tasklet_kill(&dw->tasklet); 1671 tasklet_kill(&dw->tasklet);
1671 1672
1672 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, 1673 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 766b68ed505c..394cbc5c93e3 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -191,12 +191,10 @@ static void mv_set_mode(struct mv_xor_chan *chan,
191 191
192static void mv_chan_activate(struct mv_xor_chan *chan) 192static void mv_chan_activate(struct mv_xor_chan *chan)
193{ 193{
194 u32 activation;
195
196 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); 194 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
197 activation = readl_relaxed(XOR_ACTIVATION(chan)); 195
198 activation |= 0x1; 196 /* writel ensures all descriptors are flushed before activation */
199 writel_relaxed(activation, XOR_ACTIVATION(chan)); 197 writel(BIT(0), XOR_ACTIVATION(chan));
200} 198}
201 199
202static char mv_chan_is_busy(struct mv_xor_chan *chan) 200static char mv_chan_is_busy(struct mv_xor_chan *chan)
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index ab26d46bbe15..5ebdfbc1051e 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -113,11 +113,9 @@ struct sa11x0_dma_phy {
113 struct sa11x0_dma_desc *txd_load; 113 struct sa11x0_dma_desc *txd_load;
114 unsigned sg_done; 114 unsigned sg_done;
115 struct sa11x0_dma_desc *txd_done; 115 struct sa11x0_dma_desc *txd_done;
116#ifdef CONFIG_PM_SLEEP
117 u32 dbs[2]; 116 u32 dbs[2];
118 u32 dbt[2]; 117 u32 dbt[2];
119 u32 dcsr; 118 u32 dcsr;
120#endif
121}; 119};
122 120
123struct sa11x0_dma_dev { 121struct sa11x0_dma_dev {
@@ -984,7 +982,6 @@ static int sa11x0_dma_remove(struct platform_device *pdev)
984 return 0; 982 return 0;
985} 983}
986 984
987#ifdef CONFIG_PM_SLEEP
988static int sa11x0_dma_suspend(struct device *dev) 985static int sa11x0_dma_suspend(struct device *dev)
989{ 986{
990 struct sa11x0_dma_dev *d = dev_get_drvdata(dev); 987 struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
@@ -1054,7 +1051,6 @@ static int sa11x0_dma_resume(struct device *dev)
1054 1051
1055 return 0; 1052 return 0;
1056} 1053}
1057#endif
1058 1054
1059static const struct dev_pm_ops sa11x0_dma_pm_ops = { 1055static const struct dev_pm_ops sa11x0_dma_pm_ops = {
1060 .suspend_noirq = sa11x0_dma_suspend, 1056 .suspend_noirq = sa11x0_dma_suspend,
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index c98764aeeec6..f477308b6e9c 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -237,8 +237,8 @@ static inline bool is_next_generation(int new_generation, int old_generation)
237 237
238#define LOCAL_BUS 0xffc0 238#define LOCAL_BUS 0xffc0
239 239
240/* arbitrarily chosen maximum range for physical DMA: 128 TB */ 240/* OHCI-1394's default upper bound for physical DMA: 4 GB */
241#define FW_MAX_PHYSICAL_RANGE (128ULL << 40) 241#define FW_MAX_PHYSICAL_RANGE (1ULL << 32)
242 242
243void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); 243void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
244void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); 244void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 8db663219560..586f2f7f6993 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -3716,7 +3716,7 @@ static int pci_probe(struct pci_dev *dev,
3716 version >> 16, version & 0xff, ohci->card.index, 3716 version >> 16, version & 0xff, ohci->card.index,
3717 ohci->n_ir, ohci->n_it, ohci->quirks, 3717 ohci->n_ir, ohci->n_it, ohci->quirks,
3718 reg_read(ohci, OHCI1394_PhyUpperBound) ? 3718 reg_read(ohci, OHCI1394_PhyUpperBound) ?
3719 ", >4 GB phys DMA" : ""); 3719 ", physUB" : "");
3720 3720
3721 return 0; 3721 return 0;
3722 3722
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 3ee852c9925b..071c2c969eec 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -756,6 +756,7 @@ static const struct {
756 */ 756 */
757 { ACPI_SIG_IBFT }, 757 { ACPI_SIG_IBFT },
758 { "iBFT" }, 758 { "iBFT" },
759 { "BIFT" }, /* Broadcom iSCSI Offload */
759}; 760};
760 761
761static void __init acpi_find_ibft_region(void) 762static void __init acpi_find_ibft_region(void)
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index e73c6755a5eb..70304220a479 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -305,6 +305,8 @@ static struct ichx_desc ich6_desc = {
305 305
306 .ngpio = 50, 306 .ngpio = 50,
307 .have_blink = true, 307 .have_blink = true,
308 .regs = ichx_regs,
309 .reglen = ichx_reglen,
308}; 310};
309 311
310/* Intel 3100 */ 312/* Intel 3100 */
@@ -324,6 +326,8 @@ static struct ichx_desc i3100_desc = {
324 .uses_gpe0 = true, 326 .uses_gpe0 = true,
325 327
326 .ngpio = 50, 328 .ngpio = 50,
329 .regs = ichx_regs,
330 .reglen = ichx_reglen,
327}; 331};
328 332
329/* ICH7 and ICH8-based */ 333/* ICH7 and ICH8-based */
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 99a68310e7c0..3d53fd6880d1 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -894,9 +894,11 @@ static int mcp23s08_probe(struct spi_device *spi)
894 dev_err(&spi->dev, "invalid spi-present-mask\n"); 894 dev_err(&spi->dev, "invalid spi-present-mask\n");
895 return -ENODEV; 895 return -ENODEV;
896 } 896 }
897 897 for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
898 for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) 898 if ((spi_present_mask & (1 << addr)))
899 chips++;
899 pullups[addr] = 0; 900 pullups[addr] = 0;
901 }
900 } else { 902 } else {
901 type = spi_get_device_id(spi)->driver_data; 903 type = spi_get_device_id(spi)->driver_data;
902 pdata = dev_get_platdata(&spi->dev); 904 pdata = dev_get_platdata(&spi->dev);
@@ -919,12 +921,12 @@ static int mcp23s08_probe(struct spi_device *spi)
919 pullups[addr] = pdata->chip[addr].pullups; 921 pullups[addr] = pdata->chip[addr].pullups;
920 } 922 }
921 923
922 if (!chips)
923 return -ENODEV;
924
925 base = pdata->base; 924 base = pdata->base;
926 } 925 }
927 926
927 if (!chips)
928 return -ENODEV;
929
928 data = kzalloc(sizeof(*data) + chips * sizeof(struct mcp23s08), 930 data = kzalloc(sizeof(*data) + chips * sizeof(struct mcp23s08),
929 GFP_KERNEL); 931 GFP_KERNEL);
930 if (!data) 932 if (!data)
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index eb1c062e04b2..78b37f3febd3 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -29,6 +29,7 @@
29 * Jesse Barnes <jesse.barnes@intel.com> 29 * Jesse Barnes <jesse.barnes@intel.com>
30 */ 30 */
31 31
32#include <linux/kernel.h>
32#include <linux/export.h> 33#include <linux/export.h>
33#include <linux/moduleparam.h> 34#include <linux/moduleparam.h>
34 35
@@ -88,8 +89,15 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
88 struct drm_connector *connector; 89 struct drm_connector *connector;
89 struct drm_device *dev = encoder->dev; 90 struct drm_device *dev = encoder->dev;
90 91
91 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 92 /*
92 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 93 * We can expect this mutex to be locked if we are not panicking.
94 * Locking is currently fubar in the panic handler.
95 */
96 if (!oops_in_progress) {
97 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
98 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
99 }
100
93 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 101 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
94 if (connector->encoder == encoder) 102 if (connector->encoder == encoder)
95 return true; 103 return true;
@@ -113,7 +121,13 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
113 struct drm_encoder *encoder; 121 struct drm_encoder *encoder;
114 struct drm_device *dev = crtc->dev; 122 struct drm_device *dev = crtc->dev;
115 123
116 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 124 /*
125 * We can expect this mutex to be locked if we are not panicking.
126 * Locking is currently fubar in the panic handler.
127 */
128 if (!oops_in_progress)
129 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
130
117 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) 131 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
118 if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder)) 132 if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
119 return true; 133 return true;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 4e70de6ed468..b9159ade5e85 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1836,7 +1836,6 @@ int i915_driver_unload(struct drm_device *dev)
1836 flush_workqueue(dev_priv->wq); 1836 flush_workqueue(dev_priv->wq);
1837 1837
1838 mutex_lock(&dev->struct_mutex); 1838 mutex_lock(&dev->struct_mutex);
1839 i915_gem_free_all_phys_object(dev);
1840 i915_gem_cleanup_ringbuffer(dev); 1839 i915_gem_cleanup_ringbuffer(dev);
1841 i915_gem_context_fini(dev); 1840 i915_gem_context_fini(dev);
1842 WARN_ON(dev_priv->mm.aliasing_ppgtt); 1841 WARN_ON(dev_priv->mm.aliasing_ppgtt);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8f68678f361f..8e78703e45cf 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -251,18 +251,6 @@ struct intel_ddi_plls {
251#define WATCH_LISTS 0 251#define WATCH_LISTS 0
252#define WATCH_GTT 0 252#define WATCH_GTT 0
253 253
254#define I915_GEM_PHYS_CURSOR_0 1
255#define I915_GEM_PHYS_CURSOR_1 2
256#define I915_GEM_PHYS_OVERLAY_REGS 3
257#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
258
259struct drm_i915_gem_phys_object {
260 int id;
261 struct page **page_list;
262 drm_dma_handle_t *handle;
263 struct drm_i915_gem_object *cur_obj;
264};
265
266struct opregion_header; 254struct opregion_header;
267struct opregion_acpi; 255struct opregion_acpi;
268struct opregion_swsci; 256struct opregion_swsci;
@@ -1106,9 +1094,6 @@ struct i915_gem_mm {
1106 /** Bit 6 swizzling required for Y tiling */ 1094 /** Bit 6 swizzling required for Y tiling */
1107 uint32_t bit_6_swizzle_y; 1095 uint32_t bit_6_swizzle_y;
1108 1096
1109 /* storage for physical objects */
1110 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
1111
1112 /* accounting, useful for userland debugging */ 1097 /* accounting, useful for userland debugging */
1113 spinlock_t object_stat_lock; 1098 spinlock_t object_stat_lock;
1114 size_t object_memory; 1099 size_t object_memory;
@@ -1712,7 +1697,7 @@ struct drm_i915_gem_object {
1712 struct drm_file *pin_filp; 1697 struct drm_file *pin_filp;
1713 1698
1714 /** for phy allocated objects */ 1699 /** for phy allocated objects */
1715 struct drm_i915_gem_phys_object *phys_obj; 1700 drm_dma_handle_t *phys_handle;
1716 1701
1717 union { 1702 union {
1718 struct i915_gem_userptr { 1703 struct i915_gem_userptr {
@@ -1916,6 +1901,9 @@ struct drm_i915_cmd_table {
1916#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) 1901#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
1917#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 1902#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
1918 ((dev)->pdev->device & 0x00F0) == 0x0020) 1903 ((dev)->pdev->device & 0x00F0) == 0x0020)
1904/* ULX machines are also considered ULT. */
1905#define IS_HSW_ULX(dev) ((dev)->pdev->device == 0x0A0E || \
1906 (dev)->pdev->device == 0x0A1E)
1919#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 1907#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
1920 1908
1921/* 1909/*
@@ -2172,10 +2160,12 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
2172#define PIN_MAPPABLE 0x1 2160#define PIN_MAPPABLE 0x1
2173#define PIN_NONBLOCK 0x2 2161#define PIN_NONBLOCK 0x2
2174#define PIN_GLOBAL 0x4 2162#define PIN_GLOBAL 0x4
2163#define PIN_OFFSET_BIAS 0x8
2164#define PIN_OFFSET_MASK (~4095)
2175int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 2165int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
2176 struct i915_address_space *vm, 2166 struct i915_address_space *vm,
2177 uint32_t alignment, 2167 uint32_t alignment,
2178 unsigned flags); 2168 uint64_t flags);
2179int __must_check i915_vma_unbind(struct i915_vma *vma); 2169int __must_check i915_vma_unbind(struct i915_vma *vma);
2180int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2170int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
2181void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 2171void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
@@ -2297,13 +2287,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2297 u32 alignment, 2287 u32 alignment,
2298 struct intel_engine_cs *pipelined); 2288 struct intel_engine_cs *pipelined);
2299void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); 2289void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
2300int i915_gem_attach_phys_object(struct drm_device *dev, 2290int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
2301 struct drm_i915_gem_object *obj,
2302 int id,
2303 int align); 2291 int align);
2304void i915_gem_detach_phys_object(struct drm_device *dev,
2305 struct drm_i915_gem_object *obj);
2306void i915_gem_free_all_phys_object(struct drm_device *dev);
2307int i915_gem_open(struct drm_device *dev, struct drm_file *file); 2292int i915_gem_open(struct drm_device *dev, struct drm_file *file);
2308void i915_gem_release(struct drm_device *dev, struct drm_file *file); 2293void i915_gem_release(struct drm_device *dev, struct drm_file *file);
2309 2294
@@ -2430,6 +2415,8 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
2430 int min_size, 2415 int min_size,
2431 unsigned alignment, 2416 unsigned alignment,
2432 unsigned cache_level, 2417 unsigned cache_level,
2418 unsigned long start,
2419 unsigned long end,
2433 unsigned flags); 2420 unsigned flags);
2434int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 2421int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2435int i915_gem_evict_everything(struct drm_device *dev); 2422int i915_gem_evict_everything(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 87e9b349ebef..bbcd35abf247 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -47,11 +47,6 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
47static void 47static void
48i915_gem_object_retire(struct drm_i915_gem_object *obj); 48i915_gem_object_retire(struct drm_i915_gem_object *obj);
49 49
50static int i915_gem_phys_pwrite(struct drm_device *dev,
51 struct drm_i915_gem_object *obj,
52 struct drm_i915_gem_pwrite *args,
53 struct drm_file *file);
54
55static void i915_gem_write_fence(struct drm_device *dev, int reg, 50static void i915_gem_write_fence(struct drm_device *dev, int reg,
56 struct drm_i915_gem_object *obj); 51 struct drm_i915_gem_object *obj);
57static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, 52static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
@@ -214,6 +209,128 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
214 return 0; 209 return 0;
215} 210}
216 211
212static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
213{
214 drm_dma_handle_t *phys = obj->phys_handle;
215
216 if (!phys)
217 return;
218
219 if (obj->madv == I915_MADV_WILLNEED) {
220 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
221 char *vaddr = phys->vaddr;
222 int i;
223
224 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
225 struct page *page = shmem_read_mapping_page(mapping, i);
226 if (!IS_ERR(page)) {
227 char *dst = kmap_atomic(page);
228 memcpy(dst, vaddr, PAGE_SIZE);
229 drm_clflush_virt_range(dst, PAGE_SIZE);
230 kunmap_atomic(dst);
231
232 set_page_dirty(page);
233 mark_page_accessed(page);
234 page_cache_release(page);
235 }
236 vaddr += PAGE_SIZE;
237 }
238 i915_gem_chipset_flush(obj->base.dev);
239 }
240
241#ifdef CONFIG_X86
242 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
243#endif
244 drm_pci_free(obj->base.dev, phys);
245 obj->phys_handle = NULL;
246}
247
248int
249i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
250 int align)
251{
252 drm_dma_handle_t *phys;
253 struct address_space *mapping;
254 char *vaddr;
255 int i;
256
257 if (obj->phys_handle) {
258 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
259 return -EBUSY;
260
261 return 0;
262 }
263
264 if (obj->madv != I915_MADV_WILLNEED)
265 return -EFAULT;
266
267 if (obj->base.filp == NULL)
268 return -EINVAL;
269
270 /* create a new object */
271 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
272 if (!phys)
273 return -ENOMEM;
274
275 vaddr = phys->vaddr;
276#ifdef CONFIG_X86
277 set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
278#endif
279 mapping = file_inode(obj->base.filp)->i_mapping;
280 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
281 struct page *page;
282 char *src;
283
284 page = shmem_read_mapping_page(mapping, i);
285 if (IS_ERR(page)) {
286#ifdef CONFIG_X86
287 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
288#endif
289 drm_pci_free(obj->base.dev, phys);
290 return PTR_ERR(page);
291 }
292
293 src = kmap_atomic(page);
294 memcpy(vaddr, src, PAGE_SIZE);
295 kunmap_atomic(src);
296
297 mark_page_accessed(page);
298 page_cache_release(page);
299
300 vaddr += PAGE_SIZE;
301 }
302
303 obj->phys_handle = phys;
304 return 0;
305}
306
307static int
308i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
309 struct drm_i915_gem_pwrite *args,
310 struct drm_file *file_priv)
311{
312 struct drm_device *dev = obj->base.dev;
313 void *vaddr = obj->phys_handle->vaddr + args->offset;
314 char __user *user_data = to_user_ptr(args->data_ptr);
315
316 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
317 unsigned long unwritten;
318
319 /* The physical object once assigned is fixed for the lifetime
320 * of the obj, so we can safely drop the lock and continue
321 * to access vaddr.
322 */
323 mutex_unlock(&dev->struct_mutex);
324 unwritten = copy_from_user(vaddr, user_data, args->size);
325 mutex_lock(&dev->struct_mutex);
326 if (unwritten)
327 return -EFAULT;
328 }
329
330 i915_gem_chipset_flush(dev);
331 return 0;
332}
333
217void *i915_gem_object_alloc(struct drm_device *dev) 334void *i915_gem_object_alloc(struct drm_device *dev)
218{ 335{
219 struct drm_i915_private *dev_priv = dev->dev_private; 336 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -930,8 +1047,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
930 * pread/pwrite currently are reading and writing from the CPU 1047 * pread/pwrite currently are reading and writing from the CPU
931 * perspective, requiring manual detiling by the client. 1048 * perspective, requiring manual detiling by the client.
932 */ 1049 */
933 if (obj->phys_obj) { 1050 if (obj->phys_handle) {
934 ret = i915_gem_phys_pwrite(dev, obj, args, file); 1051 ret = i915_gem_phys_pwrite(obj, args, file);
935 goto out; 1052 goto out;
936 } 1053 }
937 1054
@@ -3257,12 +3374,14 @@ static struct i915_vma *
3257i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, 3374i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3258 struct i915_address_space *vm, 3375 struct i915_address_space *vm,
3259 unsigned alignment, 3376 unsigned alignment,
3260 unsigned flags) 3377 uint64_t flags)
3261{ 3378{
3262 struct drm_device *dev = obj->base.dev; 3379 struct drm_device *dev = obj->base.dev;
3263 struct drm_i915_private *dev_priv = dev->dev_private; 3380 struct drm_i915_private *dev_priv = dev->dev_private;
3264 u32 size, fence_size, fence_alignment, unfenced_alignment; 3381 u32 size, fence_size, fence_alignment, unfenced_alignment;
3265 size_t gtt_max = 3382 unsigned long start =
3383 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3384 unsigned long end =
3266 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; 3385 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3267 struct i915_vma *vma; 3386 struct i915_vma *vma;
3268 int ret; 3387 int ret;
@@ -3291,11 +3410,11 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3291 /* If the object is bigger than the entire aperture, reject it early 3410 /* If the object is bigger than the entire aperture, reject it early
3292 * before evicting everything in a vain attempt to find space. 3411 * before evicting everything in a vain attempt to find space.
3293 */ 3412 */
3294 if (obj->base.size > gtt_max) { 3413 if (obj->base.size > end) {
3295 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", 3414 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
3296 obj->base.size, 3415 obj->base.size,
3297 flags & PIN_MAPPABLE ? "mappable" : "total", 3416 flags & PIN_MAPPABLE ? "mappable" : "total",
3298 gtt_max); 3417 end);
3299 return ERR_PTR(-E2BIG); 3418 return ERR_PTR(-E2BIG);
3300 } 3419 }
3301 3420
@@ -3312,12 +3431,15 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3312search_free: 3431search_free:
3313 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, 3432 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3314 size, alignment, 3433 size, alignment,
3315 obj->cache_level, 0, gtt_max, 3434 obj->cache_level,
3435 start, end,
3316 DRM_MM_SEARCH_DEFAULT, 3436 DRM_MM_SEARCH_DEFAULT,
3317 DRM_MM_CREATE_DEFAULT); 3437 DRM_MM_CREATE_DEFAULT);
3318 if (ret) { 3438 if (ret) {
3319 ret = i915_gem_evict_something(dev, vm, size, alignment, 3439 ret = i915_gem_evict_something(dev, vm, size, alignment,
3320 obj->cache_level, flags); 3440 obj->cache_level,
3441 start, end,
3442 flags);
3321 if (ret == 0) 3443 if (ret == 0)
3322 goto search_free; 3444 goto search_free;
3323 3445
@@ -3892,11 +4014,30 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3892 return ret; 4014 return ret;
3893} 4015}
3894 4016
4017static bool
4018i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4019{
4020 struct drm_i915_gem_object *obj = vma->obj;
4021
4022 if (alignment &&
4023 vma->node.start & (alignment - 1))
4024 return true;
4025
4026 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
4027 return true;
4028
4029 if (flags & PIN_OFFSET_BIAS &&
4030 vma->node.start < (flags & PIN_OFFSET_MASK))
4031 return true;
4032
4033 return false;
4034}
4035
3895int 4036int
3896i915_gem_object_pin(struct drm_i915_gem_object *obj, 4037i915_gem_object_pin(struct drm_i915_gem_object *obj,
3897 struct i915_address_space *vm, 4038 struct i915_address_space *vm,
3898 uint32_t alignment, 4039 uint32_t alignment,
3899 unsigned flags) 4040 uint64_t flags)
3900{ 4041{
3901 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 4042 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3902 struct i915_vma *vma; 4043 struct i915_vma *vma;
@@ -3913,15 +4054,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3913 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) 4054 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3914 return -EBUSY; 4055 return -EBUSY;
3915 4056
3916 if ((alignment && 4057 if (i915_vma_misplaced(vma, alignment, flags)) {
3917 vma->node.start & (alignment - 1)) ||
3918 (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
3919 WARN(vma->pin_count, 4058 WARN(vma->pin_count,
3920 "bo is already pinned with incorrect alignment:" 4059 "bo is already pinned with incorrect alignment:"
3921 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," 4060 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3922 " obj->map_and_fenceable=%d\n", 4061 " obj->map_and_fenceable=%d\n",
3923 i915_gem_obj_offset(obj, vm), alignment, 4062 i915_gem_obj_offset(obj, vm), alignment,
3924 flags & PIN_MAPPABLE, 4063 !!(flags & PIN_MAPPABLE),
3925 obj->map_and_fenceable); 4064 obj->map_and_fenceable);
3926 ret = i915_vma_unbind(vma); 4065 ret = i915_vma_unbind(vma);
3927 if (ret) 4066 if (ret)
@@ -4281,9 +4420,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4281 4420
4282 trace_i915_gem_object_destroy(obj); 4421 trace_i915_gem_object_destroy(obj);
4283 4422
4284 if (obj->phys_obj)
4285 i915_gem_detach_phys_object(dev, obj);
4286
4287 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 4423 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4288 int ret; 4424 int ret;
4289 4425
@@ -4301,6 +4437,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4301 } 4437 }
4302 } 4438 }
4303 4439
4440 i915_gem_object_detach_phys(obj);
4441
4304 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up 4442 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4305 * before progressing. */ 4443 * before progressing. */
4306 if (obj->stolen) 4444 if (obj->stolen)
@@ -4792,190 +4930,6 @@ i915_gem_load(struct drm_device *dev)
4792 register_oom_notifier(&dev_priv->mm.oom_notifier); 4930 register_oom_notifier(&dev_priv->mm.oom_notifier);
4793} 4931}
4794 4932
4795/*
4796 * Create a physically contiguous memory object for this object
4797 * e.g. for cursor + overlay regs
4798 */
4799static int i915_gem_init_phys_object(struct drm_device *dev,
4800 int id, int size, int align)
4801{
4802 struct drm_i915_private *dev_priv = dev->dev_private;
4803 struct drm_i915_gem_phys_object *phys_obj;
4804 int ret;
4805
4806 if (dev_priv->mm.phys_objs[id - 1] || !size)
4807 return 0;
4808
4809 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
4810 if (!phys_obj)
4811 return -ENOMEM;
4812
4813 phys_obj->id = id;
4814
4815 phys_obj->handle = drm_pci_alloc(dev, size, align);
4816 if (!phys_obj->handle) {
4817 ret = -ENOMEM;
4818 goto kfree_obj;
4819 }
4820#ifdef CONFIG_X86
4821 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4822#endif
4823
4824 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4825
4826 return 0;
4827kfree_obj:
4828 kfree(phys_obj);
4829 return ret;
4830}
4831
4832static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4833{
4834 struct drm_i915_private *dev_priv = dev->dev_private;
4835 struct drm_i915_gem_phys_object *phys_obj;
4836
4837 if (!dev_priv->mm.phys_objs[id - 1])
4838 return;
4839
4840 phys_obj = dev_priv->mm.phys_objs[id - 1];
4841 if (phys_obj->cur_obj) {
4842 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4843 }
4844
4845#ifdef CONFIG_X86
4846 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4847#endif
4848 drm_pci_free(dev, phys_obj->handle);
4849 kfree(phys_obj);
4850 dev_priv->mm.phys_objs[id - 1] = NULL;
4851}
4852
4853void i915_gem_free_all_phys_object(struct drm_device *dev)
4854{
4855 int i;
4856
4857 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4858 i915_gem_free_phys_object(dev, i);
4859}
4860
4861void i915_gem_detach_phys_object(struct drm_device *dev,
4862 struct drm_i915_gem_object *obj)
4863{
4864 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4865 char *vaddr;
4866 int i;
4867 int page_count;
4868
4869 if (!obj->phys_obj)
4870 return;
4871 vaddr = obj->phys_obj->handle->vaddr;
4872
4873 page_count = obj->base.size / PAGE_SIZE;
4874 for (i = 0; i < page_count; i++) {
4875 struct page *page = shmem_read_mapping_page(mapping, i);
4876 if (!IS_ERR(page)) {
4877 char *dst = kmap_atomic(page);
4878 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4879 kunmap_atomic(dst);
4880
4881 drm_clflush_pages(&page, 1);
4882
4883 set_page_dirty(page);
4884 mark_page_accessed(page);
4885 page_cache_release(page);
4886 }
4887 }
4888 i915_gem_chipset_flush(dev);
4889
4890 obj->phys_obj->cur_obj = NULL;
4891 obj->phys_obj = NULL;
4892}
4893
4894int
4895i915_gem_attach_phys_object(struct drm_device *dev,
4896 struct drm_i915_gem_object *obj,
4897 int id,
4898 int align)
4899{
4900 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4901 struct drm_i915_private *dev_priv = dev->dev_private;
4902 int ret = 0;
4903 int page_count;
4904 int i;
4905
4906 if (id > I915_MAX_PHYS_OBJECT)
4907 return -EINVAL;
4908
4909 if (obj->phys_obj) {
4910 if (obj->phys_obj->id == id)
4911 return 0;
4912 i915_gem_detach_phys_object(dev, obj);
4913 }
4914
4915 /* create a new object */
4916 if (!dev_priv->mm.phys_objs[id - 1]) {
4917 ret = i915_gem_init_phys_object(dev, id,
4918 obj->base.size, align);
4919 if (ret) {
4920 DRM_ERROR("failed to init phys object %d size: %zu\n",
4921 id, obj->base.size);
4922 return ret;
4923 }
4924 }
4925
4926 /* bind to the object */
4927 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4928 obj->phys_obj->cur_obj = obj;
4929
4930 page_count = obj->base.size / PAGE_SIZE;
4931
4932 for (i = 0; i < page_count; i++) {
4933 struct page *page;
4934 char *dst, *src;
4935
4936 page = shmem_read_mapping_page(mapping, i);
4937 if (IS_ERR(page))
4938 return PTR_ERR(page);
4939
4940 src = kmap_atomic(page);
4941 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4942 memcpy(dst, src, PAGE_SIZE);
4943 kunmap_atomic(src);
4944
4945 mark_page_accessed(page);
4946 page_cache_release(page);
4947 }
4948
4949 return 0;
4950}
4951
4952static int
4953i915_gem_phys_pwrite(struct drm_device *dev,
4954 struct drm_i915_gem_object *obj,
4955 struct drm_i915_gem_pwrite *args,
4956 struct drm_file *file_priv)
4957{
4958 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4959 char __user *user_data = to_user_ptr(args->data_ptr);
4960
4961 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4962 unsigned long unwritten;
4963
4964 /* The physical object once assigned is fixed for the lifetime
4965 * of the obj, so we can safely drop the lock and continue
4966 * to access vaddr.
4967 */
4968 mutex_unlock(&dev->struct_mutex);
4969 unwritten = copy_from_user(vaddr, user_data, args->size);
4970 mutex_lock(&dev->struct_mutex);
4971 if (unwritten)
4972 return -EFAULT;
4973 }
4974
4975 i915_gem_chipset_flush(dev);
4976 return 0;
4977}
4978
4979void i915_gem_release(struct drm_device *dev, struct drm_file *file) 4933void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4980{ 4934{
4981 struct drm_i915_file_private *file_priv = file->driver_priv; 4935 struct drm_i915_file_private *file_priv = file->driver_priv;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 75fca63dc8c1..bbf4b12d842e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -68,9 +68,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
68int 68int
69i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, 69i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
70 int min_size, unsigned alignment, unsigned cache_level, 70 int min_size, unsigned alignment, unsigned cache_level,
71 unsigned long start, unsigned long end,
71 unsigned flags) 72 unsigned flags)
72{ 73{
73 struct drm_i915_private *dev_priv = dev->dev_private;
74 struct list_head eviction_list, unwind_list; 74 struct list_head eviction_list, unwind_list;
75 struct i915_vma *vma; 75 struct i915_vma *vma;
76 int ret = 0; 76 int ret = 0;
@@ -102,11 +102,10 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
102 */ 102 */
103 103
104 INIT_LIST_HEAD(&unwind_list); 104 INIT_LIST_HEAD(&unwind_list);
105 if (flags & PIN_MAPPABLE) { 105 if (start != 0 || end != vm->total) {
106 BUG_ON(!i915_is_ggtt(vm));
107 drm_mm_init_scan_with_range(&vm->mm, min_size, 106 drm_mm_init_scan_with_range(&vm->mm, min_size,
108 alignment, cache_level, 0, 107 alignment, cache_level,
109 dev_priv->gtt.mappable_end); 108 start, end);
110 } else 109 } else
111 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); 110 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
112 111
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 008e208e9a3a..3a30133f93e8 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -35,6 +35,9 @@
35 35
36#define __EXEC_OBJECT_HAS_PIN (1<<31) 36#define __EXEC_OBJECT_HAS_PIN (1<<31)
37#define __EXEC_OBJECT_HAS_FENCE (1<<30) 37#define __EXEC_OBJECT_HAS_FENCE (1<<30)
38#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
39
40#define BATCH_OFFSET_BIAS (256*1024)
38 41
39struct eb_vmas { 42struct eb_vmas {
40 struct list_head vmas; 43 struct list_head vmas;
@@ -548,7 +551,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
548 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 551 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
549 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 552 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
550 bool need_fence; 553 bool need_fence;
551 unsigned flags; 554 uint64_t flags;
552 int ret; 555 int ret;
553 556
554 flags = 0; 557 flags = 0;
@@ -562,6 +565,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
562 565
563 if (entry->flags & EXEC_OBJECT_NEEDS_GTT) 566 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
564 flags |= PIN_GLOBAL; 567 flags |= PIN_GLOBAL;
568 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
569 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
565 570
566 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); 571 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
567 if (ret) 572 if (ret)
@@ -595,6 +600,36 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
595 return 0; 600 return 0;
596} 601}
597 602
603static bool
604eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
605{
606 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
607 struct drm_i915_gem_object *obj = vma->obj;
608 bool need_fence, need_mappable;
609
610 need_fence =
611 has_fenced_gpu_access &&
612 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
613 obj->tiling_mode != I915_TILING_NONE;
614 need_mappable = need_fence || need_reloc_mappable(vma);
615
616 WARN_ON((need_mappable || need_fence) &&
617 !i915_is_ggtt(vma->vm));
618
619 if (entry->alignment &&
620 vma->node.start & (entry->alignment - 1))
621 return true;
622
623 if (need_mappable && !obj->map_and_fenceable)
624 return true;
625
626 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
627 vma->node.start < BATCH_OFFSET_BIAS)
628 return true;
629
630 return false;
631}
632
598static int 633static int
599i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, 634i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
600 struct list_head *vmas, 635 struct list_head *vmas,
@@ -658,26 +693,10 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
658 693
659 /* Unbind any ill-fitting objects or pin. */ 694 /* Unbind any ill-fitting objects or pin. */
660 list_for_each_entry(vma, vmas, exec_list) { 695 list_for_each_entry(vma, vmas, exec_list) {
661 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
662 bool need_fence, need_mappable;
663
664 obj = vma->obj;
665
666 if (!drm_mm_node_allocated(&vma->node)) 696 if (!drm_mm_node_allocated(&vma->node))
667 continue; 697 continue;
668 698
669 need_fence = 699 if (eb_vma_misplaced(vma, has_fenced_gpu_access))
670 has_fenced_gpu_access &&
671 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
672 obj->tiling_mode != I915_TILING_NONE;
673 need_mappable = need_fence || need_reloc_mappable(vma);
674
675 WARN_ON((need_mappable || need_fence) &&
676 !i915_is_ggtt(vma->vm));
677
678 if ((entry->alignment &&
679 vma->node.start & (entry->alignment - 1)) ||
680 (need_mappable && !obj->map_and_fenceable))
681 ret = i915_vma_unbind(vma); 700 ret = i915_vma_unbind(vma);
682 else 701 else
683 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); 702 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
@@ -778,9 +797,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
778 * relocations were valid. 797 * relocations were valid.
779 */ 798 */
780 for (j = 0; j < exec[i].relocation_count; j++) { 799 for (j = 0; j < exec[i].relocation_count; j++) {
781 if (copy_to_user(&user_relocs[j].presumed_offset, 800 if (__copy_to_user(&user_relocs[j].presumed_offset,
782 &invalid_offset, 801 &invalid_offset,
783 sizeof(invalid_offset))) { 802 sizeof(invalid_offset))) {
784 ret = -EFAULT; 803 ret = -EFAULT;
785 mutex_lock(&dev->struct_mutex); 804 mutex_lock(&dev->struct_mutex);
786 goto err; 805 goto err;
@@ -1040,6 +1059,25 @@ static int gen8_dispatch_bsd_ring(struct drm_device *dev,
1040 } 1059 }
1041} 1060}
1042 1061
1062static struct drm_i915_gem_object *
1063eb_get_batch(struct eb_vmas *eb)
1064{
1065 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
1066
1067 /*
1068 * SNA is doing fancy tricks with compressing batch buffers, which leads
1069 * to negative relocation deltas. Usually that works out ok since the
1070 * relocate address is still positive, except when the batch is placed
1071 * very low in the GTT. Ensure this doesn't happen.
1072 *
1073 * Note that actual hangs have only been observed on gen7, but for
1074 * paranoia do it everywhere.
1075 */
1076 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1077
1078 return vma->obj;
1079}
1080
1043static int 1081static int
1044i915_gem_do_execbuffer(struct drm_device *dev, void *data, 1082i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1045 struct drm_file *file, 1083 struct drm_file *file,
@@ -1220,7 +1258,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1220 goto err; 1258 goto err;
1221 1259
1222 /* take note of the batch buffer before we might reorder the lists */ 1260 /* take note of the batch buffer before we might reorder the lists */
1223 batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj; 1261 batch_obj = eb_get_batch(eb);
1224 1262
1225 /* Move the objects en-masse into the GTT, evicting if necessary. */ 1263 /* Move the objects en-masse into the GTT, evicting if necessary. */
1226 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 1264 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
@@ -1422,18 +1460,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1422 1460
1423 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); 1461 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1424 if (!ret) { 1462 if (!ret) {
1463 struct drm_i915_gem_exec_object __user *user_exec_list =
1464 to_user_ptr(args->buffers_ptr);
1465
1425 /* Copy the new buffer offsets back to the user's exec list. */ 1466 /* Copy the new buffer offsets back to the user's exec list. */
1426 for (i = 0; i < args->buffer_count; i++) 1467 for (i = 0; i < args->buffer_count; i++) {
1427 exec_list[i].offset = exec2_list[i].offset; 1468 ret = __copy_to_user(&user_exec_list[i].offset,
1428 /* ... and back out to userspace */ 1469 &exec2_list[i].offset,
1429 ret = copy_to_user(to_user_ptr(args->buffers_ptr), 1470 sizeof(user_exec_list[i].offset));
1430 exec_list, 1471 if (ret) {
1431 sizeof(*exec_list) * args->buffer_count); 1472 ret = -EFAULT;
1432 if (ret) { 1473 DRM_DEBUG("failed to copy %d exec entries "
1433 ret = -EFAULT; 1474 "back to user (%d)\n",
1434 DRM_DEBUG("failed to copy %d exec entries " 1475 args->buffer_count, ret);
1435 "back to user (%d)\n", 1476 break;
1436 args->buffer_count, ret); 1477 }
1437 } 1478 }
1438 } 1479 }
1439 1480
@@ -1484,14 +1525,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1484 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); 1525 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1485 if (!ret) { 1526 if (!ret) {
1486 /* Copy the new buffer offsets back to the user's exec list. */ 1527 /* Copy the new buffer offsets back to the user's exec list. */
1487 ret = copy_to_user(to_user_ptr(args->buffers_ptr), 1528 struct drm_i915_gem_exec_object2 *user_exec_list =
1488 exec2_list, 1529 to_user_ptr(args->buffers_ptr);
1489 sizeof(*exec2_list) * args->buffer_count); 1530 int i;
1490 if (ret) { 1531
1491 ret = -EFAULT; 1532 for (i = 0; i < args->buffer_count; i++) {
1492 DRM_DEBUG("failed to copy %d exec entries " 1533 ret = __copy_to_user(&user_exec_list[i].offset,
1493 "back to user (%d)\n", 1534 &exec2_list[i].offset,
1494 args->buffer_count, ret); 1535 sizeof(user_exec_list[i].offset));
1536 if (ret) {
1537 ret = -EFAULT;
1538 DRM_DEBUG("failed to copy %d exec entries "
1539 "back to user\n",
1540 args->buffer_count);
1541 break;
1542 }
1495 } 1543 }
1496 } 1544 }
1497 1545
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 94916362b61c..931b906f292a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -35,25 +35,35 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
35 35
36bool intel_enable_ppgtt(struct drm_device *dev, bool full) 36bool intel_enable_ppgtt(struct drm_device *dev, bool full)
37{ 37{
38 if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) 38 if (i915.enable_ppgtt == 0)
39 return false; 39 return false;
40 40
41 if (i915.enable_ppgtt == 1 && full) 41 if (i915.enable_ppgtt == 1 && full)
42 return false; 42 return false;
43 43
44 return true;
45}
46
47static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
48{
49 if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
50 return 0;
51
52 if (enable_ppgtt == 1)
53 return 1;
54
55 if (enable_ppgtt == 2 && HAS_PPGTT(dev))
56 return 2;
57
44#ifdef CONFIG_INTEL_IOMMU 58#ifdef CONFIG_INTEL_IOMMU
45 /* Disable ppgtt on SNB if VT-d is on. */ 59 /* Disable ppgtt on SNB if VT-d is on. */
46 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { 60 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
47 DRM_INFO("Disabling PPGTT because VT-d is on\n"); 61 DRM_INFO("Disabling PPGTT because VT-d is on\n");
48 return false; 62 return 0;
49 } 63 }
50#endif 64#endif
51 65
52 /* Full ppgtt disabled by default for now due to issues. */ 66 return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
53 if (full)
54 return HAS_PPGTT(dev) && (i915.enable_ppgtt == 2);
55 else
56 return HAS_ALIASING_PPGTT(dev);
57} 67}
58 68
59 69
@@ -1039,7 +1049,9 @@ alloc:
1039 if (ret == -ENOSPC && !retried) { 1049 if (ret == -ENOSPC && !retried) {
1040 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, 1050 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
1041 GEN6_PD_SIZE, GEN6_PD_ALIGN, 1051 GEN6_PD_SIZE, GEN6_PD_ALIGN,
1042 I915_CACHE_NONE, 0); 1052 I915_CACHE_NONE,
1053 0, dev_priv->gtt.base.total,
1054 0);
1043 if (ret) 1055 if (ret)
1044 return ret; 1056 return ret;
1045 1057
@@ -2052,6 +2064,14 @@ int i915_gem_gtt_init(struct drm_device *dev)
2052 if (intel_iommu_gfx_mapped) 2064 if (intel_iommu_gfx_mapped)
2053 DRM_INFO("VT-d active for gfx access\n"); 2065 DRM_INFO("VT-d active for gfx access\n");
2054#endif 2066#endif
2067 /*
2068 * i915.enable_ppgtt is read-only, so do an early pass to validate the
2069 * user's requested state against the hardware/driver capabilities. We
2070 * do this now so that we can print out any log messages once rather
2071 * than every time we check intel_enable_ppgtt().
2072 */
2073 i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
2074 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
2055 2075
2056 return 0; 2076 return 0;
2057} 2077}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 2945f57c53ee..6b6509656f16 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -598,47 +598,71 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
598 598
599 dev_priv->vbt.edp_pps = *edp_pps; 599 dev_priv->vbt.edp_pps = *edp_pps;
600 600
601 dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 : 601 switch (edp_link_params->rate) {
602 DP_LINK_BW_1_62; 602 case EDP_RATE_1_62:
603 dev_priv->vbt.edp_rate = DP_LINK_BW_1_62;
604 break;
605 case EDP_RATE_2_7:
606 dev_priv->vbt.edp_rate = DP_LINK_BW_2_7;
607 break;
608 default:
609 DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
610 edp_link_params->rate);
611 break;
612 }
613
603 switch (edp_link_params->lanes) { 614 switch (edp_link_params->lanes) {
604 case 0: 615 case EDP_LANE_1:
605 dev_priv->vbt.edp_lanes = 1; 616 dev_priv->vbt.edp_lanes = 1;
606 break; 617 break;
607 case 1: 618 case EDP_LANE_2:
608 dev_priv->vbt.edp_lanes = 2; 619 dev_priv->vbt.edp_lanes = 2;
609 break; 620 break;
610 case 3: 621 case EDP_LANE_4:
611 default:
612 dev_priv->vbt.edp_lanes = 4; 622 dev_priv->vbt.edp_lanes = 4;
613 break; 623 break;
624 default:
625 DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
626 edp_link_params->lanes);
627 break;
614 } 628 }
629
615 switch (edp_link_params->preemphasis) { 630 switch (edp_link_params->preemphasis) {
616 case 0: 631 case EDP_PREEMPHASIS_NONE:
617 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0; 632 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
618 break; 633 break;
619 case 1: 634 case EDP_PREEMPHASIS_3_5dB:
620 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; 635 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
621 break; 636 break;
622 case 2: 637 case EDP_PREEMPHASIS_6dB:
623 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6; 638 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
624 break; 639 break;
625 case 3: 640 case EDP_PREEMPHASIS_9_5dB:
626 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; 641 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
627 break; 642 break;
643 default:
644 DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
645 edp_link_params->preemphasis);
646 break;
628 } 647 }
648
629 switch (edp_link_params->vswing) { 649 switch (edp_link_params->vswing) {
630 case 0: 650 case EDP_VSWING_0_4V:
631 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400; 651 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400;
632 break; 652 break;
633 case 1: 653 case EDP_VSWING_0_6V:
634 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600; 654 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600;
635 break; 655 break;
636 case 2: 656 case EDP_VSWING_0_8V:
637 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800; 657 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800;
638 break; 658 break;
639 case 3: 659 case EDP_VSWING_1_2V:
640 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200; 660 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200;
641 break; 661 break;
662 default:
663 DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
664 edp_link_params->vswing);
665 break;
642 } 666 }
643} 667}
644 668
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1ce4ad4626e4..7a4c7c98378a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8112,14 +8112,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
8112 addr = i915_gem_obj_ggtt_offset(obj); 8112 addr = i915_gem_obj_ggtt_offset(obj);
8113 } else { 8113 } else {
8114 int align = IS_I830(dev) ? 16 * 1024 : 256; 8114 int align = IS_I830(dev) ? 16 * 1024 : 256;
8115 ret = i915_gem_attach_phys_object(dev, obj, 8115 ret = i915_gem_object_attach_phys(obj, align);
8116 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
8117 align);
8118 if (ret) { 8116 if (ret) {
8119 DRM_DEBUG_KMS("failed to attach phys object\n"); 8117 DRM_DEBUG_KMS("failed to attach phys object\n");
8120 goto fail_locked; 8118 goto fail_locked;
8121 } 8119 }
8122 addr = obj->phys_obj->handle->busaddr; 8120 addr = obj->phys_handle->busaddr;
8123 } 8121 }
8124 8122
8125 if (IS_GEN2(dev)) 8123 if (IS_GEN2(dev))
@@ -8127,10 +8125,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
8127 8125
8128 finish: 8126 finish:
8129 if (intel_crtc->cursor_bo) { 8127 if (intel_crtc->cursor_bo) {
8130 if (INTEL_INFO(dev)->cursor_needs_physical) { 8128 if (!INTEL_INFO(dev)->cursor_needs_physical)
8131 if (intel_crtc->cursor_bo != obj)
8132 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
8133 } else
8134 i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); 8129 i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
8135 drm_gem_object_unreference(&intel_crtc->cursor_bo->base); 8130 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
8136 } 8131 }
@@ -11808,15 +11803,6 @@ void intel_modeset_init(struct drm_device *dev)
11808 } 11803 }
11809} 11804}
11810 11805
11811static void
11812intel_connector_break_all_links(struct intel_connector *connector)
11813{
11814 connector->base.dpms = DRM_MODE_DPMS_OFF;
11815 connector->base.encoder = NULL;
11816 connector->encoder->connectors_active = false;
11817 connector->encoder->base.crtc = NULL;
11818}
11819
11820static void intel_enable_pipe_a(struct drm_device *dev) 11806static void intel_enable_pipe_a(struct drm_device *dev)
11821{ 11807{
11822 struct intel_connector *connector; 11808 struct intel_connector *connector;
@@ -11905,8 +11891,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
11905 if (connector->encoder->base.crtc != &crtc->base) 11891 if (connector->encoder->base.crtc != &crtc->base)
11906 continue; 11892 continue;
11907 11893
11908 intel_connector_break_all_links(connector); 11894 connector->base.dpms = DRM_MODE_DPMS_OFF;
11895 connector->base.encoder = NULL;
11909 } 11896 }
11897 /* multiple connectors may have the same encoder:
11898 * handle them and break crtc link separately */
11899 list_for_each_entry(connector, &dev->mode_config.connector_list,
11900 base.head)
11901 if (connector->encoder->base.crtc == &crtc->base) {
11902 connector->encoder->base.crtc = NULL;
11903 connector->encoder->connectors_active = false;
11904 }
11910 11905
11911 WARN_ON(crtc->active); 11906 WARN_ON(crtc->active);
11912 crtc->base.enabled = false; 11907 crtc->base.enabled = false;
@@ -11997,6 +11992,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
11997 encoder->base.name); 11992 encoder->base.name);
11998 encoder->disable(encoder); 11993 encoder->disable(encoder);
11999 } 11994 }
11995 encoder->base.crtc = NULL;
11996 encoder->connectors_active = false;
12000 11997
12001 /* Inconsistent output/port/pipe state happens presumably due to 11998 /* Inconsistent output/port/pipe state happens presumably due to
12002 * a bug in one of the get_hw_state functions. Or someplace else 11999 * a bug in one of the get_hw_state functions. Or someplace else
@@ -12007,8 +12004,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
12007 base.head) { 12004 base.head) {
12008 if (connector->encoder != encoder) 12005 if (connector->encoder != encoder)
12009 continue; 12006 continue;
12010 12007 connector->base.dpms = DRM_MODE_DPMS_OFF;
12011 intel_connector_break_all_links(connector); 12008 connector->base.encoder = NULL;
12012 } 12009 }
12013 } 12010 }
12014 /* Enabled encoders without active connectors will be fixed in 12011 /* Enabled encoders without active connectors will be fixed in
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2d5d9b010073..52fda950fd2a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -123,7 +123,8 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
123 case DP_LINK_BW_2_7: 123 case DP_LINK_BW_2_7:
124 break; 124 break;
125 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ 125 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
126 if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) && 126 if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
127 INTEL_INFO(dev)->gen >= 8) &&
127 intel_dp->dpcd[DP_DPCD_REV] >= 0x12) 128 intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
128 max_link_bw = DP_LINK_BW_5_4; 129 max_link_bw = DP_LINK_BW_5_4;
129 else 130 else
@@ -138,6 +139,22 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
138 return max_link_bw; 139 return max_link_bw;
139} 140}
140 141
142static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
143{
144 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
145 struct drm_device *dev = intel_dig_port->base.base.dev;
146 u8 source_max, sink_max;
147
148 source_max = 4;
149 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
150 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
151 source_max = 2;
152
153 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
154
155 return min(source_max, sink_max);
156}
157
141/* 158/*
142 * The units on the numbers in the next two are... bizarre. Examples will 159 * The units on the numbers in the next two are... bizarre. Examples will
143 * make it clearer; this one parallels an example in the eDP spec. 160 * make it clearer; this one parallels an example in the eDP spec.
@@ -188,7 +205,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
188 } 205 }
189 206
190 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); 207 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
191 max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); 208 max_lanes = intel_dp_max_lane_count(intel_dp);
192 209
193 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 210 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
194 mode_rate = intel_dp_link_required(target_clock, 18); 211 mode_rate = intel_dp_link_required(target_clock, 18);
@@ -789,8 +806,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
789 struct intel_crtc *intel_crtc = encoder->new_crtc; 806 struct intel_crtc *intel_crtc = encoder->new_crtc;
790 struct intel_connector *intel_connector = intel_dp->attached_connector; 807 struct intel_connector *intel_connector = intel_dp->attached_connector;
791 int lane_count, clock; 808 int lane_count, clock;
792 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 809 int min_lane_count = 1;
810 int max_lane_count = intel_dp_max_lane_count(intel_dp);
793 /* Conveniently, the link BW constants become indices with a shift...*/ 811 /* Conveniently, the link BW constants become indices with a shift...*/
812 int min_clock = 0;
794 int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; 813 int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
795 int bpp, mode_rate; 814 int bpp, mode_rate;
796 static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; 815 static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
@@ -824,19 +843,38 @@ intel_dp_compute_config(struct intel_encoder *encoder,
824 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 843 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
825 * bpc in between. */ 844 * bpc in between. */
826 bpp = pipe_config->pipe_bpp; 845 bpp = pipe_config->pipe_bpp;
827 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && 846 if (is_edp(intel_dp)) {
828 dev_priv->vbt.edp_bpp < bpp) { 847 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
829 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", 848 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
830 dev_priv->vbt.edp_bpp); 849 dev_priv->vbt.edp_bpp);
831 bpp = dev_priv->vbt.edp_bpp; 850 bpp = dev_priv->vbt.edp_bpp;
851 }
852
853 if (IS_BROADWELL(dev)) {
854 /* Yes, it's an ugly hack. */
855 min_lane_count = max_lane_count;
856 DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n",
857 min_lane_count);
858 } else if (dev_priv->vbt.edp_lanes) {
859 min_lane_count = min(dev_priv->vbt.edp_lanes,
860 max_lane_count);
861 DRM_DEBUG_KMS("using min %u lanes per VBT\n",
862 min_lane_count);
863 }
864
865 if (dev_priv->vbt.edp_rate) {
866 min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
867 DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
868 bws[min_clock]);
869 }
832 } 870 }
833 871
834 for (; bpp >= 6*3; bpp -= 2*3) { 872 for (; bpp >= 6*3; bpp -= 2*3) {
835 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 873 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
836 bpp); 874 bpp);
837 875
838 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 876 for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
839 for (clock = 0; clock <= max_clock; clock++) { 877 for (clock = min_clock; clock <= max_clock; clock++) {
840 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); 878 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
841 link_avail = intel_dp_max_data_rate(link_clock, 879 link_avail = intel_dp_max_data_rate(link_clock,
842 lane_count); 880 lane_count);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 6ea2d75464da..088fe9378a4c 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -387,6 +387,15 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
387 height); 387 height);
388 } 388 }
389 389
390 /* No preferred mode marked by the EDID? Are there any modes? */
391 if (!modes[i] && !list_empty(&connector->modes)) {
392 DRM_DEBUG_KMS("using first mode listed on connector %s\n",
393 connector->name);
394 modes[i] = list_first_entry(&connector->modes,
395 struct drm_display_mode,
396 head);
397 }
398
390 /* last resort: use current mode */ 399 /* last resort: use current mode */
391 if (!modes[i]) { 400 if (!modes[i]) {
392 /* 401 /*
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 0396d1312b5c..daa118978eec 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -193,7 +193,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
193 struct overlay_registers __iomem *regs; 193 struct overlay_registers __iomem *regs;
194 194
195 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 195 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; 196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
197 else 197 else
198 regs = io_mapping_map_wc(dev_priv->gtt.mappable, 198 regs = io_mapping_map_wc(dev_priv->gtt.mappable,
199 i915_gem_obj_ggtt_offset(overlay->reg_bo)); 199 i915_gem_obj_ggtt_offset(overlay->reg_bo));
@@ -1340,14 +1340,12 @@ void intel_setup_overlay(struct drm_device *dev)
1340 overlay->reg_bo = reg_bo; 1340 overlay->reg_bo = reg_bo;
1341 1341
1342 if (OVERLAY_NEEDS_PHYSICAL(dev)) { 1342 if (OVERLAY_NEEDS_PHYSICAL(dev)) {
1343 ret = i915_gem_attach_phys_object(dev, reg_bo, 1343 ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
1344 I915_GEM_PHYS_OVERLAY_REGS,
1345 PAGE_SIZE);
1346 if (ret) { 1344 if (ret) {
1347 DRM_ERROR("failed to attach phys overlay regs\n"); 1345 DRM_ERROR("failed to attach phys overlay regs\n");
1348 goto out_free_bo; 1346 goto out_free_bo;
1349 } 1347 }
1350 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; 1348 overlay->flip_addr = reg_bo->phys_handle->busaddr;
1351 } else { 1349 } else {
1352 ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE); 1350 ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
1353 if (ret) { 1351 if (ret) {
@@ -1428,7 +1426,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1428 /* Cast to make sparse happy, but it's wc memory anyway, so 1426 /* Cast to make sparse happy, but it's wc memory anyway, so
1429 * equivalent to the wc io mapping on X86. */ 1427 * equivalent to the wc io mapping on X86. */
1430 regs = (struct overlay_registers __iomem *) 1428 regs = (struct overlay_registers __iomem *)
1431 overlay->reg_bo->phys_obj->handle->vaddr; 1429 overlay->reg_bo->phys_handle->vaddr;
1432 else 1430 else
1433 regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, 1431 regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1434 i915_gem_obj_ggtt_offset(overlay->reg_bo)); 1432 i915_gem_obj_ggtt_offset(overlay->reg_bo));
@@ -1462,7 +1460,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
1462 error->dovsta = I915_READ(DOVSTA); 1460 error->dovsta = I915_READ(DOVSTA);
1463 error->isr = I915_READ(ISR); 1461 error->isr = I915_READ(ISR);
1464 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1462 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1465 error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; 1463 error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
1466 else 1464 else
1467 error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); 1465 error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
1468 1466
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 2e1338a5d488..5e6c888b4928 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -567,6 +567,7 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
567 enum pipe pipe = intel_get_pipe_from_connector(connector); 567 enum pipe pipe = intel_get_pipe_from_connector(connector);
568 u32 freq; 568 u32 freq;
569 unsigned long flags; 569 unsigned long flags;
570 u64 n;
570 571
571 if (!panel->backlight.present || pipe == INVALID_PIPE) 572 if (!panel->backlight.present || pipe == INVALID_PIPE)
572 return; 573 return;
@@ -577,10 +578,9 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
577 578
578 /* scale to hardware max, but be careful to not overflow */ 579 /* scale to hardware max, but be careful to not overflow */
579 freq = panel->backlight.max; 580 freq = panel->backlight.max;
580 if (freq < max) 581 n = (u64)level * freq;
581 level = level * freq / max; 582 do_div(n, max);
582 else 583 level = n;
583 level = freq / max * level;
584 584
585 panel->backlight.level = level; 585 panel->backlight.level = level;
586 if (panel->backlight.device) 586 if (panel->backlight.device)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index b86b58c44228..906d06f73e51 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2113,6 +2113,43 @@ static void intel_print_wm_latency(struct drm_device *dev,
2113 } 2113 }
2114} 2114}
2115 2115
2116static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2117 uint16_t wm[5], uint16_t min)
2118{
2119 int level, max_level = ilk_wm_max_level(dev_priv->dev);
2120
2121 if (wm[0] >= min)
2122 return false;
2123
2124 wm[0] = max(wm[0], min);
2125 for (level = 1; level <= max_level; level++)
2126 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2127
2128 return true;
2129}
2130
2131static void snb_wm_latency_quirk(struct drm_device *dev)
2132{
2133 struct drm_i915_private *dev_priv = dev->dev_private;
2134 bool changed;
2135
2136 /*
2137 * The BIOS provided WM memory latency values are often
2138 * inadequate for high resolution displays. Adjust them.
2139 */
2140 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2141 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2142 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2143
2144 if (!changed)
2145 return;
2146
2147 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2148 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2149 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2150 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2151}
2152
2116static void ilk_setup_wm_latency(struct drm_device *dev) 2153static void ilk_setup_wm_latency(struct drm_device *dev)
2117{ 2154{
2118 struct drm_i915_private *dev_priv = dev->dev_private; 2155 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2130,6 +2167,9 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
2130 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); 2167 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2131 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); 2168 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2132 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); 2169 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2170
2171 if (IS_GEN6(dev))
2172 snb_wm_latency_quirk(dev);
2133} 2173}
2134 2174
2135static void ilk_compute_wm_parameters(struct drm_crtc *crtc, 2175static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 9cd99d9676fd..2f5d5d3f0043 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -185,6 +185,8 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
185{ 185{
186 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 186 __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
187 _MASKED_BIT_DISABLE(0xffff)); 187 _MASKED_BIT_DISABLE(0xffff));
188 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
189 _MASKED_BIT_DISABLE(0xffff));
188 /* something from same cacheline, but !FORCEWAKE_VLV */ 190 /* something from same cacheline, but !FORCEWAKE_VLV */
189 __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); 191 __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
190} 192}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 7762665ad8fd..876de9ac3793 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -1009,7 +1009,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
1009 } 1009 }
1010 1010
1011 if (outp == 8) 1011 if (outp == 8)
1012 return false; 1012 return conf;
1013 1013
1014 data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1); 1014 data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
1015 if (data == 0x0000) 1015 if (data == 0x0000)
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
index 1dc37b1ddbfa..b0d0fb2f4d08 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
@@ -863,7 +863,7 @@ gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
863{ 863{
864 mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); 864 mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
865 mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); 865 mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
866 mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); 866 mmio_data(0x200000, 0x1000, NV_MEM_ACCESS_RW);
867 867
868 mmio_list(0x40800c, 0x00000000, 8, 1); 868 mmio_list(0x40800c, 0x00000000, 8, 1);
869 mmio_list(0x408010, 0x80000000, 0, 0); 869 mmio_list(0x408010, 0x80000000, 0, 0);
@@ -877,6 +877,8 @@ gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
877 mmio_list(0x418e24, 0x00000000, 8, 0); 877 mmio_list(0x418e24, 0x00000000, 8, 0);
878 mmio_list(0x418e28, 0x80000030, 0, 0); 878 mmio_list(0x418e28, 0x80000030, 0, 0);
879 879
880 mmio_list(0x4064c8, 0x018002c0, 0, 0);
881
880 mmio_list(0x418810, 0x80000000, 12, 2); 882 mmio_list(0x418810, 0x80000000, 12, 2);
881 mmio_list(0x419848, 0x10000000, 12, 2); 883 mmio_list(0x419848, 0x10000000, 12, 2);
882 mmio_list(0x419c2c, 0x10000000, 12, 2); 884 mmio_list(0x419c2c, 0x10000000, 12, 2);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index fb0b6b2d1427..222e8ebb669d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -168,7 +168,8 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
168 */ 168 */
169 i = 16; 169 i = 16;
170 do { 170 do {
171 if ((nv_rd32(bios, 0x300000) & 0xffff) == 0xaa55) 171 u32 data = le32_to_cpu(nv_rd32(bios, 0x300000)) & 0xffff;
172 if (data == 0xaa55)
172 break; 173 break;
173 } while (i--); 174 } while (i--);
174 175
@@ -176,14 +177,15 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
176 goto out; 177 goto out;
177 178
178 /* read entire bios image to system memory */ 179 /* read entire bios image to system memory */
179 bios->size = ((nv_rd32(bios, 0x300000) >> 16) & 0xff) * 512; 180 bios->size = (le32_to_cpu(nv_rd32(bios, 0x300000)) >> 16) & 0xff;
181 bios->size = bios->size * 512;
180 if (!bios->size) 182 if (!bios->size)
181 goto out; 183 goto out;
182 184
183 bios->data = kmalloc(bios->size, GFP_KERNEL); 185 bios->data = kmalloc(bios->size, GFP_KERNEL);
184 if (bios->data) { 186 if (bios->data) {
185 for (i = 0; i < bios->size; i+=4) 187 for (i = 0; i < bios->size; i += 4)
186 nv_wo32(bios, i, nv_rd32(bios, 0x300000 + i)); 188 ((u32 *)bios->data)[i/4] = nv_rd32(bios, 0x300000 + i);
187 } 189 }
188 190
189 /* check the PCI record header */ 191 /* check the PCI record header */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
index 43fec17ea540..bbf117be572f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -40,6 +40,7 @@ pwm_info(struct nouveau_therm *therm, int line)
40 case 0x00: return 2; 40 case 0x00: return 2;
41 case 0x19: return 1; 41 case 0x19: return 1;
42 case 0x1c: return 0; 42 case 0x1c: return 0;
43 case 0x1e: return 2;
43 default: 44 default:
44 break; 45 break;
45 } 46 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 83face3f608f..279206997e5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -389,9 +389,6 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
389 acpi_status status; 389 acpi_status status;
390 acpi_handle dhandle, rom_handle; 390 acpi_handle dhandle, rom_handle;
391 391
392 if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected)
393 return false;
394
395 dhandle = ACPI_HANDLE(&pdev->dev); 392 dhandle = ACPI_HANDLE(&pdev->dev);
396 if (!dhandle) 393 if (!dhandle)
397 return false; 394 return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 3ff030dc1ee3..da764a4ed958 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -764,9 +764,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
764 } 764 }
765 765
766 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); 766 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
767 mutex_unlock(&chan->cli->mutex);
768 if (ret) 767 if (ret)
769 goto fail_unreserve; 768 goto fail_unreserve;
769 mutex_unlock(&chan->cli->mutex);
770 770
771 /* Update the crtc struct and cleanup */ 771 /* Update the crtc struct and cleanup */
772 crtc->primary->fb = fb; 772 crtc->primary->fb = fb;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 967d193d36d0..76c30f2da3fb 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -270,8 +270,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
270 switch (mode) { 270 switch (mode) {
271 case DRM_MODE_DPMS_ON: 271 case DRM_MODE_DPMS_ON:
272 radeon_crtc->enabled = true; 272 radeon_crtc->enabled = true;
273 /* adjust pm to dpms changes BEFORE enabling crtcs */
274 radeon_pm_compute_clocks(rdev);
275 atombios_enable_crtc(crtc, ATOM_ENABLE); 273 atombios_enable_crtc(crtc, ATOM_ENABLE);
276 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) 274 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
277 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); 275 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
@@ -289,10 +287,10 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
289 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); 287 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
290 atombios_enable_crtc(crtc, ATOM_DISABLE); 288 atombios_enable_crtc(crtc, ATOM_DISABLE);
291 radeon_crtc->enabled = false; 289 radeon_crtc->enabled = false;
292 /* adjust pm to dpms changes AFTER disabling crtcs */
293 radeon_pm_compute_clocks(rdev);
294 break; 290 break;
295 } 291 }
292 /* adjust pm to dpms */
293 radeon_pm_compute_clocks(rdev);
296} 294}
297 295
298static void 296static void
@@ -1208,27 +1206,43 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1208 1206
1209 /* Set NUM_BANKS. */ 1207 /* Set NUM_BANKS. */
1210 if (rdev->family >= CHIP_TAHITI) { 1208 if (rdev->family >= CHIP_TAHITI) {
1211 unsigned tileb, index, num_banks, tile_split_bytes; 1209 unsigned index, num_banks;
1212 1210
1213 /* Calculate the macrotile mode index. */ 1211 if (rdev->family >= CHIP_BONAIRE) {
1214 tile_split_bytes = 64 << tile_split; 1212 unsigned tileb, tile_split_bytes;
1215 tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
1216 tileb = min(tile_split_bytes, tileb);
1217 1213
1218 for (index = 0; tileb > 64; index++) { 1214 /* Calculate the macrotile mode index. */
1219 tileb >>= 1; 1215 tile_split_bytes = 64 << tile_split;
1220 } 1216 tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
1217 tileb = min(tile_split_bytes, tileb);
1221 1218
1222 if (index >= 16) { 1219 for (index = 0; tileb > 64; index++)
1223 DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", 1220 tileb >>= 1;
1224 target_fb->bits_per_pixel, tile_split); 1221
1225 return -EINVAL; 1222 if (index >= 16) {
1226 } 1223 DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
1224 target_fb->bits_per_pixel, tile_split);
1225 return -EINVAL;
1226 }
1227 1227
1228 if (rdev->family >= CHIP_BONAIRE)
1229 num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; 1228 num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
1230 else 1229 } else {
1230 switch (target_fb->bits_per_pixel) {
1231 case 8:
1232 index = 10;
1233 break;
1234 case 16:
1235 index = SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP;
1236 break;
1237 default:
1238 case 32:
1239 index = SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP;
1240 break;
1241 }
1242
1231 num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3; 1243 num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3;
1244 }
1245
1232 fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); 1246 fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
1233 } else { 1247 } else {
1234 /* NI and older. */ 1248 /* NI and older. */
@@ -1751,8 +1765,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1751 } 1765 }
1752 /* otherwise, pick one of the plls */ 1766 /* otherwise, pick one of the plls */
1753 if ((rdev->family == CHIP_KAVERI) || 1767 if ((rdev->family == CHIP_KAVERI) ||
1754 (rdev->family == CHIP_KABINI)) { 1768 (rdev->family == CHIP_KABINI) ||
1755 /* KB/KV has PPLL1 and PPLL2 */ 1769 (rdev->family == CHIP_MULLINS)) {
1770 /* KB/KV/ML has PPLL1 and PPLL2 */
1756 pll_in_use = radeon_get_pll_use_mask(crtc); 1771 pll_in_use = radeon_get_pll_use_mask(crtc);
1757 if (!(pll_in_use & (1 << ATOM_PPLL2))) 1772 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1758 return ATOM_PPLL2; 1773 return ATOM_PPLL2;
@@ -1916,6 +1931,9 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
1916 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) 1931 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
1917 is_tvcv = true; 1932 is_tvcv = true;
1918 1933
1934 if (!radeon_crtc->adjusted_clock)
1935 return -EINVAL;
1936
1919 atombios_crtc_set_pll(crtc, adjusted_mode); 1937 atombios_crtc_set_pll(crtc, adjusted_mode);
1920 1938
1921 if (ASIC_IS_DCE4(rdev)) 1939 if (ASIC_IS_DCE4(rdev))
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index a54c44181a0f..c5b1f2da3954 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -390,11 +390,11 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
390 if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 390 if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
391 return; 391 return;
392 392
393 if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3)) 393 if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3) == 3)
394 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 394 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
395 buf[0], buf[1], buf[2]); 395 buf[0], buf[1], buf[2]);
396 396
397 if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3)) 397 if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3) == 3)
398 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 398 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
399 buf[0], buf[1], buf[2]); 399 buf[0], buf[1], buf[2]);
400} 400}
@@ -443,21 +443,23 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
443 443
444 if (dp_bridge != ENCODER_OBJECT_ID_NONE) { 444 if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
445 /* DP bridge chips */ 445 /* DP bridge chips */
446 drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, 446 if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
447 DP_EDP_CONFIGURATION_CAP, &tmp); 447 DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
448 if (tmp & 1) 448 if (tmp & 1)
449 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; 449 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
450 else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || 450 else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
451 (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) 451 (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
452 panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; 452 panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
453 else 453 else
454 panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; 454 panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
455 }
455 } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 456 } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
456 /* eDP */ 457 /* eDP */
457 drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, 458 if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
458 DP_EDP_CONFIGURATION_CAP, &tmp); 459 DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
459 if (tmp & 1) 460 if (tmp & 1)
460 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; 461 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
462 }
461 } 463 }
462 464
463 return panel_mode; 465 return panel_mode;
@@ -833,11 +835,15 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
833 else 835 else
834 dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; 836 dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
835 837
836 drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp); 838 if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp)
837 if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) 839 == 1) {
838 dp_info.tp3_supported = true; 840 if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
839 else 841 dp_info.tp3_supported = true;
842 else
843 dp_info.tp3_supported = false;
844 } else {
840 dp_info.tp3_supported = false; 845 dp_info.tp3_supported = false;
846 }
841 847
842 memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE); 848 memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
843 dp_info.rdev = rdev; 849 dp_info.rdev = rdev;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index a5181404f130..69a00d64716e 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -63,6 +63,12 @@ MODULE_FIRMWARE("radeon/KABINI_ce.bin");
63MODULE_FIRMWARE("radeon/KABINI_mec.bin"); 63MODULE_FIRMWARE("radeon/KABINI_mec.bin");
64MODULE_FIRMWARE("radeon/KABINI_rlc.bin"); 64MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
65MODULE_FIRMWARE("radeon/KABINI_sdma.bin"); 65MODULE_FIRMWARE("radeon/KABINI_sdma.bin");
66MODULE_FIRMWARE("radeon/MULLINS_pfp.bin");
67MODULE_FIRMWARE("radeon/MULLINS_me.bin");
68MODULE_FIRMWARE("radeon/MULLINS_ce.bin");
69MODULE_FIRMWARE("radeon/MULLINS_mec.bin");
70MODULE_FIRMWARE("radeon/MULLINS_rlc.bin");
71MODULE_FIRMWARE("radeon/MULLINS_sdma.bin");
66 72
67extern int r600_ih_ring_alloc(struct radeon_device *rdev); 73extern int r600_ih_ring_alloc(struct radeon_device *rdev);
68extern void r600_ih_ring_fini(struct radeon_device *rdev); 74extern void r600_ih_ring_fini(struct radeon_device *rdev);
@@ -1473,6 +1479,43 @@ static const u32 hawaii_mgcg_cgcg_init[] =
1473 0xd80c, 0xff000ff0, 0x00000100 1479 0xd80c, 0xff000ff0, 0x00000100
1474}; 1480};
1475 1481
1482static const u32 godavari_golden_registers[] =
1483{
1484 0x55e4, 0xff607fff, 0xfc000100,
1485 0x6ed8, 0x00010101, 0x00010000,
1486 0x9830, 0xffffffff, 0x00000000,
1487 0x98302, 0xf00fffff, 0x00000400,
1488 0x6130, 0xffffffff, 0x00010000,
1489 0x5bb0, 0x000000f0, 0x00000070,
1490 0x5bc0, 0xf0311fff, 0x80300000,
1491 0x98f8, 0x73773777, 0x12010001,
1492 0x98fc, 0xffffffff, 0x00000010,
1493 0x8030, 0x00001f0f, 0x0000100a,
1494 0x2f48, 0x73773777, 0x12010001,
1495 0x2408, 0x000fffff, 0x000c007f,
1496 0x8a14, 0xf000003f, 0x00000007,
1497 0x8b24, 0xffffffff, 0x00ff0fff,
1498 0x30a04, 0x0000ff0f, 0x00000000,
1499 0x28a4c, 0x07ffffff, 0x06000000,
1500 0x4d8, 0x00000fff, 0x00000100,
1501 0xd014, 0x00010000, 0x00810001,
1502 0xd814, 0x00010000, 0x00810001,
1503 0x3e78, 0x00000001, 0x00000002,
1504 0xc768, 0x00000008, 0x00000008,
1505 0xc770, 0x00000f00, 0x00000800,
1506 0xc774, 0x00000f00, 0x00000800,
1507 0xc798, 0x00ffffff, 0x00ff7fbf,
1508 0xc79c, 0x00ffffff, 0x00ff7faf,
1509 0x8c00, 0x000000ff, 0x00000001,
1510 0x214f8, 0x01ff01ff, 0x00000002,
1511 0x21498, 0x007ff800, 0x00200000,
1512 0x2015c, 0xffffffff, 0x00000f40,
1513 0x88c4, 0x001f3ae3, 0x00000082,
1514 0x88d4, 0x0000001f, 0x00000010,
1515 0x30934, 0xffffffff, 0x00000000
1516};
1517
1518
1476static void cik_init_golden_registers(struct radeon_device *rdev) 1519static void cik_init_golden_registers(struct radeon_device *rdev)
1477{ 1520{
1478 switch (rdev->family) { 1521 switch (rdev->family) {
@@ -1504,6 +1547,20 @@ static void cik_init_golden_registers(struct radeon_device *rdev)
1504 kalindi_golden_spm_registers, 1547 kalindi_golden_spm_registers,
1505 (const u32)ARRAY_SIZE(kalindi_golden_spm_registers)); 1548 (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
1506 break; 1549 break;
1550 case CHIP_MULLINS:
1551 radeon_program_register_sequence(rdev,
1552 kalindi_mgcg_cgcg_init,
1553 (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
1554 radeon_program_register_sequence(rdev,
1555 godavari_golden_registers,
1556 (const u32)ARRAY_SIZE(godavari_golden_registers));
1557 radeon_program_register_sequence(rdev,
1558 kalindi_golden_common_registers,
1559 (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
1560 radeon_program_register_sequence(rdev,
1561 kalindi_golden_spm_registers,
1562 (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
1563 break;
1507 case CHIP_KAVERI: 1564 case CHIP_KAVERI:
1508 radeon_program_register_sequence(rdev, 1565 radeon_program_register_sequence(rdev,
1509 spectre_mgcg_cgcg_init, 1566 spectre_mgcg_cgcg_init,
@@ -1834,6 +1891,15 @@ static int cik_init_microcode(struct radeon_device *rdev)
1834 rlc_req_size = KB_RLC_UCODE_SIZE * 4; 1891 rlc_req_size = KB_RLC_UCODE_SIZE * 4;
1835 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; 1892 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1836 break; 1893 break;
1894 case CHIP_MULLINS:
1895 chip_name = "MULLINS";
1896 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1897 me_req_size = CIK_ME_UCODE_SIZE * 4;
1898 ce_req_size = CIK_CE_UCODE_SIZE * 4;
1899 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1900 rlc_req_size = ML_RLC_UCODE_SIZE * 4;
1901 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1902 break;
1837 default: BUG(); 1903 default: BUG();
1838 } 1904 }
1839 1905
@@ -3272,6 +3338,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
3272 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; 3338 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
3273 break; 3339 break;
3274 case CHIP_KABINI: 3340 case CHIP_KABINI:
3341 case CHIP_MULLINS:
3275 default: 3342 default:
3276 rdev->config.cik.max_shader_engines = 1; 3343 rdev->config.cik.max_shader_engines = 1;
3277 rdev->config.cik.max_tile_pipes = 2; 3344 rdev->config.cik.max_tile_pipes = 2;
@@ -3702,6 +3769,7 @@ int cik_copy_cpdma(struct radeon_device *rdev,
3702 r = radeon_fence_emit(rdev, fence, ring->idx); 3769 r = radeon_fence_emit(rdev, fence, ring->idx);
3703 if (r) { 3770 if (r) {
3704 radeon_ring_unlock_undo(rdev, ring); 3771 radeon_ring_unlock_undo(rdev, ring);
3772 radeon_semaphore_free(rdev, &sem, NULL);
3705 return r; 3773 return r;
3706 } 3774 }
3707 3775
@@ -5803,6 +5871,9 @@ static int cik_rlc_resume(struct radeon_device *rdev)
5803 case CHIP_KABINI: 5871 case CHIP_KABINI:
5804 size = KB_RLC_UCODE_SIZE; 5872 size = KB_RLC_UCODE_SIZE;
5805 break; 5873 break;
5874 case CHIP_MULLINS:
5875 size = ML_RLC_UCODE_SIZE;
5876 break;
5806 } 5877 }
5807 5878
5808 cik_rlc_stop(rdev); 5879 cik_rlc_stop(rdev);
@@ -6551,6 +6622,7 @@ void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
6551 buffer[count++] = cpu_to_le32(0x00000000); 6622 buffer[count++] = cpu_to_le32(0x00000000);
6552 break; 6623 break;
6553 case CHIP_KABINI: 6624 case CHIP_KABINI:
6625 case CHIP_MULLINS:
6554 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */ 6626 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
6555 buffer[count++] = cpu_to_le32(0x00000000); 6627 buffer[count++] = cpu_to_le32(0x00000000);
6556 break; 6628 break;
@@ -6696,6 +6768,19 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
6696 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 6768 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
6697 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 6769 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
6698 } 6770 }
6771 /* pflip */
6772 if (rdev->num_crtc >= 2) {
6773 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
6774 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
6775 }
6776 if (rdev->num_crtc >= 4) {
6777 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
6778 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
6779 }
6780 if (rdev->num_crtc >= 6) {
6781 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
6782 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
6783 }
6699 6784
6700 /* dac hotplug */ 6785 /* dac hotplug */
6701 WREG32(DAC_AUTODETECT_INT_CONTROL, 0); 6786 WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
@@ -7052,6 +7137,25 @@ int cik_irq_set(struct radeon_device *rdev)
7052 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); 7137 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
7053 } 7138 }
7054 7139
7140 if (rdev->num_crtc >= 2) {
7141 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
7142 GRPH_PFLIP_INT_MASK);
7143 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
7144 GRPH_PFLIP_INT_MASK);
7145 }
7146 if (rdev->num_crtc >= 4) {
7147 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
7148 GRPH_PFLIP_INT_MASK);
7149 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
7150 GRPH_PFLIP_INT_MASK);
7151 }
7152 if (rdev->num_crtc >= 6) {
7153 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
7154 GRPH_PFLIP_INT_MASK);
7155 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
7156 GRPH_PFLIP_INT_MASK);
7157 }
7158
7055 WREG32(DC_HPD1_INT_CONTROL, hpd1); 7159 WREG32(DC_HPD1_INT_CONTROL, hpd1);
7056 WREG32(DC_HPD2_INT_CONTROL, hpd2); 7160 WREG32(DC_HPD2_INT_CONTROL, hpd2);
7057 WREG32(DC_HPD3_INT_CONTROL, hpd3); 7161 WREG32(DC_HPD3_INT_CONTROL, hpd3);
@@ -7088,6 +7192,29 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
7088 rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); 7192 rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
7089 rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6); 7193 rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6);
7090 7194
7195 rdev->irq.stat_regs.cik.d1grph_int = RREG32(GRPH_INT_STATUS +
7196 EVERGREEN_CRTC0_REGISTER_OFFSET);
7197 rdev->irq.stat_regs.cik.d2grph_int = RREG32(GRPH_INT_STATUS +
7198 EVERGREEN_CRTC1_REGISTER_OFFSET);
7199 if (rdev->num_crtc >= 4) {
7200 rdev->irq.stat_regs.cik.d3grph_int = RREG32(GRPH_INT_STATUS +
7201 EVERGREEN_CRTC2_REGISTER_OFFSET);
7202 rdev->irq.stat_regs.cik.d4grph_int = RREG32(GRPH_INT_STATUS +
7203 EVERGREEN_CRTC3_REGISTER_OFFSET);
7204 }
7205 if (rdev->num_crtc >= 6) {
7206 rdev->irq.stat_regs.cik.d5grph_int = RREG32(GRPH_INT_STATUS +
7207 EVERGREEN_CRTC4_REGISTER_OFFSET);
7208 rdev->irq.stat_regs.cik.d6grph_int = RREG32(GRPH_INT_STATUS +
7209 EVERGREEN_CRTC5_REGISTER_OFFSET);
7210 }
7211
7212 if (rdev->irq.stat_regs.cik.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
7213 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
7214 GRPH_PFLIP_INT_CLEAR);
7215 if (rdev->irq.stat_regs.cik.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
7216 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
7217 GRPH_PFLIP_INT_CLEAR);
7091 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) 7218 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT)
7092 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); 7219 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
7093 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) 7220 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT)
@@ -7098,6 +7225,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
7098 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); 7225 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
7099 7226
7100 if (rdev->num_crtc >= 4) { 7227 if (rdev->num_crtc >= 4) {
7228 if (rdev->irq.stat_regs.cik.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
7229 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
7230 GRPH_PFLIP_INT_CLEAR);
7231 if (rdev->irq.stat_regs.cik.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
7232 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
7233 GRPH_PFLIP_INT_CLEAR);
7101 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) 7234 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
7102 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); 7235 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
7103 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) 7236 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
@@ -7109,6 +7242,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
7109 } 7242 }
7110 7243
7111 if (rdev->num_crtc >= 6) { 7244 if (rdev->num_crtc >= 6) {
7245 if (rdev->irq.stat_regs.cik.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
7246 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
7247 GRPH_PFLIP_INT_CLEAR);
7248 if (rdev->irq.stat_regs.cik.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
7249 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET,
7250 GRPH_PFLIP_INT_CLEAR);
7112 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) 7251 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
7113 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); 7252 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
7114 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) 7253 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
@@ -7460,6 +7599,15 @@ restart_ih:
7460 break; 7599 break;
7461 } 7600 }
7462 break; 7601 break;
7602 case 8: /* D1 page flip */
7603 case 10: /* D2 page flip */
7604 case 12: /* D3 page flip */
7605 case 14: /* D4 page flip */
7606 case 16: /* D5 page flip */
7607 case 18: /* D6 page flip */
7608 DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
7609 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
7610 break;
7463 case 42: /* HPD hotplug */ 7611 case 42: /* HPD hotplug */
7464 switch (src_data) { 7612 switch (src_data) {
7465 case 0: 7613 case 0:
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 3c2407bad1f0..1347162ca1a4 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -562,6 +562,7 @@ int cik_copy_dma(struct radeon_device *rdev,
562 r = radeon_fence_emit(rdev, fence, ring->idx); 562 r = radeon_fence_emit(rdev, fence, ring->idx);
563 if (r) { 563 if (r) {
564 radeon_ring_unlock_undo(rdev, ring); 564 radeon_ring_unlock_undo(rdev, ring);
565 radeon_semaphore_free(rdev, &sem, NULL);
565 return r; 566 return r;
566 } 567 }
567 568
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 0b27ea08c299..ae88660f34ea 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -889,6 +889,15 @@
889# define DC_HPD6_RX_INTERRUPT (1 << 18) 889# define DC_HPD6_RX_INTERRUPT (1 << 18)
890#define DISP_INTERRUPT_STATUS_CONTINUE6 0x6780 890#define DISP_INTERRUPT_STATUS_CONTINUE6 0x6780
891 891
892/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
893#define GRPH_INT_STATUS 0x6858
894# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
895# define GRPH_PFLIP_INT_CLEAR (1 << 8)
896/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
897#define GRPH_INT_CONTROL 0x685c
898# define GRPH_PFLIP_INT_MASK (1 << 0)
899# define GRPH_PFLIP_INT_TYPE (1 << 8)
900
892#define DAC_AUTODETECT_INT_CONTROL 0x67c8 901#define DAC_AUTODETECT_INT_CONTROL 0x67c8
893 902
894#define DC_HPD1_INT_STATUS 0x601c 903#define DC_HPD1_INT_STATUS 0x601c
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 0318230ef274..653eff814504 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4355,7 +4355,6 @@ int evergreen_irq_set(struct radeon_device *rdev)
4355 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 4355 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
4356 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; 4356 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
4357 u32 grbm_int_cntl = 0; 4357 u32 grbm_int_cntl = 0;
4358 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
4359 u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; 4358 u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
4360 u32 dma_cntl, dma_cntl1 = 0; 4359 u32 dma_cntl, dma_cntl1 = 0;
4361 u32 thermal_int = 0; 4360 u32 thermal_int = 0;
@@ -4538,15 +4537,21 @@ int evergreen_irq_set(struct radeon_device *rdev)
4538 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); 4537 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
4539 } 4538 }
4540 4539
4541 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); 4540 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
4542 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); 4541 GRPH_PFLIP_INT_MASK);
4542 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
4543 GRPH_PFLIP_INT_MASK);
4543 if (rdev->num_crtc >= 4) { 4544 if (rdev->num_crtc >= 4) {
4544 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); 4545 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
4545 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); 4546 GRPH_PFLIP_INT_MASK);
4547 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
4548 GRPH_PFLIP_INT_MASK);
4546 } 4549 }
4547 if (rdev->num_crtc >= 6) { 4550 if (rdev->num_crtc >= 6) {
4548 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); 4551 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
4549 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); 4552 GRPH_PFLIP_INT_MASK);
4553 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
4554 GRPH_PFLIP_INT_MASK);
4550 } 4555 }
4551 4556
4552 WREG32(DC_HPD1_INT_CONTROL, hpd1); 4557 WREG32(DC_HPD1_INT_CONTROL, hpd1);
@@ -4935,6 +4940,15 @@ restart_ih:
4935 break; 4940 break;
4936 } 4941 }
4937 break; 4942 break;
4943 case 8: /* D1 page flip */
4944 case 10: /* D2 page flip */
4945 case 12: /* D3 page flip */
4946 case 14: /* D4 page flip */
4947 case 16: /* D5 page flip */
4948 case 18: /* D6 page flip */
4949 DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
4950 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
4951 break;
4938 case 42: /* HPD hotplug */ 4952 case 42: /* HPD hotplug */
4939 switch (src_data) { 4953 switch (src_data) {
4940 case 0: 4954 case 0:
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 287fe966d7de..478caefe0fef 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -151,6 +151,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
151 r = radeon_fence_emit(rdev, fence, ring->idx); 151 r = radeon_fence_emit(rdev, fence, ring->idx);
152 if (r) { 152 if (r) {
153 radeon_ring_unlock_undo(rdev, ring); 153 radeon_ring_unlock_undo(rdev, ring);
154 radeon_semaphore_free(rdev, &sem, NULL);
154 return r; 155 return r;
155 } 156 }
156 157
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 16ec9d56a234..3f6e817d97ee 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -546,6 +546,52 @@ static int kv_set_divider_value(struct radeon_device *rdev,
546 return 0; 546 return 0;
547} 547}
548 548
549static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev,
550 struct sumo_vid_mapping_table *vid_mapping_table,
551 u32 vid_2bit)
552{
553 struct radeon_clock_voltage_dependency_table *vddc_sclk_table =
554 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
555 u32 i;
556
557 if (vddc_sclk_table && vddc_sclk_table->count) {
558 if (vid_2bit < vddc_sclk_table->count)
559 return vddc_sclk_table->entries[vid_2bit].v;
560 else
561 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v;
562 } else {
563 for (i = 0; i < vid_mapping_table->num_entries; i++) {
564 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
565 return vid_mapping_table->entries[i].vid_7bit;
566 }
567 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
568 }
569}
570
571static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev,
572 struct sumo_vid_mapping_table *vid_mapping_table,
573 u32 vid_7bit)
574{
575 struct radeon_clock_voltage_dependency_table *vddc_sclk_table =
576 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
577 u32 i;
578
579 if (vddc_sclk_table && vddc_sclk_table->count) {
580 for (i = 0; i < vddc_sclk_table->count; i++) {
581 if (vddc_sclk_table->entries[i].v == vid_7bit)
582 return i;
583 }
584 return vddc_sclk_table->count - 1;
585 } else {
586 for (i = 0; i < vid_mapping_table->num_entries; i++) {
587 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
588 return vid_mapping_table->entries[i].vid_2bit;
589 }
590
591 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
592 }
593}
594
549static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev, 595static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev,
550 u16 voltage) 596 u16 voltage)
551{ 597{
@@ -556,9 +602,9 @@ static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev,
556 u32 vid_2bit) 602 u32 vid_2bit)
557{ 603{
558 struct kv_power_info *pi = kv_get_pi(rdev); 604 struct kv_power_info *pi = kv_get_pi(rdev);
559 u32 vid_8bit = sumo_convert_vid2_to_vid7(rdev, 605 u32 vid_8bit = kv_convert_vid2_to_vid7(rdev,
560 &pi->sys_info.vid_mapping_table, 606 &pi->sys_info.vid_mapping_table,
561 vid_2bit); 607 vid_2bit);
562 608
563 return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit); 609 return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit);
564} 610}
@@ -639,7 +685,7 @@ static int kv_force_lowest_valid(struct radeon_device *rdev)
639 685
640static int kv_unforce_levels(struct radeon_device *rdev) 686static int kv_unforce_levels(struct radeon_device *rdev)
641{ 687{
642 if (rdev->family == CHIP_KABINI) 688 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
643 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); 689 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
644 else 690 else
645 return kv_set_enabled_levels(rdev); 691 return kv_set_enabled_levels(rdev);
@@ -1362,13 +1408,20 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
1362 struct radeon_uvd_clock_voltage_dependency_table *table = 1408 struct radeon_uvd_clock_voltage_dependency_table *table =
1363 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1409 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1364 int ret; 1410 int ret;
1411 u32 mask;
1365 1412
1366 if (!gate) { 1413 if (!gate) {
1367 if (!pi->caps_uvd_dpm || table->count || pi->caps_stable_p_state) 1414 if (table->count)
1368 pi->uvd_boot_level = table->count - 1; 1415 pi->uvd_boot_level = table->count - 1;
1369 else 1416 else
1370 pi->uvd_boot_level = 0; 1417 pi->uvd_boot_level = 0;
1371 1418
1419 if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) {
1420 mask = 1 << pi->uvd_boot_level;
1421 } else {
1422 mask = 0x1f;
1423 }
1424
1372 ret = kv_copy_bytes_to_smc(rdev, 1425 ret = kv_copy_bytes_to_smc(rdev,
1373 pi->dpm_table_start + 1426 pi->dpm_table_start +
1374 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), 1427 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
@@ -1377,11 +1430,9 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
1377 if (ret) 1430 if (ret)
1378 return ret; 1431 return ret;
1379 1432
1380 if (!pi->caps_uvd_dpm || 1433 kv_send_msg_to_smc_with_parameter(rdev,
1381 pi->caps_stable_p_state) 1434 PPSMC_MSG_UVDDPM_SetEnabledMask,
1382 kv_send_msg_to_smc_with_parameter(rdev, 1435 mask);
1383 PPSMC_MSG_UVDDPM_SetEnabledMask,
1384 (1 << pi->uvd_boot_level));
1385 } 1436 }
1386 1437
1387 return kv_enable_uvd_dpm(rdev, !gate); 1438 return kv_enable_uvd_dpm(rdev, !gate);
@@ -1617,7 +1668,7 @@ static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate)
1617 if (pi->acp_power_gated == gate) 1668 if (pi->acp_power_gated == gate)
1618 return; 1669 return;
1619 1670
1620 if (rdev->family == CHIP_KABINI) 1671 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
1621 return; 1672 return;
1622 1673
1623 pi->acp_power_gated = gate; 1674 pi->acp_power_gated = gate;
@@ -1786,7 +1837,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
1786 } 1837 }
1787 } 1838 }
1788 1839
1789 if (rdev->family == CHIP_KABINI) { 1840 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
1790 if (pi->enable_dpm) { 1841 if (pi->enable_dpm) {
1791 kv_set_valid_clock_range(rdev, new_ps); 1842 kv_set_valid_clock_range(rdev, new_ps);
1792 kv_update_dfs_bypass_settings(rdev, new_ps); 1843 kv_update_dfs_bypass_settings(rdev, new_ps);
@@ -1812,6 +1863,8 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
1812 return ret; 1863 return ret;
1813 } 1864 }
1814 kv_update_sclk_t(rdev); 1865 kv_update_sclk_t(rdev);
1866 if (rdev->family == CHIP_MULLINS)
1867 kv_enable_nb_dpm(rdev);
1815 } 1868 }
1816 } else { 1869 } else {
1817 if (pi->enable_dpm) { 1870 if (pi->enable_dpm) {
@@ -1862,7 +1915,7 @@ void kv_dpm_reset_asic(struct radeon_device *rdev)
1862{ 1915{
1863 struct kv_power_info *pi = kv_get_pi(rdev); 1916 struct kv_power_info *pi = kv_get_pi(rdev);
1864 1917
1865 if (rdev->family == CHIP_KABINI) { 1918 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
1866 kv_force_lowest_valid(rdev); 1919 kv_force_lowest_valid(rdev);
1867 kv_init_graphics_levels(rdev); 1920 kv_init_graphics_levels(rdev);
1868 kv_program_bootup_state(rdev); 1921 kv_program_bootup_state(rdev);
@@ -1901,14 +1954,41 @@ static void kv_construct_max_power_limits_table(struct radeon_device *rdev,
1901static void kv_patch_voltage_values(struct radeon_device *rdev) 1954static void kv_patch_voltage_values(struct radeon_device *rdev)
1902{ 1955{
1903 int i; 1956 int i;
1904 struct radeon_uvd_clock_voltage_dependency_table *table = 1957 struct radeon_uvd_clock_voltage_dependency_table *uvd_table =
1905 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1958 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1959 struct radeon_vce_clock_voltage_dependency_table *vce_table =
1960 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1961 struct radeon_clock_voltage_dependency_table *samu_table =
1962 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
1963 struct radeon_clock_voltage_dependency_table *acp_table =
1964 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1906 1965
1907 if (table->count) { 1966 if (uvd_table->count) {
1908 for (i = 0; i < table->count; i++) 1967 for (i = 0; i < uvd_table->count; i++)
1909 table->entries[i].v = 1968 uvd_table->entries[i].v =
1910 kv_convert_8bit_index_to_voltage(rdev, 1969 kv_convert_8bit_index_to_voltage(rdev,
1911 table->entries[i].v); 1970 uvd_table->entries[i].v);
1971 }
1972
1973 if (vce_table->count) {
1974 for (i = 0; i < vce_table->count; i++)
1975 vce_table->entries[i].v =
1976 kv_convert_8bit_index_to_voltage(rdev,
1977 vce_table->entries[i].v);
1978 }
1979
1980 if (samu_table->count) {
1981 for (i = 0; i < samu_table->count; i++)
1982 samu_table->entries[i].v =
1983 kv_convert_8bit_index_to_voltage(rdev,
1984 samu_table->entries[i].v);
1985 }
1986
1987 if (acp_table->count) {
1988 for (i = 0; i < acp_table->count; i++)
1989 acp_table->entries[i].v =
1990 kv_convert_8bit_index_to_voltage(rdev,
1991 acp_table->entries[i].v);
1912 } 1992 }
1913 1993
1914} 1994}
@@ -1941,7 +2021,7 @@ static int kv_force_dpm_highest(struct radeon_device *rdev)
1941 break; 2021 break;
1942 } 2022 }
1943 2023
1944 if (rdev->family == CHIP_KABINI) 2024 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
1945 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 2025 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
1946 else 2026 else
1947 return kv_set_enabled_level(rdev, i); 2027 return kv_set_enabled_level(rdev, i);
@@ -1961,7 +2041,7 @@ static int kv_force_dpm_lowest(struct radeon_device *rdev)
1961 break; 2041 break;
1962 } 2042 }
1963 2043
1964 if (rdev->family == CHIP_KABINI) 2044 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
1965 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 2045 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
1966 else 2046 else
1967 return kv_set_enabled_level(rdev, i); 2047 return kv_set_enabled_level(rdev, i);
@@ -2118,7 +2198,7 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
2118 else 2198 else
2119 pi->battery_state = false; 2199 pi->battery_state = false;
2120 2200
2121 if (rdev->family == CHIP_KABINI) { 2201 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
2122 ps->dpm0_pg_nb_ps_lo = 0x1; 2202 ps->dpm0_pg_nb_ps_lo = 0x1;
2123 ps->dpm0_pg_nb_ps_hi = 0x0; 2203 ps->dpm0_pg_nb_ps_hi = 0x0;
2124 ps->dpmx_nb_ps_lo = 0x1; 2204 ps->dpmx_nb_ps_lo = 0x1;
@@ -2179,7 +2259,7 @@ static int kv_calculate_nbps_level_settings(struct radeon_device *rdev)
2179 if (pi->lowest_valid > pi->highest_valid) 2259 if (pi->lowest_valid > pi->highest_valid)
2180 return -EINVAL; 2260 return -EINVAL;
2181 2261
2182 if (rdev->family == CHIP_KABINI) { 2262 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
2183 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2263 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2184 pi->graphics_level[i].GnbSlow = 1; 2264 pi->graphics_level[i].GnbSlow = 1;
2185 pi->graphics_level[i].ForceNbPs1 = 0; 2265 pi->graphics_level[i].ForceNbPs1 = 0;
@@ -2253,9 +2333,9 @@ static void kv_init_graphics_levels(struct radeon_device *rdev)
2253 break; 2333 break;
2254 2334
2255 kv_set_divider_value(rdev, i, table->entries[i].clk); 2335 kv_set_divider_value(rdev, i, table->entries[i].clk);
2256 vid_2bit = sumo_convert_vid7_to_vid2(rdev, 2336 vid_2bit = kv_convert_vid7_to_vid2(rdev,
2257 &pi->sys_info.vid_mapping_table, 2337 &pi->sys_info.vid_mapping_table,
2258 table->entries[i].v); 2338 table->entries[i].v);
2259 kv_set_vid(rdev, i, vid_2bit); 2339 kv_set_vid(rdev, i, vid_2bit);
2260 kv_set_at(rdev, i, pi->at[i]); 2340 kv_set_at(rdev, i, pi->at[i]);
2261 kv_dpm_power_level_enabled_for_throttle(rdev, i, true); 2341 kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
@@ -2324,7 +2404,7 @@ static void kv_program_nbps_index_settings(struct radeon_device *rdev,
2324 struct kv_power_info *pi = kv_get_pi(rdev); 2404 struct kv_power_info *pi = kv_get_pi(rdev);
2325 u32 nbdpmconfig1; 2405 u32 nbdpmconfig1;
2326 2406
2327 if (rdev->family == CHIP_KABINI) 2407 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
2328 return; 2408 return;
2329 2409
2330 if (pi->sys_info.nb_dpm_enable) { 2410 if (pi->sys_info.nb_dpm_enable) {
@@ -2631,9 +2711,6 @@ int kv_dpm_init(struct radeon_device *rdev)
2631 2711
2632 pi->sram_end = SMC_RAM_END; 2712 pi->sram_end = SMC_RAM_END;
2633 2713
2634 if (rdev->family == CHIP_KABINI)
2635 pi->high_voltage_t = 4001;
2636
2637 pi->enable_nb_dpm = true; 2714 pi->enable_nb_dpm = true;
2638 2715
2639 pi->caps_power_containment = true; 2716 pi->caps_power_containment = true;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 436e55092e9d..c75881223d18 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2839,6 +2839,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,
2839 r = radeon_fence_emit(rdev, fence, ring->idx); 2839 r = radeon_fence_emit(rdev, fence, ring->idx);
2840 if (r) { 2840 if (r) {
2841 radeon_ring_unlock_undo(rdev, ring); 2841 radeon_ring_unlock_undo(rdev, ring);
2842 radeon_semaphore_free(rdev, &sem, NULL);
2842 return r; 2843 return r;
2843 } 2844 }
2844 2845
@@ -3505,7 +3506,6 @@ int r600_irq_set(struct radeon_device *rdev)
3505 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; 3506 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3506 u32 grbm_int_cntl = 0; 3507 u32 grbm_int_cntl = 0;
3507 u32 hdmi0, hdmi1; 3508 u32 hdmi0, hdmi1;
3508 u32 d1grph = 0, d2grph = 0;
3509 u32 dma_cntl; 3509 u32 dma_cntl;
3510 u32 thermal_int = 0; 3510 u32 thermal_int = 0;
3511 3511
@@ -3614,8 +3614,8 @@ int r600_irq_set(struct radeon_device *rdev)
3614 WREG32(CP_INT_CNTL, cp_int_cntl); 3614 WREG32(CP_INT_CNTL, cp_int_cntl);
3615 WREG32(DMA_CNTL, dma_cntl); 3615 WREG32(DMA_CNTL, dma_cntl);
3616 WREG32(DxMODE_INT_MASK, mode_int); 3616 WREG32(DxMODE_INT_MASK, mode_int);
3617 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); 3617 WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3618 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); 3618 WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3619 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 3619 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3620 if (ASIC_IS_DCE3(rdev)) { 3620 if (ASIC_IS_DCE3(rdev)) {
3621 WREG32(DC_HPD1_INT_CONTROL, hpd1); 3621 WREG32(DC_HPD1_INT_CONTROL, hpd1);
@@ -3918,6 +3918,14 @@ restart_ih:
3918 break; 3918 break;
3919 } 3919 }
3920 break; 3920 break;
3921 case 9: /* D1 pflip */
3922 DRM_DEBUG("IH: D1 flip\n");
3923 radeon_crtc_handle_flip(rdev, 0);
3924 break;
3925 case 11: /* D2 pflip */
3926 DRM_DEBUG("IH: D2 flip\n");
3927 radeon_crtc_handle_flip(rdev, 1);
3928 break;
3921 case 19: /* HPD/DAC hotplug */ 3929 case 19: /* HPD/DAC hotplug */
3922 switch (src_data) { 3930 switch (src_data) {
3923 case 0: 3931 case 0:
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 53fcb28f5578..4969cef44a19 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -489,6 +489,7 @@ int r600_copy_dma(struct radeon_device *rdev,
489 r = radeon_fence_emit(rdev, fence, ring->idx); 489 r = radeon_fence_emit(rdev, fence, ring->idx);
490 if (r) { 490 if (r) {
491 radeon_ring_unlock_undo(rdev, ring); 491 radeon_ring_unlock_undo(rdev, ring);
492 radeon_semaphore_free(rdev, &sem, NULL);
492 return r; 493 return r;
493 } 494 }
494 495
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index dd4da88b3ab1..7501ba318c67 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -732,6 +732,12 @@ struct cik_irq_stat_regs {
732 u32 disp_int_cont4; 732 u32 disp_int_cont4;
733 u32 disp_int_cont5; 733 u32 disp_int_cont5;
734 u32 disp_int_cont6; 734 u32 disp_int_cont6;
735 u32 d1grph_int;
736 u32 d2grph_int;
737 u32 d3grph_int;
738 u32 d4grph_int;
739 u32 d5grph_int;
740 u32 d6grph_int;
735}; 741};
736 742
737union radeon_irq_stat_regs { 743union radeon_irq_stat_regs {
@@ -1647,6 +1653,7 @@ struct radeon_vce {
1647 unsigned fb_version; 1653 unsigned fb_version;
1648 atomic_t handles[RADEON_MAX_VCE_HANDLES]; 1654 atomic_t handles[RADEON_MAX_VCE_HANDLES];
1649 struct drm_file *filp[RADEON_MAX_VCE_HANDLES]; 1655 struct drm_file *filp[RADEON_MAX_VCE_HANDLES];
1656 unsigned img_size[RADEON_MAX_VCE_HANDLES];
1650 struct delayed_work idle_work; 1657 struct delayed_work idle_work;
1651}; 1658};
1652 1659
@@ -1660,7 +1667,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
1660 uint32_t handle, struct radeon_fence **fence); 1667 uint32_t handle, struct radeon_fence **fence);
1661void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp); 1668void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
1662void radeon_vce_note_usage(struct radeon_device *rdev); 1669void radeon_vce_note_usage(struct radeon_device *rdev);
1663int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi); 1670int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size);
1664int radeon_vce_cs_parse(struct radeon_cs_parser *p); 1671int radeon_vce_cs_parse(struct radeon_cs_parser *p);
1665bool radeon_vce_semaphore_emit(struct radeon_device *rdev, 1672bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
1666 struct radeon_ring *ring, 1673 struct radeon_ring *ring,
@@ -2644,7 +2651,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
2644#define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE)) 2651#define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
2645#define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI)) 2652#define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI))
2646#define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE)) 2653#define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE))
2647#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI)) 2654#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
2655 (rdev->family == CHIP_MULLINS))
2648 2656
2649#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \ 2657#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
2650 (rdev->ddev->pdev->device == 0x6850) || \ 2658 (rdev->ddev->pdev->device == 0x6850) || \
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index d8e1587d89cf..34ea53d980a1 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2029,8 +2029,8 @@ static struct radeon_asic ci_asic = {
2029 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 2029 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2030 .dma = &cik_copy_dma, 2030 .dma = &cik_copy_dma,
2031 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 2031 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
2032 .copy = &cik_copy_dma, 2032 .copy = &cik_copy_cpdma,
2033 .copy_ring_index = R600_RING_TYPE_DMA_INDEX, 2033 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2034 }, 2034 },
2035 .surface = { 2035 .surface = {
2036 .set_reg = r600_set_surface_reg, 2036 .set_reg = r600_set_surface_reg,
@@ -2494,6 +2494,7 @@ int radeon_asic_init(struct radeon_device *rdev)
2494 break; 2494 break;
2495 case CHIP_KAVERI: 2495 case CHIP_KAVERI:
2496 case CHIP_KABINI: 2496 case CHIP_KABINI:
2497 case CHIP_MULLINS:
2497 rdev->asic = &kv_asic; 2498 rdev->asic = &kv_asic;
2498 /* set num crtcs */ 2499 /* set num crtcs */
2499 if (rdev->family == CHIP_KAVERI) { 2500 if (rdev->family == CHIP_KAVERI) {
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index b3633d9a5317..9ab30976287d 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
196 } 196 }
197 } 197 }
198 198
199 if (!found) {
200 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
201 dhandle = ACPI_HANDLE(&pdev->dev);
202 if (!dhandle)
203 continue;
204
205 status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
206 if (!ACPI_FAILURE(status)) {
207 found = true;
208 break;
209 }
210 }
211 }
212
199 if (!found) 213 if (!found)
200 return false; 214 return false;
201 215
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 2b6e0ebcc13a..41ecf8a60611 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -152,6 +152,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
152 uint32_t domain = r->write_domain ? 152 uint32_t domain = r->write_domain ?
153 r->write_domain : r->read_domains; 153 r->write_domain : r->read_domains;
154 154
155 if (domain & RADEON_GEM_DOMAIN_CPU) {
156 DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
157 "for command submission\n");
158 return -EINVAL;
159 }
160
155 p->relocs[i].domain = domain; 161 p->relocs[i].domain = domain;
156 if (domain == RADEON_GEM_DOMAIN_VRAM) 162 if (domain == RADEON_GEM_DOMAIN_VRAM)
157 domain |= RADEON_GEM_DOMAIN_GTT; 163 domain |= RADEON_GEM_DOMAIN_GTT;
@@ -342,10 +348,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
342 return -EINVAL; 348 return -EINVAL;
343 349
344 /* we only support VM on some SI+ rings */ 350 /* we only support VM on some SI+ rings */
345 if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) && 351 if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
346 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) { 352 if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
347 DRM_ERROR("Ring %d requires VM!\n", p->ring); 353 DRM_ERROR("Ring %d requires VM!\n", p->ring);
348 return -EINVAL; 354 return -EINVAL;
355 }
356 } else {
357 if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
358 DRM_ERROR("VM not supported on ring %d!\n",
359 p->ring);
360 return -EINVAL;
361 }
349 } 362 }
350 } 363 }
351 364
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 9aa1afd1786e..31565de1116c 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -99,6 +99,7 @@ static const char radeon_family_name[][16] = {
99 "KAVERI", 99 "KAVERI",
100 "KABINI", 100 "KABINI",
101 "HAWAII", 101 "HAWAII",
102 "MULLINS",
102 "LAST", 103 "LAST",
103}; 104};
104 105
@@ -1533,11 +1534,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1533 1534
1534 radeon_restore_bios_scratch_regs(rdev); 1535 radeon_restore_bios_scratch_regs(rdev);
1535 1536
1536 if (fbcon) {
1537 radeon_fbdev_set_suspend(rdev, 0);
1538 console_unlock();
1539 }
1540
1541 /* init dig PHYs, disp eng pll */ 1537 /* init dig PHYs, disp eng pll */
1542 if (rdev->is_atom_bios) { 1538 if (rdev->is_atom_bios) {
1543 radeon_atom_encoder_init(rdev); 1539 radeon_atom_encoder_init(rdev);
@@ -1562,6 +1558,16 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1562 } 1558 }
1563 1559
1564 drm_kms_helper_poll_enable(dev); 1560 drm_kms_helper_poll_enable(dev);
1561
1562 /* set the power state here in case we are a PX system or headless */
1563 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1564 radeon_pm_compute_clocks(rdev);
1565
1566 if (fbcon) {
1567 radeon_fbdev_set_suspend(rdev, 0);
1568 console_unlock();
1569 }
1570
1565 return 0; 1571 return 0;
1566} 1572}
1567 1573
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index e330e762c360..a4e725c6b8c8 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -289,6 +289,10 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
289 u32 update_pending; 289 u32 update_pending;
290 int vpos, hpos; 290 int vpos, hpos;
291 291
292 /* can happen during initialization */
293 if (radeon_crtc == NULL)
294 return;
295
292 spin_lock_irqsave(&rdev->ddev->event_lock, flags); 296 spin_lock_irqsave(&rdev->ddev->event_lock, flags);
293 work = radeon_crtc->flip_work; 297 work = radeon_crtc->flip_work;
294 if (work == NULL) { 298 if (work == NULL) {
@@ -872,14 +876,14 @@ static void avivo_reduce_ratio(unsigned *nom, unsigned *den,
872 876
873 /* make sure nominator is large enough */ 877 /* make sure nominator is large enough */
874 if (*nom < nom_min) { 878 if (*nom < nom_min) {
875 tmp = (nom_min + *nom - 1) / *nom; 879 tmp = DIV_ROUND_UP(nom_min, *nom);
876 *nom *= tmp; 880 *nom *= tmp;
877 *den *= tmp; 881 *den *= tmp;
878 } 882 }
879 883
880 /* make sure the denominator is large enough */ 884 /* make sure the denominator is large enough */
881 if (*den < den_min) { 885 if (*den < den_min) {
882 tmp = (den_min + *den - 1) / *den; 886 tmp = DIV_ROUND_UP(den_min, *den);
883 *nom *= tmp; 887 *nom *= tmp;
884 *den *= tmp; 888 *den *= tmp;
885 } 889 }
@@ -904,7 +908,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
904 unsigned *fb_div, unsigned *ref_div) 908 unsigned *fb_div, unsigned *ref_div)
905{ 909{
906 /* limit reference * post divider to a maximum */ 910 /* limit reference * post divider to a maximum */
907 ref_div_max = min(210 / post_div, ref_div_max); 911 ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
908 912
909 /* get matching reference and feedback divider */ 913 /* get matching reference and feedback divider */
910 *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); 914 *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
@@ -1039,6 +1043,16 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
1039 /* this also makes sure that the reference divider is large enough */ 1043 /* this also makes sure that the reference divider is large enough */
1040 avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min); 1044 avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
1041 1045
1046 /* avoid high jitter with small fractional dividers */
1047 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
1048 fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50);
1049 if (fb_div < fb_div_min) {
1050 unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
1051 fb_div *= tmp;
1052 ref_div *= tmp;
1053 }
1054 }
1055
1042 /* and finally save the result */ 1056 /* and finally save the result */
1043 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { 1057 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
1044 *fb_div_p = fb_div / 10; 1058 *fb_div_p = fb_div / 10;
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 9da5da4ffd17..4b7b87f71a63 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -97,6 +97,7 @@ enum radeon_family {
97 CHIP_KAVERI, 97 CHIP_KAVERI,
98 CHIP_KABINI, 98 CHIP_KABINI,
99 CHIP_HAWAII, 99 CHIP_HAWAII,
100 CHIP_MULLINS,
100 CHIP_LAST, 101 CHIP_LAST,
101}; 102};
102 103
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 0cc47f12d995..eaaedba04675 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -577,28 +577,29 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
577 return r; 577 return r;
578 } 578 }
579 579
580 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 580 if (rdev->accel_working) {
581 if (r) { 581 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
582 radeon_vm_fini(rdev, &fpriv->vm); 582 if (r) {
583 kfree(fpriv); 583 radeon_vm_fini(rdev, &fpriv->vm);
584 return r; 584 kfree(fpriv);
585 } 585 return r;
586 }
586 587
587 /* map the ib pool buffer read only into 588 /* map the ib pool buffer read only into
588 * virtual address space */ 589 * virtual address space */
589 bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, 590 bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
590 rdev->ring_tmp_bo.bo); 591 rdev->ring_tmp_bo.bo);
591 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, 592 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
592 RADEON_VM_PAGE_READABLE | 593 RADEON_VM_PAGE_READABLE |
593 RADEON_VM_PAGE_SNOOPED); 594 RADEON_VM_PAGE_SNOOPED);
594 595
595 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 596 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
596 if (r) { 597 if (r) {
597 radeon_vm_fini(rdev, &fpriv->vm); 598 radeon_vm_fini(rdev, &fpriv->vm);
598 kfree(fpriv); 599 kfree(fpriv);
599 return r; 600 return r;
601 }
600 } 602 }
601
602 file_priv->driver_priv = fpriv; 603 file_priv->driver_priv = fpriv;
603 } 604 }
604 605
@@ -626,13 +627,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
626 struct radeon_bo_va *bo_va; 627 struct radeon_bo_va *bo_va;
627 int r; 628 int r;
628 629
629 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 630 if (rdev->accel_working) {
630 if (!r) { 631 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
631 bo_va = radeon_vm_bo_find(&fpriv->vm, 632 if (!r) {
632 rdev->ring_tmp_bo.bo); 633 bo_va = radeon_vm_bo_find(&fpriv->vm,
633 if (bo_va) 634 rdev->ring_tmp_bo.bo);
634 radeon_vm_bo_rmv(rdev, bo_va); 635 if (bo_va)
635 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 636 radeon_vm_bo_rmv(rdev, bo_va);
637 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
638 }
636 } 639 }
637 640
638 radeon_vm_fini(rdev, &fpriv->vm); 641 radeon_vm_fini(rdev, &fpriv->vm);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 95197aa4de4a..2918087e572f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
458 * into account. We don't want to disallow buffer moves 458 * into account. We don't want to disallow buffer moves
459 * completely. 459 * completely.
460 */ 460 */
461 if (current_domain != RADEON_GEM_DOMAIN_CPU && 461 if ((lobj->alt_domain & current_domain) != 0 &&
462 (domain & current_domain) == 0 && /* will be moved */ 462 (domain & current_domain) == 0 && /* will be moved */
463 bytes_moved > bytes_moved_threshold) { 463 bytes_moved > bytes_moved_threshold) {
464 /* don't move it */ 464 /* don't move it */
@@ -699,22 +699,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
699 rbo = container_of(bo, struct radeon_bo, tbo); 699 rbo = container_of(bo, struct radeon_bo, tbo);
700 radeon_bo_check_tiling(rbo, 0, 0); 700 radeon_bo_check_tiling(rbo, 0, 0);
701 rdev = rbo->rdev; 701 rdev = rbo->rdev;
702 if (bo->mem.mem_type == TTM_PL_VRAM) { 702 if (bo->mem.mem_type != TTM_PL_VRAM)
703 size = bo->mem.num_pages << PAGE_SHIFT; 703 return 0;
704 offset = bo->mem.start << PAGE_SHIFT; 704
705 if ((offset + size) > rdev->mc.visible_vram_size) { 705 size = bo->mem.num_pages << PAGE_SHIFT;
706 /* hurrah the memory is not visible ! */ 706 offset = bo->mem.start << PAGE_SHIFT;
707 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 707 if ((offset + size) <= rdev->mc.visible_vram_size)
708 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; 708 return 0;
709 r = ttm_bo_validate(bo, &rbo->placement, false, false); 709
710 if (unlikely(r != 0)) 710 /* hurrah the memory is not visible ! */
711 return r; 711 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
712 offset = bo->mem.start << PAGE_SHIFT; 712 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
713 /* this should not happen */ 713 r = ttm_bo_validate(bo, &rbo->placement, false, false);
714 if ((offset + size) > rdev->mc.visible_vram_size) 714 if (unlikely(r == -ENOMEM)) {
715 return -EINVAL; 715 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
716 } 716 return ttm_bo_validate(bo, &rbo->placement, false, false);
717 } else if (unlikely(r != 0)) {
718 return r;
717 } 719 }
720
721 offset = bo->mem.start << PAGE_SHIFT;
722 /* this should never happen */
723 if ((offset + size) > rdev->mc.visible_vram_size)
724 return -EINVAL;
725
718 return 0; 726 return 0;
719} 727}
720 728
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 6fac8efe8340..2bdae61c0ac0 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -361,6 +361,11 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
361 struct drm_device *ddev = dev_get_drvdata(dev); 361 struct drm_device *ddev = dev_get_drvdata(dev);
362 struct radeon_device *rdev = ddev->dev_private; 362 struct radeon_device *rdev = ddev->dev_private;
363 363
364 /* Can't set profile when the card is off */
365 if ((rdev->flags & RADEON_IS_PX) &&
366 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
367 return -EINVAL;
368
364 mutex_lock(&rdev->pm.mutex); 369 mutex_lock(&rdev->pm.mutex);
365 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 370 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
366 if (strncmp("default", buf, strlen("default")) == 0) 371 if (strncmp("default", buf, strlen("default")) == 0)
@@ -409,6 +414,13 @@ static ssize_t radeon_set_pm_method(struct device *dev,
409 struct drm_device *ddev = dev_get_drvdata(dev); 414 struct drm_device *ddev = dev_get_drvdata(dev);
410 struct radeon_device *rdev = ddev->dev_private; 415 struct radeon_device *rdev = ddev->dev_private;
411 416
417 /* Can't set method when the card is off */
418 if ((rdev->flags & RADEON_IS_PX) &&
419 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
420 count = -EINVAL;
421 goto fail;
422 }
423
412 /* we don't support the legacy modes with dpm */ 424 /* we don't support the legacy modes with dpm */
413 if (rdev->pm.pm_method == PM_METHOD_DPM) { 425 if (rdev->pm.pm_method == PM_METHOD_DPM) {
414 count = -EINVAL; 426 count = -EINVAL;
@@ -446,6 +458,10 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
446 struct radeon_device *rdev = ddev->dev_private; 458 struct radeon_device *rdev = ddev->dev_private;
447 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; 459 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
448 460
461 if ((rdev->flags & RADEON_IS_PX) &&
462 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
463 return snprintf(buf, PAGE_SIZE, "off\n");
464
449 return snprintf(buf, PAGE_SIZE, "%s\n", 465 return snprintf(buf, PAGE_SIZE, "%s\n",
450 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 466 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
451 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 467 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -459,6 +475,11 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
459 struct drm_device *ddev = dev_get_drvdata(dev); 475 struct drm_device *ddev = dev_get_drvdata(dev);
460 struct radeon_device *rdev = ddev->dev_private; 476 struct radeon_device *rdev = ddev->dev_private;
461 477
478 /* Can't set dpm state when the card is off */
479 if ((rdev->flags & RADEON_IS_PX) &&
480 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
481 return -EINVAL;
482
462 mutex_lock(&rdev->pm.mutex); 483 mutex_lock(&rdev->pm.mutex);
463 if (strncmp("battery", buf, strlen("battery")) == 0) 484 if (strncmp("battery", buf, strlen("battery")) == 0)
464 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; 485 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
@@ -485,6 +506,10 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
485 struct radeon_device *rdev = ddev->dev_private; 506 struct radeon_device *rdev = ddev->dev_private;
486 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 507 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
487 508
509 if ((rdev->flags & RADEON_IS_PX) &&
510 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
511 return snprintf(buf, PAGE_SIZE, "off\n");
512
488 return snprintf(buf, PAGE_SIZE, "%s\n", 513 return snprintf(buf, PAGE_SIZE, "%s\n",
489 (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" : 514 (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
490 (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); 515 (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
@@ -500,6 +525,11 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
500 enum radeon_dpm_forced_level level; 525 enum radeon_dpm_forced_level level;
501 int ret = 0; 526 int ret = 0;
502 527
528 /* Can't force performance level when the card is off */
529 if ((rdev->flags & RADEON_IS_PX) &&
530 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
531 return -EINVAL;
532
503 mutex_lock(&rdev->pm.mutex); 533 mutex_lock(&rdev->pm.mutex);
504 if (strncmp("low", buf, strlen("low")) == 0) { 534 if (strncmp("low", buf, strlen("low")) == 0) {
505 level = RADEON_DPM_FORCED_LEVEL_LOW; 535 level = RADEON_DPM_FORCED_LEVEL_LOW;
@@ -538,8 +568,14 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
538 char *buf) 568 char *buf)
539{ 569{
540 struct radeon_device *rdev = dev_get_drvdata(dev); 570 struct radeon_device *rdev = dev_get_drvdata(dev);
571 struct drm_device *ddev = rdev->ddev;
541 int temp; 572 int temp;
542 573
574 /* Can't get temperature when the card is off */
575 if ((rdev->flags & RADEON_IS_PX) &&
576 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
577 return -EINVAL;
578
543 if (rdev->asic->pm.get_temperature) 579 if (rdev->asic->pm.get_temperature)
544 temp = radeon_get_temperature(rdev); 580 temp = radeon_get_temperature(rdev);
545 else 581 else
@@ -1068,7 +1104,6 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1068 if (ret) 1104 if (ret)
1069 goto dpm_resume_fail; 1105 goto dpm_resume_fail;
1070 rdev->pm.dpm_enabled = true; 1106 rdev->pm.dpm_enabled = true;
1071 radeon_pm_compute_clocks(rdev);
1072 return; 1107 return;
1073 1108
1074dpm_resume_fail: 1109dpm_resume_fail:
@@ -1300,6 +1335,7 @@ int radeon_pm_init(struct radeon_device *rdev)
1300 case CHIP_KABINI: 1335 case CHIP_KABINI:
1301 case CHIP_KAVERI: 1336 case CHIP_KAVERI:
1302 case CHIP_HAWAII: 1337 case CHIP_HAWAII:
1338 case CHIP_MULLINS:
1303 /* DPM requires the RLC, RV770+ dGPU requires SMC */ 1339 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1304 if (!rdev->rlc_fw) 1340 if (!rdev->rlc_fw)
1305 rdev->pm.pm_method = PM_METHOD_PROFILE; 1341 rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1613,8 +1649,12 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
1613 struct drm_info_node *node = (struct drm_info_node *) m->private; 1649 struct drm_info_node *node = (struct drm_info_node *) m->private;
1614 struct drm_device *dev = node->minor->dev; 1650 struct drm_device *dev = node->minor->dev;
1615 struct radeon_device *rdev = dev->dev_private; 1651 struct radeon_device *rdev = dev->dev_private;
1652 struct drm_device *ddev = rdev->ddev;
1616 1653
1617 if (rdev->pm.dpm_enabled) { 1654 if ((rdev->flags & RADEON_IS_PX) &&
1655 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1656 seq_printf(m, "PX asic powered off\n");
1657 } else if (rdev->pm.dpm_enabled) {
1618 mutex_lock(&rdev->pm.mutex); 1658 mutex_lock(&rdev->pm.mutex);
1619 if (rdev->asic->dpm.debugfs_print_current_performance_level) 1659 if (rdev->asic->dpm.debugfs_print_current_performance_level)
1620 radeon_dpm_debugfs_print_current_performance_level(rdev, m); 1660 radeon_dpm_debugfs_print_current_performance_level(rdev, m);
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
index 58d12938c0b8..4e7c3269b183 100644
--- a/drivers/gpu/drm/radeon/radeon_ucode.h
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -52,6 +52,7 @@
52#define BONAIRE_RLC_UCODE_SIZE 2048 52#define BONAIRE_RLC_UCODE_SIZE 2048
53#define KB_RLC_UCODE_SIZE 2560 53#define KB_RLC_UCODE_SIZE 2560
54#define KV_RLC_UCODE_SIZE 2560 54#define KV_RLC_UCODE_SIZE 2560
55#define ML_RLC_UCODE_SIZE 2560
55 56
56/* MC */ 57/* MC */
57#define BTC_MC_UCODE_SIZE 6024 58#define BTC_MC_UCODE_SIZE 6024
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 5748bdaeacce..1b65ae2433cd 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -99,6 +99,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
99 case CHIP_KABINI: 99 case CHIP_KABINI:
100 case CHIP_KAVERI: 100 case CHIP_KAVERI:
101 case CHIP_HAWAII: 101 case CHIP_HAWAII:
102 case CHIP_MULLINS:
102 fw_name = FIRMWARE_BONAIRE; 103 fw_name = FIRMWARE_BONAIRE;
103 break; 104 break;
104 105
@@ -465,6 +466,10 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
465 cmd = radeon_get_ib_value(p, p->idx) >> 1; 466 cmd = radeon_get_ib_value(p, p->idx) >> 1;
466 467
467 if (cmd < 0x4) { 468 if (cmd < 0x4) {
469 if (end <= start) {
470 DRM_ERROR("invalid reloc offset %X!\n", offset);
471 return -EINVAL;
472 }
468 if ((end - start) < buf_sizes[cmd]) { 473 if ((end - start) < buf_sizes[cmd]) {
469 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, 474 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
470 (unsigned)(end - start), buf_sizes[cmd]); 475 (unsigned)(end - start), buf_sizes[cmd]);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index ced53dd03e7c..3971d968af6c 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -66,6 +66,7 @@ int radeon_vce_init(struct radeon_device *rdev)
66 case CHIP_BONAIRE: 66 case CHIP_BONAIRE:
67 case CHIP_KAVERI: 67 case CHIP_KAVERI:
68 case CHIP_KABINI: 68 case CHIP_KABINI:
69 case CHIP_MULLINS:
69 fw_name = FIRMWARE_BONAIRE; 70 fw_name = FIRMWARE_BONAIRE;
70 break; 71 break;
71 72
@@ -442,13 +443,16 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
442 * @p: parser context 443 * @p: parser context
443 * @lo: address of lower dword 444 * @lo: address of lower dword
444 * @hi: address of higher dword 445 * @hi: address of higher dword
446 * @size: size of checker for relocation buffer
445 * 447 *
446 * Patch relocation inside command stream with real buffer address 448 * Patch relocation inside command stream with real buffer address
447 */ 449 */
448int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) 450int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
451 unsigned size)
449{ 452{
450 struct radeon_cs_chunk *relocs_chunk; 453 struct radeon_cs_chunk *relocs_chunk;
451 uint64_t offset; 454 struct radeon_cs_reloc *reloc;
455 uint64_t start, end, offset;
452 unsigned idx; 456 unsigned idx;
453 457
454 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 458 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
@@ -461,15 +465,60 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
461 return -EINVAL; 465 return -EINVAL;
462 } 466 }
463 467
464 offset += p->relocs_ptr[(idx / 4)]->gpu_offset; 468 reloc = p->relocs_ptr[(idx / 4)];
469 start = reloc->gpu_offset;
470 end = start + radeon_bo_size(reloc->robj);
471 start += offset;
465 472
466 p->ib.ptr[lo] = offset & 0xFFFFFFFF; 473 p->ib.ptr[lo] = start & 0xFFFFFFFF;
467 p->ib.ptr[hi] = offset >> 32; 474 p->ib.ptr[hi] = start >> 32;
475
476 if (end <= start) {
477 DRM_ERROR("invalid reloc offset %llX!\n", offset);
478 return -EINVAL;
479 }
480 if ((end - start) < size) {
481 DRM_ERROR("buffer to small (%d / %d)!\n",
482 (unsigned)(end - start), size);
483 return -EINVAL;
484 }
468 485
469 return 0; 486 return 0;
470} 487}
471 488
472/** 489/**
490 * radeon_vce_validate_handle - validate stream handle
491 *
492 * @p: parser context
493 * @handle: handle to validate
494 *
495 * Validates the handle and return the found session index or -EINVAL
496 * we we don't have another free session index.
497 */
498int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
499{
500 unsigned i;
501
502 /* validate the handle */
503 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
504 if (atomic_read(&p->rdev->vce.handles[i]) == handle)
505 return i;
506 }
507
508 /* handle not found try to alloc a new one */
509 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
510 if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
511 p->rdev->vce.filp[i] = p->filp;
512 p->rdev->vce.img_size[i] = 0;
513 return i;
514 }
515 }
516
517 DRM_ERROR("No more free VCE handles!\n");
518 return -EINVAL;
519}
520
521/**
473 * radeon_vce_cs_parse - parse and validate the command stream 522 * radeon_vce_cs_parse - parse and validate the command stream
474 * 523 *
475 * @p: parser context 524 * @p: parser context
@@ -477,8 +526,10 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
477 */ 526 */
478int radeon_vce_cs_parse(struct radeon_cs_parser *p) 527int radeon_vce_cs_parse(struct radeon_cs_parser *p)
479{ 528{
480 uint32_t handle = 0; 529 int session_idx = -1;
481 bool destroy = false; 530 bool destroyed = false;
531 uint32_t tmp, handle = 0;
532 uint32_t *size = &tmp;
482 int i, r; 533 int i, r;
483 534
484 while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { 535 while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
@@ -490,13 +541,29 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
490 return -EINVAL; 541 return -EINVAL;
491 } 542 }
492 543
544 if (destroyed) {
545 DRM_ERROR("No other command allowed after destroy!\n");
546 return -EINVAL;
547 }
548
493 switch (cmd) { 549 switch (cmd) {
494 case 0x00000001: // session 550 case 0x00000001: // session
495 handle = radeon_get_ib_value(p, p->idx + 2); 551 handle = radeon_get_ib_value(p, p->idx + 2);
552 session_idx = radeon_vce_validate_handle(p, handle);
553 if (session_idx < 0)
554 return session_idx;
555 size = &p->rdev->vce.img_size[session_idx];
496 break; 556 break;
497 557
498 case 0x00000002: // task info 558 case 0x00000002: // task info
559 break;
560
499 case 0x01000001: // create 561 case 0x01000001: // create
562 *size = radeon_get_ib_value(p, p->idx + 8) *
563 radeon_get_ib_value(p, p->idx + 10) *
564 8 * 3 / 2;
565 break;
566
500 case 0x04000001: // config extension 567 case 0x04000001: // config extension
501 case 0x04000002: // pic control 568 case 0x04000002: // pic control
502 case 0x04000005: // rate control 569 case 0x04000005: // rate control
@@ -505,23 +572,39 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
505 break; 572 break;
506 573
507 case 0x03000001: // encode 574 case 0x03000001: // encode
508 r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9); 575 r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
576 *size);
509 if (r) 577 if (r)
510 return r; 578 return r;
511 579
512 r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11); 580 r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
581 *size / 3);
513 if (r) 582 if (r)
514 return r; 583 return r;
515 break; 584 break;
516 585
517 case 0x02000001: // destroy 586 case 0x02000001: // destroy
518 destroy = true; 587 destroyed = true;
519 break; 588 break;
520 589
521 case 0x05000001: // context buffer 590 case 0x05000001: // context buffer
591 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
592 *size * 2);
593 if (r)
594 return r;
595 break;
596
522 case 0x05000004: // video bitstream buffer 597 case 0x05000004: // video bitstream buffer
598 tmp = radeon_get_ib_value(p, p->idx + 4);
599 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
600 tmp);
601 if (r)
602 return r;
603 break;
604
523 case 0x05000005: // feedback buffer 605 case 0x05000005: // feedback buffer
524 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2); 606 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
607 4096);
525 if (r) 608 if (r)
526 return r; 609 return r;
527 break; 610 break;
@@ -531,33 +614,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
531 return -EINVAL; 614 return -EINVAL;
532 } 615 }
533 616
617 if (session_idx == -1) {
618 DRM_ERROR("no session command at start of IB\n");
619 return -EINVAL;
620 }
621
534 p->idx += len / 4; 622 p->idx += len / 4;
535 } 623 }
536 624
537 if (destroy) { 625 if (destroyed) {
538 /* IB contains a destroy msg, free the handle */ 626 /* IB contains a destroy msg, free the handle */
539 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) 627 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
540 atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); 628 atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
541
542 return 0;
543 }
544
545 /* create or encode, validate the handle */
546 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
547 if (atomic_read(&p->rdev->vce.handles[i]) == handle)
548 return 0;
549 } 629 }
550 630
551 /* handle not found try to alloc a new one */ 631 return 0;
552 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
553 if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
554 p->rdev->vce.filp[i] = p->filp;
555 return 0;
556 }
557 }
558
559 DRM_ERROR("No more free VCE handles!\n");
560 return -EINVAL;
561} 632}
562 633
563/** 634/**
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index a128a4fd64b3..a72e9c81805d 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -130,10 +130,10 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
130 struct list_head *head) 130 struct list_head *head)
131{ 131{
132 struct radeon_cs_reloc *list; 132 struct radeon_cs_reloc *list;
133 unsigned i, idx, size; 133 unsigned i, idx;
134 134
135 size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc); 135 list = kmalloc_array(vm->max_pde_used + 2,
136 list = kmalloc(size, GFP_KERNEL); 136 sizeof(struct radeon_cs_reloc), GFP_KERNEL);
137 if (!list) 137 if (!list)
138 return NULL; 138 return NULL;
139 139
@@ -585,7 +585,8 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
585{ 585{
586 static const uint32_t incr = RADEON_VM_PTE_COUNT * 8; 586 static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
587 587
588 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); 588 struct radeon_bo *pd = vm->page_directory;
589 uint64_t pd_addr = radeon_bo_gpu_offset(pd);
589 uint64_t last_pde = ~0, last_pt = ~0; 590 uint64_t last_pde = ~0, last_pt = ~0;
590 unsigned count = 0, pt_idx, ndw; 591 unsigned count = 0, pt_idx, ndw;
591 struct radeon_ib ib; 592 struct radeon_ib ib;
@@ -595,7 +596,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
595 ndw = 64; 596 ndw = 64;
596 597
597 /* assume the worst case */ 598 /* assume the worst case */
598 ndw += vm->max_pde_used * 12; 599 ndw += vm->max_pde_used * 16;
599 600
600 /* update too big for an IB */ 601 /* update too big for an IB */
601 if (ndw > 0xfffff) 602 if (ndw > 0xfffff)
@@ -642,6 +643,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
642 incr, R600_PTE_VALID); 643 incr, R600_PTE_VALID);
643 644
644 if (ib.length_dw != 0) { 645 if (ib.length_dw != 0) {
646 radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
645 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); 647 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
646 r = radeon_ib_schedule(rdev, &ib, NULL); 648 r = radeon_ib_schedule(rdev, &ib, NULL);
647 if (r) { 649 if (r) {
@@ -767,15 +769,18 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
767 /* walk over the address space and update the page tables */ 769 /* walk over the address space and update the page tables */
768 for (addr = start; addr < end; ) { 770 for (addr = start; addr < end; ) {
769 uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE; 771 uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
772 struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
770 unsigned nptes; 773 unsigned nptes;
771 uint64_t pte; 774 uint64_t pte;
772 775
776 radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj);
777
773 if ((addr & ~mask) == (end & ~mask)) 778 if ((addr & ~mask) == (end & ~mask))
774 nptes = end - addr; 779 nptes = end - addr;
775 else 780 else
776 nptes = RADEON_VM_PTE_COUNT - (addr & mask); 781 nptes = RADEON_VM_PTE_COUNT - (addr & mask);
777 782
778 pte = radeon_bo_gpu_offset(vm->page_tables[pt_idx].bo); 783 pte = radeon_bo_gpu_offset(pt);
779 pte += (addr & mask) * 8; 784 pte += (addr & mask) * 8;
780 785
781 if ((last_pte + 8 * count) != pte) { 786 if ((last_pte + 8 * count) != pte) {
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index aca8cbe8a335..bbf2e076ee45 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -86,6 +86,7 @@ int rv770_copy_dma(struct radeon_device *rdev,
86 r = radeon_fence_emit(rdev, fence, ring->idx); 86 r = radeon_fence_emit(rdev, fence, ring->idx);
87 if (r) { 87 if (r) {
88 radeon_ring_unlock_undo(rdev, ring); 88 radeon_ring_unlock_undo(rdev, ring);
89 radeon_semaphore_free(rdev, &sem, NULL);
89 return r; 90 return r;
90 } 91 }
91 92
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 5c1c0c795e98..d64ef9115b69 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -5784,7 +5784,6 @@ int si_irq_set(struct radeon_device *rdev)
5784 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 5784 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
5785 u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; 5785 u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
5786 u32 grbm_int_cntl = 0; 5786 u32 grbm_int_cntl = 0;
5787 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
5788 u32 dma_cntl, dma_cntl1; 5787 u32 dma_cntl, dma_cntl1;
5789 u32 thermal_int = 0; 5788 u32 thermal_int = 0;
5790 5789
@@ -5923,16 +5922,22 @@ int si_irq_set(struct radeon_device *rdev)
5923 } 5922 }
5924 5923
5925 if (rdev->num_crtc >= 2) { 5924 if (rdev->num_crtc >= 2) {
5926 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); 5925 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
5927 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); 5926 GRPH_PFLIP_INT_MASK);
5927 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
5928 GRPH_PFLIP_INT_MASK);
5928 } 5929 }
5929 if (rdev->num_crtc >= 4) { 5930 if (rdev->num_crtc >= 4) {
5930 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); 5931 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
5931 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); 5932 GRPH_PFLIP_INT_MASK);
5933 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
5934 GRPH_PFLIP_INT_MASK);
5932 } 5935 }
5933 if (rdev->num_crtc >= 6) { 5936 if (rdev->num_crtc >= 6) {
5934 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); 5937 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
5935 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); 5938 GRPH_PFLIP_INT_MASK);
5939 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
5940 GRPH_PFLIP_INT_MASK);
5936 } 5941 }
5937 5942
5938 if (!ASIC_IS_NODCE(rdev)) { 5943 if (!ASIC_IS_NODCE(rdev)) {
@@ -6296,6 +6301,15 @@ restart_ih:
6296 break; 6301 break;
6297 } 6302 }
6298 break; 6303 break;
6304 case 8: /* D1 page flip */
6305 case 10: /* D2 page flip */
6306 case 12: /* D3 page flip */
6307 case 14: /* D4 page flip */
6308 case 16: /* D5 page flip */
6309 case 18: /* D6 page flip */
6310 DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
6311 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
6312 break;
6299 case 42: /* HPD hotplug */ 6313 case 42: /* HPD hotplug */
6300 switch (src_data) { 6314 switch (src_data) {
6301 case 0: 6315 case 0:
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 952166930fb8..9a660f861d2c 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -231,6 +231,7 @@ int si_copy_dma(struct radeon_device *rdev,
231 r = radeon_fence_emit(rdev, fence, ring->idx); 231 r = radeon_fence_emit(rdev, fence, ring->idx);
232 if (r) { 232 if (r) {
233 radeon_ring_unlock_undo(rdev, ring); 233 radeon_ring_unlock_undo(rdev, ring);
234 radeon_semaphore_free(rdev, &sem, NULL);
234 return r; 235 return r;
235 } 236 }
236 237
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index da8f8674a552..fd414d34d885 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -107,8 +107,8 @@
107#define SPLL_CHG_STATUS (1 << 1) 107#define SPLL_CHG_STATUS (1 << 1)
108#define SPLL_CNTL_MODE 0x618 108#define SPLL_CNTL_MODE 0x618
109#define SPLL_SW_DIR_CONTROL (1 << 0) 109#define SPLL_SW_DIR_CONTROL (1 << 0)
110# define SPLL_REFCLK_SEL(x) ((x) << 8) 110# define SPLL_REFCLK_SEL(x) ((x) << 26)
111# define SPLL_REFCLK_SEL_MASK 0xFF00 111# define SPLL_REFCLK_SEL_MASK (3 << 26)
112 112
113#define CG_SPLL_SPREAD_SPECTRUM 0x620 113#define CG_SPLL_SPREAD_SPECTRUM 0x620
114#define SSEN (1 << 0) 114#define SSEN (1 << 0)
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 0a243f0e5d68..be42c8125203 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -83,7 +83,10 @@ int uvd_v1_0_init(struct radeon_device *rdev)
83 int r; 83 int r;
84 84
85 /* raise clocks while booting up the VCPU */ 85 /* raise clocks while booting up the VCPU */
86 radeon_set_uvd_clocks(rdev, 53300, 40000); 86 if (rdev->family < CHIP_RV740)
87 radeon_set_uvd_clocks(rdev, 10000, 10000);
88 else
89 radeon_set_uvd_clocks(rdev, 53300, 40000);
87 90
88 r = uvd_v1_0_start(rdev); 91 r = uvd_v1_0_start(rdev);
89 if (r) 92 if (r)
@@ -407,7 +410,10 @@ int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
407 struct radeon_fence *fence = NULL; 410 struct radeon_fence *fence = NULL;
408 int r; 411 int r;
409 412
410 r = radeon_set_uvd_clocks(rdev, 53300, 40000); 413 if (rdev->family < CHIP_RV740)
414 r = radeon_set_uvd_clocks(rdev, 10000, 10000);
415 else
416 r = radeon_set_uvd_clocks(rdev, 53300, 40000);
411 if (r) { 417 if (r) {
412 DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r); 418 DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
413 return r; 419 return r;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 10a2c0866459..da52279de939 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1253,7 +1253,8 @@ EXPORT_SYMBOL_GPL(hid_output_report);
1253 1253
1254static int hid_report_len(struct hid_report *report) 1254static int hid_report_len(struct hid_report *report)
1255{ 1255{
1256 return ((report->size - 1) >> 3) + 1 + (report->id > 0) + 7; 1256 /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
1257 return ((report->size - 1) >> 3) + 1 + (report->id > 0);
1257} 1258}
1258 1259
1259/* 1260/*
@@ -1266,7 +1267,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
1266 * of implement() working on 8 byte chunks 1267 * of implement() working on 8 byte chunks
1267 */ 1268 */
1268 1269
1269 int len = hid_report_len(report); 1270 int len = hid_report_len(report) + 7;
1270 1271
1271 return kmalloc(len, flags); 1272 return kmalloc(len, flags);
1272} 1273}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index c8af7202c28d..34bb2205d2ea 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -301,6 +301,9 @@
301 301
302#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 302#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34
303 303
304#define USB_VENDOR_ID_ELITEGROUP 0x03fc
305#define USB_DEVICE_ID_ELITEGROUP_05D8 0x05d8
306
304#define USB_VENDOR_ID_ELO 0x04E7 307#define USB_VENDOR_ID_ELO 0x04E7
305#define USB_DEVICE_ID_ELO_TS2515 0x0022 308#define USB_DEVICE_ID_ELO_TS2515 0x0022
306#define USB_DEVICE_ID_ELO_TS2700 0x0020 309#define USB_DEVICE_ID_ELO_TS2700 0x0020
@@ -834,6 +837,10 @@
834#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10 837#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
835#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 838#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
836#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 839#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
840#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
841
842#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
843#define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855
837 844
838#define USB_VENDOR_ID_THINGM 0x27b8 845#define USB_VENDOR_ID_THINGM 0x27b8
839#define USB_DEVICE_ID_BLINK1 0x01ed 846#define USB_DEVICE_ID_BLINK1 0x01ed
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 35278e43c7a4..51e25b9407f2 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1155,6 +1155,11 @@ static const struct hid_device_id mt_devices[] = {
1155 MT_USB_DEVICE(USB_VENDOR_ID_DWAV, 1155 MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
1156 USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) }, 1156 USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
1157 1157
1158 /* Elitegroup panel */
1159 { .driver_data = MT_CLS_SERIAL,
1160 MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP,
1161 USB_DEVICE_ID_ELITEGROUP_05D8) },
1162
1158 /* Flatfrog Panels */ 1163 /* Flatfrog Panels */
1159 { .driver_data = MT_CLS_FLATFROG, 1164 { .driver_data = MT_CLS_FLATFROG,
1160 MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG, 1165 MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG,
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index af8244b1c1f4..be14b5690e94 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -708,6 +708,9 @@ static const struct hid_device_id sensor_hub_devices[] = {
708 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0, 708 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
709 USB_DEVICE_ID_STM_HID_SENSOR), 709 USB_DEVICE_ID_STM_HID_SENSOR),
710 .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, 710 .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
711 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_TEXAS_INSTRUMENTS,
712 USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA),
713 .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
711 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID, 714 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID,
712 HID_ANY_ID) }, 715 HID_ANY_ID) },
713 { } 716 { }
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index dbd83878ff99..8e4ddb369883 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -119,6 +119,7 @@ static const struct hid_blacklist {
119 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS }, 119 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
120 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS }, 120 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS },
121 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS }, 121 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS },
122 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS },
122 123
123 { 0, 0 } 124 { 0, 0 }
124}; 125};
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index bc196f49ec53..4af0da96c2e2 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1053,7 +1053,7 @@ config SENSORS_PC87427
1053 1053
1054config SENSORS_NTC_THERMISTOR 1054config SENSORS_NTC_THERMISTOR
1055 tristate "NTC thermistor support" 1055 tristate "NTC thermistor support"
1056 depends on (!OF && !IIO) || (OF && IIO) 1056 depends on !OF || IIO=n || IIO
1057 help 1057 help
1058 This driver supports NTC thermistors sensor reading and its 1058 This driver supports NTC thermistors sensor reading and its
1059 interpretation. The driver can also monitor the temperature and 1059 interpretation. The driver can also monitor the temperature and
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 6d02e3b06375..d76f0b70c6e0 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -365,12 +365,12 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
365 if (cpu_has_tjmax(c)) 365 if (cpu_has_tjmax(c))
366 dev_warn(dev, "Unable to read TjMax from CPU %u\n", id); 366 dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
367 } else { 367 } else {
368 val = (eax >> 16) & 0x7f; 368 val = (eax >> 16) & 0xff;
369 /* 369 /*
370 * If the TjMax is not plausible, an assumption 370 * If the TjMax is not plausible, an assumption
371 * will be used 371 * will be used
372 */ 372 */
373 if (val >= 85) { 373 if (val) {
374 dev_dbg(dev, "TjMax is %d degrees C\n", val); 374 dev_dbg(dev, "TjMax is %d degrees C\n", val);
375 return val * 1000; 375 return val * 1000;
376 } 376 }
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 90ec1173b8a1..01723f04fe45 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -163,7 +163,7 @@ static ssize_t store_hyst(struct device *dev,
163 if (retval < 0) 163 if (retval < 0)
164 goto fail; 164 goto fail;
165 165
166 hyst = val - retval * 1000; 166 hyst = retval * 1000 - val;
167 hyst = DIV_ROUND_CLOSEST(hyst, 1000); 167 hyst = DIV_ROUND_CLOSEST(hyst, 1000);
168 if (hyst < 0 || hyst > 255) { 168 if (hyst < 0 || hyst > 255) {
169 retval = -ERANGE; 169 retval = -ERANGE;
@@ -330,7 +330,7 @@ static int emc1403_detect(struct i2c_client *client,
330 } 330 }
331 331
332 id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG); 332 id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
333 if (id != 0x01) 333 if (id < 0x01 || id > 0x04)
334 return -ENODEV; 334 return -ENODEV;
335 335
336 return 0; 336 return 0;
@@ -355,9 +355,9 @@ static int emc1403_probe(struct i2c_client *client,
355 if (id->driver_data) 355 if (id->driver_data)
356 data->groups[1] = &emc1404_group; 356 data->groups[1] = &emc1404_group;
357 357
358 hwmon_dev = hwmon_device_register_with_groups(&client->dev, 358 hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
359 client->name, data, 359 client->name, data,
360 data->groups); 360 data->groups);
361 if (IS_ERR(hwmon_dev)) 361 if (IS_ERR(hwmon_dev))
362 return PTR_ERR(hwmon_dev); 362 return PTR_ERR(hwmon_dev);
363 363
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 8a17f01e8672..e76feb86a1d4 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -44,6 +44,7 @@ struct ntc_compensation {
44 unsigned int ohm; 44 unsigned int ohm;
45}; 45};
46 46
47/* Order matters, ntc_match references the entries by index */
47static const struct platform_device_id ntc_thermistor_id[] = { 48static const struct platform_device_id ntc_thermistor_id[] = {
48 { "ncp15wb473", TYPE_NCPXXWB473 }, 49 { "ncp15wb473", TYPE_NCPXXWB473 },
49 { "ncp18wb473", TYPE_NCPXXWB473 }, 50 { "ncp18wb473", TYPE_NCPXXWB473 },
@@ -141,7 +142,7 @@ struct ntc_data {
141 char name[PLATFORM_NAME_SIZE]; 142 char name[PLATFORM_NAME_SIZE];
142}; 143};
143 144
144#ifdef CONFIG_OF 145#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
145static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) 146static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
146{ 147{
147 struct iio_channel *channel = pdata->chan; 148 struct iio_channel *channel = pdata->chan;
@@ -163,15 +164,15 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
163 164
164static const struct of_device_id ntc_match[] = { 165static const struct of_device_id ntc_match[] = {
165 { .compatible = "ntc,ncp15wb473", 166 { .compatible = "ntc,ncp15wb473",
166 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 167 .data = &ntc_thermistor_id[0] },
167 { .compatible = "ntc,ncp18wb473", 168 { .compatible = "ntc,ncp18wb473",
168 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 169 .data = &ntc_thermistor_id[1] },
169 { .compatible = "ntc,ncp21wb473", 170 { .compatible = "ntc,ncp21wb473",
170 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 171 .data = &ntc_thermistor_id[2] },
171 { .compatible = "ntc,ncp03wb473", 172 { .compatible = "ntc,ncp03wb473",
172 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 173 .data = &ntc_thermistor_id[3] },
173 { .compatible = "ntc,ncp15wl333", 174 { .compatible = "ntc,ncp15wl333",
174 .data = &ntc_thermistor_id[TYPE_NCPXXWL333] }, 175 .data = &ntc_thermistor_id[4] },
175 { }, 176 { },
176}; 177};
177MODULE_DEVICE_TABLE(of, ntc_match); 178MODULE_DEVICE_TABLE(of, ntc_match);
@@ -223,6 +224,8 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
223 return NULL; 224 return NULL;
224} 225}
225 226
227#define ntc_match NULL
228
226static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata) 229static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata)
227{ } 230{ }
228#endif 231#endif
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 22e92c3d3d07..3c20e4bd6dd1 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -422,6 +422,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
422 */ 422 */
423 dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR); 423 dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR);
424 424
425 /* enforce disabled interrupts (due to HW issues) */
426 i2c_dw_disable_int(dev);
427
425 /* Enable the adapter */ 428 /* Enable the adapter */
426 __i2c_dw_enable(dev, true); 429 __i2c_dw_enable(dev, true);
427 430
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 28cbe1b2a2ec..32c85e9ecdae 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -999,7 +999,7 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
999 999
1000 dev->virtbase = devm_ioremap(&adev->dev, adev->res.start, 1000 dev->virtbase = devm_ioremap(&adev->dev, adev->res.start,
1001 resource_size(&adev->res)); 1001 resource_size(&adev->res));
1002 if (IS_ERR(dev->virtbase)) { 1002 if (!dev->virtbase) {
1003 ret = -ENOMEM; 1003 ret = -ENOMEM;
1004 goto err_no_mem; 1004 goto err_no_mem;
1005 } 1005 }
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 1b4cf14f1106..2a5efb5b487c 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -479,7 +479,7 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
479 int ret, idx; 479 int ret, idx;
480 480
481 ret = pm_runtime_get_sync(qup->dev); 481 ret = pm_runtime_get_sync(qup->dev);
482 if (ret) 482 if (ret < 0)
483 goto out; 483 goto out;
484 484
485 writel(1, qup->base + QUP_SW_RESET); 485 writel(1, qup->base + QUP_SW_RESET);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index d4fa8eba6e9d..06d47aafbb79 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -561,6 +561,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
561 561
562 ret = -EINVAL; 562 ret = -EINVAL;
563 for (i = 0; i < num; i++) { 563 for (i = 0; i < num; i++) {
564 /* This HW can't send STOP after address phase */
565 if (msgs[i].len == 0) {
566 ret = -EOPNOTSUPP;
567 break;
568 }
569
564 /*-------------- spin lock -----------------*/ 570 /*-------------- spin lock -----------------*/
565 spin_lock_irqsave(&priv->lock, flags); 571 spin_lock_irqsave(&priv->lock, flags);
566 572
@@ -625,7 +631,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
625 631
626static u32 rcar_i2c_func(struct i2c_adapter *adap) 632static u32 rcar_i2c_func(struct i2c_adapter *adap)
627{ 633{
628 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 634 /* This HW can't do SMBUS_QUICK and NOSTART */
635 return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
629} 636}
630 637
631static const struct i2c_algorithm rcar_i2c_algo = { 638static const struct i2c_algorithm rcar_i2c_algo = {
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index ae4491062e41..bb3a9964f7e0 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1276,10 +1276,10 @@ static int s3c24xx_i2c_resume(struct device *dev)
1276 struct platform_device *pdev = to_platform_device(dev); 1276 struct platform_device *pdev = to_platform_device(dev);
1277 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); 1277 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
1278 1278
1279 i2c->suspended = 0;
1280 clk_prepare_enable(i2c->clk); 1279 clk_prepare_enable(i2c->clk);
1281 s3c24xx_i2c_init(i2c); 1280 s3c24xx_i2c_init(i2c);
1282 clk_disable_unprepare(i2c->clk); 1281 clk_disable_unprepare(i2c->clk);
1282 i2c->suspended = 0;
1283 1283
1284 return 0; 1284 return 0;
1285} 1285}
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index d86196cfe4b4..24c28e3f93a3 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -106,7 +106,7 @@ config AT91_ADC
106 Say yes here to build support for Atmel AT91 ADC. 106 Say yes here to build support for Atmel AT91 ADC.
107 107
108config EXYNOS_ADC 108config EXYNOS_ADC
109 bool "Exynos ADC driver support" 109 tristate "Exynos ADC driver support"
110 depends on OF 110 depends on OF
111 help 111 help
112 Core support for the ADC block found in the Samsung EXYNOS series 112 Core support for the ADC block found in the Samsung EXYNOS series
@@ -114,7 +114,7 @@ config EXYNOS_ADC
114 this resource. 114 this resource.
115 115
116config LP8788_ADC 116config LP8788_ADC
117 bool "LP8788 ADC driver" 117 tristate "LP8788 ADC driver"
118 depends on MFD_LP8788 118 depends on MFD_LP8788
119 help 119 help
120 Say yes here to build support for TI LP8788 ADC. 120 Say yes here to build support for TI LP8788 ADC.
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index d25b262193a7..affa93f51789 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -344,7 +344,7 @@ static int exynos_adc_probe(struct platform_device *pdev)
344 344
345 exynos_adc_hw_init(info); 345 exynos_adc_hw_init(info);
346 346
347 ret = of_platform_populate(np, exynos_adc_match, NULL, &pdev->dev); 347 ret = of_platform_populate(np, exynos_adc_match, NULL, &indio_dev->dev);
348 if (ret < 0) { 348 if (ret < 0) {
349 dev_err(&pdev->dev, "failed adding child nodes\n"); 349 dev_err(&pdev->dev, "failed adding child nodes\n");
350 goto err_of_populate; 350 goto err_of_populate;
@@ -353,7 +353,7 @@ static int exynos_adc_probe(struct platform_device *pdev)
353 return 0; 353 return 0;
354 354
355err_of_populate: 355err_of_populate:
356 device_for_each_child(&pdev->dev, NULL, 356 device_for_each_child(&indio_dev->dev, NULL,
357 exynos_adc_remove_devices); 357 exynos_adc_remove_devices);
358 regulator_disable(info->vdd); 358 regulator_disable(info->vdd);
359 clk_disable_unprepare(info->clk); 359 clk_disable_unprepare(info->clk);
@@ -369,7 +369,7 @@ static int exynos_adc_remove(struct platform_device *pdev)
369 struct iio_dev *indio_dev = platform_get_drvdata(pdev); 369 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
370 struct exynos_adc *info = iio_priv(indio_dev); 370 struct exynos_adc *info = iio_priv(indio_dev);
371 371
372 device_for_each_child(&pdev->dev, NULL, 372 device_for_each_child(&indio_dev->dev, NULL,
373 exynos_adc_remove_devices); 373 exynos_adc_remove_devices);
374 regulator_disable(info->vdd); 374 regulator_disable(info->vdd);
375 clk_disable_unprepare(info->clk); 375 clk_disable_unprepare(info->clk);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index cb9f96b446a5..d8ad606c7cd0 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -660,6 +660,7 @@ static int inv_mpu_probe(struct i2c_client *client,
660{ 660{
661 struct inv_mpu6050_state *st; 661 struct inv_mpu6050_state *st;
662 struct iio_dev *indio_dev; 662 struct iio_dev *indio_dev;
663 struct inv_mpu6050_platform_data *pdata;
663 int result; 664 int result;
664 665
665 if (!i2c_check_functionality(client->adapter, 666 if (!i2c_check_functionality(client->adapter,
@@ -672,8 +673,10 @@ static int inv_mpu_probe(struct i2c_client *client,
672 673
673 st = iio_priv(indio_dev); 674 st = iio_priv(indio_dev);
674 st->client = client; 675 st->client = client;
675 st->plat_data = *(struct inv_mpu6050_platform_data 676 pdata = (struct inv_mpu6050_platform_data
676 *)dev_get_platdata(&client->dev); 677 *)dev_get_platdata(&client->dev);
678 if (pdata)
679 st->plat_data = *pdata;
677 /* power is turned on inside check chip type*/ 680 /* power is turned on inside check chip type*/
678 result = inv_check_and_setup_chip(st, id); 681 result = inv_check_and_setup_chip(st, id);
679 if (result) 682 if (result)
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig
index d4e8983fba53..23f38cf2c5cd 100644
--- a/drivers/infiniband/hw/cxgb4/Kconfig
+++ b/drivers/infiniband/hw/cxgb4/Kconfig
@@ -1,10 +1,10 @@
1config INFINIBAND_CXGB4 1config INFINIBAND_CXGB4
2 tristate "Chelsio T4 RDMA Driver" 2 tristate "Chelsio T4/T5 RDMA Driver"
3 depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n) 3 depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
4 select GENERIC_ALLOCATOR 4 select GENERIC_ALLOCATOR
5 ---help--- 5 ---help---
6 This is an iWARP/RDMA driver for the Chelsio T4 1GbE and 6 This is an iWARP/RDMA driver for the Chelsio T4 and T5
7 10GbE adapters. 7 1GbE, 10GbE adapters and T5 40GbE adapter.
8 8
9 For general information about Chelsio and our products, visit 9 For general information about Chelsio and our products, visit
10 our website at <http://www.chelsio.com>. 10 our website at <http://www.chelsio.com>.
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 185452abf32c..1f863a96a480 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -587,6 +587,10 @@ static int send_connect(struct c4iw_ep *ep)
587 opt2 |= SACK_EN(1); 587 opt2 |= SACK_EN(1);
588 if (wscale && enable_tcp_window_scaling) 588 if (wscale && enable_tcp_window_scaling)
589 opt2 |= WND_SCALE_EN(1); 589 opt2 |= WND_SCALE_EN(1);
590 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
591 opt2 |= T5_OPT_2_VALID;
592 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
593 }
590 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); 594 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
591 595
592 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { 596 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
@@ -996,7 +1000,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status)
996static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 1000static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
997{ 1001{
998 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1002 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
999 state_set(&ep->com, ABORTING); 1003 __state_set(&ep->com, ABORTING);
1000 set_bit(ABORT_CONN, &ep->com.history); 1004 set_bit(ABORT_CONN, &ep->com.history);
1001 return send_abort(ep, skb, gfp); 1005 return send_abort(ep, skb, gfp);
1002} 1006}
@@ -1154,7 +1158,7 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1154 return credits; 1158 return credits;
1155} 1159}
1156 1160
1157static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) 1161static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1158{ 1162{
1159 struct mpa_message *mpa; 1163 struct mpa_message *mpa;
1160 struct mpa_v2_conn_params *mpa_v2_params; 1164 struct mpa_v2_conn_params *mpa_v2_params;
@@ -1164,6 +1168,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1164 struct c4iw_qp_attributes attrs; 1168 struct c4iw_qp_attributes attrs;
1165 enum c4iw_qp_attr_mask mask; 1169 enum c4iw_qp_attr_mask mask;
1166 int err; 1170 int err;
1171 int disconnect = 0;
1167 1172
1168 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1173 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1169 1174
@@ -1173,7 +1178,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1173 * will abort the connection. 1178 * will abort the connection.
1174 */ 1179 */
1175 if (stop_ep_timer(ep)) 1180 if (stop_ep_timer(ep))
1176 return; 1181 return 0;
1177 1182
1178 /* 1183 /*
1179 * If we get more than the supported amount of private data 1184 * If we get more than the supported amount of private data
@@ -1195,7 +1200,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1195 * if we don't even have the mpa message, then bail. 1200 * if we don't even have the mpa message, then bail.
1196 */ 1201 */
1197 if (ep->mpa_pkt_len < sizeof(*mpa)) 1202 if (ep->mpa_pkt_len < sizeof(*mpa))
1198 return; 1203 return 0;
1199 mpa = (struct mpa_message *) ep->mpa_pkt; 1204 mpa = (struct mpa_message *) ep->mpa_pkt;
1200 1205
1201 /* Validate MPA header. */ 1206 /* Validate MPA header. */
@@ -1235,7 +1240,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1235 * We'll continue process when more data arrives. 1240 * We'll continue process when more data arrives.
1236 */ 1241 */
1237 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1242 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1238 return; 1243 return 0;
1239 1244
1240 if (mpa->flags & MPA_REJECT) { 1245 if (mpa->flags & MPA_REJECT) {
1241 err = -ECONNREFUSED; 1246 err = -ECONNREFUSED;
@@ -1337,9 +1342,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1337 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1342 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1338 attrs.ecode = MPA_NOMATCH_RTR; 1343 attrs.ecode = MPA_NOMATCH_RTR;
1339 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1344 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1345 attrs.send_term = 1;
1340 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1346 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1341 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1347 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1342 err = -ENOMEM; 1348 err = -ENOMEM;
1349 disconnect = 1;
1343 goto out; 1350 goto out;
1344 } 1351 }
1345 1352
@@ -1355,9 +1362,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1355 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1362 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1356 attrs.ecode = MPA_INSUFF_IRD; 1363 attrs.ecode = MPA_INSUFF_IRD;
1357 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1364 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1365 attrs.send_term = 1;
1358 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1366 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1359 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1367 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1360 err = -ENOMEM; 1368 err = -ENOMEM;
1369 disconnect = 1;
1361 goto out; 1370 goto out;
1362 } 1371 }
1363 goto out; 1372 goto out;
@@ -1366,7 +1375,7 @@ err:
1366 send_abort(ep, skb, GFP_KERNEL); 1375 send_abort(ep, skb, GFP_KERNEL);
1367out: 1376out:
1368 connect_reply_upcall(ep, err); 1377 connect_reply_upcall(ep, err);
1369 return; 1378 return disconnect;
1370} 1379}
1371 1380
1372static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) 1381static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
@@ -1524,6 +1533,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1524 unsigned int tid = GET_TID(hdr); 1533 unsigned int tid = GET_TID(hdr);
1525 struct tid_info *t = dev->rdev.lldi.tids; 1534 struct tid_info *t = dev->rdev.lldi.tids;
1526 __u8 status = hdr->status; 1535 __u8 status = hdr->status;
1536 int disconnect = 0;
1527 1537
1528 ep = lookup_tid(t, tid); 1538 ep = lookup_tid(t, tid);
1529 if (!ep) 1539 if (!ep)
@@ -1539,7 +1549,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1539 switch (ep->com.state) { 1549 switch (ep->com.state) {
1540 case MPA_REQ_SENT: 1550 case MPA_REQ_SENT:
1541 ep->rcv_seq += dlen; 1551 ep->rcv_seq += dlen;
1542 process_mpa_reply(ep, skb); 1552 disconnect = process_mpa_reply(ep, skb);
1543 break; 1553 break;
1544 case MPA_REQ_WAIT: 1554 case MPA_REQ_WAIT:
1545 ep->rcv_seq += dlen; 1555 ep->rcv_seq += dlen;
@@ -1555,13 +1565,16 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1555 ep->com.state, ep->hwtid, status); 1565 ep->com.state, ep->hwtid, status);
1556 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1566 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1557 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1567 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1558 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1568 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1569 disconnect = 1;
1559 break; 1570 break;
1560 } 1571 }
1561 default: 1572 default:
1562 break; 1573 break;
1563 } 1574 }
1564 mutex_unlock(&ep->com.mutex); 1575 mutex_unlock(&ep->com.mutex);
1576 if (disconnect)
1577 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1565 return 0; 1578 return 0;
1566} 1579}
1567 1580
@@ -2009,6 +2022,10 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2009 if (tcph->ece && tcph->cwr) 2022 if (tcph->ece && tcph->cwr)
2010 opt2 |= CCTRL_ECN(1); 2023 opt2 |= CCTRL_ECN(1);
2011 } 2024 }
2025 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
2026 opt2 |= T5_OPT_2_VALID;
2027 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
2028 }
2012 2029
2013 rpl = cplhdr(skb); 2030 rpl = cplhdr(skb);
2014 INIT_TP_WR(rpl, ep->hwtid); 2031 INIT_TP_WR(rpl, ep->hwtid);
@@ -3482,9 +3499,9 @@ static void process_timeout(struct c4iw_ep *ep)
3482 __func__, ep, ep->hwtid, ep->com.state); 3499 __func__, ep, ep->hwtid, ep->com.state);
3483 abort = 0; 3500 abort = 0;
3484 } 3501 }
3485 mutex_unlock(&ep->com.mutex);
3486 if (abort) 3502 if (abort)
3487 abort_connection(ep, NULL, GFP_KERNEL); 3503 abort_connection(ep, NULL, GFP_KERNEL);
3504 mutex_unlock(&ep->com.mutex);
3488 c4iw_put_ep(&ep->com); 3505 c4iw_put_ep(&ep->com);
3489} 3506}
3490 3507
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 7b8c5806a09d..7474b490760a 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -435,6 +435,7 @@ struct c4iw_qp_attributes {
435 u8 ecode; 435 u8 ecode;
436 u16 sq_db_inc; 436 u16 sq_db_inc;
437 u16 rq_db_inc; 437 u16 rq_db_inc;
438 u8 send_term;
438}; 439};
439 440
440struct c4iw_qp { 441struct c4iw_qp {
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 7b5114cb486f..086f62f5dc9e 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1388,11 +1388,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1388 qhp->attr.layer_etype = attrs->layer_etype; 1388 qhp->attr.layer_etype = attrs->layer_etype;
1389 qhp->attr.ecode = attrs->ecode; 1389 qhp->attr.ecode = attrs->ecode;
1390 ep = qhp->ep; 1390 ep = qhp->ep;
1391 disconnect = 1; 1391 if (!internal) {
1392 c4iw_get_ep(&qhp->ep->com); 1392 c4iw_get_ep(&qhp->ep->com);
1393 if (!internal)
1394 terminate = 1; 1393 terminate = 1;
1395 else { 1394 disconnect = 1;
1395 } else {
1396 terminate = qhp->attr.send_term;
1396 ret = rdma_fini(rhp, qhp, ep); 1397 ret = rdma_fini(rhp, qhp, ep);
1397 if (ret) 1398 if (ret)
1398 goto err; 1399 goto err;
@@ -1776,11 +1777,15 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1776 /* 1777 /*
1777 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for 1778 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
1778 * ringing the queue db when we're in DB_FULL mode. 1779 * ringing the queue db when we're in DB_FULL mode.
1780 * Only allow this on T4 devices.
1779 */ 1781 */
1780 attrs.sq_db_inc = attr->sq_psn; 1782 attrs.sq_db_inc = attr->sq_psn;
1781 attrs.rq_db_inc = attr->rq_psn; 1783 attrs.rq_db_inc = attr->rq_psn;
1782 mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0; 1784 mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
1783 mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0; 1785 mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
1786 if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
1787 (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
1788 return -EINVAL;
1784 1789
1785 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); 1790 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1786} 1791}
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index dc193c292671..6121ca08fe58 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -836,4 +836,18 @@ struct ulptx_idata {
836#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE) 836#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
837#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U) 837#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
838 838
839enum { /* TCP congestion control algorithms */
840 CONG_ALG_RENO,
841 CONG_ALG_TAHOE,
842 CONG_ALG_NEWRENO,
843 CONG_ALG_HIGHSPEED
844};
845
846#define S_CONG_CNTRL 14
847#define M_CONG_CNTRL 0x3
848#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
849#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
850
851#define T5_OPT_2_VALID (1 << 31)
852
839#endif /* _T4FW_RI_API_H_ */ 853#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 1b6dbe156a37..199c7896f081 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -48,6 +48,7 @@
48 48
49#include <linux/mlx4/driver.h> 49#include <linux/mlx4/driver.h>
50#include <linux/mlx4/cmd.h> 50#include <linux/mlx4/cmd.h>
51#include <linux/mlx4/qp.h>
51 52
52#include "mlx4_ib.h" 53#include "mlx4_ib.h"
53#include "user.h" 54#include "user.h"
@@ -1614,6 +1615,53 @@ static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
1614} 1615}
1615#endif 1616#endif
1616 1617
1618#define MLX4_IB_INVALID_MAC ((u64)-1)
1619static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
1620 struct net_device *dev,
1621 int port)
1622{
1623 u64 new_smac = 0;
1624 u64 release_mac = MLX4_IB_INVALID_MAC;
1625 struct mlx4_ib_qp *qp;
1626
1627 read_lock(&dev_base_lock);
1628 new_smac = mlx4_mac_to_u64(dev->dev_addr);
1629 read_unlock(&dev_base_lock);
1630
1631 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
1632 qp = ibdev->qp1_proxy[port - 1];
1633 if (qp) {
1634 int new_smac_index;
1635 u64 old_smac = qp->pri.smac;
1636 struct mlx4_update_qp_params update_params;
1637
1638 if (new_smac == old_smac)
1639 goto unlock;
1640
1641 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
1642
1643 if (new_smac_index < 0)
1644 goto unlock;
1645
1646 update_params.smac_index = new_smac_index;
1647 if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
1648 &update_params)) {
1649 release_mac = new_smac;
1650 goto unlock;
1651 }
1652
1653 qp->pri.smac = new_smac;
1654 qp->pri.smac_index = new_smac_index;
1655
1656 release_mac = old_smac;
1657 }
1658
1659unlock:
1660 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
1661 if (release_mac != MLX4_IB_INVALID_MAC)
1662 mlx4_unregister_mac(ibdev->dev, port, release_mac);
1663}
1664
1617static void mlx4_ib_get_dev_addr(struct net_device *dev, 1665static void mlx4_ib_get_dev_addr(struct net_device *dev,
1618 struct mlx4_ib_dev *ibdev, u8 port) 1666 struct mlx4_ib_dev *ibdev, u8 port)
1619{ 1667{
@@ -1689,9 +1737,13 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
1689 return 0; 1737 return 0;
1690} 1738}
1691 1739
1692static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) 1740static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
1741 struct net_device *dev,
1742 unsigned long event)
1743
1693{ 1744{
1694 struct mlx4_ib_iboe *iboe; 1745 struct mlx4_ib_iboe *iboe;
1746 int update_qps_port = -1;
1695 int port; 1747 int port;
1696 1748
1697 iboe = &ibdev->iboe; 1749 iboe = &ibdev->iboe;
@@ -1719,6 +1771,11 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
1719 } 1771 }
1720 curr_master = iboe->masters[port - 1]; 1772 curr_master = iboe->masters[port - 1];
1721 1773
1774 if (dev == iboe->netdevs[port - 1] &&
1775 (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
1776 event == NETDEV_UP || event == NETDEV_CHANGE))
1777 update_qps_port = port;
1778
1722 if (curr_netdev) { 1779 if (curr_netdev) {
1723 port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? 1780 port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
1724 IB_PORT_ACTIVE : IB_PORT_DOWN; 1781 IB_PORT_ACTIVE : IB_PORT_DOWN;
@@ -1752,6 +1809,9 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
1752 } 1809 }
1753 1810
1754 spin_unlock(&iboe->lock); 1811 spin_unlock(&iboe->lock);
1812
1813 if (update_qps_port > 0)
1814 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
1755} 1815}
1756 1816
1757static int mlx4_ib_netdev_event(struct notifier_block *this, 1817static int mlx4_ib_netdev_event(struct notifier_block *this,
@@ -1764,7 +1824,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this,
1764 return NOTIFY_DONE; 1824 return NOTIFY_DONE;
1765 1825
1766 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); 1826 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
1767 mlx4_ib_scan_netdevs(ibdev); 1827 mlx4_ib_scan_netdevs(ibdev, dev, event);
1768 1828
1769 return NOTIFY_DONE; 1829 return NOTIFY_DONE;
1770} 1830}
@@ -2043,6 +2103,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2043 goto err_map; 2103 goto err_map;
2044 2104
2045 for (i = 0; i < ibdev->num_ports; ++i) { 2105 for (i = 0; i < ibdev->num_ports; ++i) {
2106 mutex_init(&ibdev->qp1_proxy_lock[i]);
2046 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == 2107 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2047 IB_LINK_LAYER_ETHERNET) { 2108 IB_LINK_LAYER_ETHERNET) {
2048 err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]); 2109 err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
@@ -2126,7 +2187,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2126 for (i = 1 ; i <= ibdev->num_ports ; ++i) 2187 for (i = 1 ; i <= ibdev->num_ports ; ++i)
2127 reset_gid_table(ibdev, i); 2188 reset_gid_table(ibdev, i);
2128 rtnl_lock(); 2189 rtnl_lock();
2129 mlx4_ib_scan_netdevs(ibdev); 2190 mlx4_ib_scan_netdevs(ibdev, NULL, 0);
2130 rtnl_unlock(); 2191 rtnl_unlock();
2131 mlx4_ib_init_gid_table(ibdev); 2192 mlx4_ib_init_gid_table(ibdev);
2132 } 2193 }
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index f589522fddfd..66b0b7dbd9f4 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -522,6 +522,9 @@ struct mlx4_ib_dev {
522 int steer_qpn_count; 522 int steer_qpn_count;
523 int steer_qpn_base; 523 int steer_qpn_base;
524 int steering_support; 524 int steering_support;
525 struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS];
526 /* lock when destroying qp1_proxy and getting netdev events */
527 struct mutex qp1_proxy_lock[MLX4_MAX_PORTS];
525}; 528};
526 529
527struct ib_event_work { 530struct ib_event_work {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 41308af4163c..dc57482ae7af 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1132,6 +1132,12 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
1132 if (is_qp0(dev, mqp)) 1132 if (is_qp0(dev, mqp))
1133 mlx4_CLOSE_PORT(dev->dev, mqp->port); 1133 mlx4_CLOSE_PORT(dev->dev, mqp->port);
1134 1134
1135 if (dev->qp1_proxy[mqp->port - 1] == mqp) {
1136 mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
1137 dev->qp1_proxy[mqp->port - 1] = NULL;
1138 mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
1139 }
1140
1135 pd = get_pd(mqp); 1141 pd = get_pd(mqp);
1136 destroy_qp_common(dev, mqp, !!pd->ibpd.uobject); 1142 destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
1137 1143
@@ -1646,6 +1652,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1646 err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context); 1652 err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
1647 if (err) 1653 if (err)
1648 return -EINVAL; 1654 return -EINVAL;
1655 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
1656 dev->qp1_proxy[qp->port - 1] = qp;
1649 } 1657 }
1650 } 1658 }
1651 } 1659 }
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index c98fdb185931..a1710465faaf 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -28,6 +28,7 @@
28#include <target/target_core_base.h> 28#include <target/target_core_base.h>
29#include <target/target_core_fabric.h> 29#include <target/target_core_fabric.h>
30#include <target/iscsi/iscsi_transport.h> 30#include <target/iscsi/iscsi_transport.h>
31#include <linux/semaphore.h>
31 32
32#include "isert_proto.h" 33#include "isert_proto.h"
33#include "ib_isert.h" 34#include "ib_isert.h"
@@ -561,7 +562,15 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
561 struct isert_device *device; 562 struct isert_device *device;
562 struct ib_device *ib_dev = cma_id->device; 563 struct ib_device *ib_dev = cma_id->device;
563 int ret = 0; 564 int ret = 0;
564 u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; 565 u8 pi_support;
566
567 spin_lock_bh(&np->np_thread_lock);
568 if (!np->enabled) {
569 spin_unlock_bh(&np->np_thread_lock);
570 pr_debug("iscsi_np is not enabled, reject connect request\n");
571 return rdma_reject(cma_id, NULL, 0);
572 }
573 spin_unlock_bh(&np->np_thread_lock);
565 574
566 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", 575 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
567 cma_id, cma_id->context); 576 cma_id, cma_id->context);
@@ -652,6 +661,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
652 goto out_mr; 661 goto out_mr;
653 } 662 }
654 663
664 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
655 if (pi_support && !device->pi_capable) { 665 if (pi_support && !device->pi_capable) {
656 pr_err("Protection information requested but not supported\n"); 666 pr_err("Protection information requested but not supported\n");
657 ret = -EINVAL; 667 ret = -EINVAL;
@@ -663,11 +673,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
663 goto out_conn_dev; 673 goto out_conn_dev;
664 674
665 mutex_lock(&isert_np->np_accept_mutex); 675 mutex_lock(&isert_np->np_accept_mutex);
666 list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node); 676 list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
667 mutex_unlock(&isert_np->np_accept_mutex); 677 mutex_unlock(&isert_np->np_accept_mutex);
668 678
669 pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np); 679 pr_debug("isert_connect_request() up np_sem np: %p\n", np);
670 wake_up(&isert_np->np_accept_wq); 680 up(&isert_np->np_sem);
671 return 0; 681 return 0;
672 682
673out_conn_dev: 683out_conn_dev:
@@ -2999,7 +3009,7 @@ isert_setup_np(struct iscsi_np *np,
2999 pr_err("Unable to allocate struct isert_np\n"); 3009 pr_err("Unable to allocate struct isert_np\n");
3000 return -ENOMEM; 3010 return -ENOMEM;
3001 } 3011 }
3002 init_waitqueue_head(&isert_np->np_accept_wq); 3012 sema_init(&isert_np->np_sem, 0);
3003 mutex_init(&isert_np->np_accept_mutex); 3013 mutex_init(&isert_np->np_accept_mutex);
3004 INIT_LIST_HEAD(&isert_np->np_accept_list); 3014 INIT_LIST_HEAD(&isert_np->np_accept_list);
3005 init_completion(&isert_np->np_login_comp); 3015 init_completion(&isert_np->np_login_comp);
@@ -3048,18 +3058,6 @@ out:
3048} 3058}
3049 3059
3050static int 3060static int
3051isert_check_accept_queue(struct isert_np *isert_np)
3052{
3053 int empty;
3054
3055 mutex_lock(&isert_np->np_accept_mutex);
3056 empty = list_empty(&isert_np->np_accept_list);
3057 mutex_unlock(&isert_np->np_accept_mutex);
3058
3059 return empty;
3060}
3061
3062static int
3063isert_rdma_accept(struct isert_conn *isert_conn) 3061isert_rdma_accept(struct isert_conn *isert_conn)
3064{ 3062{
3065 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id; 3063 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
@@ -3151,16 +3149,14 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3151 int max_accept = 0, ret; 3149 int max_accept = 0, ret;
3152 3150
3153accept_wait: 3151accept_wait:
3154 ret = wait_event_interruptible(isert_np->np_accept_wq, 3152 ret = down_interruptible(&isert_np->np_sem);
3155 !isert_check_accept_queue(isert_np) ||
3156 np->np_thread_state == ISCSI_NP_THREAD_RESET);
3157 if (max_accept > 5) 3153 if (max_accept > 5)
3158 return -ENODEV; 3154 return -ENODEV;
3159 3155
3160 spin_lock_bh(&np->np_thread_lock); 3156 spin_lock_bh(&np->np_thread_lock);
3161 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { 3157 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
3162 spin_unlock_bh(&np->np_thread_lock); 3158 spin_unlock_bh(&np->np_thread_lock);
3163 pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n"); 3159 pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
3164 return -ENODEV; 3160 return -ENODEV;
3165 } 3161 }
3166 spin_unlock_bh(&np->np_thread_lock); 3162 spin_unlock_bh(&np->np_thread_lock);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 4c072ae34c01..da6612e68000 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -182,7 +182,7 @@ struct isert_device {
182}; 182};
183 183
184struct isert_np { 184struct isert_np {
185 wait_queue_head_t np_accept_wq; 185 struct semaphore np_sem;
186 struct rdma_cm_id *np_cm_id; 186 struct rdma_cm_id *np_cm_id;
187 struct mutex np_accept_mutex; 187 struct mutex np_accept_mutex;
188 struct list_head np_accept_list; 188 struct list_head np_accept_list;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 76842d7dc2e3..ffc7ad3a2c88 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -71,7 +71,7 @@ config KEYBOARD_ATKBD
71 default y 71 default y
72 select SERIO 72 select SERIO
73 select SERIO_LIBPS2 73 select SERIO_LIBPS2
74 select SERIO_I8042 if X86 74 select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
75 select SERIO_GSCPS2 if GSC 75 select SERIO_GSCPS2 if GSC
76 help 76 help
77 Say Y here if you want to use a standard AT or PS/2 keyboard. Usually 77 Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 2626773ff29b..2dd1d0dd4f7d 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -243,6 +243,12 @@ static void (*atkbd_platform_fixup)(struct atkbd *, const void *data);
243static void *atkbd_platform_fixup_data; 243static void *atkbd_platform_fixup_data;
244static unsigned int (*atkbd_platform_scancode_fixup)(struct atkbd *, unsigned int); 244static unsigned int (*atkbd_platform_scancode_fixup)(struct atkbd *, unsigned int);
245 245
246/*
247 * Certain keyboards to not like ATKBD_CMD_RESET_DIS and stop responding
248 * to many commands until full reset (ATKBD_CMD_RESET_BAT) is performed.
249 */
250static bool atkbd_skip_deactivate;
251
246static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf, 252static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf,
247 ssize_t (*handler)(struct atkbd *, char *)); 253 ssize_t (*handler)(struct atkbd *, char *));
248static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count, 254static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count,
@@ -768,7 +774,8 @@ static int atkbd_probe(struct atkbd *atkbd)
768 * Make sure nothing is coming from the keyboard and disturbs our 774 * Make sure nothing is coming from the keyboard and disturbs our
769 * internal state. 775 * internal state.
770 */ 776 */
771 atkbd_deactivate(atkbd); 777 if (!atkbd_skip_deactivate)
778 atkbd_deactivate(atkbd);
772 779
773 return 0; 780 return 0;
774} 781}
@@ -1638,6 +1645,12 @@ static int __init atkbd_setup_scancode_fixup(const struct dmi_system_id *id)
1638 return 1; 1645 return 1;
1639} 1646}
1640 1647
1648static int __init atkbd_deactivate_fixup(const struct dmi_system_id *id)
1649{
1650 atkbd_skip_deactivate = true;
1651 return 1;
1652}
1653
1641static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = { 1654static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
1642 { 1655 {
1643 .matches = { 1656 .matches = {
@@ -1775,6 +1788,20 @@ static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
1775 .callback = atkbd_setup_scancode_fixup, 1788 .callback = atkbd_setup_scancode_fixup,
1776 .driver_data = atkbd_oqo_01plus_scancode_fixup, 1789 .driver_data = atkbd_oqo_01plus_scancode_fixup,
1777 }, 1790 },
1791 {
1792 .matches = {
1793 DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
1794 DMI_MATCH(DMI_PRODUCT_NAME, "LW25-B7HV"),
1795 },
1796 .callback = atkbd_deactivate_fixup,
1797 },
1798 {
1799 .matches = {
1800 DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
1801 DMI_MATCH(DMI_PRODUCT_NAME, "P1-J273B"),
1802 },
1803 .callback = atkbd_deactivate_fixup,
1804 },
1778 { } 1805 { }
1779}; 1806};
1780 1807
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index d8241ba0afa0..a15063bea700 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -111,6 +111,8 @@ struct pxa27x_keypad {
111 unsigned short keycodes[MAX_KEYPAD_KEYS]; 111 unsigned short keycodes[MAX_KEYPAD_KEYS];
112 int rotary_rel_code[2]; 112 int rotary_rel_code[2];
113 113
114 unsigned int row_shift;
115
114 /* state row bits of each column scan */ 116 /* state row bits of each column scan */
115 uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS]; 117 uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS];
116 uint32_t direct_key_state; 118 uint32_t direct_key_state;
@@ -467,7 +469,8 @@ scan:
467 if ((bits_changed & (1 << row)) == 0) 469 if ((bits_changed & (1 << row)) == 0)
468 continue; 470 continue;
469 471
470 code = MATRIX_SCAN_CODE(row, col, MATRIX_ROW_SHIFT); 472 code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
473
471 input_event(input_dev, EV_MSC, MSC_SCAN, code); 474 input_event(input_dev, EV_MSC, MSC_SCAN, code);
472 input_report_key(input_dev, keypad->keycodes[code], 475 input_report_key(input_dev, keypad->keycodes[code],
473 new_state[col] & (1 << row)); 476 new_state[col] & (1 << row));
@@ -802,6 +805,8 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
802 goto failed_put_clk; 805 goto failed_put_clk;
803 } 806 }
804 807
808 keypad->row_shift = get_count_order(pdata->matrix_key_cols);
809
805 if ((pdata->enable_rotary0 && keypad->rotary_rel_code[0] != -1) || 810 if ((pdata->enable_rotary0 && keypad->rotary_rel_code[0] != -1) ||
806 (pdata->enable_rotary1 && keypad->rotary_rel_code[1] != -1)) { 811 (pdata->enable_rotary1 && keypad->rotary_rel_code[1] != -1)) {
807 input_dev->evbit[0] |= BIT_MASK(EV_REL); 812 input_dev->evbit[0] |= BIT_MASK(EV_REL);
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 55c15304ddbc..4e491c1762cf 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -392,6 +392,13 @@ static const struct of_device_id tca8418_dt_ids[] = {
392 { } 392 { }
393}; 393};
394MODULE_DEVICE_TABLE(of, tca8418_dt_ids); 394MODULE_DEVICE_TABLE(of, tca8418_dt_ids);
395
396/*
397 * The device tree based i2c loader looks for
398 * "i2c:" + second_component_of(property("compatible"))
399 * and therefore we need an alias to be found.
400 */
401MODULE_ALIAS("i2c:tca8418");
395#endif 402#endif
396 403
397static struct i2c_driver tca8418_keypad_driver = { 404static struct i2c_driver tca8418_keypad_driver = {
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index 52d3a9b28f0b..b36831c828d3 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -70,6 +70,7 @@
70#define BMA150_CFG_5_REG 0x11 70#define BMA150_CFG_5_REG 0x11
71 71
72#define BMA150_CHIP_ID 2 72#define BMA150_CHIP_ID 2
73#define BMA180_CHIP_ID 3
73#define BMA150_CHIP_ID_REG BMA150_DATA_0_REG 74#define BMA150_CHIP_ID_REG BMA150_DATA_0_REG
74 75
75#define BMA150_ACC_X_LSB_REG BMA150_DATA_2_REG 76#define BMA150_ACC_X_LSB_REG BMA150_DATA_2_REG
@@ -539,7 +540,7 @@ static int bma150_probe(struct i2c_client *client,
539 } 540 }
540 541
541 chip_id = i2c_smbus_read_byte_data(client, BMA150_CHIP_ID_REG); 542 chip_id = i2c_smbus_read_byte_data(client, BMA150_CHIP_ID_REG);
542 if (chip_id != BMA150_CHIP_ID) { 543 if (chip_id != BMA150_CHIP_ID && chip_id != BMA180_CHIP_ID) {
543 dev_err(&client->dev, "BMA150 chip id error: %d\n", chip_id); 544 dev_err(&client->dev, "BMA150 chip id error: %d\n", chip_id);
544 return -EINVAL; 545 return -EINVAL;
545 } 546 }
@@ -643,6 +644,7 @@ static UNIVERSAL_DEV_PM_OPS(bma150_pm, bma150_suspend, bma150_resume, NULL);
643 644
644static const struct i2c_device_id bma150_id[] = { 645static const struct i2c_device_id bma150_id[] = {
645 { "bma150", 0 }, 646 { "bma150", 0 },
647 { "bma180", 0 },
646 { "smb380", 0 }, 648 { "smb380", 0 },
647 { "bma023", 0 }, 649 { "bma023", 0 },
648 { } 650 { }
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index effa9c5f2c5c..6b8441f7bc32 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -17,7 +17,7 @@ config MOUSE_PS2
17 default y 17 default y
18 select SERIO 18 select SERIO
19 select SERIO_LIBPS2 19 select SERIO_LIBPS2
20 select SERIO_I8042 if X86 20 select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
21 select SERIO_GSCPS2 if GSC 21 select SERIO_GSCPS2 if GSC
22 help 22 help
23 Say Y here if you have a PS/2 mouse connected to your system. This 23 Say Y here if you have a PS/2 mouse connected to your system. This
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 088d3541c7d3..b96e978a37b7 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/dmi.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
15#include <linux/module.h> 16#include <linux/module.h>
16#include <linux/input.h> 17#include <linux/input.h>
@@ -831,7 +832,11 @@ static int elantech_set_absolute_mode(struct psmouse *psmouse)
831 break; 832 break;
832 833
833 case 3: 834 case 3:
834 etd->reg_10 = 0x0b; 835 if (etd->set_hw_resolution)
836 etd->reg_10 = 0x0b;
837 else
838 etd->reg_10 = 0x03;
839
835 if (elantech_write_reg(psmouse, 0x10, etd->reg_10)) 840 if (elantech_write_reg(psmouse, 0x10, etd->reg_10))
836 rc = -1; 841 rc = -1;
837 842
@@ -1331,6 +1336,22 @@ static int elantech_reconnect(struct psmouse *psmouse)
1331} 1336}
1332 1337
1333/* 1338/*
1339 * Some hw_version 3 models go into error state when we try to set bit 3 of r10
1340 */
1341static const struct dmi_system_id no_hw_res_dmi_table[] = {
1342#if defined(CONFIG_DMI) && defined(CONFIG_X86)
1343 {
1344 /* Gigabyte U2442 */
1345 .matches = {
1346 DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
1347 DMI_MATCH(DMI_PRODUCT_NAME, "U2442"),
1348 },
1349 },
1350#endif
1351 { }
1352};
1353
1354/*
1334 * determine hardware version and set some properties according to it. 1355 * determine hardware version and set some properties according to it.
1335 */ 1356 */
1336static int elantech_set_properties(struct elantech_data *etd) 1357static int elantech_set_properties(struct elantech_data *etd)
@@ -1390,6 +1411,9 @@ static int elantech_set_properties(struct elantech_data *etd)
1390 */ 1411 */
1391 etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000); 1412 etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000);
1392 1413
1414 /* Enable real hardware resolution on hw_version 3 ? */
1415 etd->set_hw_resolution = !dmi_check_system(no_hw_res_dmi_table);
1416
1393 return 0; 1417 return 0;
1394} 1418}
1395 1419
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index 036a04abaef7..9e0e2a1f340d 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -130,6 +130,7 @@ struct elantech_data {
130 bool jumpy_cursor; 130 bool jumpy_cursor;
131 bool reports_pressure; 131 bool reports_pressure;
132 bool crc_enabled; 132 bool crc_enabled;
133 bool set_hw_resolution;
133 unsigned char hw_version; 134 unsigned char hw_version;
134 unsigned int fw_version; 135 unsigned int fw_version;
135 unsigned int single_finger_reports; 136 unsigned int single_finger_reports;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ef9f4913450d..c5ec703c727e 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -117,6 +117,31 @@ void synaptics_reset(struct psmouse *psmouse)
117} 117}
118 118
119#ifdef CONFIG_MOUSE_PS2_SYNAPTICS 119#ifdef CONFIG_MOUSE_PS2_SYNAPTICS
120struct min_max_quirk {
121 const char * const *pnp_ids;
122 int x_min, x_max, y_min, y_max;
123};
124
125static const struct min_max_quirk min_max_pnpid_table[] = {
126 {
127 (const char * const []){"LEN0033", NULL},
128 1024, 5052, 2258, 4832
129 },
130 {
131 (const char * const []){"LEN0035", "LEN0042", NULL},
132 1232, 5710, 1156, 4696
133 },
134 {
135 (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL},
136 1024, 5112, 2024, 4832
137 },
138 {
139 (const char * const []){"LEN2001", NULL},
140 1024, 5022, 2508, 4832
141 },
142 { }
143};
144
120/* This list has been kindly provided by Synaptics. */ 145/* This list has been kindly provided by Synaptics. */
121static const char * const topbuttonpad_pnp_ids[] = { 146static const char * const topbuttonpad_pnp_ids[] = {
122 "LEN0017", 147 "LEN0017",
@@ -129,7 +154,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
129 "LEN002D", 154 "LEN002D",
130 "LEN002E", 155 "LEN002E",
131 "LEN0033", /* Helix */ 156 "LEN0033", /* Helix */
132 "LEN0034", /* T431s, T540, X1 Carbon 2nd */ 157 "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
133 "LEN0035", /* X240 */ 158 "LEN0035", /* X240 */
134 "LEN0036", /* T440 */ 159 "LEN0036", /* T440 */
135 "LEN0037", 160 "LEN0037",
@@ -142,7 +167,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
142 "LEN0048", 167 "LEN0048",
143 "LEN0049", 168 "LEN0049",
144 "LEN2000", 169 "LEN2000",
145 "LEN2001", 170 "LEN2001", /* Edge E431 */
146 "LEN2002", 171 "LEN2002",
147 "LEN2003", 172 "LEN2003",
148 "LEN2004", /* L440 */ 173 "LEN2004", /* L440 */
@@ -156,6 +181,18 @@ static const char * const topbuttonpad_pnp_ids[] = {
156 NULL 181 NULL
157}; 182};
158 183
184static bool matches_pnp_id(struct psmouse *psmouse, const char * const ids[])
185{
186 int i;
187
188 if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4))
189 for (i = 0; ids[i]; i++)
190 if (strstr(psmouse->ps2dev.serio->firmware_id, ids[i]))
191 return true;
192
193 return false;
194}
195
159/***************************************************************************** 196/*****************************************************************************
160 * Synaptics communications functions 197 * Synaptics communications functions
161 ****************************************************************************/ 198 ****************************************************************************/
@@ -304,20 +341,20 @@ static int synaptics_identify(struct psmouse *psmouse)
304 * Resolution is left zero if touchpad does not support the query 341 * Resolution is left zero if touchpad does not support the query
305 */ 342 */
306 343
307static const int *quirk_min_max;
308
309static int synaptics_resolution(struct psmouse *psmouse) 344static int synaptics_resolution(struct psmouse *psmouse)
310{ 345{
311 struct synaptics_data *priv = psmouse->private; 346 struct synaptics_data *priv = psmouse->private;
312 unsigned char resp[3]; 347 unsigned char resp[3];
348 int i;
313 349
314 if (quirk_min_max) { 350 for (i = 0; min_max_pnpid_table[i].pnp_ids; i++)
315 priv->x_min = quirk_min_max[0]; 351 if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) {
316 priv->x_max = quirk_min_max[1]; 352 priv->x_min = min_max_pnpid_table[i].x_min;
317 priv->y_min = quirk_min_max[2]; 353 priv->x_max = min_max_pnpid_table[i].x_max;
318 priv->y_max = quirk_min_max[3]; 354 priv->y_min = min_max_pnpid_table[i].y_min;
319 return 0; 355 priv->y_max = min_max_pnpid_table[i].y_max;
320 } 356 return 0;
357 }
321 358
322 if (SYN_ID_MAJOR(priv->identity) < 4) 359 if (SYN_ID_MAJOR(priv->identity) < 4)
323 return 0; 360 return 0;
@@ -1365,17 +1402,8 @@ static void set_input_params(struct psmouse *psmouse,
1365 1402
1366 if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { 1403 if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
1367 __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); 1404 __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
1368 /* See if this buttonpad has a top button area */ 1405 if (matches_pnp_id(psmouse, topbuttonpad_pnp_ids))
1369 if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4)) { 1406 __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit);
1370 for (i = 0; topbuttonpad_pnp_ids[i]; i++) {
1371 if (strstr(psmouse->ps2dev.serio->firmware_id,
1372 topbuttonpad_pnp_ids[i])) {
1373 __set_bit(INPUT_PROP_TOPBUTTONPAD,
1374 dev->propbit);
1375 break;
1376 }
1377 }
1378 }
1379 /* Clickpads report only left button */ 1407 /* Clickpads report only left button */
1380 __clear_bit(BTN_RIGHT, dev->keybit); 1408 __clear_bit(BTN_RIGHT, dev->keybit);
1381 __clear_bit(BTN_MIDDLE, dev->keybit); 1409 __clear_bit(BTN_MIDDLE, dev->keybit);
@@ -1547,96 +1575,10 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
1547 { } 1575 { }
1548}; 1576};
1549 1577
1550static const struct dmi_system_id min_max_dmi_table[] __initconst = {
1551#if defined(CONFIG_DMI)
1552 {
1553 /* Lenovo ThinkPad Helix */
1554 .matches = {
1555 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1556 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
1557 },
1558 .driver_data = (int []){1024, 5052, 2258, 4832},
1559 },
1560 {
1561 /* Lenovo ThinkPad X240 */
1562 .matches = {
1563 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1564 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
1565 },
1566 .driver_data = (int []){1232, 5710, 1156, 4696},
1567 },
1568 {
1569 /* Lenovo ThinkPad T431s */
1570 .matches = {
1571 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1572 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"),
1573 },
1574 .driver_data = (int []){1024, 5112, 2024, 4832},
1575 },
1576 {
1577 /* Lenovo ThinkPad T440s */
1578 .matches = {
1579 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1580 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
1581 },
1582 .driver_data = (int []){1024, 5112, 2024, 4832},
1583 },
1584 {
1585 /* Lenovo ThinkPad L440 */
1586 .matches = {
1587 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1588 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"),
1589 },
1590 .driver_data = (int []){1024, 5112, 2024, 4832},
1591 },
1592 {
1593 /* Lenovo ThinkPad T540p */
1594 .matches = {
1595 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1596 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
1597 },
1598 .driver_data = (int []){1024, 5056, 2058, 4832},
1599 },
1600 {
1601 /* Lenovo ThinkPad L540 */
1602 .matches = {
1603 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1604 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"),
1605 },
1606 .driver_data = (int []){1024, 5112, 2024, 4832},
1607 },
1608 {
1609 /* Lenovo Yoga S1 */
1610 .matches = {
1611 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1612 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
1613 "ThinkPad S1 Yoga"),
1614 },
1615 .driver_data = (int []){1232, 5710, 1156, 4696},
1616 },
1617 {
1618 /* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */
1619 .matches = {
1620 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1621 DMI_MATCH(DMI_PRODUCT_VERSION,
1622 "ThinkPad X1 Carbon 2nd"),
1623 },
1624 .driver_data = (int []){1024, 5112, 2024, 4832},
1625 },
1626#endif
1627 { }
1628};
1629
1630void __init synaptics_module_init(void) 1578void __init synaptics_module_init(void)
1631{ 1579{
1632 const struct dmi_system_id *min_max_dmi;
1633
1634 impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table); 1580 impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
1635 broken_olpc_ec = dmi_check_system(olpc_dmi_table); 1581 broken_olpc_ec = dmi_check_system(olpc_dmi_table);
1636
1637 min_max_dmi = dmi_first_match(min_max_dmi_table);
1638 if (min_max_dmi)
1639 quirk_min_max = min_max_dmi->driver_data;
1640} 1582}
1641 1583
1642static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode) 1584static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 762b08432de0..8b748d99b934 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -79,7 +79,8 @@ static int amba_kmi_open(struct serio *io)
79 writeb(divisor, KMICLKDIV); 79 writeb(divisor, KMICLKDIV);
80 writeb(KMICR_EN, KMICR); 80 writeb(KMICR_EN, KMICR);
81 81
82 ret = request_irq(kmi->irq, amba_kmi_int, 0, "kmi-pl050", kmi); 82 ret = request_irq(kmi->irq, amba_kmi_int, IRQF_SHARED, "kmi-pl050",
83 kmi);
83 if (ret) { 84 if (ret) {
84 printk(KERN_ERR "kmi: failed to claim IRQ%d\n", kmi->irq); 85 printk(KERN_ERR "kmi: failed to claim IRQ%d\n", kmi->irq);
85 writeb(0, KMICR); 86 writeb(0, KMICR);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 68edc9db2c64..b845e9370871 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -640,7 +640,7 @@ config TOUCHSCREEN_WM9713
640 640
641config TOUCHSCREEN_WM97XX_ATMEL 641config TOUCHSCREEN_WM97XX_ATMEL
642 tristate "WM97xx Atmel accelerated touch" 642 tristate "WM97xx Atmel accelerated touch"
643 depends on TOUCHSCREEN_WM97XX && (AVR32 || ARCH_AT91) 643 depends on TOUCHSCREEN_WM97XX && AVR32
644 help 644 help
645 Say Y here for support for streaming mode with WM97xx touchscreens 645 Say Y here for support for streaming mode with WM97xx touchscreens
646 on Atmel AT91 or AVR32 systems with an AC97C module. 646 on Atmel AT91 or AVR32 systems with an AC97C module.
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index c949520bd196..57068e8035b5 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3999,7 +3999,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
3999 iommu_flush_dte(iommu, devid); 3999 iommu_flush_dte(iommu, devid);
4000 if (devid != alias) { 4000 if (devid != alias) {
4001 irq_lookup_table[alias] = table; 4001 irq_lookup_table[alias] = table;
4002 set_dte_irq_entry(devid, table); 4002 set_dte_irq_entry(alias, table);
4003 iommu_flush_dte(iommu, alias); 4003 iommu_flush_dte(iommu, alias);
4004 } 4004 }
4005 4005
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index b76c58dbe30c..0e08545d7298 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -788,7 +788,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
788 * per device. But we can enable the exclusion range per 788 * per device. But we can enable the exclusion range per
789 * device. This is done here 789 * device. This is done here
790 */ 790 */
791 set_dev_entry_bit(m->devid, DEV_ENTRY_EX); 791 set_dev_entry_bit(devid, DEV_ENTRY_EX);
792 iommu->exclusion_start = m->range_start; 792 iommu->exclusion_start = m->range_start;
793 iommu->exclusion_length = m->range_length; 793 iommu->exclusion_length = m->range_length;
794 } 794 }
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 5208828792e6..203b2e6a91cf 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -504,8 +504,10 @@ static void do_fault(struct work_struct *work)
504 504
505 write = !!(fault->flags & PPR_FAULT_WRITE); 505 write = !!(fault->flags & PPR_FAULT_WRITE);
506 506
507 down_read(&fault->state->mm->mmap_sem);
507 npages = get_user_pages(fault->state->task, fault->state->mm, 508 npages = get_user_pages(fault->state->task, fault->state->mm,
508 fault->address, 1, write, 0, &page, NULL); 509 fault->address, 1, write, 0, &page, NULL);
510 up_read(&fault->state->mm->mmap_sem);
509 511
510 if (npages == 1) { 512 if (npages == 1) {
511 put_page(page); 513 put_page(page);
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 41be897df8d5..3899ba7821c5 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -41,6 +41,7 @@
41#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30) 41#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
42#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34) 42#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
43#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4) 43#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
44#define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF
44 45
45#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44) 46#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
46#define ARMADA_375_PPI_CAUSE (0x10) 47#define ARMADA_375_PPI_CAUSE (0x10)
@@ -132,8 +133,7 @@ static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
132 struct msi_desc *desc) 133 struct msi_desc *desc)
133{ 134{
134 struct msi_msg msg; 135 struct msi_msg msg;
135 irq_hw_number_t hwirq; 136 int virq, hwirq;
136 int virq;
137 137
138 hwirq = armada_370_xp_alloc_msi(); 138 hwirq = armada_370_xp_alloc_msi();
139 if (hwirq < 0) 139 if (hwirq < 0)
@@ -159,8 +159,19 @@ static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
159 unsigned int irq) 159 unsigned int irq)
160{ 160{
161 struct irq_data *d = irq_get_irq_data(irq); 161 struct irq_data *d = irq_get_irq_data(irq);
162 unsigned long hwirq = d->hwirq;
163
162 irq_dispose_mapping(irq); 164 irq_dispose_mapping(irq);
163 armada_370_xp_free_msi(d->hwirq); 165 armada_370_xp_free_msi(hwirq);
166}
167
168static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev,
169 int nvec, int type)
170{
171 /* We support MSI, but not MSI-X */
172 if (type == PCI_CAP_ID_MSI)
173 return 0;
174 return -EINVAL;
164} 175}
165 176
166static struct irq_chip armada_370_xp_msi_irq_chip = { 177static struct irq_chip armada_370_xp_msi_irq_chip = {
@@ -201,6 +212,7 @@ static int armada_370_xp_msi_init(struct device_node *node,
201 212
202 msi_chip->setup_irq = armada_370_xp_setup_msi_irq; 213 msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
203 msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq; 214 msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
215 msi_chip->check_device = armada_370_xp_check_msi_device;
204 msi_chip->of_node = node; 216 msi_chip->of_node = node;
205 217
206 armada_370_xp_msi_domain = 218 armada_370_xp_msi_domain =
@@ -244,35 +256,18 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock);
244static int armada_xp_set_affinity(struct irq_data *d, 256static int armada_xp_set_affinity(struct irq_data *d,
245 const struct cpumask *mask_val, bool force) 257 const struct cpumask *mask_val, bool force)
246{ 258{
247 unsigned long reg;
248 unsigned long new_mask = 0;
249 unsigned long online_mask = 0;
250 unsigned long count = 0;
251 irq_hw_number_t hwirq = irqd_to_hwirq(d); 259 irq_hw_number_t hwirq = irqd_to_hwirq(d);
260 unsigned long reg, mask;
252 int cpu; 261 int cpu;
253 262
254 for_each_cpu(cpu, mask_val) { 263 /* Select a single core from the affinity mask which is online */
255 new_mask |= 1 << cpu_logical_map(cpu); 264 cpu = cpumask_any_and(mask_val, cpu_online_mask);
256 count++; 265 mask = 1UL << cpu_logical_map(cpu);
257 }
258
259 /*
260 * Forbid mutlicore interrupt affinity
261 * This is required since the MPIC HW doesn't limit
262 * several CPUs from acknowledging the same interrupt.
263 */
264 if (count > 1)
265 return -EINVAL;
266
267 for_each_cpu(cpu, cpu_online_mask)
268 online_mask |= 1 << cpu_logical_map(cpu);
269 266
270 raw_spin_lock(&irq_controller_lock); 267 raw_spin_lock(&irq_controller_lock);
271
272 reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); 268 reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
273 reg = (reg & (~online_mask)) | new_mask; 269 reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask;
274 writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); 270 writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
275
276 raw_spin_unlock(&irq_controller_lock); 271 raw_spin_unlock(&irq_controller_lock);
277 272
278 return 0; 273 return 0;
@@ -494,15 +489,6 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
494 489
495#ifdef CONFIG_SMP 490#ifdef CONFIG_SMP
496 armada_xp_mpic_smp_cpu_init(); 491 armada_xp_mpic_smp_cpu_init();
497
498 /*
499 * Set the default affinity from all CPUs to the boot cpu.
500 * This is required since the MPIC doesn't limit several CPUs
501 * from acknowledging the same interrupt.
502 */
503 cpumask_clear(irq_default_affinity);
504 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
505
506#endif 492#endif
507 493
508 armada_370_xp_msi_init(node, main_int_res.start); 494 armada_370_xp_msi_init(node, main_int_res.start);
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index fc817d28d1fe..3d15d16a7088 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -107,7 +107,7 @@ static int __init crossbar_of_init(struct device_node *node)
107 int i, size, max, reserved = 0, entry; 107 int i, size, max, reserved = 0, entry;
108 const __be32 *irqsr; 108 const __be32 *irqsr;
109 109
110 cb = kzalloc(sizeof(struct cb_device *), GFP_KERNEL); 110 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
111 111
112 if (!cb) 112 if (!cb)
113 return -ENOMEM; 113 return -ENOMEM;
diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c
index 51dae9167238..96d1df05044f 100644
--- a/drivers/isdn/hisax/icc.c
+++ b/drivers/isdn/hisax/icc.c
@@ -425,7 +425,7 @@ afterXPR:
425 if (cs->debug & L1_DEB_MONITOR) 425 if (cs->debug & L1_DEB_MONITOR)
426 debugl1(cs, "ICC %02x -> MOX1", cs->dc.icc.mon_tx[cs->dc.icc.mon_txp - 1]); 426 debugl1(cs, "ICC %02x -> MOX1", cs->dc.icc.mon_tx[cs->dc.icc.mon_txp - 1]);
427 } 427 }
428 AfterMOX1: 428 AfterMOX1: ;
429#endif 429#endif
430 } 430 }
431 } 431 }
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1bf4a71919ec..5f054c44b485 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2178,6 +2178,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2178 ti->num_discard_bios = 1; 2178 ti->num_discard_bios = 1;
2179 ti->discards_supported = true; 2179 ti->discards_supported = true;
2180 ti->discard_zeroes_data_unsupported = true; 2180 ti->discard_zeroes_data_unsupported = true;
2181 /* Discard bios must be split on a block boundary */
2182 ti->split_discard_bios = true;
2181 2183
2182 cache->features = ca->features; 2184 cache->features = ca->features;
2183 ti->per_bio_data_size = get_per_bio_data_size(cache); 2185 ti->per_bio_data_size = get_per_bio_data_size(cache);
@@ -2488,6 +2490,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2488 2490
2489 } else { 2491 } else {
2490 inc_hit_counter(cache, bio); 2492 inc_hit_counter(cache, bio);
2493 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2491 2494
2492 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && 2495 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
2493 !is_dirty(cache, lookup_result.cblock)) 2496 !is_dirty(cache, lookup_result.cblock))
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 784695d22fde..53b213226c01 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -19,7 +19,6 @@
19#include <linux/crypto.h> 19#include <linux/crypto.h>
20#include <linux/workqueue.h> 20#include <linux/workqueue.h>
21#include <linux/backing-dev.h> 21#include <linux/backing-dev.h>
22#include <linux/percpu.h>
23#include <linux/atomic.h> 22#include <linux/atomic.h>
24#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
25#include <asm/page.h> 24#include <asm/page.h>
@@ -43,6 +42,7 @@ struct convert_context {
43 struct bvec_iter iter_out; 42 struct bvec_iter iter_out;
44 sector_t cc_sector; 43 sector_t cc_sector;
45 atomic_t cc_pending; 44 atomic_t cc_pending;
45 struct ablkcipher_request *req;
46}; 46};
47 47
48/* 48/*
@@ -111,15 +111,7 @@ struct iv_tcw_private {
111enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; 111enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
112 112
113/* 113/*
114 * Duplicated per-CPU state for cipher. 114 * The fields in here must be read only after initialization.
115 */
116struct crypt_cpu {
117 struct ablkcipher_request *req;
118};
119
120/*
121 * The fields in here must be read only after initialization,
122 * changing state should be in crypt_cpu.
123 */ 115 */
124struct crypt_config { 116struct crypt_config {
125 struct dm_dev *dev; 117 struct dm_dev *dev;
@@ -150,12 +142,6 @@ struct crypt_config {
150 sector_t iv_offset; 142 sector_t iv_offset;
151 unsigned int iv_size; 143 unsigned int iv_size;
152 144
153 /*
154 * Duplicated per cpu state. Access through
155 * per_cpu_ptr() only.
156 */
157 struct crypt_cpu __percpu *cpu;
158
159 /* ESSIV: struct crypto_cipher *essiv_tfm */ 145 /* ESSIV: struct crypto_cipher *essiv_tfm */
160 void *iv_private; 146 void *iv_private;
161 struct crypto_ablkcipher **tfms; 147 struct crypto_ablkcipher **tfms;
@@ -192,11 +178,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *);
192static void kcryptd_queue_crypt(struct dm_crypt_io *io); 178static void kcryptd_queue_crypt(struct dm_crypt_io *io);
193static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); 179static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
194 180
195static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
196{
197 return this_cpu_ptr(cc->cpu);
198}
199
200/* 181/*
201 * Use this to access cipher attributes that are the same for each CPU. 182 * Use this to access cipher attributes that are the same for each CPU.
202 */ 183 */
@@ -903,16 +884,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
903static void crypt_alloc_req(struct crypt_config *cc, 884static void crypt_alloc_req(struct crypt_config *cc,
904 struct convert_context *ctx) 885 struct convert_context *ctx)
905{ 886{
906 struct crypt_cpu *this_cc = this_crypt_config(cc);
907 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); 887 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
908 888
909 if (!this_cc->req) 889 if (!ctx->req)
910 this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); 890 ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
911 891
912 ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]); 892 ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
913 ablkcipher_request_set_callback(this_cc->req, 893 ablkcipher_request_set_callback(ctx->req,
914 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 894 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
915 kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); 895 kcryptd_async_done, dmreq_of_req(cc, ctx->req));
916} 896}
917 897
918/* 898/*
@@ -921,7 +901,6 @@ static void crypt_alloc_req(struct crypt_config *cc,
921static int crypt_convert(struct crypt_config *cc, 901static int crypt_convert(struct crypt_config *cc,
922 struct convert_context *ctx) 902 struct convert_context *ctx)
923{ 903{
924 struct crypt_cpu *this_cc = this_crypt_config(cc);
925 int r; 904 int r;
926 905
927 atomic_set(&ctx->cc_pending, 1); 906 atomic_set(&ctx->cc_pending, 1);
@@ -932,7 +911,7 @@ static int crypt_convert(struct crypt_config *cc,
932 911
933 atomic_inc(&ctx->cc_pending); 912 atomic_inc(&ctx->cc_pending);
934 913
935 r = crypt_convert_block(cc, ctx, this_cc->req); 914 r = crypt_convert_block(cc, ctx, ctx->req);
936 915
937 switch (r) { 916 switch (r) {
938 /* async */ 917 /* async */
@@ -941,7 +920,7 @@ static int crypt_convert(struct crypt_config *cc,
941 reinit_completion(&ctx->restart); 920 reinit_completion(&ctx->restart);
942 /* fall through*/ 921 /* fall through*/
943 case -EINPROGRESS: 922 case -EINPROGRESS:
944 this_cc->req = NULL; 923 ctx->req = NULL;
945 ctx->cc_sector++; 924 ctx->cc_sector++;
946 continue; 925 continue;
947 926
@@ -1040,6 +1019,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
1040 io->sector = sector; 1019 io->sector = sector;
1041 io->error = 0; 1020 io->error = 0;
1042 io->base_io = NULL; 1021 io->base_io = NULL;
1022 io->ctx.req = NULL;
1043 atomic_set(&io->io_pending, 0); 1023 atomic_set(&io->io_pending, 0);
1044 1024
1045 return io; 1025 return io;
@@ -1065,6 +1045,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
1065 if (!atomic_dec_and_test(&io->io_pending)) 1045 if (!atomic_dec_and_test(&io->io_pending))
1066 return; 1046 return;
1067 1047
1048 if (io->ctx.req)
1049 mempool_free(io->ctx.req, cc->req_pool);
1068 mempool_free(io, cc->io_pool); 1050 mempool_free(io, cc->io_pool);
1069 1051
1070 if (likely(!base_io)) 1052 if (likely(!base_io))
@@ -1492,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_config *cc)
1492static void crypt_dtr(struct dm_target *ti) 1474static void crypt_dtr(struct dm_target *ti)
1493{ 1475{
1494 struct crypt_config *cc = ti->private; 1476 struct crypt_config *cc = ti->private;
1495 struct crypt_cpu *cpu_cc;
1496 int cpu;
1497 1477
1498 ti->private = NULL; 1478 ti->private = NULL;
1499 1479
@@ -1505,13 +1485,6 @@ static void crypt_dtr(struct dm_target *ti)
1505 if (cc->crypt_queue) 1485 if (cc->crypt_queue)
1506 destroy_workqueue(cc->crypt_queue); 1486 destroy_workqueue(cc->crypt_queue);
1507 1487
1508 if (cc->cpu)
1509 for_each_possible_cpu(cpu) {
1510 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1511 if (cpu_cc->req)
1512 mempool_free(cpu_cc->req, cc->req_pool);
1513 }
1514
1515 crypt_free_tfms(cc); 1488 crypt_free_tfms(cc);
1516 1489
1517 if (cc->bs) 1490 if (cc->bs)
@@ -1530,9 +1503,6 @@ static void crypt_dtr(struct dm_target *ti)
1530 if (cc->dev) 1503 if (cc->dev)
1531 dm_put_device(ti, cc->dev); 1504 dm_put_device(ti, cc->dev);
1532 1505
1533 if (cc->cpu)
1534 free_percpu(cc->cpu);
1535
1536 kzfree(cc->cipher); 1506 kzfree(cc->cipher);
1537 kzfree(cc->cipher_string); 1507 kzfree(cc->cipher_string);
1538 1508
@@ -1588,13 +1558,6 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1588 if (tmp) 1558 if (tmp)
1589 DMWARN("Ignoring unexpected additional cipher options"); 1559 DMWARN("Ignoring unexpected additional cipher options");
1590 1560
1591 cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
1592 __alignof__(struct crypt_cpu));
1593 if (!cc->cpu) {
1594 ti->error = "Cannot allocate per cpu state";
1595 goto bad_mem;
1596 }
1597
1598 /* 1561 /*
1599 * For compatibility with the original dm-crypt mapping format, if 1562 * For compatibility with the original dm-crypt mapping format, if
1600 * only the cipher name is supplied, use cbc-plain. 1563 * only the cipher name is supplied, use cbc-plain.
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index aa009e865871..ebfa411d1a7d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -445,11 +445,11 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
445 else 445 else
446 m->saved_queue_if_no_path = queue_if_no_path; 446 m->saved_queue_if_no_path = queue_if_no_path;
447 m->queue_if_no_path = queue_if_no_path; 447 m->queue_if_no_path = queue_if_no_path;
448 if (!m->queue_if_no_path)
449 dm_table_run_md_queue_async(m->ti->table);
450
451 spin_unlock_irqrestore(&m->lock, flags); 448 spin_unlock_irqrestore(&m->lock, flags);
452 449
450 if (!queue_if_no_path)
451 dm_table_run_md_queue_async(m->ti->table);
452
453 return 0; 453 return 0;
454} 454}
455 455
@@ -954,7 +954,7 @@ out:
954 */ 954 */
955static int reinstate_path(struct pgpath *pgpath) 955static int reinstate_path(struct pgpath *pgpath)
956{ 956{
957 int r = 0; 957 int r = 0, run_queue = 0;
958 unsigned long flags; 958 unsigned long flags;
959 struct multipath *m = pgpath->pg->m; 959 struct multipath *m = pgpath->pg->m;
960 960
@@ -978,7 +978,7 @@ static int reinstate_path(struct pgpath *pgpath)
978 978
979 if (!m->nr_valid_paths++) { 979 if (!m->nr_valid_paths++) {
980 m->current_pgpath = NULL; 980 m->current_pgpath = NULL;
981 dm_table_run_md_queue_async(m->ti->table); 981 run_queue = 1;
982 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { 982 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
983 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) 983 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
984 m->pg_init_in_progress++; 984 m->pg_init_in_progress++;
@@ -991,6 +991,8 @@ static int reinstate_path(struct pgpath *pgpath)
991 991
992out: 992out:
993 spin_unlock_irqrestore(&m->lock, flags); 993 spin_unlock_irqrestore(&m->lock, flags);
994 if (run_queue)
995 dm_table_run_md_queue_async(m->ti->table);
994 996
995 return r; 997 return r;
996} 998}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 53728be84dee..242ac2ea5f29 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -27,6 +27,9 @@
27#define MAPPING_POOL_SIZE 1024 27#define MAPPING_POOL_SIZE 1024
28#define PRISON_CELLS 1024 28#define PRISON_CELLS 1024
29#define COMMIT_PERIOD HZ 29#define COMMIT_PERIOD HZ
30#define NO_SPACE_TIMEOUT_SECS 60
31
32static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
30 33
31DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, 34DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
32 "A percentage of time allocated for copy on write"); 35 "A percentage of time allocated for copy on write");
@@ -175,6 +178,7 @@ struct pool {
175 struct workqueue_struct *wq; 178 struct workqueue_struct *wq;
176 struct work_struct worker; 179 struct work_struct worker;
177 struct delayed_work waker; 180 struct delayed_work waker;
181 struct delayed_work no_space_timeout;
178 182
179 unsigned long last_commit_jiffies; 183 unsigned long last_commit_jiffies;
180 unsigned ref_count; 184 unsigned ref_count;
@@ -232,6 +236,13 @@ struct thin_c {
232 struct bio_list deferred_bio_list; 236 struct bio_list deferred_bio_list;
233 struct bio_list retry_on_resume_list; 237 struct bio_list retry_on_resume_list;
234 struct rb_root sort_bio_list; /* sorted list of deferred bios */ 238 struct rb_root sort_bio_list; /* sorted list of deferred bios */
239
240 /*
241 * Ensures the thin is not destroyed until the worker has finished
242 * iterating the active_thins list.
243 */
244 atomic_t refcount;
245 struct completion can_destroy;
235}; 246};
236 247
237/*----------------------------------------------------------------*/ 248/*----------------------------------------------------------------*/
@@ -928,7 +939,7 @@ static int commit(struct pool *pool)
928{ 939{
929 int r; 940 int r;
930 941
931 if (get_pool_mode(pool) != PM_WRITE) 942 if (get_pool_mode(pool) >= PM_READ_ONLY)
932 return -EINVAL; 943 return -EINVAL;
933 944
934 r = dm_pool_commit_metadata(pool->pmd); 945 r = dm_pool_commit_metadata(pool->pmd);
@@ -1486,6 +1497,45 @@ static void process_thin_deferred_bios(struct thin_c *tc)
1486 blk_finish_plug(&plug); 1497 blk_finish_plug(&plug);
1487} 1498}
1488 1499
1500static void thin_get(struct thin_c *tc);
1501static void thin_put(struct thin_c *tc);
1502
1503/*
1504 * We can't hold rcu_read_lock() around code that can block. So we
1505 * find a thin with the rcu lock held; bump a refcount; then drop
1506 * the lock.
1507 */
1508static struct thin_c *get_first_thin(struct pool *pool)
1509{
1510 struct thin_c *tc = NULL;
1511
1512 rcu_read_lock();
1513 if (!list_empty(&pool->active_thins)) {
1514 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
1515 thin_get(tc);
1516 }
1517 rcu_read_unlock();
1518
1519 return tc;
1520}
1521
1522static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
1523{
1524 struct thin_c *old_tc = tc;
1525
1526 rcu_read_lock();
1527 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
1528 thin_get(tc);
1529 thin_put(old_tc);
1530 rcu_read_unlock();
1531 return tc;
1532 }
1533 thin_put(old_tc);
1534 rcu_read_unlock();
1535
1536 return NULL;
1537}
1538
1489static void process_deferred_bios(struct pool *pool) 1539static void process_deferred_bios(struct pool *pool)
1490{ 1540{
1491 unsigned long flags; 1541 unsigned long flags;
@@ -1493,10 +1543,11 @@ static void process_deferred_bios(struct pool *pool)
1493 struct bio_list bios; 1543 struct bio_list bios;
1494 struct thin_c *tc; 1544 struct thin_c *tc;
1495 1545
1496 rcu_read_lock(); 1546 tc = get_first_thin(pool);
1497 list_for_each_entry_rcu(tc, &pool->active_thins, list) 1547 while (tc) {
1498 process_thin_deferred_bios(tc); 1548 process_thin_deferred_bios(tc);
1499 rcu_read_unlock(); 1549 tc = get_next_thin(pool, tc);
1550 }
1500 1551
1501 /* 1552 /*
1502 * If there are any deferred flush bios, we must commit 1553 * If there are any deferred flush bios, we must commit
@@ -1543,6 +1594,20 @@ static void do_waker(struct work_struct *ws)
1543 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); 1594 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1544} 1595}
1545 1596
1597/*
1598 * We're holding onto IO to allow userland time to react. After the
1599 * timeout either the pool will have been resized (and thus back in
1600 * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
1601 */
1602static void do_no_space_timeout(struct work_struct *ws)
1603{
1604 struct pool *pool = container_of(to_delayed_work(ws), struct pool,
1605 no_space_timeout);
1606
1607 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
1608 set_pool_mode(pool, PM_READ_ONLY);
1609}
1610
1546/*----------------------------------------------------------------*/ 1611/*----------------------------------------------------------------*/
1547 1612
1548struct noflush_work { 1613struct noflush_work {
@@ -1578,7 +1643,7 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
1578{ 1643{
1579 struct noflush_work w; 1644 struct noflush_work w;
1580 1645
1581 INIT_WORK(&w.worker, fn); 1646 INIT_WORK_ONSTACK(&w.worker, fn);
1582 w.tc = tc; 1647 w.tc = tc;
1583 atomic_set(&w.complete, 0); 1648 atomic_set(&w.complete, 0);
1584 init_waitqueue_head(&w.wait); 1649 init_waitqueue_head(&w.wait);
@@ -1607,6 +1672,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1607 struct pool_c *pt = pool->ti->private; 1672 struct pool_c *pt = pool->ti->private;
1608 bool needs_check = dm_pool_metadata_needs_check(pool->pmd); 1673 bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
1609 enum pool_mode old_mode = get_pool_mode(pool); 1674 enum pool_mode old_mode = get_pool_mode(pool);
1675 unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
1610 1676
1611 /* 1677 /*
1612 * Never allow the pool to transition to PM_WRITE mode if user 1678 * Never allow the pool to transition to PM_WRITE mode if user
@@ -1668,6 +1734,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1668 pool->process_discard = process_discard; 1734 pool->process_discard = process_discard;
1669 pool->process_prepared_mapping = process_prepared_mapping; 1735 pool->process_prepared_mapping = process_prepared_mapping;
1670 pool->process_prepared_discard = process_prepared_discard_passdown; 1736 pool->process_prepared_discard = process_prepared_discard_passdown;
1737
1738 if (!pool->pf.error_if_no_space && no_space_timeout)
1739 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
1671 break; 1740 break;
1672 1741
1673 case PM_WRITE: 1742 case PM_WRITE:
@@ -2053,6 +2122,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2053 2122
2054 INIT_WORK(&pool->worker, do_worker); 2123 INIT_WORK(&pool->worker, do_worker);
2055 INIT_DELAYED_WORK(&pool->waker, do_waker); 2124 INIT_DELAYED_WORK(&pool->waker, do_waker);
2125 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
2056 spin_lock_init(&pool->lock); 2126 spin_lock_init(&pool->lock);
2057 bio_list_init(&pool->deferred_flush_bios); 2127 bio_list_init(&pool->deferred_flush_bios);
2058 INIT_LIST_HEAD(&pool->prepared_mappings); 2128 INIT_LIST_HEAD(&pool->prepared_mappings);
@@ -2615,6 +2685,7 @@ static void pool_postsuspend(struct dm_target *ti)
2615 struct pool *pool = pt->pool; 2685 struct pool *pool = pt->pool;
2616 2686
2617 cancel_delayed_work(&pool->waker); 2687 cancel_delayed_work(&pool->waker);
2688 cancel_delayed_work(&pool->no_space_timeout);
2618 flush_workqueue(pool->wq); 2689 flush_workqueue(pool->wq);
2619 (void) commit(pool); 2690 (void) commit(pool);
2620} 2691}
@@ -3061,11 +3132,25 @@ static struct target_type pool_target = {
3061/*---------------------------------------------------------------- 3132/*----------------------------------------------------------------
3062 * Thin target methods 3133 * Thin target methods
3063 *--------------------------------------------------------------*/ 3134 *--------------------------------------------------------------*/
3135static void thin_get(struct thin_c *tc)
3136{
3137 atomic_inc(&tc->refcount);
3138}
3139
3140static void thin_put(struct thin_c *tc)
3141{
3142 if (atomic_dec_and_test(&tc->refcount))
3143 complete(&tc->can_destroy);
3144}
3145
3064static void thin_dtr(struct dm_target *ti) 3146static void thin_dtr(struct dm_target *ti)
3065{ 3147{
3066 struct thin_c *tc = ti->private; 3148 struct thin_c *tc = ti->private;
3067 unsigned long flags; 3149 unsigned long flags;
3068 3150
3151 thin_put(tc);
3152 wait_for_completion(&tc->can_destroy);
3153
3069 spin_lock_irqsave(&tc->pool->lock, flags); 3154 spin_lock_irqsave(&tc->pool->lock, flags);
3070 list_del_rcu(&tc->list); 3155 list_del_rcu(&tc->list);
3071 spin_unlock_irqrestore(&tc->pool->lock, flags); 3156 spin_unlock_irqrestore(&tc->pool->lock, flags);
@@ -3101,6 +3186,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3101 struct thin_c *tc; 3186 struct thin_c *tc;
3102 struct dm_dev *pool_dev, *origin_dev; 3187 struct dm_dev *pool_dev, *origin_dev;
3103 struct mapped_device *pool_md; 3188 struct mapped_device *pool_md;
3189 unsigned long flags;
3104 3190
3105 mutex_lock(&dm_thin_pool_table.mutex); 3191 mutex_lock(&dm_thin_pool_table.mutex);
3106 3192
@@ -3191,9 +3277,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3191 3277
3192 mutex_unlock(&dm_thin_pool_table.mutex); 3278 mutex_unlock(&dm_thin_pool_table.mutex);
3193 3279
3194 spin_lock(&tc->pool->lock); 3280 atomic_set(&tc->refcount, 1);
3281 init_completion(&tc->can_destroy);
3282
3283 spin_lock_irqsave(&tc->pool->lock, flags);
3195 list_add_tail_rcu(&tc->list, &tc->pool->active_thins); 3284 list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
3196 spin_unlock(&tc->pool->lock); 3285 spin_unlock_irqrestore(&tc->pool->lock, flags);
3197 /* 3286 /*
3198 * This synchronize_rcu() call is needed here otherwise we risk a 3287 * This synchronize_rcu() call is needed here otherwise we risk a
3199 * wake_worker() call finding no bios to process (because the newly 3288 * wake_worker() call finding no bios to process (because the newly
@@ -3422,6 +3511,9 @@ static void dm_thin_exit(void)
3422module_init(dm_thin_init); 3511module_init(dm_thin_init);
3423module_exit(dm_thin_exit); 3512module_exit(dm_thin_exit);
3424 3513
3514module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
3515MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
3516
3425MODULE_DESCRIPTION(DM_NAME " thin provisioning target"); 3517MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
3426MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3518MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3427MODULE_LICENSE("GPL"); 3519MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 796007a5e0e1..7a7bab8947ae 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -330,15 +330,17 @@ test_block_hash:
330 return r; 330 return r;
331 } 331 }
332 } 332 }
333
334 todo = 1 << v->data_dev_block_bits; 333 todo = 1 << v->data_dev_block_bits;
335 while (io->iter.bi_size) { 334 do {
336 u8 *page; 335 u8 *page;
336 unsigned len;
337 struct bio_vec bv = bio_iter_iovec(bio, io->iter); 337 struct bio_vec bv = bio_iter_iovec(bio, io->iter);
338 338
339 page = kmap_atomic(bv.bv_page); 339 page = kmap_atomic(bv.bv_page);
340 r = crypto_shash_update(desc, page + bv.bv_offset, 340 len = bv.bv_len;
341 bv.bv_len); 341 if (likely(len >= todo))
342 len = todo;
343 r = crypto_shash_update(desc, page + bv.bv_offset, len);
342 kunmap_atomic(page); 344 kunmap_atomic(page);
343 345
344 if (r < 0) { 346 if (r < 0) {
@@ -346,8 +348,9 @@ test_block_hash:
346 return r; 348 return r;
347 } 349 }
348 350
349 bio_advance_iter(bio, &io->iter, bv.bv_len); 351 bio_advance_iter(bio, &io->iter, len);
350 } 352 todo -= len;
353 } while (todo);
351 354
352 if (!v->version) { 355 if (!v->version) {
353 r = crypto_shash_update(desc, v->salt, v->salt_size); 356 r = crypto_shash_update(desc, v->salt, v->salt_size);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8fda38d23e38..2382cfc9bb3f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7381,8 +7381,10 @@ void md_do_sync(struct md_thread *thread)
7381 /* just incase thread restarts... */ 7381 /* just incase thread restarts... */
7382 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 7382 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7383 return; 7383 return;
7384 if (mddev->ro) /* never try to sync a read-only array */ 7384 if (mddev->ro) {/* never try to sync a read-only array */
7385 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7385 return; 7386 return;
7387 }
7386 7388
7387 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7389 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7388 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 7390 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
@@ -7824,6 +7826,7 @@ void md_check_recovery(struct mddev *mddev)
7824 /* There is no thread, but we need to call 7826 /* There is no thread, but we need to call
7825 * ->spare_active and clear saved_raid_disk 7827 * ->spare_active and clear saved_raid_disk
7826 */ 7828 */
7829 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7827 md_reap_sync_thread(mddev); 7830 md_reap_sync_thread(mddev);
7828 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7831 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7829 goto unlock; 7832 goto unlock;
@@ -8516,7 +8519,8 @@ static int md_notify_reboot(struct notifier_block *this,
8516 if (mddev_trylock(mddev)) { 8519 if (mddev_trylock(mddev)) {
8517 if (mddev->pers) 8520 if (mddev->pers)
8518 __md_stop_writes(mddev); 8521 __md_stop_writes(mddev);
8519 mddev->safemode = 2; 8522 if (mddev->persistent)
8523 mddev->safemode = 2;
8520 mddev_unlock(mddev); 8524 mddev_unlock(mddev);
8521 } 8525 }
8522 need_delay = 1; 8526 need_delay = 1;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 33fc408e5eac..cb882aae9e20 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1172,6 +1172,13 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
1172 int max_sectors; 1172 int max_sectors;
1173 int sectors; 1173 int sectors;
1174 1174
1175 /*
1176 * Register the new request and wait if the reconstruction
1177 * thread has put up a bar for new requests.
1178 * Continue immediately if no resync is active currently.
1179 */
1180 wait_barrier(conf);
1181
1175 sectors = bio_sectors(bio); 1182 sectors = bio_sectors(bio);
1176 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1183 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1177 bio->bi_iter.bi_sector < conf->reshape_progress && 1184 bio->bi_iter.bi_sector < conf->reshape_progress &&
@@ -1552,12 +1559,6 @@ static void make_request(struct mddev *mddev, struct bio *bio)
1552 1559
1553 md_write_start(mddev, bio); 1560 md_write_start(mddev, bio);
1554 1561
1555 /*
1556 * Register the new request and wait if the reconstruction
1557 * thread has put up a bar for new requests.
1558 * Continue immediately if no resync is active currently.
1559 */
1560 wait_barrier(conf);
1561 1562
1562 do { 1563 do {
1563 1564
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index e8a1ce204036..cdd7c1b7259b 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -1109,7 +1109,7 @@ static int ov7670_enum_framesizes(struct v4l2_subdev *sd,
1109 * windows that fall outside that. 1109 * windows that fall outside that.
1110 */ 1110 */
1111 for (i = 0; i < n_win_sizes; i++) { 1111 for (i = 0; i < n_win_sizes; i++) {
1112 struct ov7670_win_size *win = &info->devtype->win_sizes[index]; 1112 struct ov7670_win_size *win = &info->devtype->win_sizes[i];
1113 if (info->min_width && win->width < info->min_width) 1113 if (info->min_width && win->width < info->min_width)
1114 continue; 1114 continue;
1115 if (info->min_height && win->height < info->min_height) 1115 if (info->min_height && win->height < info->min_height)
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index a4459301b5f8..ee0f57e01b56 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1616,7 +1616,7 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state)
1616 if (ret < 0) 1616 if (ret < 0)
1617 return -EINVAL; 1617 return -EINVAL;
1618 1618
1619 node_ep = v4l2_of_get_next_endpoint(node, NULL); 1619 node_ep = of_graph_get_next_endpoint(node, NULL);
1620 if (!node_ep) { 1620 if (!node_ep) {
1621 dev_warn(dev, "no endpoint defined for node: %s\n", 1621 dev_warn(dev, "no endpoint defined for node: %s\n",
1622 node->full_name); 1622 node->full_name);
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index d5a7a135f75d..703560fa5e73 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -93,6 +93,7 @@ static long media_device_enum_entities(struct media_device *mdev,
93 struct media_entity *ent; 93 struct media_entity *ent;
94 struct media_entity_desc u_ent; 94 struct media_entity_desc u_ent;
95 95
96 memset(&u_ent, 0, sizeof(u_ent));
96 if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id))) 97 if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id)))
97 return -EFAULT; 98 return -EFAULT;
98 99
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index b4f12d00be05..656708252962 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -372,18 +372,32 @@ static int vpbe_stop_streaming(struct vb2_queue *vq)
372{ 372{
373 struct vpbe_fh *fh = vb2_get_drv_priv(vq); 373 struct vpbe_fh *fh = vb2_get_drv_priv(vq);
374 struct vpbe_layer *layer = fh->layer; 374 struct vpbe_layer *layer = fh->layer;
375 struct vpbe_display *disp = fh->disp_dev;
376 unsigned long flags;
375 377
376 if (!vb2_is_streaming(vq)) 378 if (!vb2_is_streaming(vq))
377 return 0; 379 return 0;
378 380
379 /* release all active buffers */ 381 /* release all active buffers */
382 spin_lock_irqsave(&disp->dma_queue_lock, flags);
383 if (layer->cur_frm == layer->next_frm) {
384 vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_ERROR);
385 } else {
386 if (layer->cur_frm != NULL)
387 vb2_buffer_done(&layer->cur_frm->vb,
388 VB2_BUF_STATE_ERROR);
389 if (layer->next_frm != NULL)
390 vb2_buffer_done(&layer->next_frm->vb,
391 VB2_BUF_STATE_ERROR);
392 }
393
380 while (!list_empty(&layer->dma_queue)) { 394 while (!list_empty(&layer->dma_queue)) {
381 layer->next_frm = list_entry(layer->dma_queue.next, 395 layer->next_frm = list_entry(layer->dma_queue.next,
382 struct vpbe_disp_buffer, list); 396 struct vpbe_disp_buffer, list);
383 list_del(&layer->next_frm->list); 397 list_del(&layer->next_frm->list);
384 vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR); 398 vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR);
385 } 399 }
386 400 spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
387 return 0; 401 return 0;
388} 402}
389 403
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index d762246eabf5..0379cb9f9a9c 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -734,6 +734,8 @@ static int vpfe_release(struct file *file)
734 } 734 }
735 vpfe_dev->io_usrs = 0; 735 vpfe_dev->io_usrs = 0;
736 vpfe_dev->numbuffers = config_params.numbuffers; 736 vpfe_dev->numbuffers = config_params.numbuffers;
737 videobuf_stop(&vpfe_dev->buffer_queue);
738 videobuf_mmap_free(&vpfe_dev->buffer_queue);
737 } 739 }
738 740
739 /* Decrement device usrs counter */ 741 /* Decrement device usrs counter */
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 756da78bac23..8dea0b84a3ad 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -358,8 +358,31 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
358 358
359 common = &ch->common[VPIF_VIDEO_INDEX]; 359 common = &ch->common[VPIF_VIDEO_INDEX];
360 360
361 /* Disable channel as per its device type and channel id */
362 if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
363 enable_channel0(0);
364 channel0_intr_enable(0);
365 }
366 if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) ||
367 (2 == common->started)) {
368 enable_channel1(0);
369 channel1_intr_enable(0);
370 }
371 common->started = 0;
372
361 /* release all active buffers */ 373 /* release all active buffers */
362 spin_lock_irqsave(&common->irqlock, flags); 374 spin_lock_irqsave(&common->irqlock, flags);
375 if (common->cur_frm == common->next_frm) {
376 vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR);
377 } else {
378 if (common->cur_frm != NULL)
379 vb2_buffer_done(&common->cur_frm->vb,
380 VB2_BUF_STATE_ERROR);
381 if (common->next_frm != NULL)
382 vb2_buffer_done(&common->next_frm->vb,
383 VB2_BUF_STATE_ERROR);
384 }
385
363 while (!list_empty(&common->dma_queue)) { 386 while (!list_empty(&common->dma_queue)) {
364 common->next_frm = list_entry(common->dma_queue.next, 387 common->next_frm = list_entry(common->dma_queue.next,
365 struct vpif_cap_buffer, list); 388 struct vpif_cap_buffer, list);
@@ -933,17 +956,6 @@ static int vpif_release(struct file *filep)
933 if (fh->io_allowed[VPIF_VIDEO_INDEX]) { 956 if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
934 /* Reset io_usrs member of channel object */ 957 /* Reset io_usrs member of channel object */
935 common->io_usrs = 0; 958 common->io_usrs = 0;
936 /* Disable channel as per its device type and channel id */
937 if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
938 enable_channel0(0);
939 channel0_intr_enable(0);
940 }
941 if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) ||
942 (2 == common->started)) {
943 enable_channel1(0);
944 channel1_intr_enable(0);
945 }
946 common->started = 0;
947 /* Free buffers allocated */ 959 /* Free buffers allocated */
948 vb2_queue_release(&common->buffer_queue); 960 vb2_queue_release(&common->buffer_queue);
949 vb2_dma_contig_cleanup_ctx(common->alloc_ctx); 961 vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 0ac841e35aa4..aed41edd0501 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -320,8 +320,31 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
320 320
321 common = &ch->common[VPIF_VIDEO_INDEX]; 321 common = &ch->common[VPIF_VIDEO_INDEX];
322 322
323 /* Disable channel */
324 if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
325 enable_channel2(0);
326 channel2_intr_enable(0);
327 }
328 if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) ||
329 (2 == common->started)) {
330 enable_channel3(0);
331 channel3_intr_enable(0);
332 }
333 common->started = 0;
334
323 /* release all active buffers */ 335 /* release all active buffers */
324 spin_lock_irqsave(&common->irqlock, flags); 336 spin_lock_irqsave(&common->irqlock, flags);
337 if (common->cur_frm == common->next_frm) {
338 vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR);
339 } else {
340 if (common->cur_frm != NULL)
341 vb2_buffer_done(&common->cur_frm->vb,
342 VB2_BUF_STATE_ERROR);
343 if (common->next_frm != NULL)
344 vb2_buffer_done(&common->next_frm->vb,
345 VB2_BUF_STATE_ERROR);
346 }
347
325 while (!list_empty(&common->dma_queue)) { 348 while (!list_empty(&common->dma_queue)) {
326 common->next_frm = list_entry(common->dma_queue.next, 349 common->next_frm = list_entry(common->dma_queue.next,
327 struct vpif_disp_buffer, list); 350 struct vpif_disp_buffer, list);
@@ -773,18 +796,6 @@ static int vpif_release(struct file *filep)
773 if (fh->io_allowed[VPIF_VIDEO_INDEX]) { 796 if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
774 /* Reset io_usrs member of channel object */ 797 /* Reset io_usrs member of channel object */
775 common->io_usrs = 0; 798 common->io_usrs = 0;
776 /* Disable channel */
777 if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
778 enable_channel2(0);
779 channel2_intr_enable(0);
780 }
781 if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) ||
782 (2 == common->started)) {
783 enable_channel3(0);
784 channel3_intr_enable(0);
785 }
786 common->started = 0;
787
788 /* Free buffers allocated */ 799 /* Free buffers allocated */
789 vb2_queue_release(&common->buffer_queue); 800 vb2_queue_release(&common->buffer_queue);
790 vb2_dma_contig_cleanup_ctx(common->alloc_ctx); 801 vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index da2fc86cc524..25dbf5b05a96 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -122,7 +122,7 @@ static struct fimc_fmt fimc_formats[] = {
122 }, { 122 }, {
123 .name = "YUV 4:2:2 planar, Y/Cb/Cr", 123 .name = "YUV 4:2:2 planar, Y/Cb/Cr",
124 .fourcc = V4L2_PIX_FMT_YUV422P, 124 .fourcc = V4L2_PIX_FMT_YUV422P,
125 .depth = { 12 }, 125 .depth = { 16 },
126 .color = FIMC_FMT_YCBYCR422, 126 .color = FIMC_FMT_YCBYCR422,
127 .memplanes = 1, 127 .memplanes = 1,
128 .colplanes = 3, 128 .colplanes = 3,
diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
index 3aecaf465094..f0c9c42867de 100644
--- a/drivers/media/tuners/fc2580.c
+++ b/drivers/media/tuners/fc2580.c
@@ -195,7 +195,7 @@ static int fc2580_set_params(struct dvb_frontend *fe)
195 195
196 f_ref = 2UL * priv->cfg->clock / r_val; 196 f_ref = 2UL * priv->cfg->clock / r_val;
197 n_val = div_u64_rem(f_vco, f_ref, &k_val); 197 n_val = div_u64_rem(f_vco, f_ref, &k_val);
198 k_val_reg = 1UL * k_val * (1 << 20) / f_ref; 198 k_val_reg = div_u64(1ULL * k_val * (1 << 20), f_ref);
199 199
200 ret = fc2580_wr_reg(priv, 0x18, r18_val | ((k_val_reg >> 16) & 0xff)); 200 ret = fc2580_wr_reg(priv, 0x18, r18_val | ((k_val_reg >> 16) & 0xff));
201 if (ret < 0) 201 if (ret < 0)
@@ -348,8 +348,8 @@ static int fc2580_set_params(struct dvb_frontend *fe)
348 if (ret < 0) 348 if (ret < 0)
349 goto err; 349 goto err;
350 350
351 ret = fc2580_wr_reg(priv, 0x37, 1UL * priv->cfg->clock * \ 351 ret = fc2580_wr_reg(priv, 0x37, div_u64(1ULL * priv->cfg->clock *
352 fc2580_if_filter_lut[i].mul / 1000000000); 352 fc2580_if_filter_lut[i].mul, 1000000000));
353 if (ret < 0) 353 if (ret < 0)
354 goto err; 354 goto err;
355 355
diff --git a/drivers/media/tuners/fc2580_priv.h b/drivers/media/tuners/fc2580_priv.h
index be38a9e637e0..646c99452136 100644
--- a/drivers/media/tuners/fc2580_priv.h
+++ b/drivers/media/tuners/fc2580_priv.h
@@ -22,6 +22,7 @@
22#define FC2580_PRIV_H 22#define FC2580_PRIV_H
23 23
24#include "fc2580.h" 24#include "fc2580.h"
25#include <linux/math64.h>
25 26
26struct fc2580_reg_val { 27struct fc2580_reg_val {
27 u8 reg; 28 u8 reg;
diff --git a/drivers/media/usb/dvb-usb-v2/Makefile b/drivers/media/usb/dvb-usb-v2/Makefile
index 7407b8338ccf..bc38f03394cd 100644
--- a/drivers/media/usb/dvb-usb-v2/Makefile
+++ b/drivers/media/usb/dvb-usb-v2/Makefile
@@ -41,4 +41,3 @@ ccflags-y += -I$(srctree)/drivers/media/dvb-core
41ccflags-y += -I$(srctree)/drivers/media/dvb-frontends 41ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
42ccflags-y += -I$(srctree)/drivers/media/tuners 42ccflags-y += -I$(srctree)/drivers/media/tuners
43ccflags-y += -I$(srctree)/drivers/media/common 43ccflags-y += -I$(srctree)/drivers/media/common
44ccflags-y += -I$(srctree)/drivers/staging/media/rtl2832u_sdr
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 61d196e8b3ab..dcbd392e6efc 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -24,7 +24,6 @@
24 24
25#include "rtl2830.h" 25#include "rtl2830.h"
26#include "rtl2832.h" 26#include "rtl2832.h"
27#include "rtl2832_sdr.h"
28 27
29#include "qt1010.h" 28#include "qt1010.h"
30#include "mt2060.h" 29#include "mt2060.h"
@@ -36,6 +35,45 @@
36#include "tua9001.h" 35#include "tua9001.h"
37#include "r820t.h" 36#include "r820t.h"
38 37
38/*
39 * RTL2832_SDR module is in staging. That logic is added in order to avoid any
40 * hard dependency to drivers/staging/ directory as we want compile mainline
41 * driver even whole staging directory is missing.
42 */
43#include <media/v4l2-subdev.h>
44
45#if IS_ENABLED(CONFIG_DVB_RTL2832_SDR)
46struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
47 struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
48 struct v4l2_subdev *sd);
49#else
50static inline struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
51 struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
52 struct v4l2_subdev *sd)
53{
54 return NULL;
55}
56#endif
57
58#ifdef CONFIG_MEDIA_ATTACH
59#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \
60 void *__r = NULL; \
61 typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
62 if (__a) { \
63 __r = (void *) __a(ARGS); \
64 if (__r == NULL) \
65 symbol_put(FUNCTION); \
66 } \
67 __r; \
68})
69
70#else
71#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \
72 FUNCTION(ARGS); \
73})
74
75#endif
76
39static int rtl28xxu_disable_rc; 77static int rtl28xxu_disable_rc;
40module_param_named(disable_rc, rtl28xxu_disable_rc, int, 0644); 78module_param_named(disable_rc, rtl28xxu_disable_rc, int, 0644);
41MODULE_PARM_DESC(disable_rc, "disable RTL2832U remote controller"); 79MODULE_PARM_DESC(disable_rc, "disable RTL2832U remote controller");
@@ -908,7 +946,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
908 adap->fe[0]->ops.tuner_ops.get_rf_strength; 946 adap->fe[0]->ops.tuner_ops.get_rf_strength;
909 947
910 /* attach SDR */ 948 /* attach SDR */
911 dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, 949 dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
912 &rtl28xxu_rtl2832_fc0012_config, NULL); 950 &rtl28xxu_rtl2832_fc0012_config, NULL);
913 break; 951 break;
914 case TUNER_RTL2832_FC0013: 952 case TUNER_RTL2832_FC0013:
@@ -920,7 +958,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
920 adap->fe[0]->ops.tuner_ops.get_rf_strength; 958 adap->fe[0]->ops.tuner_ops.get_rf_strength;
921 959
922 /* attach SDR */ 960 /* attach SDR */
923 dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, 961 dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
924 &rtl28xxu_rtl2832_fc0013_config, NULL); 962 &rtl28xxu_rtl2832_fc0013_config, NULL);
925 break; 963 break;
926 case TUNER_RTL2832_E4000: { 964 case TUNER_RTL2832_E4000: {
@@ -951,7 +989,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
951 i2c_set_adapdata(i2c_adap_internal, d); 989 i2c_set_adapdata(i2c_adap_internal, d);
952 990
953 /* attach SDR */ 991 /* attach SDR */
954 dvb_attach(rtl2832_sdr_attach, adap->fe[0], 992 dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0],
955 i2c_adap_internal, 993 i2c_adap_internal,
956 &rtl28xxu_rtl2832_e4000_config, sd); 994 &rtl28xxu_rtl2832_e4000_config, sd);
957 } 995 }
@@ -982,7 +1020,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
982 adap->fe[0]->ops.tuner_ops.get_rf_strength; 1020 adap->fe[0]->ops.tuner_ops.get_rf_strength;
983 1021
984 /* attach SDR */ 1022 /* attach SDR */
985 dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, 1023 dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
986 &rtl28xxu_rtl2832_r820t_config, NULL); 1024 &rtl28xxu_rtl2832_r820t_config, NULL);
987 break; 1025 break;
988 case TUNER_RTL2832_R828D: 1026 case TUNER_RTL2832_R828D:
diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
index 7277dbd2afcd..ecbcb39feb71 100644
--- a/drivers/media/usb/gspca/sonixb.c
+++ b/drivers/media/usb/gspca/sonixb.c
@@ -1430,10 +1430,8 @@ static const struct usb_device_id device_table[] = {
1430 {USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)}, 1430 {USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)},
1431 {USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)}, 1431 {USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)},
1432 {USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)}, 1432 {USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)},
1433#if !IS_ENABLED(CONFIG_USB_SN9C102)
1434 {USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)}, 1433 {USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)},
1435 {USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)}, 1434 {USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)},
1436#endif
1437 {USB_DEVICE(0x0c45, 0x6027), SB(OV7630, 101)}, /* Genius Eye 310 */ 1435 {USB_DEVICE(0x0c45, 0x6027), SB(OV7630, 101)}, /* Genius Eye 310 */
1438 {USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)}, 1436 {USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)},
1439 {USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)}, 1437 {USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)},
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 04b2daf567be..7e2411c36419 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -178,6 +178,9 @@ struct v4l2_create_buffers32 {
178 178
179static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) 179static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
180{ 180{
181 if (get_user(kp->type, &up->type))
182 return -EFAULT;
183
181 switch (kp->type) { 184 switch (kp->type) {
182 case V4L2_BUF_TYPE_VIDEO_CAPTURE: 185 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
183 case V4L2_BUF_TYPE_VIDEO_OUTPUT: 186 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
@@ -204,17 +207,16 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us
204 207
205static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) 208static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
206{ 209{
207 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) || 210 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
208 get_user(kp->type, &up->type)) 211 return -EFAULT;
209 return -EFAULT;
210 return __get_v4l2_format32(kp, up); 212 return __get_v4l2_format32(kp, up);
211} 213}
212 214
213static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) 215static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
214{ 216{
215 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) || 217 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
216 copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt))) 218 copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
217 return -EFAULT; 219 return -EFAULT;
218 return __get_v4l2_format32(&kp->format, &up->format); 220 return __get_v4l2_format32(&kp->format, &up->format);
219} 221}
220 222
diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c
index 110c03627051..b59a17fb7c3e 100644
--- a/drivers/memory/mvebu-devbus.c
+++ b/drivers/memory/mvebu-devbus.c
@@ -108,8 +108,19 @@ static int devbus_set_timing_params(struct devbus *devbus,
108 node->full_name); 108 node->full_name);
109 return err; 109 return err;
110 } 110 }
111 /* Convert bit width to byte width */ 111
112 r.bus_width /= 8; 112 /*
113 * The bus width is encoded into the register as 0 for 8 bits,
114 * and 1 for 16 bits, so we do the necessary conversion here.
115 */
116 if (r.bus_width == 8)
117 r.bus_width = 0;
118 else if (r.bus_width == 16)
119 r.bus_width = 1;
120 else {
121 dev_err(devbus->dev, "invalid bus width %d\n", r.bus_width);
122 return -EINVAL;
123 }
113 124
114 err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps", 125 err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps",
115 &r.badr_skew); 126 &r.badr_skew);
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index c9de3d598ea5..1d15735f9ef9 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -338,28 +338,58 @@ int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
338 int num_sg, bool read, int timeout) 338 int num_sg, bool read, int timeout)
339{ 339{
340 struct completion trans_done; 340 struct completion trans_done;
341 int err = 0, count; 341 u8 dir;
342 int err = 0, i, count;
342 long timeleft; 343 long timeleft;
343 unsigned long flags; 344 unsigned long flags;
345 struct scatterlist *sg;
346 enum dma_data_direction dma_dir;
347 u32 val;
348 dma_addr_t addr;
349 unsigned int len;
350
351 dev_dbg(&(pcr->pci->dev), "--> %s: num_sg = %d\n", __func__, num_sg);
352
353 /* don't transfer data during abort processing */
354 if (pcr->remove_pci)
355 return -EINVAL;
356
357 if ((sglist == NULL) || (num_sg <= 0))
358 return -EINVAL;
344 359
345 count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read); 360 if (read) {
361 dir = DEVICE_TO_HOST;
362 dma_dir = DMA_FROM_DEVICE;
363 } else {
364 dir = HOST_TO_DEVICE;
365 dma_dir = DMA_TO_DEVICE;
366 }
367
368 count = dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir);
346 if (count < 1) { 369 if (count < 1) {
347 dev_err(&(pcr->pci->dev), "scatterlist map failed\n"); 370 dev_err(&(pcr->pci->dev), "scatterlist map failed\n");
348 return -EINVAL; 371 return -EINVAL;
349 } 372 }
350 dev_dbg(&(pcr->pci->dev), "DMA mapping count: %d\n", count); 373 dev_dbg(&(pcr->pci->dev), "DMA mapping count: %d\n", count);
351 374
375 val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
376 pcr->sgi = 0;
377 for_each_sg(sglist, sg, count, i) {
378 addr = sg_dma_address(sg);
379 len = sg_dma_len(sg);
380 rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
381 }
352 382
353 spin_lock_irqsave(&pcr->lock, flags); 383 spin_lock_irqsave(&pcr->lock, flags);
354 384
355 pcr->done = &trans_done; 385 pcr->done = &trans_done;
356 pcr->trans_result = TRANS_NOT_READY; 386 pcr->trans_result = TRANS_NOT_READY;
357 init_completion(&trans_done); 387 init_completion(&trans_done);
388 rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
389 rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
358 390
359 spin_unlock_irqrestore(&pcr->lock, flags); 391 spin_unlock_irqrestore(&pcr->lock, flags);
360 392
361 rtsx_pci_dma_transfer(pcr, sglist, count, read);
362
363 timeleft = wait_for_completion_interruptible_timeout( 393 timeleft = wait_for_completion_interruptible_timeout(
364 &trans_done, msecs_to_jiffies(timeout)); 394 &trans_done, msecs_to_jiffies(timeout));
365 if (timeleft <= 0) { 395 if (timeleft <= 0) {
@@ -383,7 +413,7 @@ out:
383 pcr->done = NULL; 413 pcr->done = NULL;
384 spin_unlock_irqrestore(&pcr->lock, flags); 414 spin_unlock_irqrestore(&pcr->lock, flags);
385 415
386 rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read); 416 dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir);
387 417
388 if ((err < 0) && (err != -ENODEV)) 418 if ((err < 0) && (err != -ENODEV))
389 rtsx_pci_stop_cmd(pcr); 419 rtsx_pci_stop_cmd(pcr);
@@ -395,73 +425,6 @@ out:
395} 425}
396EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data); 426EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
397 427
398int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
399 int num_sg, bool read)
400{
401 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
402
403 if (pcr->remove_pci)
404 return -EINVAL;
405
406 if ((sglist == NULL) || num_sg < 1)
407 return -EINVAL;
408
409 return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
410}
411EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
412
413int rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
414 int num_sg, bool read)
415{
416 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
417
418 if (pcr->remove_pci)
419 return -EINVAL;
420
421 if (sglist == NULL || num_sg < 1)
422 return -EINVAL;
423
424 dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
425 return num_sg;
426}
427EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
428
429int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
430 int sg_count, bool read)
431{
432 struct scatterlist *sg;
433 dma_addr_t addr;
434 unsigned int len;
435 int i;
436 u32 val;
437 u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
438 unsigned long flags;
439
440 if (pcr->remove_pci)
441 return -EINVAL;
442
443 if ((sglist == NULL) || (sg_count < 1))
444 return -EINVAL;
445
446 val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
447 pcr->sgi = 0;
448 for_each_sg(sglist, sg, sg_count, i) {
449 addr = sg_dma_address(sg);
450 len = sg_dma_len(sg);
451 rtsx_pci_add_sg_tbl(pcr, addr, len, i == sg_count - 1);
452 }
453
454 spin_lock_irqsave(&pcr->lock, flags);
455
456 rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
457 rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
458
459 spin_unlock_irqrestore(&pcr->lock, flags);
460
461 return 0;
462}
463EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
464
465int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len) 428int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
466{ 429{
467 int err; 430 int err;
@@ -873,8 +836,6 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
873 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR); 836 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
874 /* Clear interrupt flag */ 837 /* Clear interrupt flag */
875 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg); 838 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
876 dev_dbg(&pcr->pci->dev, "=========== BIPR 0x%8x ==========\n", int_reg);
877
878 if ((int_reg & pcr->bier) == 0) { 839 if ((int_reg & pcr->bier) == 0) {
879 spin_unlock(&pcr->lock); 840 spin_unlock(&pcr->lock);
880 return IRQ_NONE; 841 return IRQ_NONE;
@@ -905,28 +866,17 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
905 } 866 }
906 867
907 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) { 868 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
908 if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) 869 if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
909 pcr->trans_result = TRANS_RESULT_FAIL; 870 pcr->trans_result = TRANS_RESULT_FAIL;
910 else if (int_reg & TRANS_OK_INT) 871 if (pcr->done)
872 complete(pcr->done);
873 } else if (int_reg & TRANS_OK_INT) {
911 pcr->trans_result = TRANS_RESULT_OK; 874 pcr->trans_result = TRANS_RESULT_OK;
912 875 if (pcr->done)
913 if (pcr->done) 876 complete(pcr->done);
914 complete(pcr->done);
915
916 if (int_reg & SD_EXIST) {
917 struct rtsx_slot *slot = &pcr->slots[RTSX_SD_CARD];
918 if (slot && slot->done_transfer)
919 slot->done_transfer(slot->p_dev);
920 }
921
922 if (int_reg & MS_EXIST) {
923 struct rtsx_slot *slot = &pcr->slots[RTSX_SD_CARD];
924 if (slot && slot->done_transfer)
925 slot->done_transfer(slot->p_dev);
926 } 877 }
927 } 878 }
928 879
929
930 if (pcr->card_inserted || pcr->card_removed) 880 if (pcr->card_inserted || pcr->card_removed)
931 schedule_delayed_work(&pcr->carddet_work, 881 schedule_delayed_work(&pcr->carddet_work,
932 msecs_to_jiffies(200)); 882 msecs_to_jiffies(200));
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 5fb994f9a653..0b9ded13a3ae 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -31,28 +31,14 @@
31#include <linux/mfd/rtsx_pci.h> 31#include <linux/mfd/rtsx_pci.h>
32#include <asm/unaligned.h> 32#include <asm/unaligned.h>
33 33
34struct realtek_next {
35 unsigned int sg_count;
36 s32 cookie;
37};
38
39struct realtek_pci_sdmmc { 34struct realtek_pci_sdmmc {
40 struct platform_device *pdev; 35 struct platform_device *pdev;
41 struct rtsx_pcr *pcr; 36 struct rtsx_pcr *pcr;
42 struct mmc_host *mmc; 37 struct mmc_host *mmc;
43 struct mmc_request *mrq; 38 struct mmc_request *mrq;
44 struct mmc_command *cmd; 39
45 struct mmc_data *data; 40 struct mutex host_mutex;
46 41
47 spinlock_t lock;
48 struct timer_list timer;
49 struct tasklet_struct cmd_tasklet;
50 struct tasklet_struct data_tasklet;
51 struct tasklet_struct finish_tasklet;
52
53 u8 rsp_type;
54 u8 rsp_len;
55 int sg_count;
56 u8 ssc_depth; 42 u8 ssc_depth;
57 unsigned int clock; 43 unsigned int clock;
58 bool vpclk; 44 bool vpclk;
@@ -62,13 +48,8 @@ struct realtek_pci_sdmmc {
62 int power_state; 48 int power_state;
63#define SDMMC_POWER_ON 1 49#define SDMMC_POWER_ON 1
64#define SDMMC_POWER_OFF 0 50#define SDMMC_POWER_OFF 0
65
66 struct realtek_next next_data;
67}; 51};
68 52
69static int sd_start_multi_rw(struct realtek_pci_sdmmc *host,
70 struct mmc_request *mrq);
71
72static inline struct device *sdmmc_dev(struct realtek_pci_sdmmc *host) 53static inline struct device *sdmmc_dev(struct realtek_pci_sdmmc *host)
73{ 54{
74 return &(host->pdev->dev); 55 return &(host->pdev->dev);
@@ -105,95 +86,6 @@ static void sd_print_debug_regs(struct realtek_pci_sdmmc *host)
105#define sd_print_debug_regs(host) 86#define sd_print_debug_regs(host)
106#endif /* DEBUG */ 87#endif /* DEBUG */
107 88
108static void sd_isr_done_transfer(struct platform_device *pdev)
109{
110 struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
111
112 spin_lock(&host->lock);
113 if (host->cmd)
114 tasklet_schedule(&host->cmd_tasklet);
115 if (host->data)
116 tasklet_schedule(&host->data_tasklet);
117 spin_unlock(&host->lock);
118}
119
120static void sd_request_timeout(unsigned long host_addr)
121{
122 struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
123 unsigned long flags;
124
125 spin_lock_irqsave(&host->lock, flags);
126
127 if (!host->mrq) {
128 dev_err(sdmmc_dev(host), "error: no request exist\n");
129 goto out;
130 }
131
132 if (host->cmd)
133 host->cmd->error = -ETIMEDOUT;
134 if (host->data)
135 host->data->error = -ETIMEDOUT;
136
137 dev_dbg(sdmmc_dev(host), "timeout for request\n");
138
139out:
140 tasklet_schedule(&host->finish_tasklet);
141 spin_unlock_irqrestore(&host->lock, flags);
142}
143
144static void sd_finish_request(unsigned long host_addr)
145{
146 struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
147 struct rtsx_pcr *pcr = host->pcr;
148 struct mmc_request *mrq;
149 struct mmc_command *cmd;
150 struct mmc_data *data;
151 unsigned long flags;
152 bool any_error;
153
154 spin_lock_irqsave(&host->lock, flags);
155
156 del_timer(&host->timer);
157 mrq = host->mrq;
158 if (!mrq) {
159 dev_err(sdmmc_dev(host), "error: no request need finish\n");
160 goto out;
161 }
162
163 cmd = mrq->cmd;
164 data = mrq->data;
165
166 any_error = (mrq->sbc && mrq->sbc->error) ||
167 (mrq->stop && mrq->stop->error) ||
168 (cmd && cmd->error) || (data && data->error);
169
170 if (any_error) {
171 rtsx_pci_stop_cmd(pcr);
172 sd_clear_error(host);
173 }
174
175 if (data) {
176 if (any_error)
177 data->bytes_xfered = 0;
178 else
179 data->bytes_xfered = data->blocks * data->blksz;
180
181 if (!data->host_cookie)
182 rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len,
183 data->flags & MMC_DATA_READ);
184
185 }
186
187 host->mrq = NULL;
188 host->cmd = NULL;
189 host->data = NULL;
190
191out:
192 spin_unlock_irqrestore(&host->lock, flags);
193 mutex_unlock(&pcr->pcr_mutex);
194 mmc_request_done(host->mmc, mrq);
195}
196
197static int sd_read_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt, 89static int sd_read_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt,
198 u8 *buf, int buf_len, int timeout) 90 u8 *buf, int buf_len, int timeout)
199{ 91{
@@ -311,7 +203,8 @@ static int sd_write_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt,
311 return 0; 203 return 0;
312} 204}
313 205
314static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd) 206static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
207 struct mmc_command *cmd)
315{ 208{
316 struct rtsx_pcr *pcr = host->pcr; 209 struct rtsx_pcr *pcr = host->pcr;
317 u8 cmd_idx = (u8)cmd->opcode; 210 u8 cmd_idx = (u8)cmd->opcode;
@@ -319,14 +212,11 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
319 int err = 0; 212 int err = 0;
320 int timeout = 100; 213 int timeout = 100;
321 int i; 214 int i;
215 u8 *ptr;
216 int stat_idx = 0;
322 u8 rsp_type; 217 u8 rsp_type;
323 int rsp_len = 5; 218 int rsp_len = 5;
324 unsigned long flags; 219 bool clock_toggled = false;
325
326 if (host->cmd)
327 dev_err(sdmmc_dev(host), "error: cmd already exist\n");
328
329 host->cmd = cmd;
330 220
331 dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", 221 dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n",
332 __func__, cmd_idx, arg); 222 __func__, cmd_idx, arg);
@@ -361,8 +251,6 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
361 err = -EINVAL; 251 err = -EINVAL;
362 goto out; 252 goto out;
363 } 253 }
364 host->rsp_type = rsp_type;
365 host->rsp_len = rsp_len;
366 254
367 if (rsp_type == SD_RSP_TYPE_R1b) 255 if (rsp_type == SD_RSP_TYPE_R1b)
368 timeout = 3000; 256 timeout = 3000;
@@ -372,6 +260,8 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
372 0xFF, SD_CLK_TOGGLE_EN); 260 0xFF, SD_CLK_TOGGLE_EN);
373 if (err < 0) 261 if (err < 0)
374 goto out; 262 goto out;
263
264 clock_toggled = true;
375 } 265 }
376 266
377 rtsx_pci_init_cmd(pcr); 267 rtsx_pci_init_cmd(pcr);
@@ -395,60 +285,25 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
395 /* Read data from ping-pong buffer */ 285 /* Read data from ping-pong buffer */
396 for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++) 286 for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++)
397 rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0); 287 rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0);
288 stat_idx = 16;
398 } else if (rsp_type != SD_RSP_TYPE_R0) { 289 } else if (rsp_type != SD_RSP_TYPE_R0) {
399 /* Read data from SD_CMDx registers */ 290 /* Read data from SD_CMDx registers */
400 for (i = SD_CMD0; i <= SD_CMD4; i++) 291 for (i = SD_CMD0; i <= SD_CMD4; i++)
401 rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0); 292 rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0);
293 stat_idx = 5;
402 } 294 }
403 295
404 rtsx_pci_add_cmd(pcr, READ_REG_CMD, SD_STAT1, 0, 0); 296 rtsx_pci_add_cmd(pcr, READ_REG_CMD, SD_STAT1, 0, 0);
405 297
406 mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout)); 298 err = rtsx_pci_send_cmd(pcr, timeout);
407 299 if (err < 0) {
408 spin_lock_irqsave(&pcr->lock, flags); 300 sd_print_debug_regs(host);
409 pcr->trans_result = TRANS_NOT_READY; 301 sd_clear_error(host);
410 rtsx_pci_send_cmd_no_wait(pcr); 302 dev_dbg(sdmmc_dev(host),
411 spin_unlock_irqrestore(&pcr->lock, flags); 303 "rtsx_pci_send_cmd error (err = %d)\n", err);
412
413 return;
414
415out:
416 cmd->error = err;
417 tasklet_schedule(&host->finish_tasklet);
418}
419
420static void sd_get_rsp(unsigned long host_addr)
421{
422 struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
423 struct rtsx_pcr *pcr = host->pcr;
424 struct mmc_command *cmd;
425 int i, err = 0, stat_idx;
426 u8 *ptr, rsp_type;
427 unsigned long flags;
428
429 spin_lock_irqsave(&host->lock, flags);
430
431 cmd = host->cmd;
432 host->cmd = NULL;
433
434 if (!cmd) {
435 dev_err(sdmmc_dev(host), "error: cmd not exist\n");
436 goto out; 304 goto out;
437 } 305 }
438 306
439 spin_lock(&pcr->lock);
440 if (pcr->trans_result == TRANS_NO_DEVICE)
441 err = -ENODEV;
442 else if (pcr->trans_result != TRANS_RESULT_OK)
443 err = -EINVAL;
444 spin_unlock(&pcr->lock);
445
446 if (err < 0)
447 goto out;
448
449 rsp_type = host->rsp_type;
450 stat_idx = host->rsp_len;
451
452 if (rsp_type == SD_RSP_TYPE_R0) { 307 if (rsp_type == SD_RSP_TYPE_R0) {
453 err = 0; 308 err = 0;
454 goto out; 309 goto out;
@@ -485,106 +340,26 @@ static void sd_get_rsp(unsigned long host_addr)
485 cmd->resp[0]); 340 cmd->resp[0]);
486 } 341 }
487 342
488 if (cmd == host->mrq->sbc) {
489 sd_send_cmd(host, host->mrq->cmd);
490 spin_unlock_irqrestore(&host->lock, flags);
491 return;
492 }
493
494 if (cmd == host->mrq->stop)
495 goto out;
496
497 if (cmd->data) {
498 sd_start_multi_rw(host, host->mrq);
499 spin_unlock_irqrestore(&host->lock, flags);
500 return;
501 }
502
503out: 343out:
504 cmd->error = err; 344 cmd->error = err;
505 345
506 tasklet_schedule(&host->finish_tasklet); 346 if (err && clock_toggled)
507 spin_unlock_irqrestore(&host->lock, flags); 347 rtsx_pci_write_register(pcr, SD_BUS_STAT,
508} 348 SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
509
510static int sd_pre_dma_transfer(struct realtek_pci_sdmmc *host,
511 struct mmc_data *data, struct realtek_next *next)
512{
513 struct rtsx_pcr *pcr = host->pcr;
514 int read = data->flags & MMC_DATA_READ;
515 int sg_count = 0;
516
517 if (!next && data->host_cookie &&
518 data->host_cookie != host->next_data.cookie) {
519 dev_err(sdmmc_dev(host),
520 "error: invalid cookie data[%d] host[%d]\n",
521 data->host_cookie, host->next_data.cookie);
522 data->host_cookie = 0;
523 }
524
525 if (next || (!next && data->host_cookie != host->next_data.cookie))
526 sg_count = rtsx_pci_dma_map_sg(pcr,
527 data->sg, data->sg_len, read);
528 else
529 sg_count = host->next_data.sg_count;
530
531 if (next) {
532 next->sg_count = sg_count;
533 if (++next->cookie < 0)
534 next->cookie = 1;
535 data->host_cookie = next->cookie;
536 }
537
538 return sg_count;
539}
540
541static void sdmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
542 bool is_first_req)
543{
544 struct realtek_pci_sdmmc *host = mmc_priv(mmc);
545 struct mmc_data *data = mrq->data;
546
547 if (data->host_cookie) {
548 dev_err(sdmmc_dev(host),
549 "error: descard already cookie data[%d]\n",
550 data->host_cookie);
551 data->host_cookie = 0;
552 }
553
554 dev_dbg(sdmmc_dev(host), "dma sg prepared: %d\n",
555 sd_pre_dma_transfer(host, data, &host->next_data));
556}
557
558static void sdmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
559 int err)
560{
561 struct realtek_pci_sdmmc *host = mmc_priv(mmc);
562 struct rtsx_pcr *pcr = host->pcr;
563 struct mmc_data *data = mrq->data;
564 int read = data->flags & MMC_DATA_READ;
565
566 rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len, read);
567 data->host_cookie = 0;
568} 349}
569 350
570static int sd_start_multi_rw(struct realtek_pci_sdmmc *host, 351static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
571 struct mmc_request *mrq)
572{ 352{
573 struct rtsx_pcr *pcr = host->pcr; 353 struct rtsx_pcr *pcr = host->pcr;
574 struct mmc_host *mmc = host->mmc; 354 struct mmc_host *mmc = host->mmc;
575 struct mmc_card *card = mmc->card; 355 struct mmc_card *card = mmc->card;
576 struct mmc_data *data = mrq->data; 356 struct mmc_data *data = mrq->data;
577 int uhs = mmc_card_uhs(card); 357 int uhs = mmc_card_uhs(card);
578 int read = data->flags & MMC_DATA_READ; 358 int read = (data->flags & MMC_DATA_READ) ? 1 : 0;
579 u8 cfg2, trans_mode; 359 u8 cfg2, trans_mode;
580 int err; 360 int err;
581 size_t data_len = data->blksz * data->blocks; 361 size_t data_len = data->blksz * data->blocks;
582 362
583 if (host->data)
584 dev_err(sdmmc_dev(host), "error: data already exist\n");
585
586 host->data = data;
587
588 if (read) { 363 if (read) {
589 cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | 364 cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
590 SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_0; 365 SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_0;
@@ -635,54 +410,15 @@ static int sd_start_multi_rw(struct realtek_pci_sdmmc *host,
635 rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, 410 rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
636 SD_TRANSFER_END, SD_TRANSFER_END); 411 SD_TRANSFER_END, SD_TRANSFER_END);
637 412
638 mod_timer(&host->timer, jiffies + 10 * HZ);
639 rtsx_pci_send_cmd_no_wait(pcr); 413 rtsx_pci_send_cmd_no_wait(pcr);
640 414
641 err = rtsx_pci_dma_transfer(pcr, data->sg, host->sg_count, read); 415 err = rtsx_pci_transfer_data(pcr, data->sg, data->sg_len, read, 10000);
642 if (err < 0) {
643 data->error = err;
644 tasklet_schedule(&host->finish_tasklet);
645 }
646 return 0;
647}
648
649static void sd_finish_multi_rw(unsigned long host_addr)
650{
651 struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
652 struct rtsx_pcr *pcr = host->pcr;
653 struct mmc_data *data;
654 int err = 0;
655 unsigned long flags;
656
657 spin_lock_irqsave(&host->lock, flags);
658
659 if (!host->data) {
660 dev_err(sdmmc_dev(host), "error: no data exist\n");
661 goto out;
662 }
663
664 data = host->data;
665 host->data = NULL;
666
667 if (pcr->trans_result == TRANS_NO_DEVICE)
668 err = -ENODEV;
669 else if (pcr->trans_result != TRANS_RESULT_OK)
670 err = -EINVAL;
671
672 if (err < 0) { 416 if (err < 0) {
673 data->error = err; 417 sd_clear_error(host);
674 goto out; 418 return err;
675 }
676
677 if (!host->mrq->sbc && data->stop) {
678 sd_send_cmd(host, data->stop);
679 spin_unlock_irqrestore(&host->lock, flags);
680 return;
681 } 419 }
682 420
683out: 421 return 0;
684 tasklet_schedule(&host->finish_tasklet);
685 spin_unlock_irqrestore(&host->lock, flags);
686} 422}
687 423
688static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host) 424static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
@@ -901,13 +637,6 @@ static int sd_tuning_rx(struct realtek_pci_sdmmc *host, u8 opcode)
901 return 0; 637 return 0;
902} 638}
903 639
904static inline bool sd_use_muti_rw(struct mmc_command *cmd)
905{
906 return mmc_op_multi(cmd->opcode) ||
907 (cmd->opcode == MMC_READ_SINGLE_BLOCK) ||
908 (cmd->opcode == MMC_WRITE_BLOCK);
909}
910
911static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 640static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
912{ 641{
913 struct realtek_pci_sdmmc *host = mmc_priv(mmc); 642 struct realtek_pci_sdmmc *host = mmc_priv(mmc);
@@ -916,14 +645,6 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
916 struct mmc_data *data = mrq->data; 645 struct mmc_data *data = mrq->data;
917 unsigned int data_size = 0; 646 unsigned int data_size = 0;
918 int err; 647 int err;
919 unsigned long flags;
920
921 mutex_lock(&pcr->pcr_mutex);
922 spin_lock_irqsave(&host->lock, flags);
923
924 if (host->mrq)
925 dev_err(sdmmc_dev(host), "error: request already exist\n");
926 host->mrq = mrq;
927 648
928 if (host->eject) { 649 if (host->eject) {
929 cmd->error = -ENOMEDIUM; 650 cmd->error = -ENOMEDIUM;
@@ -936,6 +657,8 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
936 goto finish; 657 goto finish;
937 } 658 }
938 659
660 mutex_lock(&pcr->pcr_mutex);
661
939 rtsx_pci_start_run(pcr); 662 rtsx_pci_start_run(pcr);
940 663
941 rtsx_pci_switch_clock(pcr, host->clock, host->ssc_depth, 664 rtsx_pci_switch_clock(pcr, host->clock, host->ssc_depth,
@@ -944,28 +667,46 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
944 rtsx_pci_write_register(pcr, CARD_SHARE_MODE, 667 rtsx_pci_write_register(pcr, CARD_SHARE_MODE,
945 CARD_SHARE_MASK, CARD_SHARE_48_SD); 668 CARD_SHARE_MASK, CARD_SHARE_48_SD);
946 669
670 mutex_lock(&host->host_mutex);
671 host->mrq = mrq;
672 mutex_unlock(&host->host_mutex);
673
947 if (mrq->data) 674 if (mrq->data)
948 data_size = data->blocks * data->blksz; 675 data_size = data->blocks * data->blksz;
949 676
950 if (sd_use_muti_rw(cmd)) 677 if (!data_size || mmc_op_multi(cmd->opcode) ||
951 host->sg_count = sd_pre_dma_transfer(host, data, NULL); 678 (cmd->opcode == MMC_READ_SINGLE_BLOCK) ||
679 (cmd->opcode == MMC_WRITE_BLOCK)) {
680 sd_send_cmd_get_rsp(host, cmd);
952 681
953 if (!data_size || sd_use_muti_rw(cmd)) { 682 if (!cmd->error && data_size) {
954 if (mrq->sbc) 683 sd_rw_multi(host, mrq);
955 sd_send_cmd(host, mrq->sbc); 684
956 else 685 if (mmc_op_multi(cmd->opcode) && mrq->stop)
957 sd_send_cmd(host, cmd); 686 sd_send_cmd_get_rsp(host, mrq->stop);
958 spin_unlock_irqrestore(&host->lock, flags); 687 }
959 } else { 688 } else {
960 spin_unlock_irqrestore(&host->lock, flags);
961 sd_normal_rw(host, mrq); 689 sd_normal_rw(host, mrq);
962 tasklet_schedule(&host->finish_tasklet);
963 } 690 }
964 return; 691
692 if (mrq->data) {
693 if (cmd->error || data->error)
694 data->bytes_xfered = 0;
695 else
696 data->bytes_xfered = data->blocks * data->blksz;
697 }
698
699 mutex_unlock(&pcr->pcr_mutex);
965 700
966finish: 701finish:
967 tasklet_schedule(&host->finish_tasklet); 702 if (cmd->error)
968 spin_unlock_irqrestore(&host->lock, flags); 703 dev_dbg(sdmmc_dev(host), "cmd->error = %d\n", cmd->error);
704
705 mutex_lock(&host->host_mutex);
706 host->mrq = NULL;
707 mutex_unlock(&host->host_mutex);
708
709 mmc_request_done(mmc, mrq);
969} 710}
970 711
971static int sd_set_bus_width(struct realtek_pci_sdmmc *host, 712static int sd_set_bus_width(struct realtek_pci_sdmmc *host,
@@ -1400,8 +1141,6 @@ out:
1400} 1141}
1401 1142
1402static const struct mmc_host_ops realtek_pci_sdmmc_ops = { 1143static const struct mmc_host_ops realtek_pci_sdmmc_ops = {
1403 .pre_req = sdmmc_pre_req,
1404 .post_req = sdmmc_post_req,
1405 .request = sdmmc_request, 1144 .request = sdmmc_request,
1406 .set_ios = sdmmc_set_ios, 1145 .set_ios = sdmmc_set_ios,
1407 .get_ro = sdmmc_get_ro, 1146 .get_ro = sdmmc_get_ro,
@@ -1465,7 +1204,6 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
1465 struct realtek_pci_sdmmc *host; 1204 struct realtek_pci_sdmmc *host;
1466 struct rtsx_pcr *pcr; 1205 struct rtsx_pcr *pcr;
1467 struct pcr_handle *handle = pdev->dev.platform_data; 1206 struct pcr_handle *handle = pdev->dev.platform_data;
1468 unsigned long host_addr;
1469 1207
1470 if (!handle) 1208 if (!handle)
1471 return -ENXIO; 1209 return -ENXIO;
@@ -1489,15 +1227,8 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
1489 pcr->slots[RTSX_SD_CARD].p_dev = pdev; 1227 pcr->slots[RTSX_SD_CARD].p_dev = pdev;
1490 pcr->slots[RTSX_SD_CARD].card_event = rtsx_pci_sdmmc_card_event; 1228 pcr->slots[RTSX_SD_CARD].card_event = rtsx_pci_sdmmc_card_event;
1491 1229
1492 host_addr = (unsigned long)host; 1230 mutex_init(&host->host_mutex);
1493 host->next_data.cookie = 1;
1494 setup_timer(&host->timer, sd_request_timeout, host_addr);
1495 tasklet_init(&host->cmd_tasklet, sd_get_rsp, host_addr);
1496 tasklet_init(&host->data_tasklet, sd_finish_multi_rw, host_addr);
1497 tasklet_init(&host->finish_tasklet, sd_finish_request, host_addr);
1498 spin_lock_init(&host->lock);
1499 1231
1500 pcr->slots[RTSX_SD_CARD].done_transfer = sd_isr_done_transfer;
1501 realtek_init_host(host); 1232 realtek_init_host(host);
1502 1233
1503 mmc_add_host(mmc); 1234 mmc_add_host(mmc);
@@ -1510,8 +1241,6 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
1510 struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); 1241 struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
1511 struct rtsx_pcr *pcr; 1242 struct rtsx_pcr *pcr;
1512 struct mmc_host *mmc; 1243 struct mmc_host *mmc;
1513 struct mmc_request *mrq;
1514 unsigned long flags;
1515 1244
1516 if (!host) 1245 if (!host)
1517 return 0; 1246 return 0;
@@ -1519,33 +1248,22 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
1519 pcr = host->pcr; 1248 pcr = host->pcr;
1520 pcr->slots[RTSX_SD_CARD].p_dev = NULL; 1249 pcr->slots[RTSX_SD_CARD].p_dev = NULL;
1521 pcr->slots[RTSX_SD_CARD].card_event = NULL; 1250 pcr->slots[RTSX_SD_CARD].card_event = NULL;
1522 pcr->slots[RTSX_SD_CARD].done_transfer = NULL;
1523 mmc = host->mmc; 1251 mmc = host->mmc;
1524 mrq = host->mrq;
1525 1252
1526 spin_lock_irqsave(&host->lock, flags); 1253 mutex_lock(&host->host_mutex);
1527 if (host->mrq) { 1254 if (host->mrq) {
1528 dev_dbg(&(pdev->dev), 1255 dev_dbg(&(pdev->dev),
1529 "%s: Controller removed during transfer\n", 1256 "%s: Controller removed during transfer\n",
1530 mmc_hostname(mmc)); 1257 mmc_hostname(mmc));
1531 1258
1532 if (mrq->sbc) 1259 rtsx_pci_complete_unfinished_transfer(pcr);
1533 mrq->sbc->error = -ENOMEDIUM;
1534 if (mrq->cmd)
1535 mrq->cmd->error = -ENOMEDIUM;
1536 if (mrq->stop)
1537 mrq->stop->error = -ENOMEDIUM;
1538 if (mrq->data)
1539 mrq->data->error = -ENOMEDIUM;
1540 1260
1541 tasklet_schedule(&host->finish_tasklet); 1261 host->mrq->cmd->error = -ENOMEDIUM;
1262 if (host->mrq->stop)
1263 host->mrq->stop->error = -ENOMEDIUM;
1264 mmc_request_done(mmc, host->mrq);
1542 } 1265 }
1543 spin_unlock_irqrestore(&host->lock, flags); 1266 mutex_unlock(&host->host_mutex);
1544
1545 del_timer_sync(&host->timer);
1546 tasklet_kill(&host->cmd_tasklet);
1547 tasklet_kill(&host->data_tasklet);
1548 tasklet_kill(&host->finish_tasklet);
1549 1267
1550 mmc_remove_host(mmc); 1268 mmc_remove_host(mmc);
1551 host->eject = true; 1269 host->eject = true;
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 4615d79fc93f..b922c8efcf40 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -523,6 +523,7 @@ static struct nand_ecclayout hwecc4_2048 = {
523#if defined(CONFIG_OF) 523#if defined(CONFIG_OF)
524static const struct of_device_id davinci_nand_of_match[] = { 524static const struct of_device_id davinci_nand_of_match[] = {
525 {.compatible = "ti,davinci-nand", }, 525 {.compatible = "ti,davinci-nand", },
526 {.compatible = "ti,keystone-nand", },
526 {}, 527 {},
527}; 528};
528MODULE_DEVICE_TABLE(of, davinci_nand_of_match); 529MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
@@ -581,6 +582,11 @@ static struct davinci_nand_pdata
581 of_property_read_bool(pdev->dev.of_node, 582 of_property_read_bool(pdev->dev.of_node,
582 "ti,davinci-nand-use-bbt")) 583 "ti,davinci-nand-use-bbt"))
583 pdata->bbt_options = NAND_BBT_USE_FLASH; 584 pdata->bbt_options = NAND_BBT_USE_FLASH;
585
586 if (of_device_is_compatible(pdev->dev.of_node,
587 "ti,keystone-nand")) {
588 pdata->options |= NAND_NO_SUBPAGE_WRITE;
589 }
584 } 590 }
585 591
586 return dev_get_platdata(&pdev->dev); 592 return dev_get_platdata(&pdev->dev);
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 7ff473c871a9..8d659e6a1b4c 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -431,7 +431,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
431 * Create one workqueue per volume (per registered block device). 431 * Create one workqueue per volume (per registered block device).
432 * Rembember workqueues are cheap, they're not threads. 432 * Rembember workqueues are cheap, they're not threads.
433 */ 433 */
434 dev->wq = alloc_workqueue(gd->disk_name, 0, 0); 434 dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
435 if (!dev->wq) 435 if (!dev->wq)
436 goto out_free_queue; 436 goto out_free_queue;
437 INIT_WORK(&dev->work, ubiblock_do_work); 437 INIT_WORK(&dev->work, ubiblock_do_work);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 02317c1c0238..0f3425dac910 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -671,6 +671,8 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
671 671
672 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); 672 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
673 self_check_in_wl_tree(ubi, e, &ubi->free); 673 self_check_in_wl_tree(ubi, e, &ubi->free);
674 ubi->free_count--;
675 ubi_assert(ubi->free_count >= 0);
674 rb_erase(&e->u.rb, &ubi->free); 676 rb_erase(&e->u.rb, &ubi->free);
675 677
676 return e; 678 return e;
@@ -684,6 +686,9 @@ int ubi_wl_get_peb(struct ubi_device *ubi)
684 peb = __wl_get_peb(ubi); 686 peb = __wl_get_peb(ubi);
685 spin_unlock(&ubi->wl_lock); 687 spin_unlock(&ubi->wl_lock);
686 688
689 if (peb < 0)
690 return peb;
691
687 err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset, 692 err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
688 ubi->peb_size - ubi->vid_hdr_aloffset); 693 ubi->peb_size - ubi->vid_hdr_aloffset);
689 if (err) { 694 if (err) {
@@ -1068,6 +1073,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1068 1073
1069 /* Give the unused PEB back */ 1074 /* Give the unused PEB back */
1070 wl_tree_add(e2, &ubi->free); 1075 wl_tree_add(e2, &ubi->free);
1076 ubi->free_count++;
1071 goto out_cancel; 1077 goto out_cancel;
1072 } 1078 }
1073 self_check_in_wl_tree(ubi, e1, &ubi->used); 1079 self_check_in_wl_tree(ubi, e1, &ubi->used);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 9f69e818b000..93580a47cc54 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -82,7 +82,8 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
82} 82}
83 83
84/* Forward declaration */ 84/* Forward declaration */
85static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]); 85static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
86 bool strict_match);
86static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp); 87static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
87static void rlb_src_unlink(struct bonding *bond, u32 index); 88static void rlb_src_unlink(struct bonding *bond, u32 index);
88static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, 89static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
@@ -459,7 +460,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
459 460
460 bond->alb_info.rlb_promisc_timeout_counter = 0; 461 bond->alb_info.rlb_promisc_timeout_counter = 0;
461 462
462 alb_send_learning_packets(bond->curr_active_slave, addr); 463 alb_send_learning_packets(bond->curr_active_slave, addr, true);
463} 464}
464 465
465/* slave being removed should not be active at this point 466/* slave being removed should not be active at this point
@@ -995,7 +996,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
995/*********************** tlb/rlb shared functions *********************/ 996/*********************** tlb/rlb shared functions *********************/
996 997
997static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], 998static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
998 u16 vid) 999 __be16 vlan_proto, u16 vid)
999{ 1000{
1000 struct learning_pkt pkt; 1001 struct learning_pkt pkt;
1001 struct sk_buff *skb; 1002 struct sk_buff *skb;
@@ -1021,7 +1022,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
1021 skb->dev = slave->dev; 1022 skb->dev = slave->dev;
1022 1023
1023 if (vid) { 1024 if (vid) {
1024 skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid); 1025 skb = vlan_put_tag(skb, vlan_proto, vid);
1025 if (!skb) { 1026 if (!skb) {
1026 pr_err("%s: Error: failed to insert VLAN tag\n", 1027 pr_err("%s: Error: failed to insert VLAN tag\n",
1027 slave->bond->dev->name); 1028 slave->bond->dev->name);
@@ -1032,22 +1033,32 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
1032 dev_queue_xmit(skb); 1033 dev_queue_xmit(skb);
1033} 1034}
1034 1035
1035 1036static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
1036static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) 1037 bool strict_match)
1037{ 1038{
1038 struct bonding *bond = bond_get_bond_by_slave(slave); 1039 struct bonding *bond = bond_get_bond_by_slave(slave);
1039 struct net_device *upper; 1040 struct net_device *upper;
1040 struct list_head *iter; 1041 struct list_head *iter;
1041 1042
1042 /* send untagged */ 1043 /* send untagged */
1043 alb_send_lp_vid(slave, mac_addr, 0); 1044 alb_send_lp_vid(slave, mac_addr, 0, 0);
1044 1045
1045 /* loop through vlans and send one packet for each */ 1046 /* loop through vlans and send one packet for each */
1046 rcu_read_lock(); 1047 rcu_read_lock();
1047 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { 1048 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
1048 if (upper->priv_flags & IFF_802_1Q_VLAN) 1049 if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
1049 alb_send_lp_vid(slave, mac_addr, 1050 if (strict_match &&
1050 vlan_dev_vlan_id(upper)); 1051 ether_addr_equal_64bits(mac_addr,
1052 upper->dev_addr)) {
1053 alb_send_lp_vid(slave, mac_addr,
1054 vlan_dev_vlan_proto(upper),
1055 vlan_dev_vlan_id(upper));
1056 } else if (!strict_match) {
1057 alb_send_lp_vid(slave, upper->dev_addr,
1058 vlan_dev_vlan_proto(upper),
1059 vlan_dev_vlan_id(upper));
1060 }
1061 }
1051 } 1062 }
1052 rcu_read_unlock(); 1063 rcu_read_unlock();
1053} 1064}
@@ -1107,7 +1118,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
1107 1118
1108 /* fasten the change in the switch */ 1119 /* fasten the change in the switch */
1109 if (SLAVE_IS_OK(slave1)) { 1120 if (SLAVE_IS_OK(slave1)) {
1110 alb_send_learning_packets(slave1, slave1->dev->dev_addr); 1121 alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
1111 if (bond->alb_info.rlb_enabled) { 1122 if (bond->alb_info.rlb_enabled) {
1112 /* inform the clients that the mac address 1123 /* inform the clients that the mac address
1113 * has changed 1124 * has changed
@@ -1119,7 +1130,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
1119 } 1130 }
1120 1131
1121 if (SLAVE_IS_OK(slave2)) { 1132 if (SLAVE_IS_OK(slave2)) {
1122 alb_send_learning_packets(slave2, slave2->dev->dev_addr); 1133 alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
1123 if (bond->alb_info.rlb_enabled) { 1134 if (bond->alb_info.rlb_enabled) {
1124 /* inform the clients that the mac address 1135 /* inform the clients that the mac address
1125 * has changed 1136 * has changed
@@ -1490,6 +1501,8 @@ void bond_alb_monitor(struct work_struct *work)
1490 1501
1491 /* send learning packets */ 1502 /* send learning packets */
1492 if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) { 1503 if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
1504 bool strict_match;
1505
1493 /* change of curr_active_slave involves swapping of mac addresses. 1506 /* change of curr_active_slave involves swapping of mac addresses.
1494 * in order to avoid this swapping from happening while 1507 * in order to avoid this swapping from happening while
1495 * sending the learning packets, the curr_slave_lock must be held for 1508 * sending the learning packets, the curr_slave_lock must be held for
@@ -1497,8 +1510,15 @@ void bond_alb_monitor(struct work_struct *work)
1497 */ 1510 */
1498 read_lock(&bond->curr_slave_lock); 1511 read_lock(&bond->curr_slave_lock);
1499 1512
1500 bond_for_each_slave_rcu(bond, slave, iter) 1513 bond_for_each_slave_rcu(bond, slave, iter) {
1501 alb_send_learning_packets(slave, slave->dev->dev_addr); 1514 /* If updating current_active, use all currently
1515 * user mac addreses (!strict_match). Otherwise, only
1516 * use mac of the slave device.
1517 */
1518 strict_match = (slave != bond->curr_active_slave);
1519 alb_send_learning_packets(slave, slave->dev->dev_addr,
1520 strict_match);
1521 }
1502 1522
1503 read_unlock(&bond->curr_slave_lock); 1523 read_unlock(&bond->curr_slave_lock);
1504 1524
@@ -1721,7 +1741,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
1721 } else { 1741 } else {
1722 /* set the new_slave to the bond mac address */ 1742 /* set the new_slave to the bond mac address */
1723 alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr); 1743 alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
1724 alb_send_learning_packets(new_slave, bond->dev->dev_addr); 1744 alb_send_learning_packets(new_slave, bond->dev->dev_addr,
1745 false);
1725 } 1746 }
1726 1747
1727 write_lock_bh(&bond->curr_slave_lock); 1748 write_lock_bh(&bond->curr_slave_lock);
@@ -1764,7 +1785,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
1764 alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr); 1785 alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
1765 1786
1766 read_lock(&bond->lock); 1787 read_lock(&bond->lock);
1767 alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr); 1788 alb_send_learning_packets(bond->curr_active_slave,
1789 bond_dev->dev_addr, false);
1768 if (bond->alb_info.rlb_enabled) { 1790 if (bond->alb_info.rlb_enabled) {
1769 /* inform clients mac address has changed */ 1791 /* inform clients mac address has changed */
1770 rlb_req_update_slave_clients(bond, bond->curr_active_slave); 1792 rlb_req_update_slave_clients(bond, bond->curr_active_slave);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 69aff72c8957..d3a67896d435 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2126,10 +2126,10 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2126 */ 2126 */
2127static void bond_arp_send(struct net_device *slave_dev, int arp_op, 2127static void bond_arp_send(struct net_device *slave_dev, int arp_op,
2128 __be32 dest_ip, __be32 src_ip, 2128 __be32 dest_ip, __be32 src_ip,
2129 struct bond_vlan_tag *inner, 2129 struct bond_vlan_tag *tags)
2130 struct bond_vlan_tag *outer)
2131{ 2130{
2132 struct sk_buff *skb; 2131 struct sk_buff *skb;
2132 int i;
2133 2133
2134 pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n", 2134 pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
2135 arp_op, slave_dev->name, &dest_ip, &src_ip); 2135 arp_op, slave_dev->name, &dest_ip, &src_ip);
@@ -2141,21 +2141,26 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
2141 net_err_ratelimited("ARP packet allocation failed\n"); 2141 net_err_ratelimited("ARP packet allocation failed\n");
2142 return; 2142 return;
2143 } 2143 }
2144 if (outer->vlan_id) {
2145 if (inner->vlan_id) {
2146 pr_debug("inner tag: proto %X vid %X\n",
2147 ntohs(inner->vlan_proto), inner->vlan_id);
2148 skb = __vlan_put_tag(skb, inner->vlan_proto,
2149 inner->vlan_id);
2150 if (!skb) {
2151 net_err_ratelimited("failed to insert inner VLAN tag\n");
2152 return;
2153 }
2154 }
2155 2144
2156 pr_debug("outer reg: proto %X vid %X\n", 2145 /* Go through all the tags backwards and add them to the packet */
2157 ntohs(outer->vlan_proto), outer->vlan_id); 2146 for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) {
2158 skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id); 2147 if (!tags[i].vlan_id)
2148 continue;
2149
2150 pr_debug("inner tag: proto %X vid %X\n",
2151 ntohs(tags[i].vlan_proto), tags[i].vlan_id);
2152 skb = __vlan_put_tag(skb, tags[i].vlan_proto,
2153 tags[i].vlan_id);
2154 if (!skb) {
2155 net_err_ratelimited("failed to insert inner VLAN tag\n");
2156 return;
2157 }
2158 }
2159 /* Set the outer tag */
2160 if (tags[0].vlan_id) {
2161 pr_debug("outer tag: proto %X vid %X\n",
2162 ntohs(tags[0].vlan_proto), tags[0].vlan_id);
2163 skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id);
2159 if (!skb) { 2164 if (!skb) {
2160 net_err_ratelimited("failed to insert outer VLAN tag\n"); 2165 net_err_ratelimited("failed to insert outer VLAN tag\n");
2161 return; 2166 return;
@@ -2164,22 +2169,52 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
2164 arp_xmit(skb); 2169 arp_xmit(skb);
2165} 2170}
2166 2171
2172/* Validate the device path between the @start_dev and the @end_dev.
2173 * The path is valid if the @end_dev is reachable through device
2174 * stacking.
2175 * When the path is validated, collect any vlan information in the
2176 * path.
2177 */
2178static bool bond_verify_device_path(struct net_device *start_dev,
2179 struct net_device *end_dev,
2180 struct bond_vlan_tag *tags)
2181{
2182 struct net_device *upper;
2183 struct list_head *iter;
2184 int idx;
2185
2186 if (start_dev == end_dev)
2187 return true;
2188
2189 netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
2190 if (bond_verify_device_path(upper, end_dev, tags)) {
2191 if (is_vlan_dev(upper)) {
2192 idx = vlan_get_encap_level(upper);
2193 if (idx >= BOND_MAX_VLAN_ENCAP)
2194 return false;
2195
2196 tags[idx].vlan_proto =
2197 vlan_dev_vlan_proto(upper);
2198 tags[idx].vlan_id = vlan_dev_vlan_id(upper);
2199 }
2200 return true;
2201 }
2202 }
2203
2204 return false;
2205}
2167 2206
2168static void bond_arp_send_all(struct bonding *bond, struct slave *slave) 2207static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2169{ 2208{
2170 struct net_device *upper, *vlan_upper;
2171 struct list_head *iter, *vlan_iter;
2172 struct rtable *rt; 2209 struct rtable *rt;
2173 struct bond_vlan_tag inner, outer; 2210 struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
2174 __be32 *targets = bond->params.arp_targets, addr; 2211 __be32 *targets = bond->params.arp_targets, addr;
2175 int i; 2212 int i;
2213 bool ret;
2176 2214
2177 for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) { 2215 for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
2178 pr_debug("basa: target %pI4\n", &targets[i]); 2216 pr_debug("basa: target %pI4\n", &targets[i]);
2179 inner.vlan_proto = 0; 2217 memset(tags, 0, sizeof(tags));
2180 inner.vlan_id = 0;
2181 outer.vlan_proto = 0;
2182 outer.vlan_id = 0;
2183 2218
2184 /* Find out through which dev should the packet go */ 2219 /* Find out through which dev should the packet go */
2185 rt = ip_route_output(dev_net(bond->dev), targets[i], 0, 2220 rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
@@ -2192,7 +2227,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2192 net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n", 2227 net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
2193 bond->dev->name, 2228 bond->dev->name,
2194 &targets[i]); 2229 &targets[i]);
2195 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer); 2230 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2231 0, tags);
2196 continue; 2232 continue;
2197 } 2233 }
2198 2234
@@ -2201,52 +2237,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2201 goto found; 2237 goto found;
2202 2238
2203 rcu_read_lock(); 2239 rcu_read_lock();
2204 /* first we search only for vlan devices. for every vlan 2240 ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags);
2205 * found we verify its upper dev list, searching for the
2206 * rt->dst.dev. If found we save the tag of the vlan and
2207 * proceed to send the packet.
2208 */
2209 netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
2210 vlan_iter) {
2211 if (!is_vlan_dev(vlan_upper))
2212 continue;
2213
2214 if (vlan_upper == rt->dst.dev) {
2215 outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
2216 outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
2217 rcu_read_unlock();
2218 goto found;
2219 }
2220 netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
2221 iter) {
2222 if (upper == rt->dst.dev) {
2223 /* If the upper dev is a vlan dev too,
2224 * set the vlan tag to inner tag.
2225 */
2226 if (is_vlan_dev(upper)) {
2227 inner.vlan_proto = vlan_dev_vlan_proto(upper);
2228 inner.vlan_id = vlan_dev_vlan_id(upper);
2229 }
2230 outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
2231 outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
2232 rcu_read_unlock();
2233 goto found;
2234 }
2235 }
2236 }
2237
2238 /* if the device we're looking for is not on top of any of
2239 * our upper vlans, then just search for any dev that
2240 * matches, and in case it's a vlan - save the id
2241 */
2242 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
2243 if (upper == rt->dst.dev) {
2244 rcu_read_unlock();
2245 goto found;
2246 }
2247 }
2248 rcu_read_unlock(); 2241 rcu_read_unlock();
2249 2242
2243 if (ret)
2244 goto found;
2245
2250 /* Not our device - skip */ 2246 /* Not our device - skip */
2251 pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", 2247 pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
2252 bond->dev->name, &targets[i], 2248 bond->dev->name, &targets[i],
@@ -2259,7 +2255,7 @@ found:
2259 addr = bond_confirm_addr(rt->dst.dev, targets[i], 0); 2255 addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
2260 ip_rt_put(rt); 2256 ip_rt_put(rt);
2261 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 2257 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2262 addr, &inner, &outer); 2258 addr, tags);
2263 } 2259 }
2264} 2260}
2265 2261
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 724e30fa20b9..832070298446 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -125,6 +125,7 @@ static const struct bond_opt_value bond_fail_over_mac_tbl[] = {
125static const struct bond_opt_value bond_intmax_tbl[] = { 125static const struct bond_opt_value bond_intmax_tbl[] = {
126 { "off", 0, BOND_VALFLAG_DEFAULT}, 126 { "off", 0, BOND_VALFLAG_DEFAULT},
127 { "maxval", INT_MAX, BOND_VALFLAG_MAX}, 127 { "maxval", INT_MAX, BOND_VALFLAG_MAX},
128 { NULL, -1, 0}
128}; 129};
129 130
130static const struct bond_opt_value bond_lacp_rate_tbl[] = { 131static const struct bond_opt_value bond_lacp_rate_tbl[] = {
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 0e8b268da0a0..5f6babcfc26e 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -534,7 +534,7 @@ static ssize_t bonding_show_min_links(struct device *d,
534{ 534{
535 struct bonding *bond = to_bond(d); 535 struct bonding *bond = to_bond(d);
536 536
537 return sprintf(buf, "%d\n", bond->params.min_links); 537 return sprintf(buf, "%u\n", bond->params.min_links);
538} 538}
539 539
540static ssize_t bonding_store_min_links(struct device *d, 540static ssize_t bonding_store_min_links(struct device *d,
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index b8bdd0acc8f3..00bea320e3b5 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -36,6 +36,7 @@
36 36
37#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" 37#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
38 38
39#define BOND_MAX_VLAN_ENCAP 2
39#define BOND_MAX_ARP_TARGETS 16 40#define BOND_MAX_ARP_TARGETS 16
40 41
41#define BOND_DEFAULT_MIIMON 100 42#define BOND_DEFAULT_MIIMON 100
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index a5c8dcfa8357..95e04e2002da 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -60,6 +60,8 @@
60#define CONTROL_IE BIT(1) 60#define CONTROL_IE BIT(1)
61#define CONTROL_INIT BIT(0) 61#define CONTROL_INIT BIT(0)
62 62
63#define CONTROL_IRQMSK (CONTROL_EIE | CONTROL_IE | CONTROL_SIE)
64
63/* test register */ 65/* test register */
64#define TEST_RX BIT(7) 66#define TEST_RX BIT(7)
65#define TEST_TX1 BIT(6) 67#define TEST_TX1 BIT(6)
@@ -108,11 +110,14 @@
108#define IF_COMM_CONTROL BIT(4) 110#define IF_COMM_CONTROL BIT(4)
109#define IF_COMM_CLR_INT_PND BIT(3) 111#define IF_COMM_CLR_INT_PND BIT(3)
110#define IF_COMM_TXRQST BIT(2) 112#define IF_COMM_TXRQST BIT(2)
113#define IF_COMM_CLR_NEWDAT IF_COMM_TXRQST
111#define IF_COMM_DATAA BIT(1) 114#define IF_COMM_DATAA BIT(1)
112#define IF_COMM_DATAB BIT(0) 115#define IF_COMM_DATAB BIT(0)
113#define IF_COMM_ALL (IF_COMM_MASK | IF_COMM_ARB | \ 116
114 IF_COMM_CONTROL | IF_COMM_TXRQST | \ 117/* TX buffer setup */
115 IF_COMM_DATAA | IF_COMM_DATAB) 118#define IF_COMM_TX (IF_COMM_ARB | IF_COMM_CONTROL | \
119 IF_COMM_TXRQST | \
120 IF_COMM_DATAA | IF_COMM_DATAB)
116 121
117/* For the low buffers we clear the interrupt bit, but keep newdat */ 122/* For the low buffers we clear the interrupt bit, but keep newdat */
118#define IF_COMM_RCV_LOW (IF_COMM_MASK | IF_COMM_ARB | \ 123#define IF_COMM_RCV_LOW (IF_COMM_MASK | IF_COMM_ARB | \
@@ -120,12 +125,19 @@
120 IF_COMM_DATAA | IF_COMM_DATAB) 125 IF_COMM_DATAA | IF_COMM_DATAB)
121 126
122/* For the high buffers we clear the interrupt bit and newdat */ 127/* For the high buffers we clear the interrupt bit and newdat */
123#define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_TXRQST) 128#define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT)
129
130
131/* Receive setup of message objects */
132#define IF_COMM_RCV_SETUP (IF_COMM_MASK | IF_COMM_ARB | IF_COMM_CONTROL)
133
134/* Invalidation of message objects */
135#define IF_COMM_INVAL (IF_COMM_ARB | IF_COMM_CONTROL)
124 136
125/* IFx arbitration */ 137/* IFx arbitration */
126#define IF_ARB_MSGVAL BIT(15) 138#define IF_ARB_MSGVAL BIT(31)
127#define IF_ARB_MSGXTD BIT(14) 139#define IF_ARB_MSGXTD BIT(30)
128#define IF_ARB_TRANSMIT BIT(13) 140#define IF_ARB_TRANSMIT BIT(29)
129 141
130/* IFx message control */ 142/* IFx message control */
131#define IF_MCONT_NEWDAT BIT(15) 143#define IF_MCONT_NEWDAT BIT(15)
@@ -139,19 +151,17 @@
139#define IF_MCONT_EOB BIT(7) 151#define IF_MCONT_EOB BIT(7)
140#define IF_MCONT_DLC_MASK 0xf 152#define IF_MCONT_DLC_MASK 0xf
141 153
154#define IF_MCONT_RCV (IF_MCONT_RXIE | IF_MCONT_UMASK)
155#define IF_MCONT_RCV_EOB (IF_MCONT_RCV | IF_MCONT_EOB)
156
157#define IF_MCONT_TX (IF_MCONT_TXIE | IF_MCONT_EOB)
158
142/* 159/*
143 * Use IF1 for RX and IF2 for TX 160 * Use IF1 for RX and IF2 for TX
144 */ 161 */
145#define IF_RX 0 162#define IF_RX 0
146#define IF_TX 1 163#define IF_TX 1
147 164
148/* status interrupt */
149#define STATUS_INTERRUPT 0x8000
150
151/* global interrupt masks */
152#define ENABLE_ALL_INTERRUPTS 1
153#define DISABLE_ALL_INTERRUPTS 0
154
155/* minimum timeout for checking BUSY status */ 165/* minimum timeout for checking BUSY status */
156#define MIN_TIMEOUT_VALUE 6 166#define MIN_TIMEOUT_VALUE 6
157 167
@@ -171,6 +181,7 @@ enum c_can_lec_type {
171 LEC_BIT0_ERROR, 181 LEC_BIT0_ERROR,
172 LEC_CRC_ERROR, 182 LEC_CRC_ERROR,
173 LEC_UNUSED, 183 LEC_UNUSED,
184 LEC_MASK = LEC_UNUSED,
174}; 185};
175 186
176/* 187/*
@@ -226,143 +237,115 @@ static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable)
226 priv->raminit(priv, enable); 237 priv->raminit(priv, enable);
227} 238}
228 239
229static inline int get_tx_next_msg_obj(const struct c_can_priv *priv) 240static void c_can_irq_control(struct c_can_priv *priv, bool enable)
230{
231 return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
232 C_CAN_MSG_OBJ_TX_FIRST;
233}
234
235static inline int get_tx_echo_msg_obj(int txecho)
236{
237 return (txecho & C_CAN_NEXT_MSG_OBJ_MASK) + C_CAN_MSG_OBJ_TX_FIRST;
238}
239
240static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index)
241{
242 u32 val = priv->read_reg(priv, index);
243 val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
244 return val;
245}
246
247static void c_can_enable_all_interrupts(struct c_can_priv *priv,
248 int enable)
249{ 241{
250 unsigned int cntrl_save = priv->read_reg(priv, 242 u32 ctrl = priv->read_reg(priv, C_CAN_CTRL_REG) & ~CONTROL_IRQMSK;
251 C_CAN_CTRL_REG);
252 243
253 if (enable) 244 if (enable)
254 cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE); 245 ctrl |= CONTROL_IRQMSK;
255 else
256 cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
257 246
258 priv->write_reg(priv, C_CAN_CTRL_REG, cntrl_save); 247 priv->write_reg(priv, C_CAN_CTRL_REG, ctrl);
259} 248}
260 249
261static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface) 250static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj)
262{ 251{
263 int count = MIN_TIMEOUT_VALUE; 252 struct c_can_priv *priv = netdev_priv(dev);
253 int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface);
264 254
265 while (count && priv->read_reg(priv, 255 priv->write_reg(priv, reg + 1, cmd);
266 C_CAN_IFACE(COMREQ_REG, iface)) & 256 priv->write_reg(priv, reg, obj);
267 IF_COMR_BUSY) { 257
268 count--; 258 for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) {
259 if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY))
260 return;
269 udelay(1); 261 udelay(1);
270 } 262 }
263 netdev_err(dev, "Updating object timed out\n");
271 264
272 if (!count) 265}
273 return 1;
274 266
275 return 0; 267static inline void c_can_object_get(struct net_device *dev, int iface,
268 u32 obj, u32 cmd)
269{
270 c_can_obj_update(dev, iface, cmd, obj);
276} 271}
277 272
278static inline void c_can_object_get(struct net_device *dev, 273static inline void c_can_object_put(struct net_device *dev, int iface,
279 int iface, int objno, int mask) 274 u32 obj, u32 cmd)
280{ 275{
281 struct c_can_priv *priv = netdev_priv(dev); 276 c_can_obj_update(dev, iface, cmd | IF_COMM_WR, obj);
277}
282 278
283 /* 279/*
284 * As per specs, after writting the message object number in the 280 * Note: According to documentation clearing TXIE while MSGVAL is set
285 * IF command request register the transfer b/w interface 281 * is not allowed, but works nicely on C/DCAN. And that lowers the I/O
286 * register and message RAM must be complete in 6 CAN-CLK 282 * load significantly.
287 * period. 283 */
288 */ 284static void c_can_inval_tx_object(struct net_device *dev, int iface, int obj)
289 priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface), 285{
290 IFX_WRITE_LOW_16BIT(mask)); 286 struct c_can_priv *priv = netdev_priv(dev);
291 priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
292 IFX_WRITE_LOW_16BIT(objno));
293 287
294 if (c_can_msg_obj_is_busy(priv, iface)) 288 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
295 netdev_err(dev, "timed out in object get\n"); 289 c_can_object_put(dev, iface, obj, IF_COMM_INVAL);
296} 290}
297 291
298static inline void c_can_object_put(struct net_device *dev, 292static void c_can_inval_msg_object(struct net_device *dev, int iface, int obj)
299 int iface, int objno, int mask)
300{ 293{
301 struct c_can_priv *priv = netdev_priv(dev); 294 struct c_can_priv *priv = netdev_priv(dev);
302 295
303 /* 296 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
304 * As per specs, after writting the message object number in the 297 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
305 * IF command request register the transfer b/w interface 298 c_can_inval_tx_object(dev, iface, obj);
306 * register and message RAM must be complete in 6 CAN-CLK
307 * period.
308 */
309 priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
310 (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
311 priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
312 IFX_WRITE_LOW_16BIT(objno));
313
314 if (c_can_msg_obj_is_busy(priv, iface))
315 netdev_err(dev, "timed out in object put\n");
316} 299}
317 300
318static void c_can_write_msg_object(struct net_device *dev, 301static void c_can_setup_tx_object(struct net_device *dev, int iface,
319 int iface, struct can_frame *frame, int objno) 302 struct can_frame *frame, int idx)
320{ 303{
321 int i;
322 u16 flags = 0;
323 unsigned int id;
324 struct c_can_priv *priv = netdev_priv(dev); 304 struct c_can_priv *priv = netdev_priv(dev);
325 305 u16 ctrl = IF_MCONT_TX | frame->can_dlc;
326 if (!(frame->can_id & CAN_RTR_FLAG)) 306 bool rtr = frame->can_id & CAN_RTR_FLAG;
327 flags |= IF_ARB_TRANSMIT; 307 u32 arb = IF_ARB_MSGVAL;
308 int i;
328 309
329 if (frame->can_id & CAN_EFF_FLAG) { 310 if (frame->can_id & CAN_EFF_FLAG) {
330 id = frame->can_id & CAN_EFF_MASK; 311 arb |= frame->can_id & CAN_EFF_MASK;
331 flags |= IF_ARB_MSGXTD; 312 arb |= IF_ARB_MSGXTD;
332 } else 313 } else {
333 id = ((frame->can_id & CAN_SFF_MASK) << 18); 314 arb |= (frame->can_id & CAN_SFF_MASK) << 18;
315 }
316
317 if (!rtr)
318 arb |= IF_ARB_TRANSMIT;
319
320 /*
321 * If we change the DIR bit, we need to invalidate the buffer
322 * first, i.e. clear the MSGVAL flag in the arbiter.
323 */
324 if (rtr != (bool)test_bit(idx, &priv->tx_dir)) {
325 u32 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
326
327 c_can_inval_msg_object(dev, iface, obj);
328 change_bit(idx, &priv->tx_dir);
329 }
334 330
335 flags |= IF_ARB_MSGVAL; 331 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
332 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), arb >> 16);
336 333
337 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 334 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
338 IFX_WRITE_LOW_16BIT(id));
339 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), flags |
340 IFX_WRITE_HIGH_16BIT(id));
341 335
342 for (i = 0; i < frame->can_dlc; i += 2) { 336 for (i = 0; i < frame->can_dlc; i += 2) {
343 priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2, 337 priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
344 frame->data[i] | (frame->data[i + 1] << 8)); 338 frame->data[i] | (frame->data[i + 1] << 8));
345 } 339 }
346
347 /* enable interrupt for this message object */
348 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
349 IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
350 frame->can_dlc);
351 c_can_object_put(dev, iface, objno, IF_COMM_ALL);
352} 340}
353 341
354static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev, 342static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
355 int iface, 343 int iface)
356 int ctrl_mask)
357{ 344{
358 int i; 345 int i;
359 struct c_can_priv *priv = netdev_priv(dev);
360 346
361 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) { 347 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++)
362 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 348 c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT);
363 ctrl_mask & ~IF_MCONT_NEWDAT);
364 c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
365 }
366} 349}
367 350
368static int c_can_handle_lost_msg_obj(struct net_device *dev, 351static int c_can_handle_lost_msg_obj(struct net_device *dev,
@@ -377,6 +360,9 @@ static int c_can_handle_lost_msg_obj(struct net_device *dev,
377 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl); 360 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
378 c_can_object_put(dev, iface, objno, IF_COMM_CONTROL); 361 c_can_object_put(dev, iface, objno, IF_COMM_CONTROL);
379 362
363 stats->rx_errors++;
364 stats->rx_over_errors++;
365
380 /* create an error msg */ 366 /* create an error msg */
381 skb = alloc_can_err_skb(dev, &frame); 367 skb = alloc_can_err_skb(dev, &frame);
382 if (unlikely(!skb)) 368 if (unlikely(!skb))
@@ -384,22 +370,18 @@ static int c_can_handle_lost_msg_obj(struct net_device *dev,
384 370
385 frame->can_id |= CAN_ERR_CRTL; 371 frame->can_id |= CAN_ERR_CRTL;
386 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 372 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
387 stats->rx_errors++;
388 stats->rx_over_errors++;
389 373
390 netif_receive_skb(skb); 374 netif_receive_skb(skb);
391 return 1; 375 return 1;
392} 376}
393 377
394static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl) 378static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
395{ 379{
396 u16 flags, data;
397 int i;
398 unsigned int val;
399 struct c_can_priv *priv = netdev_priv(dev);
400 struct net_device_stats *stats = &dev->stats; 380 struct net_device_stats *stats = &dev->stats;
401 struct sk_buff *skb; 381 struct c_can_priv *priv = netdev_priv(dev);
402 struct can_frame *frame; 382 struct can_frame *frame;
383 struct sk_buff *skb;
384 u32 arb, data;
403 385
404 skb = alloc_can_skb(dev, &frame); 386 skb = alloc_can_skb(dev, &frame);
405 if (!skb) { 387 if (!skb) {
@@ -409,115 +391,82 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
409 391
410 frame->can_dlc = get_can_dlc(ctrl & 0x0F); 392 frame->can_dlc = get_can_dlc(ctrl & 0x0F);
411 393
412 flags = priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface)); 394 arb = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface));
413 val = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface)) | 395 arb |= priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface)) << 16;
414 (flags << 16);
415 396
416 if (flags & IF_ARB_MSGXTD) 397 if (arb & IF_ARB_MSGXTD)
417 frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG; 398 frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG;
418 else 399 else
419 frame->can_id = (val >> 18) & CAN_SFF_MASK; 400 frame->can_id = (arb >> 18) & CAN_SFF_MASK;
420 401
421 if (flags & IF_ARB_TRANSMIT) 402 if (arb & IF_ARB_TRANSMIT) {
422 frame->can_id |= CAN_RTR_FLAG; 403 frame->can_id |= CAN_RTR_FLAG;
423 else { 404 } else {
424 for (i = 0; i < frame->can_dlc; i += 2) { 405 int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
425 data = priv->read_reg(priv, 406
426 C_CAN_IFACE(DATA1_REG, iface) + i / 2); 407 for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
408 data = priv->read_reg(priv, dreg);
427 frame->data[i] = data; 409 frame->data[i] = data;
428 frame->data[i + 1] = data >> 8; 410 frame->data[i + 1] = data >> 8;
429 } 411 }
430 } 412 }
431 413
432 netif_receive_skb(skb);
433
434 stats->rx_packets++; 414 stats->rx_packets++;
435 stats->rx_bytes += frame->can_dlc; 415 stats->rx_bytes += frame->can_dlc;
416
417 netif_receive_skb(skb);
436 return 0; 418 return 0;
437} 419}
438 420
439static void c_can_setup_receive_object(struct net_device *dev, int iface, 421static void c_can_setup_receive_object(struct net_device *dev, int iface,
440 int objno, unsigned int mask, 422 u32 obj, u32 mask, u32 id, u32 mcont)
441 unsigned int id, unsigned int mcont)
442{ 423{
443 struct c_can_priv *priv = netdev_priv(dev); 424 struct c_can_priv *priv = netdev_priv(dev);
444 425
445 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), 426 mask |= BIT(29);
446 IFX_WRITE_LOW_16BIT(mask)); 427 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
447 428 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), mask >> 16);
448 /* According to C_CAN documentation, the reserved bit
449 * in IFx_MASK2 register is fixed 1
450 */
451 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
452 IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
453 429
454 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 430 id |= IF_ARB_MSGVAL;
455 IFX_WRITE_LOW_16BIT(id)); 431 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), id);
456 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 432 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), id >> 16);
457 (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
458 433
459 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont); 434 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
460 c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST); 435 c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
461
462 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
463 c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
464}
465
466static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
467{
468 struct c_can_priv *priv = netdev_priv(dev);
469
470 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
471 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
472 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
473
474 c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
475
476 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
477 c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
478}
479
480static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
481{
482 int val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
483
484 /*
485 * as transmission request register's bit n-1 corresponds to
486 * message object n, we need to handle the same properly.
487 */
488 if (val & (1 << (objno - 1)))
489 return 1;
490
491 return 0;
492} 436}
493 437
494static netdev_tx_t c_can_start_xmit(struct sk_buff *skb, 438static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
495 struct net_device *dev) 439 struct net_device *dev)
496{ 440{
497 u32 msg_obj_no;
498 struct c_can_priv *priv = netdev_priv(dev);
499 struct can_frame *frame = (struct can_frame *)skb->data; 441 struct can_frame *frame = (struct can_frame *)skb->data;
442 struct c_can_priv *priv = netdev_priv(dev);
443 u32 idx, obj;
500 444
501 if (can_dropped_invalid_skb(dev, skb)) 445 if (can_dropped_invalid_skb(dev, skb))
502 return NETDEV_TX_OK; 446 return NETDEV_TX_OK;
503
504 spin_lock_bh(&priv->xmit_lock);
505 msg_obj_no = get_tx_next_msg_obj(priv);
506
507 /* prepare message object for transmission */
508 c_can_write_msg_object(dev, IF_TX, frame, msg_obj_no);
509 priv->dlc[msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST] = frame->can_dlc;
510 can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
511
512 /* 447 /*
513 * we have to stop the queue in case of a wrap around or 448 * This is not a FIFO. C/D_CAN sends out the buffers
514 * if the next TX message object is still in use 449 * prioritized. The lowest buffer number wins.
515 */ 450 */
516 priv->tx_next++; 451 idx = fls(atomic_read(&priv->tx_active));
517 if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) || 452 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
518 (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0) 453
454 /* If this is the last buffer, stop the xmit queue */
455 if (idx == C_CAN_MSG_OBJ_TX_NUM - 1)
519 netif_stop_queue(dev); 456 netif_stop_queue(dev);
520 spin_unlock_bh(&priv->xmit_lock); 457 /*
458 * Store the message in the interface so we can call
459 * can_put_echo_skb(). We must do this before we enable
460 * transmit as we might race against do_tx().
461 */
462 c_can_setup_tx_object(dev, IF_TX, frame, idx);
463 priv->dlc[idx] = frame->can_dlc;
464 can_put_echo_skb(skb, dev, idx);
465
466 /* Update the active bits */
467 atomic_add((1 << idx), &priv->tx_active);
468 /* Start transmission */
469 c_can_object_put(dev, IF_TX, obj, IF_COMM_TX);
521 470
522 return NETDEV_TX_OK; 471 return NETDEV_TX_OK;
523} 472}
@@ -594,11 +543,10 @@ static void c_can_configure_msg_objects(struct net_device *dev)
594 543
595 /* setup receive message objects */ 544 /* setup receive message objects */
596 for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++) 545 for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
597 c_can_setup_receive_object(dev, IF_RX, i, 0, 0, 546 c_can_setup_receive_object(dev, IF_RX, i, 0, 0, IF_MCONT_RCV);
598 (IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB);
599 547
600 c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0, 548 c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
601 IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK); 549 IF_MCONT_RCV_EOB);
602} 550}
603 551
604/* 552/*
@@ -612,30 +560,22 @@ static int c_can_chip_config(struct net_device *dev)
612 struct c_can_priv *priv = netdev_priv(dev); 560 struct c_can_priv *priv = netdev_priv(dev);
613 561
614 /* enable automatic retransmission */ 562 /* enable automatic retransmission */
615 priv->write_reg(priv, C_CAN_CTRL_REG, 563 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
616 CONTROL_ENABLE_AR);
617 564
618 if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) && 565 if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
619 (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) { 566 (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
620 /* loopback + silent mode : useful for hot self-test */ 567 /* loopback + silent mode : useful for hot self-test */
621 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE | 568 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
622 CONTROL_SIE | CONTROL_IE | CONTROL_TEST); 569 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK | TEST_SILENT);
623 priv->write_reg(priv, C_CAN_TEST_REG,
624 TEST_LBACK | TEST_SILENT);
625 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { 570 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
626 /* loopback mode : useful for self-test function */ 571 /* loopback mode : useful for self-test function */
627 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE | 572 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
628 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
629 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK); 573 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK);
630 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) { 574 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
631 /* silent mode : bus-monitoring mode */ 575 /* silent mode : bus-monitoring mode */
632 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE | 576 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
633 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
634 priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT); 577 priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT);
635 } else 578 }
636 /* normal mode*/
637 priv->write_reg(priv, C_CAN_CTRL_REG,
638 CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
639 579
640 /* configure message objects */ 580 /* configure message objects */
641 c_can_configure_msg_objects(dev); 581 c_can_configure_msg_objects(dev);
@@ -643,6 +583,11 @@ static int c_can_chip_config(struct net_device *dev)
643 /* set a `lec` value so that we can check for updates later */ 583 /* set a `lec` value so that we can check for updates later */
644 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); 584 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
645 585
586 /* Clear all internal status */
587 atomic_set(&priv->tx_active, 0);
588 priv->rxmasked = 0;
589 priv->tx_dir = 0;
590
646 /* set bittiming params */ 591 /* set bittiming params */
647 return c_can_set_bittiming(dev); 592 return c_can_set_bittiming(dev);
648} 593}
@@ -657,13 +602,11 @@ static int c_can_start(struct net_device *dev)
657 if (err) 602 if (err)
658 return err; 603 return err;
659 604
660 priv->can.state = CAN_STATE_ERROR_ACTIVE; 605 /* Setup the command for new messages */
661 606 priv->comm_rcv_high = priv->type != BOSCH_D_CAN ?
662 /* reset tx helper pointers */ 607 IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
663 priv->tx_next = priv->tx_echo = 0;
664 608
665 /* enable status change, error and module interrupts */ 609 priv->can.state = CAN_STATE_ERROR_ACTIVE;
666 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
667 610
668 return 0; 611 return 0;
669} 612}
@@ -672,15 +615,13 @@ static void c_can_stop(struct net_device *dev)
672{ 615{
673 struct c_can_priv *priv = netdev_priv(dev); 616 struct c_can_priv *priv = netdev_priv(dev);
674 617
675 /* disable all interrupts */ 618 c_can_irq_control(priv, false);
676 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
677
678 /* set the state as STOPPED */
679 priv->can.state = CAN_STATE_STOPPED; 619 priv->can.state = CAN_STATE_STOPPED;
680} 620}
681 621
682static int c_can_set_mode(struct net_device *dev, enum can_mode mode) 622static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
683{ 623{
624 struct c_can_priv *priv = netdev_priv(dev);
684 int err; 625 int err;
685 626
686 switch (mode) { 627 switch (mode) {
@@ -689,6 +630,7 @@ static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
689 if (err) 630 if (err)
690 return err; 631 return err;
691 netif_wake_queue(dev); 632 netif_wake_queue(dev);
633 c_can_irq_control(priv, true);
692 break; 634 break;
693 default: 635 default:
694 return -EOPNOTSUPP; 636 return -EOPNOTSUPP;
@@ -724,42 +666,29 @@ static int c_can_get_berr_counter(const struct net_device *dev,
724 return err; 666 return err;
725} 667}
726 668
727/*
728 * priv->tx_echo holds the number of the oldest can_frame put for
729 * transmission into the hardware, but not yet ACKed by the CAN tx
730 * complete IRQ.
731 *
732 * We iterate from priv->tx_echo to priv->tx_next and check if the
733 * packet has been transmitted, echo it back to the CAN framework.
734 * If we discover a not yet transmitted packet, stop looking for more.
735 */
736static void c_can_do_tx(struct net_device *dev) 669static void c_can_do_tx(struct net_device *dev)
737{ 670{
738 struct c_can_priv *priv = netdev_priv(dev); 671 struct c_can_priv *priv = netdev_priv(dev);
739 struct net_device_stats *stats = &dev->stats; 672 struct net_device_stats *stats = &dev->stats;
740 u32 val, obj, pkts = 0, bytes = 0; 673 u32 idx, obj, pkts = 0, bytes = 0, pend, clr;
741
742 spin_lock_bh(&priv->xmit_lock);
743
744 for (; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
745 obj = get_tx_echo_msg_obj(priv->tx_echo);
746 val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
747 674
748 if (val & (1 << (obj - 1))) 675 clr = pend = priv->read_reg(priv, C_CAN_INTPND2_REG);
749 break;
750 676
751 can_get_echo_skb(dev, obj - C_CAN_MSG_OBJ_TX_FIRST); 677 while ((idx = ffs(pend))) {
752 bytes += priv->dlc[obj - C_CAN_MSG_OBJ_TX_FIRST]; 678 idx--;
679 pend &= ~(1 << idx);
680 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
681 c_can_inval_tx_object(dev, IF_RX, obj);
682 can_get_echo_skb(dev, idx);
683 bytes += priv->dlc[idx];
753 pkts++; 684 pkts++;
754 c_can_inval_msg_object(dev, IF_TX, obj);
755 } 685 }
756 686
757 /* restart queue if wrap-up or if queue stalled on last pkt */ 687 /* Clear the bits in the tx_active mask */
758 if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) || 688 atomic_sub(clr, &priv->tx_active);
759 ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
760 netif_wake_queue(dev);
761 689
762 spin_unlock_bh(&priv->xmit_lock); 690 if (clr & (1 << (C_CAN_MSG_OBJ_TX_NUM - 1)))
691 netif_wake_queue(dev);
763 692
764 if (pkts) { 693 if (pkts) {
765 stats->tx_bytes += bytes; 694 stats->tx_bytes += bytes;
@@ -800,18 +729,28 @@ static u32 c_can_adjust_pending(u32 pend)
800 return pend & ~((1 << lasts) - 1); 729 return pend & ~((1 << lasts) - 1);
801} 730}
802 731
732static inline void c_can_rx_object_get(struct net_device *dev,
733 struct c_can_priv *priv, u32 obj)
734{
735 c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
736}
737
738static inline void c_can_rx_finalize(struct net_device *dev,
739 struct c_can_priv *priv, u32 obj)
740{
741 if (priv->type != BOSCH_D_CAN)
742 c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
743}
744
803static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv, 745static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
804 u32 pend, int quota) 746 u32 pend, int quota)
805{ 747{
806 u32 pkts = 0, ctrl, obj, mcmd; 748 u32 pkts = 0, ctrl, obj;
807 749
808 while ((obj = ffs(pend)) && quota > 0) { 750 while ((obj = ffs(pend)) && quota > 0) {
809 pend &= ~BIT(obj - 1); 751 pend &= ~BIT(obj - 1);
810 752
811 mcmd = obj < C_CAN_MSG_RX_LOW_LAST ? 753 c_can_rx_object_get(dev, priv, obj);
812 IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
813
814 c_can_object_get(dev, IF_RX, obj, mcmd);
815 ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX)); 754 ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX));
816 755
817 if (ctrl & IF_MCONT_MSGLST) { 756 if (ctrl & IF_MCONT_MSGLST) {
@@ -833,9 +772,7 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
833 /* read the data from the message object */ 772 /* read the data from the message object */
834 c_can_read_msg_object(dev, IF_RX, ctrl); 773 c_can_read_msg_object(dev, IF_RX, ctrl);
835 774
836 if (obj == C_CAN_MSG_RX_LOW_LAST) 775 c_can_rx_finalize(dev, priv, obj);
837 /* activate all lower message objects */
838 c_can_activate_all_lower_rx_msg_obj(dev, IF_RX, ctrl);
839 776
840 pkts++; 777 pkts++;
841 quota--; 778 quota--;
@@ -844,6 +781,13 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
844 return pkts; 781 return pkts;
845} 782}
846 783
784static inline u32 c_can_get_pending(struct c_can_priv *priv)
785{
786 u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
787
788 return pend;
789}
790
847/* 791/*
848 * theory of operation: 792 * theory of operation:
849 * 793 *
@@ -853,18 +797,9 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
853 * has arrived. To work-around this issue, we keep two groups of message 797 * has arrived. To work-around this issue, we keep two groups of message
854 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT. 798 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
855 * 799 *
856 * To ensure in-order frame reception we use the following 800 * We clear the newdat bit right away.
857 * approach while re-activating a message object to receive further 801 *
858 * frames: 802 * This can result in packet reordering when the readout is slow.
859 * - if the current message object number is lower than
860 * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
861 * the INTPND bit.
862 * - if the current message object number is equal to
863 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
864 * receive message objects.
865 * - if the current message object number is greater than
866 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
867 * only this message object.
868 */ 803 */
869static int c_can_do_rx_poll(struct net_device *dev, int quota) 804static int c_can_do_rx_poll(struct net_device *dev, int quota)
870{ 805{
@@ -880,7 +815,7 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
880 815
881 while (quota > 0) { 816 while (quota > 0) {
882 if (!pend) { 817 if (!pend) {
883 pend = priv->read_reg(priv, C_CAN_INTPND1_REG); 818 pend = c_can_get_pending(priv);
884 if (!pend) 819 if (!pend)
885 break; 820 break;
886 /* 821 /*
@@ -905,12 +840,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
905 return pkts; 840 return pkts;
906} 841}
907 842
908static inline int c_can_has_and_handle_berr(struct c_can_priv *priv)
909{
910 return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
911 (priv->current_status & LEC_UNUSED);
912}
913
914static int c_can_handle_state_change(struct net_device *dev, 843static int c_can_handle_state_change(struct net_device *dev,
915 enum c_can_bus_error_types error_type) 844 enum c_can_bus_error_types error_type)
916{ 845{
@@ -922,6 +851,26 @@ static int c_can_handle_state_change(struct net_device *dev,
922 struct sk_buff *skb; 851 struct sk_buff *skb;
923 struct can_berr_counter bec; 852 struct can_berr_counter bec;
924 853
854 switch (error_type) {
855 case C_CAN_ERROR_WARNING:
856 /* error warning state */
857 priv->can.can_stats.error_warning++;
858 priv->can.state = CAN_STATE_ERROR_WARNING;
859 break;
860 case C_CAN_ERROR_PASSIVE:
861 /* error passive state */
862 priv->can.can_stats.error_passive++;
863 priv->can.state = CAN_STATE_ERROR_PASSIVE;
864 break;
865 case C_CAN_BUS_OFF:
866 /* bus-off state */
867 priv->can.state = CAN_STATE_BUS_OFF;
868 can_bus_off(dev);
869 break;
870 default:
871 break;
872 }
873
925 /* propagate the error condition to the CAN stack */ 874 /* propagate the error condition to the CAN stack */
926 skb = alloc_can_err_skb(dev, &cf); 875 skb = alloc_can_err_skb(dev, &cf);
927 if (unlikely(!skb)) 876 if (unlikely(!skb))
@@ -935,8 +884,6 @@ static int c_can_handle_state_change(struct net_device *dev,
935 switch (error_type) { 884 switch (error_type) {
936 case C_CAN_ERROR_WARNING: 885 case C_CAN_ERROR_WARNING:
937 /* error warning state */ 886 /* error warning state */
938 priv->can.can_stats.error_warning++;
939 priv->can.state = CAN_STATE_ERROR_WARNING;
940 cf->can_id |= CAN_ERR_CRTL; 887 cf->can_id |= CAN_ERR_CRTL;
941 cf->data[1] = (bec.txerr > bec.rxerr) ? 888 cf->data[1] = (bec.txerr > bec.rxerr) ?
942 CAN_ERR_CRTL_TX_WARNING : 889 CAN_ERR_CRTL_TX_WARNING :
@@ -947,8 +894,6 @@ static int c_can_handle_state_change(struct net_device *dev,
947 break; 894 break;
948 case C_CAN_ERROR_PASSIVE: 895 case C_CAN_ERROR_PASSIVE:
949 /* error passive state */ 896 /* error passive state */
950 priv->can.can_stats.error_passive++;
951 priv->can.state = CAN_STATE_ERROR_PASSIVE;
952 cf->can_id |= CAN_ERR_CRTL; 897 cf->can_id |= CAN_ERR_CRTL;
953 if (rx_err_passive) 898 if (rx_err_passive)
954 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; 899 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
@@ -960,22 +905,16 @@ static int c_can_handle_state_change(struct net_device *dev,
960 break; 905 break;
961 case C_CAN_BUS_OFF: 906 case C_CAN_BUS_OFF:
962 /* bus-off state */ 907 /* bus-off state */
963 priv->can.state = CAN_STATE_BUS_OFF;
964 cf->can_id |= CAN_ERR_BUSOFF; 908 cf->can_id |= CAN_ERR_BUSOFF;
965 /*
966 * disable all interrupts in bus-off mode to ensure that
967 * the CPU is not hogged down
968 */
969 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
970 can_bus_off(dev); 909 can_bus_off(dev);
971 break; 910 break;
972 default: 911 default:
973 break; 912 break;
974 } 913 }
975 914
976 netif_receive_skb(skb);
977 stats->rx_packets++; 915 stats->rx_packets++;
978 stats->rx_bytes += cf->can_dlc; 916 stats->rx_bytes += cf->can_dlc;
917 netif_receive_skb(skb);
979 918
980 return 1; 919 return 1;
981} 920}
@@ -996,6 +935,13 @@ static int c_can_handle_bus_err(struct net_device *dev,
996 if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR) 935 if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
997 return 0; 936 return 0;
998 937
938 if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
939 return 0;
940
941 /* common for all type of bus errors */
942 priv->can.can_stats.bus_error++;
943 stats->rx_errors++;
944
999 /* propagate the error condition to the CAN stack */ 945 /* propagate the error condition to the CAN stack */
1000 skb = alloc_can_err_skb(dev, &cf); 946 skb = alloc_can_err_skb(dev, &cf);
1001 if (unlikely(!skb)) 947 if (unlikely(!skb))
@@ -1005,10 +951,6 @@ static int c_can_handle_bus_err(struct net_device *dev,
1005 * check for 'last error code' which tells us the 951 * check for 'last error code' which tells us the
1006 * type of the last error to occur on the CAN bus 952 * type of the last error to occur on the CAN bus
1007 */ 953 */
1008
1009 /* common for all type of bus errors */
1010 priv->can.can_stats.bus_error++;
1011 stats->rx_errors++;
1012 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 954 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1013 cf->data[2] |= CAN_ERR_PROT_UNSPEC; 955 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
1014 956
@@ -1043,95 +985,64 @@ static int c_can_handle_bus_err(struct net_device *dev,
1043 break; 985 break;
1044 } 986 }
1045 987
1046 /* set a `lec` value so that we can check for updates later */
1047 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
1048
1049 netif_receive_skb(skb);
1050 stats->rx_packets++; 988 stats->rx_packets++;
1051 stats->rx_bytes += cf->can_dlc; 989 stats->rx_bytes += cf->can_dlc;
1052 990 netif_receive_skb(skb);
1053 return 1; 991 return 1;
1054} 992}
1055 993
1056static int c_can_poll(struct napi_struct *napi, int quota) 994static int c_can_poll(struct napi_struct *napi, int quota)
1057{ 995{
1058 u16 irqstatus;
1059 int lec_type = 0;
1060 int work_done = 0;
1061 struct net_device *dev = napi->dev; 996 struct net_device *dev = napi->dev;
1062 struct c_can_priv *priv = netdev_priv(dev); 997 struct c_can_priv *priv = netdev_priv(dev);
998 u16 curr, last = priv->last_status;
999 int work_done = 0;
1063 1000
1064 irqstatus = priv->irqstatus; 1001 priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
1065 if (!irqstatus) 1002 /* Ack status on C_CAN. D_CAN is self clearing */
1066 goto end; 1003 if (priv->type != BOSCH_D_CAN)
1004 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
1067 1005
1068 /* status events have the highest priority */ 1006 /* handle state changes */
1069 if (irqstatus == STATUS_INTERRUPT) { 1007 if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
1070 priv->current_status = priv->read_reg(priv, 1008 netdev_dbg(dev, "entered error warning state\n");
1071 C_CAN_STS_REG); 1009 work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING);
1072 1010 }
1073 /* handle Tx/Rx events */
1074 if (priv->current_status & STATUS_TXOK)
1075 priv->write_reg(priv, C_CAN_STS_REG,
1076 priv->current_status & ~STATUS_TXOK);
1077
1078 if (priv->current_status & STATUS_RXOK)
1079 priv->write_reg(priv, C_CAN_STS_REG,
1080 priv->current_status & ~STATUS_RXOK);
1081
1082 /* handle state changes */
1083 if ((priv->current_status & STATUS_EWARN) &&
1084 (!(priv->last_status & STATUS_EWARN))) {
1085 netdev_dbg(dev, "entered error warning state\n");
1086 work_done += c_can_handle_state_change(dev,
1087 C_CAN_ERROR_WARNING);
1088 }
1089 if ((priv->current_status & STATUS_EPASS) &&
1090 (!(priv->last_status & STATUS_EPASS))) {
1091 netdev_dbg(dev, "entered error passive state\n");
1092 work_done += c_can_handle_state_change(dev,
1093 C_CAN_ERROR_PASSIVE);
1094 }
1095 if ((priv->current_status & STATUS_BOFF) &&
1096 (!(priv->last_status & STATUS_BOFF))) {
1097 netdev_dbg(dev, "entered bus off state\n");
1098 work_done += c_can_handle_state_change(dev,
1099 C_CAN_BUS_OFF);
1100 }
1101 1011
1102 /* handle bus recovery events */ 1012 if ((curr & STATUS_EPASS) && (!(last & STATUS_EPASS))) {
1103 if ((!(priv->current_status & STATUS_BOFF)) && 1013 netdev_dbg(dev, "entered error passive state\n");
1104 (priv->last_status & STATUS_BOFF)) { 1014 work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE);
1105 netdev_dbg(dev, "left bus off state\n"); 1015 }
1106 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1107 }
1108 if ((!(priv->current_status & STATUS_EPASS)) &&
1109 (priv->last_status & STATUS_EPASS)) {
1110 netdev_dbg(dev, "left error passive state\n");
1111 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1112 }
1113 1016
1114 priv->last_status = priv->current_status; 1017 if ((curr & STATUS_BOFF) && (!(last & STATUS_BOFF))) {
1115 1018 netdev_dbg(dev, "entered bus off state\n");
1116 /* handle lec errors on the bus */ 1019 work_done += c_can_handle_state_change(dev, C_CAN_BUS_OFF);
1117 lec_type = c_can_has_and_handle_berr(priv); 1020 goto end;
1118 if (lec_type)
1119 work_done += c_can_handle_bus_err(dev, lec_type);
1120 } else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) &&
1121 (irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) {
1122 /* handle events corresponding to receive message objects */
1123 work_done += c_can_do_rx_poll(dev, (quota - work_done));
1124 } else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) &&
1125 (irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) {
1126 /* handle events corresponding to transmit message objects */
1127 c_can_do_tx(dev);
1128 } 1021 }
1129 1022
1023 /* handle bus recovery events */
1024 if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
1025 netdev_dbg(dev, "left bus off state\n");
1026 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1027 }
1028 if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
1029 netdev_dbg(dev, "left error passive state\n");
1030 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1031 }
1032
1033 /* handle lec errors on the bus */
1034 work_done += c_can_handle_bus_err(dev, curr & LEC_MASK);
1035
1036 /* Handle Tx/Rx events. We do this unconditionally */
1037 work_done += c_can_do_rx_poll(dev, (quota - work_done));
1038 c_can_do_tx(dev);
1039
1130end: 1040end:
1131 if (work_done < quota) { 1041 if (work_done < quota) {
1132 napi_complete(napi); 1042 napi_complete(napi);
1133 /* enable all IRQs */ 1043 /* enable all IRQs if we are not in bus off state */
1134 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS); 1044 if (priv->can.state != CAN_STATE_BUS_OFF)
1045 c_can_irq_control(priv, true);
1135 } 1046 }
1136 1047
1137 return work_done; 1048 return work_done;
@@ -1142,12 +1053,11 @@ static irqreturn_t c_can_isr(int irq, void *dev_id)
1142 struct net_device *dev = (struct net_device *)dev_id; 1053 struct net_device *dev = (struct net_device *)dev_id;
1143 struct c_can_priv *priv = netdev_priv(dev); 1054 struct c_can_priv *priv = netdev_priv(dev);
1144 1055
1145 priv->irqstatus = priv->read_reg(priv, C_CAN_INT_REG); 1056 if (!priv->read_reg(priv, C_CAN_INT_REG))
1146 if (!priv->irqstatus)
1147 return IRQ_NONE; 1057 return IRQ_NONE;
1148 1058
1149 /* disable all interrupts and schedule the NAPI */ 1059 /* disable all interrupts and schedule the NAPI */
1150 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS); 1060 c_can_irq_control(priv, false);
1151 napi_schedule(&priv->napi); 1061 napi_schedule(&priv->napi);
1152 1062
1153 return IRQ_HANDLED; 1063 return IRQ_HANDLED;
@@ -1184,6 +1094,8 @@ static int c_can_open(struct net_device *dev)
1184 can_led_event(dev, CAN_LED_EVENT_OPEN); 1094 can_led_event(dev, CAN_LED_EVENT_OPEN);
1185 1095
1186 napi_enable(&priv->napi); 1096 napi_enable(&priv->napi);
1097 /* enable status change, error and module interrupts */
1098 c_can_irq_control(priv, true);
1187 netif_start_queue(dev); 1099 netif_start_queue(dev);
1188 1100
1189 return 0; 1101 return 0;
@@ -1226,7 +1138,6 @@ struct net_device *alloc_c_can_dev(void)
1226 return NULL; 1138 return NULL;
1227 1139
1228 priv = netdev_priv(dev); 1140 priv = netdev_priv(dev);
1229 spin_lock_init(&priv->xmit_lock);
1230 netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT); 1141 netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
1231 1142
1232 priv->dev = dev; 1143 priv->dev = dev;
@@ -1281,6 +1192,7 @@ int c_can_power_up(struct net_device *dev)
1281 u32 val; 1192 u32 val;
1282 unsigned long time_out; 1193 unsigned long time_out;
1283 struct c_can_priv *priv = netdev_priv(dev); 1194 struct c_can_priv *priv = netdev_priv(dev);
1195 int ret;
1284 1196
1285 if (!(dev->flags & IFF_UP)) 1197 if (!(dev->flags & IFF_UP))
1286 return 0; 1198 return 0;
@@ -1307,7 +1219,11 @@ int c_can_power_up(struct net_device *dev)
1307 if (time_after(jiffies, time_out)) 1219 if (time_after(jiffies, time_out))
1308 return -ETIMEDOUT; 1220 return -ETIMEDOUT;
1309 1221
1310 return c_can_start(dev); 1222 ret = c_can_start(dev);
1223 if (!ret)
1224 c_can_irq_control(priv, true);
1225
1226 return ret;
1311} 1227}
1312EXPORT_SYMBOL_GPL(c_can_power_up); 1228EXPORT_SYMBOL_GPL(c_can_power_up);
1313#endif 1229#endif
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index faa8404162b3..c56f1b1c11ca 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -22,14 +22,6 @@
22#ifndef C_CAN_H 22#ifndef C_CAN_H
23#define C_CAN_H 23#define C_CAN_H
24 24
25/*
26 * IFx register masks:
27 * allow easy operation on 16-bit registers when the
28 * argument is 32-bit instead
29 */
30#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
31#define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16)
32
33/* message object split */ 25/* message object split */
34#define C_CAN_NO_OF_OBJECTS 32 26#define C_CAN_NO_OF_OBJECTS 32
35#define C_CAN_MSG_OBJ_RX_NUM 16 27#define C_CAN_MSG_OBJ_RX_NUM 16
@@ -45,8 +37,6 @@
45 37
46#define C_CAN_MSG_OBJ_RX_SPLIT 9 38#define C_CAN_MSG_OBJ_RX_SPLIT 9
47#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1) 39#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1)
48
49#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1)
50#define RECEIVE_OBJECT_BITS 0x0000ffff 40#define RECEIVE_OBJECT_BITS 0x0000ffff
51 41
52enum reg { 42enum reg {
@@ -183,23 +173,20 @@ struct c_can_priv {
183 struct napi_struct napi; 173 struct napi_struct napi;
184 struct net_device *dev; 174 struct net_device *dev;
185 struct device *device; 175 struct device *device;
186 spinlock_t xmit_lock; 176 atomic_t tx_active;
187 int tx_object; 177 unsigned long tx_dir;
188 int current_status;
189 int last_status; 178 int last_status;
190 u16 (*read_reg) (struct c_can_priv *priv, enum reg index); 179 u16 (*read_reg) (struct c_can_priv *priv, enum reg index);
191 void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val); 180 void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val);
192 void __iomem *base; 181 void __iomem *base;
193 const u16 *regs; 182 const u16 *regs;
194 unsigned long irq_flags; /* for request_irq() */
195 unsigned int tx_next;
196 unsigned int tx_echo;
197 void *priv; /* for board-specific data */ 183 void *priv; /* for board-specific data */
198 u16 irqstatus;
199 enum c_can_dev_id type; 184 enum c_can_dev_id type;
200 u32 __iomem *raminit_ctrlreg; 185 u32 __iomem *raminit_ctrlreg;
201 unsigned int instance; 186 int instance;
202 void (*raminit) (const struct c_can_priv *priv, bool enable); 187 void (*raminit) (const struct c_can_priv *priv, bool enable);
188 u32 comm_rcv_high;
189 u32 rxmasked;
203 u32 dlc[C_CAN_MSG_OBJ_TX_NUM]; 190 u32 dlc[C_CAN_MSG_OBJ_TX_NUM];
204}; 191};
205 192
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index bce0be54c2f5..fe5f6303b584 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -84,8 +84,11 @@ static int c_can_pci_probe(struct pci_dev *pdev,
84 goto out_disable_device; 84 goto out_disable_device;
85 } 85 }
86 86
87 pci_set_master(pdev); 87 ret = pci_enable_msi(pdev);
88 pci_enable_msi(pdev); 88 if (!ret) {
89 dev_info(&pdev->dev, "MSI enabled\n");
90 pci_set_master(pdev);
91 }
89 92
90 addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); 93 addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
91 if (!addr) { 94 if (!addr) {
@@ -132,6 +135,8 @@ static int c_can_pci_probe(struct pci_dev *pdev,
132 goto out_free_c_can; 135 goto out_free_c_can;
133 } 136 }
134 137
138 priv->type = c_can_pci_data->type;
139
135 /* Configure access to registers */ 140 /* Configure access to registers */
136 switch (c_can_pci_data->reg_align) { 141 switch (c_can_pci_data->reg_align) {
137 case C_CAN_REG_ALIGN_32: 142 case C_CAN_REG_ALIGN_32:
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 806d92753427..1df0b322d1e4 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -222,7 +222,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
222 222
223 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 223 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
224 priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res); 224 priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
225 if (IS_ERR(priv->raminit_ctrlreg) || (int)priv->instance < 0) 225 if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
226 dev_info(&pdev->dev, "control memory is not used for raminit\n"); 226 dev_info(&pdev->dev, "control memory is not used for raminit\n");
227 else 227 else
228 priv->raminit = c_can_hw_raminit; 228 priv->raminit = c_can_hw_raminit;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index c7a260478749..e318e87e2bfc 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -256,7 +256,7 @@ static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
256 256
257 /* Check if the CAN device has bit-timing parameters */ 257 /* Check if the CAN device has bit-timing parameters */
258 if (!btc) 258 if (!btc)
259 return -ENOTSUPP; 259 return -EOPNOTSUPP;
260 260
261 /* 261 /*
262 * Depending on the given can_bittiming parameter structure the CAN 262 * Depending on the given can_bittiming parameter structure the CAN
diff --git a/drivers/net/can/led.c b/drivers/net/can/led.c
index a3d99a8fd2d1..ab7f1b01be49 100644
--- a/drivers/net/can/led.c
+++ b/drivers/net/can/led.c
@@ -97,6 +97,9 @@ static int can_led_notifier(struct notifier_block *nb, unsigned long msg,
97 if (!priv) 97 if (!priv)
98 return NOTIFY_DONE; 98 return NOTIFY_DONE;
99 99
100 if (!priv->tx_led_trig || !priv->rx_led_trig)
101 return NOTIFY_DONE;
102
100 if (msg == NETDEV_CHANGENAME) { 103 if (msg == NETDEV_CHANGENAME) {
101 snprintf(name, sizeof(name), "%s-tx", netdev->name); 104 snprintf(name, sizeof(name), "%s-tx", netdev->name);
102 led_trigger_rename_static(name, priv->tx_led_trig); 105 led_trigger_rename_static(name, priv->tx_led_trig);
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index c540e3d12e3d..564933ae218c 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
551{ 551{
552 struct sja1000_priv *priv; 552 struct sja1000_priv *priv;
553 struct peak_pci_chan *chan; 553 struct peak_pci_chan *chan;
554 struct net_device *dev; 554 struct net_device *dev, *prev_dev;
555 void __iomem *cfg_base, *reg_base; 555 void __iomem *cfg_base, *reg_base;
556 u16 sub_sys_id, icr; 556 u16 sub_sys_id, icr;
557 int i, err, channels; 557 int i, err, channels;
@@ -688,11 +688,13 @@ failure_remove_channels:
688 writew(0x0, cfg_base + PITA_ICR + 2); 688 writew(0x0, cfg_base + PITA_ICR + 2);
689 689
690 chan = NULL; 690 chan = NULL;
691 for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) { 691 for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
692 unregister_sja1000dev(dev);
693 free_sja1000dev(dev);
694 priv = netdev_priv(dev); 692 priv = netdev_priv(dev);
695 chan = priv->priv; 693 chan = priv->priv;
694 prev_dev = chan->prev_dev;
695
696 unregister_sja1000dev(dev);
697 free_sja1000dev(dev);
696 } 698 }
697 699
698 /* free any PCIeC resources too */ 700 /* free any PCIeC resources too */
@@ -726,10 +728,12 @@ static void peak_pci_remove(struct pci_dev *pdev)
726 728
727 /* Loop over all registered devices */ 729 /* Loop over all registered devices */
728 while (1) { 730 while (1) {
731 struct net_device *prev_dev = chan->prev_dev;
732
729 dev_info(&pdev->dev, "removing device %s\n", dev->name); 733 dev_info(&pdev->dev, "removing device %s\n", dev->name);
730 unregister_sja1000dev(dev); 734 unregister_sja1000dev(dev);
731 free_sja1000dev(dev); 735 free_sja1000dev(dev);
732 dev = chan->prev_dev; 736 dev = prev_dev;
733 737
734 if (!dev) { 738 if (!dev) {
735 /* do that only for first channel */ 739 /* do that only for first channel */
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index df136a2516c4..014695d7e6a3 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -46,6 +46,7 @@ static int clk[MAXDEV];
46static unsigned char cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; 46static unsigned char cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
47static unsigned char ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; 47static unsigned char ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
48static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; 48static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
49static spinlock_t indirect_lock[MAXDEV]; /* lock for indirect access mode */
49 50
50module_param_array(port, ulong, NULL, S_IRUGO); 51module_param_array(port, ulong, NULL, S_IRUGO);
51MODULE_PARM_DESC(port, "I/O port number"); 52MODULE_PARM_DESC(port, "I/O port number");
@@ -101,19 +102,26 @@ static void sja1000_isa_port_write_reg(const struct sja1000_priv *priv,
101static u8 sja1000_isa_port_read_reg_indirect(const struct sja1000_priv *priv, 102static u8 sja1000_isa_port_read_reg_indirect(const struct sja1000_priv *priv,
102 int reg) 103 int reg)
103{ 104{
104 unsigned long base = (unsigned long)priv->reg_base; 105 unsigned long flags, base = (unsigned long)priv->reg_base;
106 u8 readval;
105 107
108 spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags);
106 outb(reg, base); 109 outb(reg, base);
107 return inb(base + 1); 110 readval = inb(base + 1);
111 spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags);
112
113 return readval;
108} 114}
109 115
110static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv, 116static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv,
111 int reg, u8 val) 117 int reg, u8 val)
112{ 118{
113 unsigned long base = (unsigned long)priv->reg_base; 119 unsigned long flags, base = (unsigned long)priv->reg_base;
114 120
121 spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags);
115 outb(reg, base); 122 outb(reg, base);
116 outb(val, base + 1); 123 outb(val, base + 1);
124 spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags);
117} 125}
118 126
119static int sja1000_isa_probe(struct platform_device *pdev) 127static int sja1000_isa_probe(struct platform_device *pdev)
@@ -169,6 +177,7 @@ static int sja1000_isa_probe(struct platform_device *pdev)
169 if (iosize == SJA1000_IOSIZE_INDIRECT) { 177 if (iosize == SJA1000_IOSIZE_INDIRECT) {
170 priv->read_reg = sja1000_isa_port_read_reg_indirect; 178 priv->read_reg = sja1000_isa_port_read_reg_indirect;
171 priv->write_reg = sja1000_isa_port_write_reg_indirect; 179 priv->write_reg = sja1000_isa_port_write_reg_indirect;
180 spin_lock_init(&indirect_lock[idx]);
172 } else { 181 } else {
173 priv->read_reg = sja1000_isa_port_read_reg; 182 priv->read_reg = sja1000_isa_port_read_reg;
174 priv->write_reg = sja1000_isa_port_write_reg; 183 priv->write_reg = sja1000_isa_port_write_reg;
@@ -198,6 +207,7 @@ static int sja1000_isa_probe(struct platform_device *pdev)
198 207
199 platform_set_drvdata(pdev, dev); 208 platform_set_drvdata(pdev, dev);
200 SET_NETDEV_DEV(dev, &pdev->dev); 209 SET_NETDEV_DEV(dev, &pdev->dev);
210 dev->dev_id = idx;
201 211
202 err = register_sja1000dev(dev); 212 err = register_sja1000dev(dev);
203 if (err) { 213 if (err) {
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index f5b16e0e3a12..dcf9196f6316 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -322,13 +322,13 @@ static void slcan_write_wakeup(struct tty_struct *tty)
322 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) 322 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
323 return; 323 return;
324 324
325 spin_lock(&sl->lock); 325 spin_lock_bh(&sl->lock);
326 if (sl->xleft <= 0) { 326 if (sl->xleft <= 0) {
327 /* Now serial buffer is almost free & we can start 327 /* Now serial buffer is almost free & we can start
328 * transmission of another packet */ 328 * transmission of another packet */
329 sl->dev->stats.tx_packets++; 329 sl->dev->stats.tx_packets++;
330 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 330 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
331 spin_unlock(&sl->lock); 331 spin_unlock_bh(&sl->lock);
332 netif_wake_queue(sl->dev); 332 netif_wake_queue(sl->dev);
333 return; 333 return;
334 } 334 }
@@ -336,7 +336,7 @@ static void slcan_write_wakeup(struct tty_struct *tty)
336 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 336 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
337 sl->xleft -= actual; 337 sl->xleft -= actual;
338 sl->xhead += actual; 338 sl->xhead += actual;
339 spin_unlock(&sl->lock); 339 spin_unlock_bh(&sl->lock);
340} 340}
341 341
342/* Send a can_frame to a TTY queue. */ 342/* Send a can_frame to a TTY queue. */
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 39b26fe28d10..051349458462 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -35,6 +35,19 @@ source "drivers/net/ethernet/calxeda/Kconfig"
35source "drivers/net/ethernet/chelsio/Kconfig" 35source "drivers/net/ethernet/chelsio/Kconfig"
36source "drivers/net/ethernet/cirrus/Kconfig" 36source "drivers/net/ethernet/cirrus/Kconfig"
37source "drivers/net/ethernet/cisco/Kconfig" 37source "drivers/net/ethernet/cisco/Kconfig"
38
39config CX_ECAT
40 tristate "Beckhoff CX5020 EtherCAT master support"
41 depends on PCI
42 depends on X86 || COMPILE_TEST
43 ---help---
44 Driver for EtherCAT master module located on CCAT FPGA
45 that can be found on Beckhoff CX5020, and possibly other of CX
46 Beckhoff CX series industrial PCs.
47
48 To compile this driver as a module, choose M here. The module
49 will be called ec_bhf.
50
38source "drivers/net/ethernet/davicom/Kconfig" 51source "drivers/net/ethernet/davicom/Kconfig"
39 52
40config DNET 53config DNET
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 545d0b3b9cb4..35190e36c456 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
21obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ 21obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
22obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ 22obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
23obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ 23obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
24obj-$(CONFIG_CX_ECAT) += ec_bhf.o
24obj-$(CONFIG_DM9000) += davicom/ 25obj-$(CONFIG_DM9000) += davicom/
25obj-$(CONFIG_DNET) += dnet.o 26obj-$(CONFIG_DNET) += dnet.o
26obj-$(CONFIG_NET_VENDOR_DEC) += dec/ 27obj-$(CONFIG_NET_VENDOR_DEC) += dec/
diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig
index 80c1ab74a4b8..fdddba51473e 100644
--- a/drivers/net/ethernet/altera/Kconfig
+++ b/drivers/net/ethernet/altera/Kconfig
@@ -1,5 +1,6 @@
1config ALTERA_TSE 1config ALTERA_TSE
2 tristate "Altera Triple-Speed Ethernet MAC support" 2 tristate "Altera Triple-Speed Ethernet MAC support"
3 depends on HAS_DMA
3 select PHYLIB 4 select PHYLIB
4 ---help--- 5 ---help---
5 This driver supports the Altera Triple-Speed (TSE) Ethernet MAC. 6 This driver supports the Altera Triple-Speed (TSE) Ethernet MAC.
diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile
index d4a187e45369..3eff2fd3997e 100644
--- a/drivers/net/ethernet/altera/Makefile
+++ b/drivers/net/ethernet/altera/Makefile
@@ -5,3 +5,4 @@
5obj-$(CONFIG_ALTERA_TSE) += altera_tse.o 5obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
6altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \ 6altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
7altera_msgdma.o altera_sgdma.o altera_utils.o 7altera_msgdma.o altera_sgdma.o altera_utils.o
8ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
index 3df18669ea30..0fb986ba3290 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.c
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -18,6 +18,7 @@
18#include "altera_utils.h" 18#include "altera_utils.h"
19#include "altera_tse.h" 19#include "altera_tse.h"
20#include "altera_msgdmahw.h" 20#include "altera_msgdmahw.h"
21#include "altera_msgdma.h"
21 22
22/* No initialization work to do for MSGDMA */ 23/* No initialization work to do for MSGDMA */
23int msgdma_initialize(struct altera_tse_private *priv) 24int msgdma_initialize(struct altera_tse_private *priv)
@@ -29,21 +30,23 @@ void msgdma_uninitialize(struct altera_tse_private *priv)
29{ 30{
30} 31}
31 32
33void msgdma_start_rxdma(struct altera_tse_private *priv)
34{
35}
36
32void msgdma_reset(struct altera_tse_private *priv) 37void msgdma_reset(struct altera_tse_private *priv)
33{ 38{
34 int counter; 39 int counter;
35 struct msgdma_csr *txcsr =
36 (struct msgdma_csr *)priv->tx_dma_csr;
37 struct msgdma_csr *rxcsr =
38 (struct msgdma_csr *)priv->rx_dma_csr;
39 40
40 /* Reset Rx mSGDMA */ 41 /* Reset Rx mSGDMA */
41 iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); 42 csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr,
42 iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control); 43 msgdma_csroffs(status));
44 csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr,
45 msgdma_csroffs(control));
43 46
44 counter = 0; 47 counter = 0;
45 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 48 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
46 if (tse_bit_is_clear(&rxcsr->status, 49 if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status),
47 MSGDMA_CSR_STAT_RESETTING)) 50 MSGDMA_CSR_STAT_RESETTING))
48 break; 51 break;
49 udelay(1); 52 udelay(1);
@@ -54,15 +57,18 @@ void msgdma_reset(struct altera_tse_private *priv)
54 "TSE Rx mSGDMA resetting bit never cleared!\n"); 57 "TSE Rx mSGDMA resetting bit never cleared!\n");
55 58
56 /* clear all status bits */ 59 /* clear all status bits */
57 iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); 60 csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status));
58 61
59 /* Reset Tx mSGDMA */ 62 /* Reset Tx mSGDMA */
60 iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); 63 csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr,
61 iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control); 64 msgdma_csroffs(status));
65
66 csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr,
67 msgdma_csroffs(control));
62 68
63 counter = 0; 69 counter = 0;
64 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 70 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
65 if (tse_bit_is_clear(&txcsr->status, 71 if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status),
66 MSGDMA_CSR_STAT_RESETTING)) 72 MSGDMA_CSR_STAT_RESETTING))
67 break; 73 break;
68 udelay(1); 74 udelay(1);
@@ -73,58 +79,58 @@ void msgdma_reset(struct altera_tse_private *priv)
73 "TSE Tx mSGDMA resetting bit never cleared!\n"); 79 "TSE Tx mSGDMA resetting bit never cleared!\n");
74 80
75 /* clear all status bits */ 81 /* clear all status bits */
76 iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); 82 csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status));
77} 83}
78 84
79void msgdma_disable_rxirq(struct altera_tse_private *priv) 85void msgdma_disable_rxirq(struct altera_tse_private *priv)
80{ 86{
81 struct msgdma_csr *csr = priv->rx_dma_csr; 87 tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control),
82 tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 88 MSGDMA_CSR_CTL_GLOBAL_INTR);
83} 89}
84 90
85void msgdma_enable_rxirq(struct altera_tse_private *priv) 91void msgdma_enable_rxirq(struct altera_tse_private *priv)
86{ 92{
87 struct msgdma_csr *csr = priv->rx_dma_csr; 93 tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control),
88 tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 94 MSGDMA_CSR_CTL_GLOBAL_INTR);
89} 95}
90 96
91void msgdma_disable_txirq(struct altera_tse_private *priv) 97void msgdma_disable_txirq(struct altera_tse_private *priv)
92{ 98{
93 struct msgdma_csr *csr = priv->tx_dma_csr; 99 tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control),
94 tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 100 MSGDMA_CSR_CTL_GLOBAL_INTR);
95} 101}
96 102
97void msgdma_enable_txirq(struct altera_tse_private *priv) 103void msgdma_enable_txirq(struct altera_tse_private *priv)
98{ 104{
99 struct msgdma_csr *csr = priv->tx_dma_csr; 105 tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control),
100 tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 106 MSGDMA_CSR_CTL_GLOBAL_INTR);
101} 107}
102 108
103void msgdma_clear_rxirq(struct altera_tse_private *priv) 109void msgdma_clear_rxirq(struct altera_tse_private *priv)
104{ 110{
105 struct msgdma_csr *csr = priv->rx_dma_csr; 111 csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status));
106 iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
107} 112}
108 113
109void msgdma_clear_txirq(struct altera_tse_private *priv) 114void msgdma_clear_txirq(struct altera_tse_private *priv)
110{ 115{
111 struct msgdma_csr *csr = priv->tx_dma_csr; 116 csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status));
112 iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
113} 117}
114 118
115/* return 0 to indicate transmit is pending */ 119/* return 0 to indicate transmit is pending */
116int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) 120int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
117{ 121{
118 struct msgdma_extended_desc *desc = priv->tx_dma_desc; 122 csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc,
119 123 msgdma_descroffs(read_addr_lo));
120 iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo); 124 csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc,
121 iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi); 125 msgdma_descroffs(read_addr_hi));
122 iowrite32(0, &desc->write_addr_lo); 126 csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo));
123 iowrite32(0, &desc->write_addr_hi); 127 csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi));
124 iowrite32(buffer->len, &desc->len); 128 csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len));
125 iowrite32(0, &desc->burst_seq_num); 129 csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num));
126 iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride); 130 csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc,
127 iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control); 131 msgdma_descroffs(stride));
132 csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc,
133 msgdma_descroffs(control));
128 return 0; 134 return 0;
129} 135}
130 136
@@ -133,17 +139,16 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
133 u32 ready = 0; 139 u32 ready = 0;
134 u32 inuse; 140 u32 inuse;
135 u32 status; 141 u32 status;
136 struct msgdma_csr *txcsr =
137 (struct msgdma_csr *)priv->tx_dma_csr;
138 142
139 /* Get number of sent descriptors */ 143 /* Get number of sent descriptors */
140 inuse = ioread32(&txcsr->rw_fill_level) & 0xffff; 144 inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level))
145 & 0xffff;
141 146
142 if (inuse) { /* Tx FIFO is not empty */ 147 if (inuse) { /* Tx FIFO is not empty */
143 ready = priv->tx_prod - priv->tx_cons - inuse - 1; 148 ready = priv->tx_prod - priv->tx_cons - inuse - 1;
144 } else { 149 } else {
145 /* Check for buffered last packet */ 150 /* Check for buffered last packet */
146 status = ioread32(&txcsr->status); 151 status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
147 if (status & MSGDMA_CSR_STAT_BUSY) 152 if (status & MSGDMA_CSR_STAT_BUSY)
148 ready = priv->tx_prod - priv->tx_cons - 1; 153 ready = priv->tx_prod - priv->tx_cons - 1;
149 else 154 else
@@ -154,10 +159,9 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
154 159
155/* Put buffer to the mSGDMA RX FIFO 160/* Put buffer to the mSGDMA RX FIFO
156 */ 161 */
157int msgdma_add_rx_desc(struct altera_tse_private *priv, 162void msgdma_add_rx_desc(struct altera_tse_private *priv,
158 struct tse_buffer *rxbuffer) 163 struct tse_buffer *rxbuffer)
159{ 164{
160 struct msgdma_extended_desc *desc = priv->rx_dma_desc;
161 u32 len = priv->rx_dma_buf_sz; 165 u32 len = priv->rx_dma_buf_sz;
162 dma_addr_t dma_addr = rxbuffer->dma_addr; 166 dma_addr_t dma_addr = rxbuffer->dma_addr;
163 u32 control = (MSGDMA_DESC_CTL_END_ON_EOP 167 u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
@@ -167,15 +171,16 @@ int msgdma_add_rx_desc(struct altera_tse_private *priv,
167 | MSGDMA_DESC_CTL_TR_ERR_IRQ 171 | MSGDMA_DESC_CTL_TR_ERR_IRQ
168 | MSGDMA_DESC_CTL_GO); 172 | MSGDMA_DESC_CTL_GO);
169 173
170 iowrite32(0, &desc->read_addr_lo); 174 csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo));
171 iowrite32(0, &desc->read_addr_hi); 175 csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi));
172 iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo); 176 csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc,
173 iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi); 177 msgdma_descroffs(write_addr_lo));
174 iowrite32(len, &desc->len); 178 csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc,
175 iowrite32(0, &desc->burst_seq_num); 179 msgdma_descroffs(write_addr_hi));
176 iowrite32(0x00010001, &desc->stride); 180 csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len));
177 iowrite32(control, &desc->control); 181 csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num));
178 return 1; 182 csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride));
183 csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control));
179} 184}
180 185
181/* status is returned on upper 16 bits, 186/* status is returned on upper 16 bits,
@@ -186,14 +191,13 @@ u32 msgdma_rx_status(struct altera_tse_private *priv)
186 u32 rxstatus = 0; 191 u32 rxstatus = 0;
187 u32 pktlength; 192 u32 pktlength;
188 u32 pktstatus; 193 u32 pktstatus;
189 struct msgdma_csr *rxcsr = 194
190 (struct msgdma_csr *)priv->rx_dma_csr; 195 if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level))
191 struct msgdma_response *rxresp = 196 & 0xffff) {
192 (struct msgdma_response *)priv->rx_dma_resp; 197 pktlength = csrrd32(priv->rx_dma_resp,
193 198 msgdma_respoffs(bytes_transferred));
194 if (ioread32(&rxcsr->resp_fill_level) & 0xffff) { 199 pktstatus = csrrd32(priv->rx_dma_resp,
195 pktlength = ioread32(&rxresp->bytes_transferred); 200 msgdma_respoffs(status));
196 pktstatus = ioread32(&rxresp->status);
197 rxstatus = pktstatus; 201 rxstatus = pktstatus;
198 rxstatus = rxstatus << 16; 202 rxstatus = rxstatus << 16;
199 rxstatus |= (pktlength & 0xffff); 203 rxstatus |= (pktlength & 0xffff);
diff --git a/drivers/net/ethernet/altera/altera_msgdma.h b/drivers/net/ethernet/altera/altera_msgdma.h
index 7f0f5bf2bba2..42cf61c81057 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.h
+++ b/drivers/net/ethernet/altera/altera_msgdma.h
@@ -25,10 +25,11 @@ void msgdma_disable_txirq(struct altera_tse_private *);
25void msgdma_clear_rxirq(struct altera_tse_private *); 25void msgdma_clear_rxirq(struct altera_tse_private *);
26void msgdma_clear_txirq(struct altera_tse_private *); 26void msgdma_clear_txirq(struct altera_tse_private *);
27u32 msgdma_tx_completions(struct altera_tse_private *); 27u32 msgdma_tx_completions(struct altera_tse_private *);
28int msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *); 28void msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *);
29int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *); 29int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *);
30u32 msgdma_rx_status(struct altera_tse_private *); 30u32 msgdma_rx_status(struct altera_tse_private *);
31int msgdma_initialize(struct altera_tse_private *); 31int msgdma_initialize(struct altera_tse_private *);
32void msgdma_uninitialize(struct altera_tse_private *); 32void msgdma_uninitialize(struct altera_tse_private *);
33void msgdma_start_rxdma(struct altera_tse_private *);
33 34
34#endif /* __ALTERA_MSGDMA_H__ */ 35#endif /* __ALTERA_MSGDMA_H__ */
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h
index d7b59ba4019c..e335626e1b6b 100644
--- a/drivers/net/ethernet/altera/altera_msgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_msgdmahw.h
@@ -17,15 +17,6 @@
17#ifndef __ALTERA_MSGDMAHW_H__ 17#ifndef __ALTERA_MSGDMAHW_H__
18#define __ALTERA_MSGDMAHW_H__ 18#define __ALTERA_MSGDMAHW_H__
19 19
20/* mSGDMA standard descriptor format
21 */
22struct msgdma_desc {
23 u32 read_addr; /* data buffer source address */
24 u32 write_addr; /* data buffer destination address */
25 u32 len; /* the number of bytes to transfer per descriptor */
26 u32 control; /* characteristics of the transfer */
27};
28
29/* mSGDMA extended descriptor format 20/* mSGDMA extended descriptor format
30 */ 21 */
31struct msgdma_extended_desc { 22struct msgdma_extended_desc {
@@ -159,6 +150,10 @@ struct msgdma_response {
159 u32 status; 150 u32 status;
160}; 151};
161 152
153#define msgdma_respoffs(a) (offsetof(struct msgdma_response, a))
154#define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a))
155#define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a))
156
162/* mSGDMA response register bit definitions 157/* mSGDMA response register bit definitions
163 */ 158 */
164#define MSGDMA_RESP_EARLY_TERM BIT(8) 159#define MSGDMA_RESP_EARLY_TERM BIT(8)
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 0ee96639ae44..99cc56f451cf 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -20,28 +20,28 @@
20#include "altera_sgdmahw.h" 20#include "altera_sgdmahw.h"
21#include "altera_sgdma.h" 21#include "altera_sgdma.h"
22 22
23static void sgdma_descrip(struct sgdma_descrip *desc, 23static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
24 struct sgdma_descrip *ndesc, 24 struct sgdma_descrip __iomem *ndesc,
25 dma_addr_t ndesc_phys, 25 dma_addr_t ndesc_phys,
26 dma_addr_t raddr, 26 dma_addr_t raddr,
27 dma_addr_t waddr, 27 dma_addr_t waddr,
28 u16 length, 28 u16 length,
29 int generate_eop, 29 int generate_eop,
30 int rfixed, 30 int rfixed,
31 int wfixed); 31 int wfixed);
32 32
33static int sgdma_async_write(struct altera_tse_private *priv, 33static int sgdma_async_write(struct altera_tse_private *priv,
34 struct sgdma_descrip *desc); 34 struct sgdma_descrip __iomem *desc);
35 35
36static int sgdma_async_read(struct altera_tse_private *priv); 36static int sgdma_async_read(struct altera_tse_private *priv);
37 37
38static dma_addr_t 38static dma_addr_t
39sgdma_txphysaddr(struct altera_tse_private *priv, 39sgdma_txphysaddr(struct altera_tse_private *priv,
40 struct sgdma_descrip *desc); 40 struct sgdma_descrip __iomem *desc);
41 41
42static dma_addr_t 42static dma_addr_t
43sgdma_rxphysaddr(struct altera_tse_private *priv, 43sgdma_rxphysaddr(struct altera_tse_private *priv,
44 struct sgdma_descrip *desc); 44 struct sgdma_descrip __iomem *desc);
45 45
46static int sgdma_txbusy(struct altera_tse_private *priv); 46static int sgdma_txbusy(struct altera_tse_private *priv);
47 47
@@ -64,18 +64,23 @@ queue_rx_peekhead(struct altera_tse_private *priv);
64 64
65int sgdma_initialize(struct altera_tse_private *priv) 65int sgdma_initialize(struct altera_tse_private *priv)
66{ 66{
67 priv->txctrlreg = SGDMA_CTRLREG_ILASTD; 67 priv->txctrlreg = SGDMA_CTRLREG_ILASTD |
68 SGDMA_CTRLREG_INTEN;
68 69
69 priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP | 70 priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
71 SGDMA_CTRLREG_INTEN |
70 SGDMA_CTRLREG_ILASTD; 72 SGDMA_CTRLREG_ILASTD;
71 73
74 priv->sgdmadesclen = sizeof(struct sgdma_descrip);
75
72 INIT_LIST_HEAD(&priv->txlisthd); 76 INIT_LIST_HEAD(&priv->txlisthd);
73 INIT_LIST_HEAD(&priv->rxlisthd); 77 INIT_LIST_HEAD(&priv->rxlisthd);
74 78
75 priv->rxdescphys = (dma_addr_t) 0; 79 priv->rxdescphys = (dma_addr_t) 0;
76 priv->txdescphys = (dma_addr_t) 0; 80 priv->txdescphys = (dma_addr_t) 0;
77 81
78 priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc, 82 priv->rxdescphys = dma_map_single(priv->device,
83 (void __force *)priv->rx_dma_desc,
79 priv->rxdescmem, DMA_BIDIRECTIONAL); 84 priv->rxdescmem, DMA_BIDIRECTIONAL);
80 85
81 if (dma_mapping_error(priv->device, priv->rxdescphys)) { 86 if (dma_mapping_error(priv->device, priv->rxdescphys)) {
@@ -84,7 +89,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
84 return -EINVAL; 89 return -EINVAL;
85 } 90 }
86 91
87 priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc, 92 priv->txdescphys = dma_map_single(priv->device,
93 (void __force *)priv->tx_dma_desc,
88 priv->txdescmem, DMA_TO_DEVICE); 94 priv->txdescmem, DMA_TO_DEVICE);
89 95
90 if (dma_mapping_error(priv->device, priv->txdescphys)) { 96 if (dma_mapping_error(priv->device, priv->txdescphys)) {
@@ -93,6 +99,16 @@ int sgdma_initialize(struct altera_tse_private *priv)
93 return -EINVAL; 99 return -EINVAL;
94 } 100 }
95 101
102 /* Initialize descriptor memory to all 0's, sync memory to cache */
103 memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
104 memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
105
106 dma_sync_single_for_device(priv->device, priv->txdescphys,
107 priv->txdescmem, DMA_TO_DEVICE);
108
109 dma_sync_single_for_device(priv->device, priv->rxdescphys,
110 priv->rxdescmem, DMA_TO_DEVICE);
111
96 return 0; 112 return 0;
97} 113}
98 114
@@ -112,58 +128,48 @@ void sgdma_uninitialize(struct altera_tse_private *priv)
112 */ 128 */
113void sgdma_reset(struct altera_tse_private *priv) 129void sgdma_reset(struct altera_tse_private *priv)
114{ 130{
115 u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
116 u32 txdescriplen = priv->txdescmem;
117 u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
118 u32 rxdescriplen = priv->rxdescmem;
119 struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
120 struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
121
122 /* Initialize descriptor memory to 0 */ 131 /* Initialize descriptor memory to 0 */
123 memset(ptxdescripmem, 0, txdescriplen); 132 memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
124 memset(prxdescripmem, 0, rxdescriplen); 133 memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
125 134
126 iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control); 135 csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
127 iowrite32(0, &ptxsgdma->control); 136 csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
128 137
129 iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control); 138 csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
130 iowrite32(0, &prxsgdma->control); 139 csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
131} 140}
132 141
142/* For SGDMA, interrupts remain enabled after initially enabling,
143 * so no need to provide implementations for abstract enable
144 * and disable
145 */
146
133void sgdma_enable_rxirq(struct altera_tse_private *priv) 147void sgdma_enable_rxirq(struct altera_tse_private *priv)
134{ 148{
135 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
136 priv->rxctrlreg |= SGDMA_CTRLREG_INTEN;
137 tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
138} 149}
139 150
140void sgdma_enable_txirq(struct altera_tse_private *priv) 151void sgdma_enable_txirq(struct altera_tse_private *priv)
141{ 152{
142 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
143 priv->txctrlreg |= SGDMA_CTRLREG_INTEN;
144 tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
145} 153}
146 154
147/* for SGDMA, RX interrupts remain enabled after enabling */
148void sgdma_disable_rxirq(struct altera_tse_private *priv) 155void sgdma_disable_rxirq(struct altera_tse_private *priv)
149{ 156{
150} 157}
151 158
152/* for SGDMA, TX interrupts remain enabled after enabling */
153void sgdma_disable_txirq(struct altera_tse_private *priv) 159void sgdma_disable_txirq(struct altera_tse_private *priv)
154{ 160{
155} 161}
156 162
157void sgdma_clear_rxirq(struct altera_tse_private *priv) 163void sgdma_clear_rxirq(struct altera_tse_private *priv)
158{ 164{
159 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 165 tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
160 tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); 166 SGDMA_CTRLREG_CLRINT);
161} 167}
162 168
163void sgdma_clear_txirq(struct altera_tse_private *priv) 169void sgdma_clear_txirq(struct altera_tse_private *priv)
164{ 170{
165 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; 171 tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
166 tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); 172 SGDMA_CTRLREG_CLRINT);
167} 173}
168 174
169/* transmits buffer through SGDMA. Returns number of buffers 175/* transmits buffer through SGDMA. Returns number of buffers
@@ -173,28 +179,27 @@ void sgdma_clear_txirq(struct altera_tse_private *priv)
173 */ 179 */
174int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) 180int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
175{ 181{
176 int pktstx = 0; 182 struct sgdma_descrip __iomem *descbase =
177 struct sgdma_descrip *descbase = 183 (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
178 (struct sgdma_descrip *)priv->tx_dma_desc;
179 184
180 struct sgdma_descrip *cdesc = &descbase[0]; 185 struct sgdma_descrip __iomem *cdesc = &descbase[0];
181 struct sgdma_descrip *ndesc = &descbase[1]; 186 struct sgdma_descrip __iomem *ndesc = &descbase[1];
182 187
183 /* wait 'til the tx sgdma is ready for the next transmit request */ 188 /* wait 'til the tx sgdma is ready for the next transmit request */
184 if (sgdma_txbusy(priv)) 189 if (sgdma_txbusy(priv))
185 return 0; 190 return 0;
186 191
187 sgdma_descrip(cdesc, /* current descriptor */ 192 sgdma_setup_descrip(cdesc, /* current descriptor */
188 ndesc, /* next descriptor */ 193 ndesc, /* next descriptor */
189 sgdma_txphysaddr(priv, ndesc), 194 sgdma_txphysaddr(priv, ndesc),
190 buffer->dma_addr, /* address of packet to xmit */ 195 buffer->dma_addr, /* address of packet to xmit */
191 0, /* write addr 0 for tx dma */ 196 0, /* write addr 0 for tx dma */
192 buffer->len, /* length of packet */ 197 buffer->len, /* length of packet */
193 SGDMA_CONTROL_EOP, /* Generate EOP */ 198 SGDMA_CONTROL_EOP, /* Generate EOP */
194 0, /* read fixed */ 199 0, /* read fixed */
195 SGDMA_CONTROL_WR_FIXED); /* Generate SOP */ 200 SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
196 201
197 pktstx = sgdma_async_write(priv, cdesc); 202 sgdma_async_write(priv, cdesc);
198 203
199 /* enqueue the request to the pending transmit queue */ 204 /* enqueue the request to the pending transmit queue */
200 queue_tx(priv, buffer); 205 queue_tx(priv, buffer);
@@ -208,10 +213,10 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
208u32 sgdma_tx_completions(struct altera_tse_private *priv) 213u32 sgdma_tx_completions(struct altera_tse_private *priv)
209{ 214{
210 u32 ready = 0; 215 u32 ready = 0;
211 struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
212 216
213 if (!sgdma_txbusy(priv) && 217 if (!sgdma_txbusy(priv) &&
214 ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) && 218 ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
219 & SGDMA_CONTROL_HW_OWNED) == 0) &&
215 (dequeue_tx(priv))) { 220 (dequeue_tx(priv))) {
216 ready = 1; 221 ready = 1;
217 } 222 }
@@ -219,11 +224,15 @@ u32 sgdma_tx_completions(struct altera_tse_private *priv)
219 return ready; 224 return ready;
220} 225}
221 226
222int sgdma_add_rx_desc(struct altera_tse_private *priv, 227void sgdma_start_rxdma(struct altera_tse_private *priv)
223 struct tse_buffer *rxbuffer) 228{
229 sgdma_async_read(priv);
230}
231
232void sgdma_add_rx_desc(struct altera_tse_private *priv,
233 struct tse_buffer *rxbuffer)
224{ 234{
225 queue_rx(priv, rxbuffer); 235 queue_rx(priv, rxbuffer);
226 return sgdma_async_read(priv);
227} 236}
228 237
229/* status is returned on upper 16 bits, 238/* status is returned on upper 16 bits,
@@ -231,38 +240,62 @@ int sgdma_add_rx_desc(struct altera_tse_private *priv,
231 */ 240 */
232u32 sgdma_rx_status(struct altera_tse_private *priv) 241u32 sgdma_rx_status(struct altera_tse_private *priv)
233{ 242{
234 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 243 struct sgdma_descrip __iomem *base =
235 struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc; 244 (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
236 struct sgdma_descrip *desc = NULL; 245 struct sgdma_descrip __iomem *desc = NULL;
237 int pktsrx;
238 unsigned int rxstatus = 0;
239 unsigned int pktlength = 0;
240 unsigned int pktstatus = 0;
241 struct tse_buffer *rxbuffer = NULL; 246 struct tse_buffer *rxbuffer = NULL;
247 unsigned int rxstatus = 0;
242 248
243 dma_sync_single_for_cpu(priv->device, 249 u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
244 priv->rxdescphys,
245 priv->rxdescmem,
246 DMA_BIDIRECTIONAL);
247 250
248 desc = &base[0]; 251 desc = &base[0];
249 if ((ioread32(&csr->status) & SGDMA_STSREG_EOP) || 252 if (sts & SGDMA_STSREG_EOP) {
250 (desc->status & SGDMA_STATUS_EOP)) { 253 unsigned int pktlength = 0;
251 pktlength = desc->bytes_xferred; 254 unsigned int pktstatus = 0;
252 pktstatus = desc->status & 0x3f; 255 dma_sync_single_for_cpu(priv->device,
253 rxstatus = pktstatus; 256 priv->rxdescphys,
257 priv->sgdmadesclen,
258 DMA_FROM_DEVICE);
259
260 pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
261 pktstatus = csrrd8(desc, sgdma_descroffs(status));
262 rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
254 rxstatus = rxstatus << 16; 263 rxstatus = rxstatus << 16;
255 rxstatus |= (pktlength & 0xffff); 264 rxstatus |= (pktlength & 0xffff);
256 265
257 desc->status = 0; 266 if (rxstatus) {
258 267 csrwr8(0, desc, sgdma_descroffs(status));
259 rxbuffer = dequeue_rx(priv); 268
260 if (rxbuffer == NULL) 269 rxbuffer = dequeue_rx(priv);
270 if (rxbuffer == NULL)
271 netdev_info(priv->dev,
272 "sgdma rx and rx queue empty!\n");
273
274 /* Clear control */
275 csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
276 /* clear status */
277 csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
278
279 /* kick the rx sgdma after reaping this descriptor */
280 sgdma_async_read(priv);
281
282 } else {
283 /* If the SGDMA indicated an end of packet on recv,
284 * then it's expected that the rxstatus from the
285 * descriptor is non-zero - meaning a valid packet
286 * with a nonzero length, or an error has been
287 * indicated. if not, then all we can do is signal
288 * an error and return no packet received. Most likely
289 * there is a system design error, or an error in the
290 * underlying kernel (cache or cache management problem)
291 */
261 netdev_err(priv->dev, 292 netdev_err(priv->dev,
262 "sgdma rx and rx queue empty!\n"); 293 "SGDMA RX Error Info: %x, %x, %x\n",
263 294 sts, csrrd8(desc, sgdma_descroffs(status)),
264 /* kick the rx sgdma after reaping this descriptor */ 295 rxstatus);
265 pktsrx = sgdma_async_read(priv); 296 }
297 } else if (sts == 0) {
298 sgdma_async_read(priv);
266 } 299 }
267 300
268 return rxstatus; 301 return rxstatus;
@@ -270,38 +303,41 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
270 303
271 304
272/* Private functions */ 305/* Private functions */
273static void sgdma_descrip(struct sgdma_descrip *desc, 306static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
274 struct sgdma_descrip *ndesc, 307 struct sgdma_descrip __iomem *ndesc,
275 dma_addr_t ndesc_phys, 308 dma_addr_t ndesc_phys,
276 dma_addr_t raddr, 309 dma_addr_t raddr,
277 dma_addr_t waddr, 310 dma_addr_t waddr,
278 u16 length, 311 u16 length,
279 int generate_eop, 312 int generate_eop,
280 int rfixed, 313 int rfixed,
281 int wfixed) 314 int wfixed)
282{ 315{
283 /* Clear the next descriptor as not owned by hardware */ 316 /* Clear the next descriptor as not owned by hardware */
284 u32 ctrl = ndesc->control; 317
318 u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
285 ctrl &= ~SGDMA_CONTROL_HW_OWNED; 319 ctrl &= ~SGDMA_CONTROL_HW_OWNED;
286 ndesc->control = ctrl; 320 csrwr8(ctrl, ndesc, sgdma_descroffs(control));
287 321
288 ctrl = 0;
289 ctrl = SGDMA_CONTROL_HW_OWNED; 322 ctrl = SGDMA_CONTROL_HW_OWNED;
290 ctrl |= generate_eop; 323 ctrl |= generate_eop;
291 ctrl |= rfixed; 324 ctrl |= rfixed;
292 ctrl |= wfixed; 325 ctrl |= wfixed;
293 326
294 /* Channel is implicitly zero, initialized to 0 by default */ 327 /* Channel is implicitly zero, initialized to 0 by default */
295 328 csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
296 desc->raddr = raddr; 329 csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
297 desc->waddr = waddr; 330
298 desc->next = lower_32_bits(ndesc_phys); 331 csrwr32(0, desc, sgdma_descroffs(pad1));
299 desc->control = ctrl; 332 csrwr32(0, desc, sgdma_descroffs(pad2));
300 desc->status = 0; 333 csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
301 desc->rburst = 0; 334
302 desc->wburst = 0; 335 csrwr8(ctrl, desc, sgdma_descroffs(control));
303 desc->bytes = length; 336 csrwr8(0, desc, sgdma_descroffs(status));
304 desc->bytes_xferred = 0; 337 csrwr8(0, desc, sgdma_descroffs(wburst));
338 csrwr8(0, desc, sgdma_descroffs(rburst));
339 csrwr16(length, desc, sgdma_descroffs(bytes));
340 csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
305} 341}
306 342
307/* If hardware is busy, don't restart async read. 343/* If hardware is busy, don't restart async read.
@@ -312,48 +348,43 @@ static void sgdma_descrip(struct sgdma_descrip *desc,
312 */ 348 */
313static int sgdma_async_read(struct altera_tse_private *priv) 349static int sgdma_async_read(struct altera_tse_private *priv)
314{ 350{
315 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 351 struct sgdma_descrip __iomem *descbase =
316 struct sgdma_descrip *descbase = 352 (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
317 (struct sgdma_descrip *)priv->rx_dma_desc;
318 353
319 struct sgdma_descrip *cdesc = &descbase[0]; 354 struct sgdma_descrip __iomem *cdesc = &descbase[0];
320 struct sgdma_descrip *ndesc = &descbase[1]; 355 struct sgdma_descrip __iomem *ndesc = &descbase[1];
321 356
322 unsigned int sts = ioread32(&csr->status);
323 struct tse_buffer *rxbuffer = NULL; 357 struct tse_buffer *rxbuffer = NULL;
324 358
325 if (!sgdma_rxbusy(priv)) { 359 if (!sgdma_rxbusy(priv)) {
326 rxbuffer = queue_rx_peekhead(priv); 360 rxbuffer = queue_rx_peekhead(priv);
327 if (rxbuffer == NULL) 361 if (rxbuffer == NULL) {
362 netdev_err(priv->dev, "no rx buffers available\n");
328 return 0; 363 return 0;
329 364 }
330 sgdma_descrip(cdesc, /* current descriptor */ 365
331 ndesc, /* next descriptor */ 366 sgdma_setup_descrip(cdesc, /* current descriptor */
332 sgdma_rxphysaddr(priv, ndesc), 367 ndesc, /* next descriptor */
333 0, /* read addr 0 for rx dma */ 368 sgdma_rxphysaddr(priv, ndesc),
334 rxbuffer->dma_addr, /* write addr for rx dma */ 369 0, /* read addr 0 for rx dma */
335 0, /* read 'til EOP */ 370 rxbuffer->dma_addr, /* write addr for rx dma */
336 0, /* EOP: NA for rx dma */ 371 0, /* read 'til EOP */
337 0, /* read fixed: NA for rx dma */ 372 0, /* EOP: NA for rx dma */
338 0); /* SOP: NA for rx DMA */ 373 0, /* read fixed: NA for rx dma */
339 374 0); /* SOP: NA for rx DMA */
340 /* clear control and status */
341 iowrite32(0, &csr->control);
342
343 /* If status available, clear those bits */
344 if (sts & 0xf)
345 iowrite32(0xf, &csr->status);
346 375
347 dma_sync_single_for_device(priv->device, 376 dma_sync_single_for_device(priv->device,
348 priv->rxdescphys, 377 priv->rxdescphys,
349 priv->rxdescmem, 378 priv->sgdmadesclen,
350 DMA_BIDIRECTIONAL); 379 DMA_TO_DEVICE);
351 380
352 iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), 381 csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
353 &csr->next_descrip); 382 priv->rx_dma_csr,
383 sgdma_csroffs(next_descrip));
354 384
355 iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START), 385 csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
356 &csr->control); 386 priv->rx_dma_csr,
387 sgdma_csroffs(control));
357 388
358 return 1; 389 return 1;
359 } 390 }
@@ -362,32 +393,32 @@ static int sgdma_async_read(struct altera_tse_private *priv)
362} 393}
363 394
364static int sgdma_async_write(struct altera_tse_private *priv, 395static int sgdma_async_write(struct altera_tse_private *priv,
365 struct sgdma_descrip *desc) 396 struct sgdma_descrip __iomem *desc)
366{ 397{
367 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
368
369 if (sgdma_txbusy(priv)) 398 if (sgdma_txbusy(priv))
370 return 0; 399 return 0;
371 400
372 /* clear control and status */ 401 /* clear control and status */
373 iowrite32(0, &csr->control); 402 csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
374 iowrite32(0x1f, &csr->status); 403 csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
375 404
376 dma_sync_single_for_device(priv->device, priv->txdescphys, 405 dma_sync_single_for_device(priv->device, priv->txdescphys,
377 priv->txdescmem, DMA_TO_DEVICE); 406 priv->sgdmadesclen, DMA_TO_DEVICE);
378 407
379 iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)), 408 csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
380 &csr->next_descrip); 409 priv->tx_dma_csr,
410 sgdma_csroffs(next_descrip));
381 411
382 iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START), 412 csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
383 &csr->control); 413 priv->tx_dma_csr,
414 sgdma_csroffs(control));
384 415
385 return 1; 416 return 1;
386} 417}
387 418
388static dma_addr_t 419static dma_addr_t
389sgdma_txphysaddr(struct altera_tse_private *priv, 420sgdma_txphysaddr(struct altera_tse_private *priv,
390 struct sgdma_descrip *desc) 421 struct sgdma_descrip __iomem *desc)
391{ 422{
392 dma_addr_t paddr = priv->txdescmem_busaddr; 423 dma_addr_t paddr = priv->txdescmem_busaddr;
393 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc; 424 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
@@ -396,7 +427,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv,
396 427
397static dma_addr_t 428static dma_addr_t
398sgdma_rxphysaddr(struct altera_tse_private *priv, 429sgdma_rxphysaddr(struct altera_tse_private *priv,
399 struct sgdma_descrip *desc) 430 struct sgdma_descrip __iomem *desc)
400{ 431{
401 dma_addr_t paddr = priv->rxdescmem_busaddr; 432 dma_addr_t paddr = priv->rxdescmem_busaddr;
402 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc; 433 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
@@ -485,8 +516,8 @@ queue_rx_peekhead(struct altera_tse_private *priv)
485 */ 516 */
486static int sgdma_rxbusy(struct altera_tse_private *priv) 517static int sgdma_rxbusy(struct altera_tse_private *priv)
487{ 518{
488 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 519 return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
489 return ioread32(&csr->status) & SGDMA_STSREG_BUSY; 520 & SGDMA_STSREG_BUSY;
490} 521}
491 522
492/* waits for the tx sgdma to finish it's current operation, returns 0 523/* waits for the tx sgdma to finish it's current operation, returns 0
@@ -495,13 +526,14 @@ static int sgdma_rxbusy(struct altera_tse_private *priv)
495static int sgdma_txbusy(struct altera_tse_private *priv) 526static int sgdma_txbusy(struct altera_tse_private *priv)
496{ 527{
497 int delay = 0; 528 int delay = 0;
498 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
499 529
500 /* if DMA is busy, wait for current transactino to finish */ 530 /* if DMA is busy, wait for current transactino to finish */
501 while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100)) 531 while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
532 & SGDMA_STSREG_BUSY) && (delay++ < 100))
502 udelay(1); 533 udelay(1);
503 534
504 if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) { 535 if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
536 & SGDMA_STSREG_BUSY) {
505 netdev_err(priv->dev, "timeout waiting for tx dma\n"); 537 netdev_err(priv->dev, "timeout waiting for tx dma\n");
506 return 1; 538 return 1;
507 } 539 }
diff --git a/drivers/net/ethernet/altera/altera_sgdma.h b/drivers/net/ethernet/altera/altera_sgdma.h
index 07d471729dc4..584977e29ef9 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.h
+++ b/drivers/net/ethernet/altera/altera_sgdma.h
@@ -26,10 +26,11 @@ void sgdma_clear_rxirq(struct altera_tse_private *);
26void sgdma_clear_txirq(struct altera_tse_private *); 26void sgdma_clear_txirq(struct altera_tse_private *);
27int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *); 27int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *);
28u32 sgdma_tx_completions(struct altera_tse_private *); 28u32 sgdma_tx_completions(struct altera_tse_private *);
29int sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *); 29void sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *);
30void sgdma_status(struct altera_tse_private *); 30void sgdma_status(struct altera_tse_private *);
31u32 sgdma_rx_status(struct altera_tse_private *); 31u32 sgdma_rx_status(struct altera_tse_private *);
32int sgdma_initialize(struct altera_tse_private *); 32int sgdma_initialize(struct altera_tse_private *);
33void sgdma_uninitialize(struct altera_tse_private *); 33void sgdma_uninitialize(struct altera_tse_private *);
34void sgdma_start_rxdma(struct altera_tse_private *);
34 35
35#endif /* __ALTERA_SGDMA_H__ */ 36#endif /* __ALTERA_SGDMA_H__ */
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h
index ba3334f35383..85bc33b218d9 100644
--- a/drivers/net/ethernet/altera/altera_sgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_sgdmahw.h
@@ -19,16 +19,16 @@
19 19
20/* SGDMA descriptor structure */ 20/* SGDMA descriptor structure */
21struct sgdma_descrip { 21struct sgdma_descrip {
22 unsigned int raddr; /* address of data to be read */ 22 u32 raddr; /* address of data to be read */
23 unsigned int pad1; 23 u32 pad1;
24 unsigned int waddr; 24 u32 waddr;
25 unsigned int pad2; 25 u32 pad2;
26 unsigned int next; 26 u32 next;
27 unsigned int pad3; 27 u32 pad3;
28 unsigned short bytes; 28 u16 bytes;
29 unsigned char rburst; 29 u8 rburst;
30 unsigned char wburst; 30 u8 wburst;
31 unsigned short bytes_xferred; /* 16 bits, bytes xferred */ 31 u16 bytes_xferred; /* 16 bits, bytes xferred */
32 32
33 /* bit 0: error 33 /* bit 0: error
34 * bit 1: length error 34 * bit 1: length error
@@ -39,7 +39,7 @@ struct sgdma_descrip {
39 * bit 6: reserved 39 * bit 6: reserved
40 * bit 7: status eop for recv case 40 * bit 7: status eop for recv case
41 */ 41 */
42 unsigned char status; 42 u8 status;
43 43
44 /* bit 0: eop 44 /* bit 0: eop
45 * bit 1: read_fixed 45 * bit 1: read_fixed
@@ -47,7 +47,7 @@ struct sgdma_descrip {
47 * bits 3,4,5,6: Channel (always 0) 47 * bits 3,4,5,6: Channel (always 0)
48 * bit 7: hardware owned 48 * bit 7: hardware owned
49 */ 49 */
50 unsigned char control; 50 u8 control;
51} __packed; 51} __packed;
52 52
53 53
@@ -101,6 +101,8 @@ struct sgdma_csr {
101 u32 pad3[3]; 101 u32 pad3[3];
102}; 102};
103 103
104#define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a))
105#define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a))
104 106
105#define SGDMA_STSREG_ERR BIT(0) /* Error */ 107#define SGDMA_STSREG_ERR BIT(0) /* Error */
106#define SGDMA_STSREG_EOP BIT(1) /* EOP */ 108#define SGDMA_STSREG_EOP BIT(1) /* EOP */
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 8feeed05de0e..2adb24d4523c 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -58,6 +58,8 @@
58/* MAC function configuration default settings */ 58/* MAC function configuration default settings */
59#define ALTERA_TSE_TX_IPG_LENGTH 12 59#define ALTERA_TSE_TX_IPG_LENGTH 12
60 60
61#define ALTERA_TSE_PAUSE_QUANTA 0xffff
62
61#define GET_BIT_VALUE(v, bit) (((v) >> (bit)) & 0x1) 63#define GET_BIT_VALUE(v, bit) (((v) >> (bit)) & 0x1)
62 64
63/* MAC Command_Config Register Bit Definitions 65/* MAC Command_Config Register Bit Definitions
@@ -355,6 +357,8 @@ struct altera_tse_mac {
355 u32 reserved5[42]; 357 u32 reserved5[42];
356}; 358};
357 359
360#define tse_csroffs(a) (offsetof(struct altera_tse_mac, a))
361
358/* Transmit and Receive Command Registers Bit Definitions 362/* Transmit and Receive Command Registers Bit Definitions
359 */ 363 */
360#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17) 364#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17)
@@ -390,10 +394,11 @@ struct altera_dmaops {
390 void (*clear_rxirq)(struct altera_tse_private *); 394 void (*clear_rxirq)(struct altera_tse_private *);
391 int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *); 395 int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *);
392 u32 (*tx_completions)(struct altera_tse_private *); 396 u32 (*tx_completions)(struct altera_tse_private *);
393 int (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *); 397 void (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *);
394 u32 (*get_rx_status)(struct altera_tse_private *); 398 u32 (*get_rx_status)(struct altera_tse_private *);
395 int (*init_dma)(struct altera_tse_private *); 399 int (*init_dma)(struct altera_tse_private *);
396 void (*uninit_dma)(struct altera_tse_private *); 400 void (*uninit_dma)(struct altera_tse_private *);
401 void (*start_rxdma)(struct altera_tse_private *);
397}; 402};
398 403
399/* This structure is private to each device. 404/* This structure is private to each device.
@@ -453,6 +458,7 @@ struct altera_tse_private {
453 u32 rxctrlreg; 458 u32 rxctrlreg;
454 dma_addr_t rxdescphys; 459 dma_addr_t rxdescphys;
455 dma_addr_t txdescphys; 460 dma_addr_t txdescphys;
461 size_t sgdmadesclen;
456 462
457 struct list_head txlisthd; 463 struct list_head txlisthd;
458 struct list_head rxlisthd; 464 struct list_head rxlisthd;
@@ -483,4 +489,49 @@ struct altera_tse_private {
483 */ 489 */
484void altera_tse_set_ethtool_ops(struct net_device *); 490void altera_tse_set_ethtool_ops(struct net_device *);
485 491
492static inline
493u32 csrrd32(void __iomem *mac, size_t offs)
494{
495 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
496 return readl(paddr);
497}
498
499static inline
500u16 csrrd16(void __iomem *mac, size_t offs)
501{
502 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
503 return readw(paddr);
504}
505
506static inline
507u8 csrrd8(void __iomem *mac, size_t offs)
508{
509 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
510 return readb(paddr);
511}
512
513static inline
514void csrwr32(u32 val, void __iomem *mac, size_t offs)
515{
516 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
517
518 writel(val, paddr);
519}
520
521static inline
522void csrwr16(u16 val, void __iomem *mac, size_t offs)
523{
524 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
525
526 writew(val, paddr);
527}
528
529static inline
530void csrwr8(u8 val, void __iomem *mac, size_t offs)
531{
532 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
533
534 writeb(val, paddr);
535}
536
486#endif /* __ALTERA_TSE_H__ */ 537#endif /* __ALTERA_TSE_H__ */
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 319ca74f5e74..54c25eff7952 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -77,7 +77,7 @@ static void tse_get_drvinfo(struct net_device *dev,
77 struct altera_tse_private *priv = netdev_priv(dev); 77 struct altera_tse_private *priv = netdev_priv(dev);
78 u32 rev = ioread32(&priv->mac_dev->megacore_revision); 78 u32 rev = ioread32(&priv->mac_dev->megacore_revision);
79 79
80 strcpy(info->driver, "Altera TSE MAC IP Driver"); 80 strcpy(info->driver, "altera_tse");
81 strcpy(info->version, "v8.0"); 81 strcpy(info->version, "v8.0");
82 snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d", 82 snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d",
83 rev & 0xFFFF, (rev & 0xFFFF0000) >> 16); 83 rev & 0xFFFF, (rev & 0xFFFF0000) >> 16);
@@ -96,54 +96,89 @@ static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
96 u64 *buf) 96 u64 *buf)
97{ 97{
98 struct altera_tse_private *priv = netdev_priv(dev); 98 struct altera_tse_private *priv = netdev_priv(dev);
99 struct altera_tse_mac *mac = priv->mac_dev;
100 u64 ext; 99 u64 ext;
101 100
102 buf[0] = ioread32(&mac->frames_transmitted_ok); 101 buf[0] = csrrd32(priv->mac_dev,
103 buf[1] = ioread32(&mac->frames_received_ok); 102 tse_csroffs(frames_transmitted_ok));
104 buf[2] = ioread32(&mac->frames_check_sequence_errors); 103 buf[1] = csrrd32(priv->mac_dev,
105 buf[3] = ioread32(&mac->alignment_errors); 104 tse_csroffs(frames_received_ok));
105 buf[2] = csrrd32(priv->mac_dev,
106 tse_csroffs(frames_check_sequence_errors));
107 buf[3] = csrrd32(priv->mac_dev,
108 tse_csroffs(alignment_errors));
106 109
107 /* Extended aOctetsTransmittedOK counter */ 110 /* Extended aOctetsTransmittedOK counter */
108 ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32; 111 ext = (u64) csrrd32(priv->mac_dev,
109 ext |= ioread32(&mac->octets_transmitted_ok); 112 tse_csroffs(msb_octets_transmitted_ok)) << 32;
113
114 ext |= csrrd32(priv->mac_dev,
115 tse_csroffs(octets_transmitted_ok));
110 buf[4] = ext; 116 buf[4] = ext;
111 117
112 /* Extended aOctetsReceivedOK counter */ 118 /* Extended aOctetsReceivedOK counter */
113 ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32; 119 ext = (u64) csrrd32(priv->mac_dev,
114 ext |= ioread32(&mac->octets_received_ok); 120 tse_csroffs(msb_octets_received_ok)) << 32;
121
122 ext |= csrrd32(priv->mac_dev,
123 tse_csroffs(octets_received_ok));
115 buf[5] = ext; 124 buf[5] = ext;
116 125
117 buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames); 126 buf[6] = csrrd32(priv->mac_dev,
118 buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames); 127 tse_csroffs(tx_pause_mac_ctrl_frames));
119 buf[8] = ioread32(&mac->if_in_errors); 128 buf[7] = csrrd32(priv->mac_dev,
120 buf[9] = ioread32(&mac->if_out_errors); 129 tse_csroffs(rx_pause_mac_ctrl_frames));
121 buf[10] = ioread32(&mac->if_in_ucast_pkts); 130 buf[8] = csrrd32(priv->mac_dev,
122 buf[11] = ioread32(&mac->if_in_multicast_pkts); 131 tse_csroffs(if_in_errors));
123 buf[12] = ioread32(&mac->if_in_broadcast_pkts); 132 buf[9] = csrrd32(priv->mac_dev,
124 buf[13] = ioread32(&mac->if_out_discards); 133 tse_csroffs(if_out_errors));
125 buf[14] = ioread32(&mac->if_out_ucast_pkts); 134 buf[10] = csrrd32(priv->mac_dev,
126 buf[15] = ioread32(&mac->if_out_multicast_pkts); 135 tse_csroffs(if_in_ucast_pkts));
127 buf[16] = ioread32(&mac->if_out_broadcast_pkts); 136 buf[11] = csrrd32(priv->mac_dev,
128 buf[17] = ioread32(&mac->ether_stats_drop_events); 137 tse_csroffs(if_in_multicast_pkts));
138 buf[12] = csrrd32(priv->mac_dev,
139 tse_csroffs(if_in_broadcast_pkts));
140 buf[13] = csrrd32(priv->mac_dev,
141 tse_csroffs(if_out_discards));
142 buf[14] = csrrd32(priv->mac_dev,
143 tse_csroffs(if_out_ucast_pkts));
144 buf[15] = csrrd32(priv->mac_dev,
145 tse_csroffs(if_out_multicast_pkts));
146 buf[16] = csrrd32(priv->mac_dev,
147 tse_csroffs(if_out_broadcast_pkts));
148 buf[17] = csrrd32(priv->mac_dev,
149 tse_csroffs(ether_stats_drop_events));
129 150
130 /* Extended etherStatsOctets counter */ 151 /* Extended etherStatsOctets counter */
131 ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32; 152 ext = (u64) csrrd32(priv->mac_dev,
132 ext |= ioread32(&mac->ether_stats_octets); 153 tse_csroffs(msb_ether_stats_octets)) << 32;
154 ext |= csrrd32(priv->mac_dev,
155 tse_csroffs(ether_stats_octets));
133 buf[18] = ext; 156 buf[18] = ext;
134 157
135 buf[19] = ioread32(&mac->ether_stats_pkts); 158 buf[19] = csrrd32(priv->mac_dev,
136 buf[20] = ioread32(&mac->ether_stats_undersize_pkts); 159 tse_csroffs(ether_stats_pkts));
137 buf[21] = ioread32(&mac->ether_stats_oversize_pkts); 160 buf[20] = csrrd32(priv->mac_dev,
138 buf[22] = ioread32(&mac->ether_stats_pkts_64_octets); 161 tse_csroffs(ether_stats_undersize_pkts));
139 buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets); 162 buf[21] = csrrd32(priv->mac_dev,
140 buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets); 163 tse_csroffs(ether_stats_oversize_pkts));
141 buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets); 164 buf[22] = csrrd32(priv->mac_dev,
142 buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets); 165 tse_csroffs(ether_stats_pkts_64_octets));
143 buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets); 166 buf[23] = csrrd32(priv->mac_dev,
144 buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets); 167 tse_csroffs(ether_stats_pkts_65to127_octets));
145 buf[29] = ioread32(&mac->ether_stats_jabbers); 168 buf[24] = csrrd32(priv->mac_dev,
146 buf[30] = ioread32(&mac->ether_stats_fragments); 169 tse_csroffs(ether_stats_pkts_128to255_octets));
170 buf[25] = csrrd32(priv->mac_dev,
171 tse_csroffs(ether_stats_pkts_256to511_octets));
172 buf[26] = csrrd32(priv->mac_dev,
173 tse_csroffs(ether_stats_pkts_512to1023_octets));
174 buf[27] = csrrd32(priv->mac_dev,
175 tse_csroffs(ether_stats_pkts_1024to1518_octets));
176 buf[28] = csrrd32(priv->mac_dev,
177 tse_csroffs(ether_stats_pkts_1519tox_octets));
178 buf[29] = csrrd32(priv->mac_dev,
179 tse_csroffs(ether_stats_jabbers));
180 buf[30] = csrrd32(priv->mac_dev,
181 tse_csroffs(ether_stats_fragments));
147} 182}
148 183
149static int tse_sset_count(struct net_device *dev, int sset) 184static int tse_sset_count(struct net_device *dev, int sset)
@@ -178,19 +213,24 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
178{ 213{
179 int i; 214 int i;
180 struct altera_tse_private *priv = netdev_priv(dev); 215 struct altera_tse_private *priv = netdev_priv(dev);
181 u32 *tse_mac_regs = (u32 *)priv->mac_dev;
182 u32 *buf = regbuf; 216 u32 *buf = regbuf;
183 217
184 /* Set version to a known value, so ethtool knows 218 /* Set version to a known value, so ethtool knows
185 * how to do any special formatting of this data. 219 * how to do any special formatting of this data.
186 * This version number will need to change if and 220 * This version number will need to change if and
187 * when this register table is changed. 221 * when this register table is changed.
222 *
223 * version[31:0] = 1: Dump the first 128 TSE Registers
224 * Upper bits are all 0 by default
225 *
226 * Upper 16-bits will indicate feature presence for
227 * Ethtool register decoding in future version.
188 */ 228 */
189 229
190 regs->version = 1; 230 regs->version = 1;
191 231
192 for (i = 0; i < TSE_NUM_REGS; i++) 232 for (i = 0; i < TSE_NUM_REGS; i++)
193 buf[i] = ioread32(&tse_mac_regs[i]); 233 buf[i] = csrrd32(priv->mac_dev, i * 4);
194} 234}
195 235
196static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 236static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index c70a29e0b9f7..7330681574d2 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -100,29 +100,30 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
100 */ 100 */
101static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 101static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
102{ 102{
103 struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; 103 struct net_device *ndev = bus->priv;
104 unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; 104 struct altera_tse_private *priv = netdev_priv(ndev);
105 u32 data;
106 105
107 /* set MDIO address */ 106 /* set MDIO address */
108 iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); 107 csrwr32((mii_id & 0x1f), priv->mac_dev,
108 tse_csroffs(mdio_phy0_addr));
109 109
110 /* get the data */ 110 /* get the data */
111 data = ioread32(&mdio_regs[regnum]) & 0xffff; 111 return csrrd32(priv->mac_dev,
112 return data; 112 tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
113} 113}
114 114
115static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 115static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
116 u16 value) 116 u16 value)
117{ 117{
118 struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; 118 struct net_device *ndev = bus->priv;
119 unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; 119 struct altera_tse_private *priv = netdev_priv(ndev);
120 120
121 /* set MDIO address */ 121 /* set MDIO address */
122 iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); 122 csrwr32((mii_id & 0x1f), priv->mac_dev,
123 tse_csroffs(mdio_phy0_addr));
123 124
124 /* write the data */ 125 /* write the data */
125 iowrite32((u32) value, &mdio_regs[regnum]); 126 csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
126 return 0; 127 return 0;
127} 128}
128 129
@@ -168,7 +169,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
168 for (i = 0; i < PHY_MAX_ADDR; i++) 169 for (i = 0; i < PHY_MAX_ADDR; i++)
169 mdio->irq[i] = PHY_POLL; 170 mdio->irq[i] = PHY_POLL;
170 171
171 mdio->priv = priv->mac_dev; 172 mdio->priv = dev;
172 mdio->parent = priv->device; 173 mdio->parent = priv->device;
173 174
174 ret = of_mdiobus_register(mdio, mdio_node); 175 ret = of_mdiobus_register(mdio, mdio_node);
@@ -224,6 +225,7 @@ static int tse_init_rx_buffer(struct altera_tse_private *priv,
224 dev_kfree_skb_any(rxbuffer->skb); 225 dev_kfree_skb_any(rxbuffer->skb);
225 return -EINVAL; 226 return -EINVAL;
226 } 227 }
228 rxbuffer->dma_addr &= (dma_addr_t)~3;
227 rxbuffer->len = len; 229 rxbuffer->len = len;
228 return 0; 230 return 0;
229} 231}
@@ -425,9 +427,10 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
425 priv->dev->stats.rx_bytes += pktlength; 427 priv->dev->stats.rx_bytes += pktlength;
426 428
427 entry = next_entry; 429 entry = next_entry;
430
431 tse_rx_refill(priv);
428 } 432 }
429 433
430 tse_rx_refill(priv);
431 return count; 434 return count;
432} 435}
433 436
@@ -520,7 +523,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
520 struct altera_tse_private *priv; 523 struct altera_tse_private *priv;
521 unsigned long int flags; 524 unsigned long int flags;
522 525
523
524 if (unlikely(!dev)) { 526 if (unlikely(!dev)) {
525 pr_err("%s: invalid dev pointer\n", __func__); 527 pr_err("%s: invalid dev pointer\n", __func__);
526 return IRQ_NONE; 528 return IRQ_NONE;
@@ -562,7 +564,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
562 unsigned int nopaged_len = skb_headlen(skb); 564 unsigned int nopaged_len = skb_headlen(skb);
563 enum netdev_tx ret = NETDEV_TX_OK; 565 enum netdev_tx ret = NETDEV_TX_OK;
564 dma_addr_t dma_addr; 566 dma_addr_t dma_addr;
565 int txcomplete = 0;
566 567
567 spin_lock_bh(&priv->tx_lock); 568 spin_lock_bh(&priv->tx_lock);
568 569
@@ -598,7 +599,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
598 dma_sync_single_for_device(priv->device, buffer->dma_addr, 599 dma_sync_single_for_device(priv->device, buffer->dma_addr,
599 buffer->len, DMA_TO_DEVICE); 600 buffer->len, DMA_TO_DEVICE);
600 601
601 txcomplete = priv->dmaops->tx_buffer(priv, buffer); 602 priv->dmaops->tx_buffer(priv, buffer);
602 603
603 skb_tx_timestamp(skb); 604 skb_tx_timestamp(skb);
604 605
@@ -697,7 +698,6 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
697 struct altera_tse_private *priv = netdev_priv(dev); 698 struct altera_tse_private *priv = netdev_priv(dev);
698 struct phy_device *phydev = NULL; 699 struct phy_device *phydev = NULL;
699 char phy_id_fmt[MII_BUS_ID_SIZE + 3]; 700 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
700 int ret;
701 701
702 if (priv->phy_addr != POLL_PHY) { 702 if (priv->phy_addr != POLL_PHY) {
703 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, 703 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
@@ -711,6 +711,7 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
711 netdev_err(dev, "Could not attach to PHY\n"); 711 netdev_err(dev, "Could not attach to PHY\n");
712 712
713 } else { 713 } else {
714 int ret;
714 phydev = phy_find_first(priv->mdio); 715 phydev = phy_find_first(priv->mdio);
715 if (phydev == NULL) { 716 if (phydev == NULL) {
716 netdev_err(dev, "No PHY found\n"); 717 netdev_err(dev, "No PHY found\n");
@@ -790,7 +791,6 @@ static int init_phy(struct net_device *dev)
790 791
791static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) 792static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
792{ 793{
793 struct altera_tse_mac *mac = priv->mac_dev;
794 u32 msb; 794 u32 msb;
795 u32 lsb; 795 u32 lsb;
796 796
@@ -798,8 +798,8 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
798 lsb = ((addr[5] << 8) | addr[4]) & 0xffff; 798 lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
799 799
800 /* Set primary MAC address */ 800 /* Set primary MAC address */
801 iowrite32(msb, &mac->mac_addr_0); 801 csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
802 iowrite32(lsb, &mac->mac_addr_1); 802 csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
803} 803}
804 804
805/* MAC software reset. 805/* MAC software reset.
@@ -810,26 +810,26 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
810 */ 810 */
811static int reset_mac(struct altera_tse_private *priv) 811static int reset_mac(struct altera_tse_private *priv)
812{ 812{
813 void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config;
814 int counter; 813 int counter;
815 u32 dat; 814 u32 dat;
816 815
817 dat = ioread32(cmd_cfg_reg); 816 dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
818 dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); 817 dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
819 dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET; 818 dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
820 iowrite32(dat, cmd_cfg_reg); 819 csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
821 820
822 counter = 0; 821 counter = 0;
823 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 822 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
824 if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET)) 823 if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
824 MAC_CMDCFG_SW_RESET))
825 break; 825 break;
826 udelay(1); 826 udelay(1);
827 } 827 }
828 828
829 if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 829 if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
830 dat = ioread32(cmd_cfg_reg); 830 dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
831 dat &= ~MAC_CMDCFG_SW_RESET; 831 dat &= ~MAC_CMDCFG_SW_RESET;
832 iowrite32(dat, cmd_cfg_reg); 832 csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
833 return -1; 833 return -1;
834 } 834 }
835 return 0; 835 return 0;
@@ -839,42 +839,58 @@ static int reset_mac(struct altera_tse_private *priv)
839*/ 839*/
840static int init_mac(struct altera_tse_private *priv) 840static int init_mac(struct altera_tse_private *priv)
841{ 841{
842 struct altera_tse_mac *mac = priv->mac_dev;
843 unsigned int cmd = 0; 842 unsigned int cmd = 0;
844 u32 frm_length; 843 u32 frm_length;
845 844
846 /* Setup Rx FIFO */ 845 /* Setup Rx FIFO */
847 iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY, 846 csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
848 &mac->rx_section_empty); 847 priv->mac_dev, tse_csroffs(rx_section_empty));
849 iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full); 848
850 iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty); 849 csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
851 iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full); 850 tse_csroffs(rx_section_full));
851
852 csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
853 tse_csroffs(rx_almost_empty));
854
855 csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
856 tse_csroffs(rx_almost_full));
852 857
853 /* Setup Tx FIFO */ 858 /* Setup Tx FIFO */
854 iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY, 859 csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
855 &mac->tx_section_empty); 860 priv->mac_dev, tse_csroffs(tx_section_empty));
856 iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full); 861
857 iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty); 862 csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
858 iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full); 863 tse_csroffs(tx_section_full));
864
865 csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
866 tse_csroffs(tx_almost_empty));
867
868 csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
869 tse_csroffs(tx_almost_full));
859 870
860 /* MAC Address Configuration */ 871 /* MAC Address Configuration */
861 tse_update_mac_addr(priv, priv->dev->dev_addr); 872 tse_update_mac_addr(priv, priv->dev->dev_addr);
862 873
863 /* MAC Function Configuration */ 874 /* MAC Function Configuration */
864 frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN; 875 frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
865 iowrite32(frm_length, &mac->frm_length); 876 csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
866 iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length); 877
878 csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
879 tse_csroffs(tx_ipg_length));
867 880
868 /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit 881 /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
869 * start address 882 * start address
870 */ 883 */
871 tse_clear_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16); 884 tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
872 tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 | 885 ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
873 ALTERA_TSE_TX_CMD_STAT_OMIT_CRC); 886
887 tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
888 ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
889 ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
874 890
875 /* Set the MAC options */ 891 /* Set the MAC options */
876 cmd = ioread32(&mac->command_config); 892 cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
877 cmd |= MAC_CMDCFG_PAD_EN; /* Padding Removal on Receive */ 893 cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */
878 cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */ 894 cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */
879 cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames 895 cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames
880 * with CRC errors 896 * with CRC errors
@@ -882,7 +898,16 @@ static int init_mac(struct altera_tse_private *priv)
882 cmd |= MAC_CMDCFG_CNTL_FRM_ENA; 898 cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
883 cmd &= ~MAC_CMDCFG_TX_ENA; 899 cmd &= ~MAC_CMDCFG_TX_ENA;
884 cmd &= ~MAC_CMDCFG_RX_ENA; 900 cmd &= ~MAC_CMDCFG_RX_ENA;
885 iowrite32(cmd, &mac->command_config); 901
902 /* Default speed and duplex setting, full/100 */
903 cmd &= ~MAC_CMDCFG_HD_ENA;
904 cmd &= ~MAC_CMDCFG_ETH_SPEED;
905 cmd &= ~MAC_CMDCFG_ENA_10;
906
907 csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
908
909 csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
910 tse_csroffs(pause_quanta));
886 911
887 if (netif_msg_hw(priv)) 912 if (netif_msg_hw(priv))
888 dev_dbg(priv->device, 913 dev_dbg(priv->device,
@@ -895,15 +920,14 @@ static int init_mac(struct altera_tse_private *priv)
895 */ 920 */
896static void tse_set_mac(struct altera_tse_private *priv, bool enable) 921static void tse_set_mac(struct altera_tse_private *priv, bool enable)
897{ 922{
898 struct altera_tse_mac *mac = priv->mac_dev; 923 u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
899 u32 value = ioread32(&mac->command_config);
900 924
901 if (enable) 925 if (enable)
902 value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA; 926 value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
903 else 927 else
904 value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); 928 value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
905 929
906 iowrite32(value, &mac->command_config); 930 csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
907} 931}
908 932
909/* Change the MTU 933/* Change the MTU
@@ -933,13 +957,12 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
933static void altera_tse_set_mcfilter(struct net_device *dev) 957static void altera_tse_set_mcfilter(struct net_device *dev)
934{ 958{
935 struct altera_tse_private *priv = netdev_priv(dev); 959 struct altera_tse_private *priv = netdev_priv(dev);
936 struct altera_tse_mac *mac = priv->mac_dev;
937 int i; 960 int i;
938 struct netdev_hw_addr *ha; 961 struct netdev_hw_addr *ha;
939 962
940 /* clear the hash filter */ 963 /* clear the hash filter */
941 for (i = 0; i < 64; i++) 964 for (i = 0; i < 64; i++)
942 iowrite32(0, &(mac->hash_table[i])); 965 csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
943 966
944 netdev_for_each_mc_addr(ha, dev) { 967 netdev_for_each_mc_addr(ha, dev) {
945 unsigned int hash = 0; 968 unsigned int hash = 0;
@@ -955,7 +978,7 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
955 978
956 hash = (hash << 1) | xor_bit; 979 hash = (hash << 1) | xor_bit;
957 } 980 }
958 iowrite32(1, &(mac->hash_table[hash])); 981 csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
959 } 982 }
960} 983}
961 984
@@ -963,12 +986,11 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
963static void altera_tse_set_mcfilterall(struct net_device *dev) 986static void altera_tse_set_mcfilterall(struct net_device *dev)
964{ 987{
965 struct altera_tse_private *priv = netdev_priv(dev); 988 struct altera_tse_private *priv = netdev_priv(dev);
966 struct altera_tse_mac *mac = priv->mac_dev;
967 int i; 989 int i;
968 990
969 /* set the hash filter */ 991 /* set the hash filter */
970 for (i = 0; i < 64; i++) 992 for (i = 0; i < 64; i++)
971 iowrite32(1, &(mac->hash_table[i])); 993 csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
972} 994}
973 995
974/* Set or clear the multicast filter for this adaptor 996/* Set or clear the multicast filter for this adaptor
@@ -976,12 +998,12 @@ static void altera_tse_set_mcfilterall(struct net_device *dev)
976static void tse_set_rx_mode_hashfilter(struct net_device *dev) 998static void tse_set_rx_mode_hashfilter(struct net_device *dev)
977{ 999{
978 struct altera_tse_private *priv = netdev_priv(dev); 1000 struct altera_tse_private *priv = netdev_priv(dev);
979 struct altera_tse_mac *mac = priv->mac_dev;
980 1001
981 spin_lock(&priv->mac_cfg_lock); 1002 spin_lock(&priv->mac_cfg_lock);
982 1003
983 if (dev->flags & IFF_PROMISC) 1004 if (dev->flags & IFF_PROMISC)
984 tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); 1005 tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1006 MAC_CMDCFG_PROMIS_EN);
985 1007
986 if (dev->flags & IFF_ALLMULTI) 1008 if (dev->flags & IFF_ALLMULTI)
987 altera_tse_set_mcfilterall(dev); 1009 altera_tse_set_mcfilterall(dev);
@@ -996,15 +1018,16 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev)
996static void tse_set_rx_mode(struct net_device *dev) 1018static void tse_set_rx_mode(struct net_device *dev)
997{ 1019{
998 struct altera_tse_private *priv = netdev_priv(dev); 1020 struct altera_tse_private *priv = netdev_priv(dev);
999 struct altera_tse_mac *mac = priv->mac_dev;
1000 1021
1001 spin_lock(&priv->mac_cfg_lock); 1022 spin_lock(&priv->mac_cfg_lock);
1002 1023
1003 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) || 1024 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
1004 !netdev_mc_empty(dev) || !netdev_uc_empty(dev)) 1025 !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
1005 tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); 1026 tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1027 MAC_CMDCFG_PROMIS_EN);
1006 else 1028 else
1007 tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); 1029 tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
1030 MAC_CMDCFG_PROMIS_EN);
1008 1031
1009 spin_unlock(&priv->mac_cfg_lock); 1032 spin_unlock(&priv->mac_cfg_lock);
1010} 1033}
@@ -1085,17 +1108,19 @@ static int tse_open(struct net_device *dev)
1085 1108
1086 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); 1109 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1087 1110
1088 /* Start MAC Rx/Tx */
1089 spin_lock(&priv->mac_cfg_lock);
1090 tse_set_mac(priv, true);
1091 spin_unlock(&priv->mac_cfg_lock);
1092
1093 if (priv->phydev) 1111 if (priv->phydev)
1094 phy_start(priv->phydev); 1112 phy_start(priv->phydev);
1095 1113
1096 napi_enable(&priv->napi); 1114 napi_enable(&priv->napi);
1097 netif_start_queue(dev); 1115 netif_start_queue(dev);
1098 1116
1117 priv->dmaops->start_rxdma(priv);
1118
1119 /* Start MAC Rx/Tx */
1120 spin_lock(&priv->mac_cfg_lock);
1121 tse_set_mac(priv, true);
1122 spin_unlock(&priv->mac_cfg_lock);
1123
1099 return 0; 1124 return 0;
1100 1125
1101tx_request_irq_error: 1126tx_request_irq_error:
@@ -1167,7 +1192,6 @@ static struct net_device_ops altera_tse_netdev_ops = {
1167 .ndo_validate_addr = eth_validate_addr, 1192 .ndo_validate_addr = eth_validate_addr,
1168}; 1193};
1169 1194
1170
1171static int request_and_map(struct platform_device *pdev, const char *name, 1195static int request_and_map(struct platform_device *pdev, const char *name,
1172 struct resource **res, void __iomem **ptr) 1196 struct resource **res, void __iomem **ptr)
1173{ 1197{
@@ -1235,7 +1259,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1235 /* Get the mapped address to the SGDMA descriptor memory */ 1259 /* Get the mapped address to the SGDMA descriptor memory */
1236 ret = request_and_map(pdev, "s1", &dma_res, &descmap); 1260 ret = request_and_map(pdev, "s1", &dma_res, &descmap);
1237 if (ret) 1261 if (ret)
1238 goto out_free; 1262 goto err_free_netdev;
1239 1263
1240 /* Start of that memory is for transmit descriptors */ 1264 /* Start of that memory is for transmit descriptors */
1241 priv->tx_dma_desc = descmap; 1265 priv->tx_dma_desc = descmap;
@@ -1254,24 +1278,24 @@ static int altera_tse_probe(struct platform_device *pdev)
1254 if (upper_32_bits(priv->rxdescmem_busaddr)) { 1278 if (upper_32_bits(priv->rxdescmem_busaddr)) {
1255 dev_dbg(priv->device, 1279 dev_dbg(priv->device,
1256 "SGDMA bus addresses greater than 32-bits\n"); 1280 "SGDMA bus addresses greater than 32-bits\n");
1257 goto out_free; 1281 goto err_free_netdev;
1258 } 1282 }
1259 if (upper_32_bits(priv->txdescmem_busaddr)) { 1283 if (upper_32_bits(priv->txdescmem_busaddr)) {
1260 dev_dbg(priv->device, 1284 dev_dbg(priv->device,
1261 "SGDMA bus addresses greater than 32-bits\n"); 1285 "SGDMA bus addresses greater than 32-bits\n");
1262 goto out_free; 1286 goto err_free_netdev;
1263 } 1287 }
1264 } else if (priv->dmaops && 1288 } else if (priv->dmaops &&
1265 priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) { 1289 priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
1266 ret = request_and_map(pdev, "rx_resp", &dma_res, 1290 ret = request_and_map(pdev, "rx_resp", &dma_res,
1267 &priv->rx_dma_resp); 1291 &priv->rx_dma_resp);
1268 if (ret) 1292 if (ret)
1269 goto out_free; 1293 goto err_free_netdev;
1270 1294
1271 ret = request_and_map(pdev, "tx_desc", &dma_res, 1295 ret = request_and_map(pdev, "tx_desc", &dma_res,
1272 &priv->tx_dma_desc); 1296 &priv->tx_dma_desc);
1273 if (ret) 1297 if (ret)
1274 goto out_free; 1298 goto err_free_netdev;
1275 1299
1276 priv->txdescmem = resource_size(dma_res); 1300 priv->txdescmem = resource_size(dma_res);
1277 priv->txdescmem_busaddr = dma_res->start; 1301 priv->txdescmem_busaddr = dma_res->start;
@@ -1279,13 +1303,13 @@ static int altera_tse_probe(struct platform_device *pdev)
1279 ret = request_and_map(pdev, "rx_desc", &dma_res, 1303 ret = request_and_map(pdev, "rx_desc", &dma_res,
1280 &priv->rx_dma_desc); 1304 &priv->rx_dma_desc);
1281 if (ret) 1305 if (ret)
1282 goto out_free; 1306 goto err_free_netdev;
1283 1307
1284 priv->rxdescmem = resource_size(dma_res); 1308 priv->rxdescmem = resource_size(dma_res);
1285 priv->rxdescmem_busaddr = dma_res->start; 1309 priv->rxdescmem_busaddr = dma_res->start;
1286 1310
1287 } else { 1311 } else {
1288 goto out_free; 1312 goto err_free_netdev;
1289 } 1313 }
1290 1314
1291 if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) 1315 if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
@@ -1294,26 +1318,26 @@ static int altera_tse_probe(struct platform_device *pdev)
1294 else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) 1318 else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
1295 dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32)); 1319 dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
1296 else 1320 else
1297 goto out_free; 1321 goto err_free_netdev;
1298 1322
1299 /* MAC address space */ 1323 /* MAC address space */
1300 ret = request_and_map(pdev, "control_port", &control_port, 1324 ret = request_and_map(pdev, "control_port", &control_port,
1301 (void __iomem **)&priv->mac_dev); 1325 (void __iomem **)&priv->mac_dev);
1302 if (ret) 1326 if (ret)
1303 goto out_free; 1327 goto err_free_netdev;
1304 1328
1305 /* xSGDMA Rx Dispatcher address space */ 1329 /* xSGDMA Rx Dispatcher address space */
1306 ret = request_and_map(pdev, "rx_csr", &dma_res, 1330 ret = request_and_map(pdev, "rx_csr", &dma_res,
1307 &priv->rx_dma_csr); 1331 &priv->rx_dma_csr);
1308 if (ret) 1332 if (ret)
1309 goto out_free; 1333 goto err_free_netdev;
1310 1334
1311 1335
1312 /* xSGDMA Tx Dispatcher address space */ 1336 /* xSGDMA Tx Dispatcher address space */
1313 ret = request_and_map(pdev, "tx_csr", &dma_res, 1337 ret = request_and_map(pdev, "tx_csr", &dma_res,
1314 &priv->tx_dma_csr); 1338 &priv->tx_dma_csr);
1315 if (ret) 1339 if (ret)
1316 goto out_free; 1340 goto err_free_netdev;
1317 1341
1318 1342
1319 /* Rx IRQ */ 1343 /* Rx IRQ */
@@ -1321,7 +1345,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1321 if (priv->rx_irq == -ENXIO) { 1345 if (priv->rx_irq == -ENXIO) {
1322 dev_err(&pdev->dev, "cannot obtain Rx IRQ\n"); 1346 dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
1323 ret = -ENXIO; 1347 ret = -ENXIO;
1324 goto out_free; 1348 goto err_free_netdev;
1325 } 1349 }
1326 1350
1327 /* Tx IRQ */ 1351 /* Tx IRQ */
@@ -1329,7 +1353,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1329 if (priv->tx_irq == -ENXIO) { 1353 if (priv->tx_irq == -ENXIO) {
1330 dev_err(&pdev->dev, "cannot obtain Tx IRQ\n"); 1354 dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
1331 ret = -ENXIO; 1355 ret = -ENXIO;
1332 goto out_free; 1356 goto err_free_netdev;
1333 } 1357 }
1334 1358
1335 /* get FIFO depths from device tree */ 1359 /* get FIFO depths from device tree */
@@ -1337,14 +1361,14 @@ static int altera_tse_probe(struct platform_device *pdev)
1337 &priv->rx_fifo_depth)) { 1361 &priv->rx_fifo_depth)) {
1338 dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n"); 1362 dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
1339 ret = -ENXIO; 1363 ret = -ENXIO;
1340 goto out_free; 1364 goto err_free_netdev;
1341 } 1365 }
1342 1366
1343 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", 1367 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1344 &priv->rx_fifo_depth)) { 1368 &priv->rx_fifo_depth)) {
1345 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); 1369 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
1346 ret = -ENXIO; 1370 ret = -ENXIO;
1347 goto out_free; 1371 goto err_free_netdev;
1348 } 1372 }
1349 1373
1350 /* get hash filter settings for this instance */ 1374 /* get hash filter settings for this instance */
@@ -1352,6 +1376,11 @@ static int altera_tse_probe(struct platform_device *pdev)
1352 of_property_read_bool(pdev->dev.of_node, 1376 of_property_read_bool(pdev->dev.of_node,
1353 "altr,has-hash-multicast-filter"); 1377 "altr,has-hash-multicast-filter");
1354 1378
1379 /* Set hash filter to not set for now until the
1380 * multicast filter receive issue is debugged
1381 */
1382 priv->hash_filter = 0;
1383
1355 /* get supplemental address settings for this instance */ 1384 /* get supplemental address settings for this instance */
1356 priv->added_unicast = 1385 priv->added_unicast =
1357 of_property_read_bool(pdev->dev.of_node, 1386 of_property_read_bool(pdev->dev.of_node,
@@ -1393,7 +1422,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1393 ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) { 1422 ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
1394 dev_err(&pdev->dev, "invalid phy-addr specified %d\n", 1423 dev_err(&pdev->dev, "invalid phy-addr specified %d\n",
1395 priv->phy_addr); 1424 priv->phy_addr);
1396 goto out_free; 1425 goto err_free_netdev;
1397 } 1426 }
1398 1427
1399 /* Create/attach to MDIO bus */ 1428 /* Create/attach to MDIO bus */
@@ -1401,7 +1430,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1401 atomic_add_return(1, &instance_count)); 1430 atomic_add_return(1, &instance_count));
1402 1431
1403 if (ret) 1432 if (ret)
1404 goto out_free; 1433 goto err_free_netdev;
1405 1434
1406 /* initialize netdev */ 1435 /* initialize netdev */
1407 ether_setup(ndev); 1436 ether_setup(ndev);
@@ -1438,7 +1467,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1438 ret = register_netdev(ndev); 1467 ret = register_netdev(ndev);
1439 if (ret) { 1468 if (ret) {
1440 dev_err(&pdev->dev, "failed to register TSE net device\n"); 1469 dev_err(&pdev->dev, "failed to register TSE net device\n");
1441 goto out_free_mdio; 1470 goto err_register_netdev;
1442 } 1471 }
1443 1472
1444 platform_set_drvdata(pdev, ndev); 1473 platform_set_drvdata(pdev, ndev);
@@ -1455,13 +1484,16 @@ static int altera_tse_probe(struct platform_device *pdev)
1455 ret = init_phy(ndev); 1484 ret = init_phy(ndev);
1456 if (ret != 0) { 1485 if (ret != 0) {
1457 netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret); 1486 netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
1458 goto out_free_mdio; 1487 goto err_init_phy;
1459 } 1488 }
1460 return 0; 1489 return 0;
1461 1490
1462out_free_mdio: 1491err_init_phy:
1492 unregister_netdev(ndev);
1493err_register_netdev:
1494 netif_napi_del(&priv->napi);
1463 altera_tse_mdio_destroy(ndev); 1495 altera_tse_mdio_destroy(ndev);
1464out_free: 1496err_free_netdev:
1465 free_netdev(ndev); 1497 free_netdev(ndev);
1466 return ret; 1498 return ret;
1467} 1499}
@@ -1480,7 +1512,7 @@ static int altera_tse_remove(struct platform_device *pdev)
1480 return 0; 1512 return 0;
1481} 1513}
1482 1514
1483struct altera_dmaops altera_dtype_sgdma = { 1515static const struct altera_dmaops altera_dtype_sgdma = {
1484 .altera_dtype = ALTERA_DTYPE_SGDMA, 1516 .altera_dtype = ALTERA_DTYPE_SGDMA,
1485 .dmamask = 32, 1517 .dmamask = 32,
1486 .reset_dma = sgdma_reset, 1518 .reset_dma = sgdma_reset,
@@ -1496,9 +1528,10 @@ struct altera_dmaops altera_dtype_sgdma = {
1496 .get_rx_status = sgdma_rx_status, 1528 .get_rx_status = sgdma_rx_status,
1497 .init_dma = sgdma_initialize, 1529 .init_dma = sgdma_initialize,
1498 .uninit_dma = sgdma_uninitialize, 1530 .uninit_dma = sgdma_uninitialize,
1531 .start_rxdma = sgdma_start_rxdma,
1499}; 1532};
1500 1533
1501struct altera_dmaops altera_dtype_msgdma = { 1534static const struct altera_dmaops altera_dtype_msgdma = {
1502 .altera_dtype = ALTERA_DTYPE_MSGDMA, 1535 .altera_dtype = ALTERA_DTYPE_MSGDMA,
1503 .dmamask = 64, 1536 .dmamask = 64,
1504 .reset_dma = msgdma_reset, 1537 .reset_dma = msgdma_reset,
@@ -1514,6 +1547,7 @@ struct altera_dmaops altera_dtype_msgdma = {
1514 .get_rx_status = msgdma_rx_status, 1547 .get_rx_status = msgdma_rx_status,
1515 .init_dma = msgdma_initialize, 1548 .init_dma = msgdma_initialize,
1516 .uninit_dma = msgdma_uninitialize, 1549 .uninit_dma = msgdma_uninitialize,
1550 .start_rxdma = msgdma_start_rxdma,
1517}; 1551};
1518 1552
1519static struct of_device_id altera_tse_ids[] = { 1553static struct of_device_id altera_tse_ids[] = {
diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c
index 70fa13f486b2..d7eeb1713ad2 100644
--- a/drivers/net/ethernet/altera/altera_utils.c
+++ b/drivers/net/ethernet/altera/altera_utils.c
@@ -17,28 +17,28 @@
17#include "altera_tse.h" 17#include "altera_tse.h"
18#include "altera_utils.h" 18#include "altera_utils.h"
19 19
20void tse_set_bit(void __iomem *ioaddr, u32 bit_mask) 20void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
21{ 21{
22 u32 value = ioread32(ioaddr); 22 u32 value = csrrd32(ioaddr, offs);
23 value |= bit_mask; 23 value |= bit_mask;
24 iowrite32(value, ioaddr); 24 csrwr32(value, ioaddr, offs);
25} 25}
26 26
27void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask) 27void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
28{ 28{
29 u32 value = ioread32(ioaddr); 29 u32 value = csrrd32(ioaddr, offs);
30 value &= ~bit_mask; 30 value &= ~bit_mask;
31 iowrite32(value, ioaddr); 31 csrwr32(value, ioaddr, offs);
32} 32}
33 33
34int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask) 34int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask)
35{ 35{
36 u32 value = ioread32(ioaddr); 36 u32 value = csrrd32(ioaddr, offs);
37 return (value & bit_mask) ? 1 : 0; 37 return (value & bit_mask) ? 1 : 0;
38} 38}
39 39
40int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask) 40int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask)
41{ 41{
42 u32 value = ioread32(ioaddr); 42 u32 value = csrrd32(ioaddr, offs);
43 return (value & bit_mask) ? 0 : 1; 43 return (value & bit_mask) ? 0 : 1;
44} 44}
diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h
index ce1db36d3583..baf100ccf587 100644
--- a/drivers/net/ethernet/altera/altera_utils.h
+++ b/drivers/net/ethernet/altera/altera_utils.h
@@ -19,9 +19,9 @@
19#ifndef __ALTERA_UTILS_H__ 19#ifndef __ALTERA_UTILS_H__
20#define __ALTERA_UTILS_H__ 20#define __ALTERA_UTILS_H__
21 21
22void tse_set_bit(void __iomem *ioaddr, u32 bit_mask); 22void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
23void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask); 23void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
24int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask); 24int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask);
25int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask); 25int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask);
26 26
27#endif /* __ALTERA_UTILS_H__*/ 27#endif /* __ALTERA_UTILS_H__*/
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index 928fac6dd10a..53f85bf71526 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -11,6 +11,7 @@
11#include <linux/dma-mapping.h> 11#include <linux/dma-mapping.h>
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/phy.h> 13#include <linux/phy.h>
14#include <linux/clk.h>
14 15
15/* STATUS and ENABLE Register bit masks */ 16/* STATUS and ENABLE Register bit masks */
16#define TXINT_MASK (1<<0) /* Transmit interrupt */ 17#define TXINT_MASK (1<<0) /* Transmit interrupt */
@@ -131,6 +132,7 @@ struct arc_emac_priv {
131 struct mii_bus *bus; 132 struct mii_bus *bus;
132 133
133 void __iomem *regs; 134 void __iomem *regs;
135 struct clk *clk;
134 136
135 struct napi_struct napi; 137 struct napi_struct napi;
136 struct net_device_stats stats; 138 struct net_device_stats stats;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index eeecc29cf5b7..d647a7d115ac 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -574,6 +574,18 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
574 return NETDEV_TX_OK; 574 return NETDEV_TX_OK;
575} 575}
576 576
577static void arc_emac_set_address_internal(struct net_device *ndev)
578{
579 struct arc_emac_priv *priv = netdev_priv(ndev);
580 unsigned int addr_low, addr_hi;
581
582 addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]);
583 addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]);
584
585 arc_reg_set(priv, R_ADDRL, addr_low);
586 arc_reg_set(priv, R_ADDRH, addr_hi);
587}
588
577/** 589/**
578 * arc_emac_set_address - Set the MAC address for this device. 590 * arc_emac_set_address - Set the MAC address for this device.
579 * @ndev: Pointer to net_device structure. 591 * @ndev: Pointer to net_device structure.
@@ -587,9 +599,7 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
587 */ 599 */
588static int arc_emac_set_address(struct net_device *ndev, void *p) 600static int arc_emac_set_address(struct net_device *ndev, void *p)
589{ 601{
590 struct arc_emac_priv *priv = netdev_priv(ndev);
591 struct sockaddr *addr = p; 602 struct sockaddr *addr = p;
592 unsigned int addr_low, addr_hi;
593 603
594 if (netif_running(ndev)) 604 if (netif_running(ndev))
595 return -EBUSY; 605 return -EBUSY;
@@ -599,11 +609,7 @@ static int arc_emac_set_address(struct net_device *ndev, void *p)
599 609
600 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 610 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
601 611
602 addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]); 612 arc_emac_set_address_internal(ndev);
603 addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]);
604
605 arc_reg_set(priv, R_ADDRL, addr_low);
606 arc_reg_set(priv, R_ADDRH, addr_hi);
607 613
608 return 0; 614 return 0;
609} 615}
@@ -643,13 +649,6 @@ static int arc_emac_probe(struct platform_device *pdev)
643 return -ENODEV; 649 return -ENODEV;
644 } 650 }
645 651
646 /* Get CPU clock frequency from device tree */
647 if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
648 &clock_frequency)) {
649 dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n");
650 return -EINVAL;
651 }
652
653 /* Get IRQ from device tree */ 652 /* Get IRQ from device tree */
654 irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 653 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
655 if (!irq) { 654 if (!irq) {
@@ -677,17 +676,36 @@ static int arc_emac_probe(struct platform_device *pdev)
677 priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs); 676 priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs);
678 if (IS_ERR(priv->regs)) { 677 if (IS_ERR(priv->regs)) {
679 err = PTR_ERR(priv->regs); 678 err = PTR_ERR(priv->regs);
680 goto out; 679 goto out_netdev;
681 } 680 }
682 dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs); 681 dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs);
683 682
683 priv->clk = of_clk_get(pdev->dev.of_node, 0);
684 if (IS_ERR(priv->clk)) {
685 /* Get CPU clock frequency from device tree */
686 if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
687 &clock_frequency)) {
688 dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n");
689 err = -EINVAL;
690 goto out_netdev;
691 }
692 } else {
693 err = clk_prepare_enable(priv->clk);
694 if (err) {
695 dev_err(&pdev->dev, "failed to enable clock\n");
696 goto out_clkget;
697 }
698
699 clock_frequency = clk_get_rate(priv->clk);
700 }
701
684 id = arc_reg_get(priv, R_ID); 702 id = arc_reg_get(priv, R_ID);
685 703
686 /* Check for EMAC revision 5 or 7, magic number */ 704 /* Check for EMAC revision 5 or 7, magic number */
687 if (!(id == 0x0005fd02 || id == 0x0007fd02)) { 705 if (!(id == 0x0005fd02 || id == 0x0007fd02)) {
688 dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id); 706 dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id);
689 err = -ENODEV; 707 err = -ENODEV;
690 goto out; 708 goto out_clken;
691 } 709 }
692 dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id); 710 dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id);
693 711
@@ -702,7 +720,7 @@ static int arc_emac_probe(struct platform_device *pdev)
702 ndev->name, ndev); 720 ndev->name, ndev);
703 if (err) { 721 if (err) {
704 dev_err(&pdev->dev, "could not allocate IRQ\n"); 722 dev_err(&pdev->dev, "could not allocate IRQ\n");
705 goto out; 723 goto out_clken;
706 } 724 }
707 725
708 /* Get MAC address from device tree */ 726 /* Get MAC address from device tree */
@@ -713,6 +731,7 @@ static int arc_emac_probe(struct platform_device *pdev)
713 else 731 else
714 eth_hw_addr_random(ndev); 732 eth_hw_addr_random(ndev);
715 733
734 arc_emac_set_address_internal(ndev);
716 dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr); 735 dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr);
717 736
718 /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */ 737 /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */
@@ -722,7 +741,7 @@ static int arc_emac_probe(struct platform_device *pdev)
722 if (!priv->rxbd) { 741 if (!priv->rxbd) {
723 dev_err(&pdev->dev, "failed to allocate data buffers\n"); 742 dev_err(&pdev->dev, "failed to allocate data buffers\n");
724 err = -ENOMEM; 743 err = -ENOMEM;
725 goto out; 744 goto out_clken;
726 } 745 }
727 746
728 priv->txbd = priv->rxbd + RX_BD_NUM; 747 priv->txbd = priv->rxbd + RX_BD_NUM;
@@ -734,7 +753,7 @@ static int arc_emac_probe(struct platform_device *pdev)
734 err = arc_mdio_probe(pdev, priv); 753 err = arc_mdio_probe(pdev, priv);
735 if (err) { 754 if (err) {
736 dev_err(&pdev->dev, "failed to probe MII bus\n"); 755 dev_err(&pdev->dev, "failed to probe MII bus\n");
737 goto out; 756 goto out_clken;
738 } 757 }
739 758
740 priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0, 759 priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
@@ -742,7 +761,7 @@ static int arc_emac_probe(struct platform_device *pdev)
742 if (!priv->phy_dev) { 761 if (!priv->phy_dev) {
743 dev_err(&pdev->dev, "of_phy_connect() failed\n"); 762 dev_err(&pdev->dev, "of_phy_connect() failed\n");
744 err = -ENODEV; 763 err = -ENODEV;
745 goto out; 764 goto out_mdio;
746 } 765 }
747 766
748 dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n", 767 dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n",
@@ -752,14 +771,25 @@ static int arc_emac_probe(struct platform_device *pdev)
752 771
753 err = register_netdev(ndev); 772 err = register_netdev(ndev);
754 if (err) { 773 if (err) {
755 netif_napi_del(&priv->napi);
756 dev_err(&pdev->dev, "failed to register network device\n"); 774 dev_err(&pdev->dev, "failed to register network device\n");
757 goto out; 775 goto out_netif_api;
758 } 776 }
759 777
760 return 0; 778 return 0;
761 779
762out: 780out_netif_api:
781 netif_napi_del(&priv->napi);
782 phy_disconnect(priv->phy_dev);
783 priv->phy_dev = NULL;
784out_mdio:
785 arc_mdio_remove(priv);
786out_clken:
787 if (!IS_ERR(priv->clk))
788 clk_disable_unprepare(priv->clk);
789out_clkget:
790 if (!IS_ERR(priv->clk))
791 clk_put(priv->clk);
792out_netdev:
763 free_netdev(ndev); 793 free_netdev(ndev);
764 return err; 794 return err;
765} 795}
@@ -774,6 +804,12 @@ static int arc_emac_remove(struct platform_device *pdev)
774 arc_mdio_remove(priv); 804 arc_mdio_remove(priv);
775 unregister_netdev(ndev); 805 unregister_netdev(ndev);
776 netif_napi_del(&priv->napi); 806 netif_napi_del(&priv->napi);
807
808 if (!IS_ERR(priv->clk)) {
809 clk_disable_unprepare(priv->clk);
810 clk_put(priv->clk);
811 }
812
777 free_netdev(ndev); 813 free_netdev(ndev);
778 814
779 return 0; 815 return 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index a78edaccceee..3b0d43154e67 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10051,8 +10051,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10051#define BCM_5710_UNDI_FW_MF_MAJOR (0x07) 10051#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
10052#define BCM_5710_UNDI_FW_MF_MINOR (0x08) 10052#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
10053#define BCM_5710_UNDI_FW_MF_VERS (0x05) 10053#define BCM_5710_UNDI_FW_MF_VERS (0x05)
10054#define BNX2X_PREV_UNDI_MF_PORT(p) (0x1a150c + ((p) << 4)) 10054#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
10055#define BNX2X_PREV_UNDI_MF_FUNC(f) (0x1a184c + ((f) << 4)) 10055#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
10056static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) 10056static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
10057{ 10057{
10058 u8 major, minor, version; 10058 u8 major, minor, version;
@@ -10352,6 +10352,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10352 /* Reset should be performed after BRB is emptied */ 10352 /* Reset should be performed after BRB is emptied */
10353 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 10353 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10354 u32 timer_count = 1000; 10354 u32 timer_count = 1000;
10355 bool need_write = true;
10355 10356
10356 /* Close the MAC Rx to prevent BRB from filling up */ 10357 /* Close the MAC Rx to prevent BRB from filling up */
10357 bnx2x_prev_unload_close_mac(bp, &mac_vals); 10358 bnx2x_prev_unload_close_mac(bp, &mac_vals);
@@ -10398,7 +10399,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10398 * cleaning methods - might be redundant but harmless. 10399 * cleaning methods - might be redundant but harmless.
10399 */ 10400 */
10400 if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) { 10401 if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
10401 bnx2x_prev_unload_undi_mf(bp); 10402 if (need_write) {
10403 bnx2x_prev_unload_undi_mf(bp);
10404 need_write = false;
10405 }
10402 } else if (prev_undi) { 10406 } else if (prev_undi) {
10403 /* If UNDI resides in memory, 10407 /* If UNDI resides in memory,
10404 * manually increment it 10408 * manually increment it
@@ -13233,6 +13237,8 @@ static void __bnx2x_remove(struct pci_dev *pdev,
13233 iounmap(bp->doorbells); 13237 iounmap(bp->doorbells);
13234 13238
13235 bnx2x_release_firmware(bp); 13239 bnx2x_release_firmware(bp);
13240 } else {
13241 bnx2x_vf_pci_dealloc(bp);
13236 } 13242 }
13237 bnx2x_free_mem_bp(bp); 13243 bnx2x_free_mem_bp(bp);
13238 13244
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 5c523b32db70..b8078d50261b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -427,7 +427,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
427 if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN && 427 if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
428 (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >= 428 (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
429 vf_vlan_rules_cnt(vf))) { 429 vf_vlan_rules_cnt(vf))) {
430 BNX2X_ERR("No credits for vlan\n"); 430 BNX2X_ERR("No credits for vlan [%d >= %d]\n",
431 atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
432 vf_vlan_rules_cnt(vf));
431 return -ENOMEM; 433 return -ENOMEM;
432 } 434 }
433 435
@@ -610,6 +612,7 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
610 } 612 }
611 613
612 /* add new mcasts */ 614 /* add new mcasts */
615 mcast.mcast_list_len = mc_num;
613 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); 616 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
614 if (rc) 617 if (rc)
615 BNX2X_ERR("Faled to add multicasts\n"); 618 BNX2X_ERR("Faled to add multicasts\n");
@@ -837,6 +840,29 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
837 return 0; 840 return 0;
838} 841}
839 842
843static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
844 struct bnx2x_virtf *vf,
845 int new)
846{
847 int num = vf_vlan_rules_cnt(vf);
848 int diff = new - num;
849 bool rc = true;
850
851 DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
852 vf->abs_vfid, new, num);
853
854 if (diff > 0)
855 rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
856 else if (diff < 0)
857 rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
858
859 if (rc)
860 vf_vlan_rules_cnt(vf) = new;
861 else
862 DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
863 vf->abs_vfid);
864}
865
840/* must be called after the number of PF queues and the number of VFs are 866/* must be called after the number of PF queues and the number of VFs are
841 * both known 867 * both known
842 */ 868 */
@@ -854,9 +880,11 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
854 resc->num_mac_filters = 1; 880 resc->num_mac_filters = 1;
855 881
856 /* divvy up vlan rules */ 882 /* divvy up vlan rules */
883 bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
857 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 884 vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
858 vlan_count = 1 << ilog2(vlan_count); 885 vlan_count = 1 << ilog2(vlan_count);
859 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); 886 bnx2x_iov_re_set_vlan_filters(bp, vf,
887 vlan_count / BNX2X_NR_VIRTFN(bp));
860 888
861 /* no real limitation */ 889 /* no real limitation */
862 resc->num_mc_filters = 0; 890 resc->num_mc_filters = 0;
@@ -1478,10 +1506,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
1478 bnx2x_iov_static_resc(bp, vf); 1506 bnx2x_iov_static_resc(bp, vf);
1479 1507
1480 /* queues are initialized during VF-ACQUIRE */ 1508 /* queues are initialized during VF-ACQUIRE */
1481
1482 /* reserve the vf vlan credit */
1483 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
1484
1485 vf->filter_state = 0; 1509 vf->filter_state = 0;
1486 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 1510 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
1487 1511
@@ -1912,11 +1936,12 @@ int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
1912 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 1936 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1913 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 1937 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1914 1938
1939 /* Save a vlan filter for the Hypervisor */
1915 return ((req_resc->num_rxqs <= rxq_cnt) && 1940 return ((req_resc->num_rxqs <= rxq_cnt) &&
1916 (req_resc->num_txqs <= txq_cnt) && 1941 (req_resc->num_txqs <= txq_cnt) &&
1917 (req_resc->num_sbs <= vf_sb_count(vf)) && 1942 (req_resc->num_sbs <= vf_sb_count(vf)) &&
1918 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 1943 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
1919 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); 1944 (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
1920} 1945}
1921 1946
1922/* CORE VF API */ 1947/* CORE VF API */
@@ -1972,14 +1997,14 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
1972 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 1997 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
1973 if (resc->num_mac_filters) 1998 if (resc->num_mac_filters)
1974 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 1999 vf_mac_rules_cnt(vf) = resc->num_mac_filters;
1975 if (resc->num_vlan_filters) 2000 /* Add an additional vlan filter credit for the hypervisor */
1976 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters; 2001 bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
1977 2002
1978 DP(BNX2X_MSG_IOV, 2003 DP(BNX2X_MSG_IOV,
1979 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 2004 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
1980 vf_sb_count(vf), vf_rxq_count(vf), 2005 vf_sb_count(vf), vf_rxq_count(vf),
1981 vf_txq_count(vf), vf_mac_rules_cnt(vf), 2006 vf_txq_count(vf), vf_mac_rules_cnt(vf),
1982 vf_vlan_rules_cnt(vf)); 2007 vf_vlan_rules_visible_cnt(vf));
1983 2008
1984 /* Initialize the queues */ 2009 /* Initialize the queues */
1985 if (!vf->vfqs) { 2010 if (!vf->vfqs) {
@@ -2670,7 +2695,7 @@ out:
2670 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 2695 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2671 } 2696 }
2672 2697
2673 return 0; 2698 return rc;
2674} 2699}
2675 2700
2676int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 2701int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
@@ -2896,6 +2921,14 @@ void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
2896 return bp->regview + PXP_VF_ADDR_DB_START; 2921 return bp->regview + PXP_VF_ADDR_DB_START;
2897} 2922}
2898 2923
2924void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
2925{
2926 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
2927 sizeof(struct bnx2x_vf_mbx_msg));
2928 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
2929 sizeof(union pf_vf_bulletin));
2930}
2931
2899int bnx2x_vf_pci_alloc(struct bnx2x *bp) 2932int bnx2x_vf_pci_alloc(struct bnx2x *bp)
2900{ 2933{
2901 mutex_init(&bp->vf2pf_mutex); 2934 mutex_init(&bp->vf2pf_mutex);
@@ -2915,10 +2948,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
2915 return 0; 2948 return 0;
2916 2949
2917alloc_mem_err: 2950alloc_mem_err:
2918 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 2951 bnx2x_vf_pci_dealloc(bp);
2919 sizeof(struct bnx2x_vf_mbx_msg));
2920 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
2921 sizeof(union pf_vf_bulletin));
2922 return -ENOMEM; 2952 return -ENOMEM;
2923} 2953}
2924 2954
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 8bf764570eef..6929adba52f9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -159,6 +159,8 @@ struct bnx2x_virtf {
159#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters) 159#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters)
160#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters) 160#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters)
161#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters) 161#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters)
162 /* Hide a single vlan filter credit for the hypervisor */
163#define vf_vlan_rules_visible_cnt(vf) (vf_vlan_rules_cnt(vf) - 1)
162 164
163 u8 sb_count; /* actual number of SBs */ 165 u8 sb_count; /* actual number of SBs */
164 u8 igu_base_id; /* base igu status block id */ 166 u8 igu_base_id; /* base igu status block id */
@@ -502,6 +504,7 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
502enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); 504enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
503void bnx2x_timer_sriov(struct bnx2x *bp); 505void bnx2x_timer_sriov(struct bnx2x *bp);
504void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp); 506void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
507void bnx2x_vf_pci_dealloc(struct bnx2x *bp);
505int bnx2x_vf_pci_alloc(struct bnx2x *bp); 508int bnx2x_vf_pci_alloc(struct bnx2x *bp);
506int bnx2x_enable_sriov(struct bnx2x *bp); 509int bnx2x_enable_sriov(struct bnx2x *bp);
507void bnx2x_disable_sriov(struct bnx2x *bp); 510void bnx2x_disable_sriov(struct bnx2x *bp);
@@ -568,6 +571,7 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
568 return NULL; 571 return NULL;
569} 572}
570 573
574static inline void bnx2x_vf_pci_dealloc(struct bnx2 *bp) {return 0; }
571static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } 575static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
572static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} 576static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
573static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } 577static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 0622884596b2..784c7155b98a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -747,7 +747,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
747out: 747out:
748 bnx2x_vfpf_finalize(bp, &req->first_tlv); 748 bnx2x_vfpf_finalize(bp, &req->first_tlv);
749 749
750 return 0; 750 return rc;
751} 751}
752 752
753/* request pf to config rss table for vf queues*/ 753/* request pf to config rss table for vf queues*/
@@ -1163,7 +1163,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
1163 bnx2x_vf_max_queue_cnt(bp, vf); 1163 bnx2x_vf_max_queue_cnt(bp, vf);
1164 resc->num_sbs = vf_sb_count(vf); 1164 resc->num_sbs = vf_sb_count(vf);
1165 resc->num_mac_filters = vf_mac_rules_cnt(vf); 1165 resc->num_mac_filters = vf_mac_rules_cnt(vf);
1166 resc->num_vlan_filters = vf_vlan_rules_cnt(vf); 1166 resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf);
1167 resc->num_mc_filters = 0; 1167 resc->num_mc_filters = 0;
1168 1168
1169 if (status == PFVF_STATUS_SUCCESS) { 1169 if (status == PFVF_STATUS_SUCCESS) {
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 7e49c43b7af3..9e089d24466e 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -4,7 +4,7 @@
4 4
5config NET_CADENCE 5config NET_CADENCE
6 bool "Cadence devices" 6 bool "Cadence devices"
7 depends on HAS_IOMEM && (ARM || AVR32 || COMPILE_TEST) 7 depends on HAS_IOMEM && (ARM || AVR32 || MICROBLAZE || COMPILE_TEST)
8 default y 8 default y
9 ---help--- 9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y. 10 If you have a network (Ethernet) card belonging to this class, say Y.
@@ -30,7 +30,7 @@ config ARM_AT91_ETHER
30 30
31config MACB 31config MACB
32 tristate "Cadence MACB/GEM support" 32 tristate "Cadence MACB/GEM support"
33 depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || COMPILE_TEST) 33 depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST)
34 select PHYLIB 34 select PHYLIB
35 ---help--- 35 ---help---
36 The Cadence MACB ethernet interface is found on many Atmel AT32 and 36 The Cadence MACB ethernet interface is found on many Atmel AT32 and
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index ca97005e24b4..e9daa072ebb4 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -599,25 +599,16 @@ static void gem_rx_refill(struct macb *bp)
599{ 599{
600 unsigned int entry; 600 unsigned int entry;
601 struct sk_buff *skb; 601 struct sk_buff *skb;
602 struct macb_dma_desc *desc;
603 dma_addr_t paddr; 602 dma_addr_t paddr;
604 603
605 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) { 604 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
606 u32 addr, ctrl;
607
608 entry = macb_rx_ring_wrap(bp->rx_prepared_head); 605 entry = macb_rx_ring_wrap(bp->rx_prepared_head);
609 desc = &bp->rx_ring[entry];
610 606
611 /* Make hw descriptor updates visible to CPU */ 607 /* Make hw descriptor updates visible to CPU */
612 rmb(); 608 rmb();
613 609
614 addr = desc->addr;
615 ctrl = desc->ctrl;
616 bp->rx_prepared_head++; 610 bp->rx_prepared_head++;
617 611
618 if ((addr & MACB_BIT(RX_USED)))
619 continue;
620
621 if (bp->rx_skbuff[entry] == NULL) { 612 if (bp->rx_skbuff[entry] == NULL) {
622 /* allocate sk_buff for this free entry in ring */ 613 /* allocate sk_buff for this free entry in ring */
623 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); 614 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
@@ -698,7 +689,6 @@ static int gem_rx(struct macb *bp, int budget)
698 if (!(addr & MACB_BIT(RX_USED))) 689 if (!(addr & MACB_BIT(RX_USED)))
699 break; 690 break;
700 691
701 desc->addr &= ~MACB_BIT(RX_USED);
702 bp->rx_tail++; 692 bp->rx_tail++;
703 count++; 693 count++;
704 694
@@ -891,16 +881,15 @@ static int macb_poll(struct napi_struct *napi, int budget)
891 if (work_done < budget) { 881 if (work_done < budget) {
892 napi_complete(napi); 882 napi_complete(napi);
893 883
894 /*
895 * We've done what we can to clean the buffers. Make sure we
896 * get notified when new packets arrive.
897 */
898 macb_writel(bp, IER, MACB_RX_INT_FLAGS);
899
900 /* Packets received while interrupts were disabled */ 884 /* Packets received while interrupts were disabled */
901 status = macb_readl(bp, RSR); 885 status = macb_readl(bp, RSR);
902 if (unlikely(status)) 886 if (status) {
887 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
888 macb_writel(bp, ISR, MACB_BIT(RCOMP));
903 napi_reschedule(napi); 889 napi_reschedule(napi);
890 } else {
891 macb_writel(bp, IER, MACB_RX_INT_FLAGS);
892 }
904 } 893 }
905 894
906 /* TODO: Handle errors */ 895 /* TODO: Handle errors */
@@ -951,6 +940,10 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
951 if (unlikely(status & (MACB_TX_ERR_FLAGS))) { 940 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
952 macb_writel(bp, IDR, MACB_TX_INT_FLAGS); 941 macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
953 schedule_work(&bp->tx_error_task); 942 schedule_work(&bp->tx_error_task);
943
944 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
945 macb_writel(bp, ISR, MACB_TX_ERR_FLAGS);
946
954 break; 947 break;
955 } 948 }
956 949
@@ -968,6 +961,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
968 bp->hw_stats.gem.rx_overruns++; 961 bp->hw_stats.gem.rx_overruns++;
969 else 962 else
970 bp->hw_stats.macb.rx_overruns++; 963 bp->hw_stats.macb.rx_overruns++;
964
965 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
966 macb_writel(bp, ISR, MACB_BIT(ISR_ROVR));
971 } 967 }
972 968
973 if (status & MACB_BIT(HRESP)) { 969 if (status & MACB_BIT(HRESP)) {
@@ -977,6 +973,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
977 * (work queue?) 973 * (work queue?)
978 */ 974 */
979 netdev_err(dev, "DMA bus error: HRESP not OK\n"); 975 netdev_err(dev, "DMA bus error: HRESP not OK\n");
976
977 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
978 macb_writel(bp, ISR, MACB_BIT(HRESP));
980 } 979 }
981 980
982 status = macb_readl(bp, ISR); 981 status = macb_readl(bp, ISR);
@@ -1113,7 +1112,7 @@ static void gem_free_rx_buffers(struct macb *bp)
1113 1112
1114 desc = &bp->rx_ring[i]; 1113 desc = &bp->rx_ring[i];
1115 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 1114 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
1116 dma_unmap_single(&bp->pdev->dev, addr, skb->len, 1115 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1117 DMA_FROM_DEVICE); 1116 DMA_FROM_DEVICE);
1118 dev_kfree_skb_any(skb); 1117 dev_kfree_skb_any(skb);
1119 skb = NULL; 1118 skb = NULL;
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index d40c994a4f6a..570222c33410 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -67,13 +67,13 @@ config CHELSIO_T3
67 will be called cxgb3. 67 will be called cxgb3.
68 68
69config CHELSIO_T4 69config CHELSIO_T4
70 tristate "Chelsio Communications T4 Ethernet support" 70 tristate "Chelsio Communications T4/T5 Ethernet support"
71 depends on PCI 71 depends on PCI
72 select FW_LOADER 72 select FW_LOADER
73 select MDIO 73 select MDIO
74 ---help--- 74 ---help---
75 This driver supports Chelsio T4-based gigabit and 10Gb Ethernet 75 This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet
76 adapters. 76 adapter and T5 based 40Gb Ethernet adapter.
77 77
78 For general information about Chelsio and our products, visit 78 For general information about Chelsio and our products, visit
79 our website at <http://www.chelsio.com>. 79 our website at <http://www.chelsio.com>.
@@ -87,11 +87,12 @@ config CHELSIO_T4
87 will be called cxgb4. 87 will be called cxgb4.
88 88
89config CHELSIO_T4VF 89config CHELSIO_T4VF
90 tristate "Chelsio Communications T4 Virtual Function Ethernet support" 90 tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support"
91 depends on PCI 91 depends on PCI
92 ---help--- 92 ---help---
93 This driver supports Chelsio T4-based gigabit and 10Gb Ethernet 93 This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet
94 adapters with PCI-E SR-IOV Virtual Functions. 94 adapters and T5 based 40Gb Ethernet adapters with PCI-E SR-IOV Virtual
95 Functions.
95 96
96 For general information about Chelsio and our products, visit 97 For general information about Chelsio and our products, visit
97 our website at <http://www.chelsio.com>. 98 our website at <http://www.chelsio.com>.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6fe58913403a..24e16e3301e0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5870,6 +5870,8 @@ static void print_port_info(const struct net_device *dev)
5870 spd = " 2.5 GT/s"; 5870 spd = " 2.5 GT/s";
5871 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) 5871 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5872 spd = " 5 GT/s"; 5872 spd = " 5 GT/s";
5873 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
5874 spd = " 8 GT/s";
5873 5875
5874 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) 5876 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5875 bufp += sprintf(bufp, "100/"); 5877 bufp += sprintf(bufp, "100/");
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
new file mode 100644
index 000000000000..4884205e56ee
--- /dev/null
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -0,0 +1,706 @@
1 /*
2 * drivers/net/ethernet/beckhoff/ec_bhf.c
3 *
4 * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17/* This is a driver for EtherCAT master module present on CCAT FPGA.
18 * Those can be found on Bechhoff CX50xx industrial PCs.
19 */
20
21#if 0
22#define DEBUG
23#endif
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/ip.h>
33#include <linux/skbuff.h>
34#include <linux/hrtimer.h>
35#include <linux/interrupt.h>
36#include <linux/stat.h>
37
38#define TIMER_INTERVAL_NSEC 20000
39
40#define INFO_BLOCK_SIZE 0x10
41#define INFO_BLOCK_TYPE 0x0
42#define INFO_BLOCK_REV 0x2
43#define INFO_BLOCK_BLK_CNT 0x4
44#define INFO_BLOCK_TX_CHAN 0x4
45#define INFO_BLOCK_RX_CHAN 0x5
46#define INFO_BLOCK_OFFSET 0x8
47
48#define EC_MII_OFFSET 0x4
49#define EC_FIFO_OFFSET 0x8
50#define EC_MAC_OFFSET 0xc
51
52#define MAC_FRAME_ERR_CNT 0x0
53#define MAC_RX_ERR_CNT 0x1
54#define MAC_CRC_ERR_CNT 0x2
55#define MAC_LNK_LST_ERR_CNT 0x3
56#define MAC_TX_FRAME_CNT 0x10
57#define MAC_RX_FRAME_CNT 0x14
58#define MAC_TX_FIFO_LVL 0x20
59#define MAC_DROPPED_FRMS 0x28
60#define MAC_CONNECTED_CCAT_FLAG 0x78
61
62#define MII_MAC_ADDR 0x8
63#define MII_MAC_FILT_FLAG 0xe
64#define MII_LINK_STATUS 0xf
65
66#define FIFO_TX_REG 0x0
67#define FIFO_TX_RESET 0x8
68#define FIFO_RX_REG 0x10
69#define FIFO_RX_ADDR_VALID (1u << 31)
70#define FIFO_RX_RESET 0x18
71
72#define DMA_CHAN_OFFSET 0x1000
73#define DMA_CHAN_SIZE 0x8
74
75#define DMA_WINDOW_SIZE_MASK 0xfffffffc
76
77static struct pci_device_id ids[] = {
78 { PCI_DEVICE(0x15ec, 0x5000), },
79 { 0, }
80};
81MODULE_DEVICE_TABLE(pci, ids);
82
83struct rx_header {
84#define RXHDR_NEXT_ADDR_MASK 0xffffffu
85#define RXHDR_NEXT_VALID (1u << 31)
86 __le32 next;
87#define RXHDR_NEXT_RECV_FLAG 0x1
88 __le32 recv;
89#define RXHDR_LEN_MASK 0xfffu
90 __le16 len;
91 __le16 port;
92 __le32 reserved;
93 u8 timestamp[8];
94} __packed;
95
96#define PKT_PAYLOAD_SIZE 0x7e8
97struct rx_desc {
98 struct rx_header header;
99 u8 data[PKT_PAYLOAD_SIZE];
100} __packed;
101
102struct tx_header {
103 __le16 len;
104#define TX_HDR_PORT_0 0x1
105#define TX_HDR_PORT_1 0x2
106 u8 port;
107 u8 ts_enable;
108#define TX_HDR_SENT 0x1
109 __le32 sent;
110 u8 timestamp[8];
111} __packed;
112
113struct tx_desc {
114 struct tx_header header;
115 u8 data[PKT_PAYLOAD_SIZE];
116} __packed;
117
118#define FIFO_SIZE 64
119
120static long polling_frequency = TIMER_INTERVAL_NSEC;
121
122struct bhf_dma {
123 u8 *buf;
124 size_t len;
125 dma_addr_t buf_phys;
126
127 u8 *alloc;
128 size_t alloc_len;
129 dma_addr_t alloc_phys;
130};
131
132struct ec_bhf_priv {
133 struct net_device *net_dev;
134
135 struct pci_dev *dev;
136
137 void * __iomem io;
138 void * __iomem dma_io;
139
140 struct hrtimer hrtimer;
141
142 int tx_dma_chan;
143 int rx_dma_chan;
144 void * __iomem ec_io;
145 void * __iomem fifo_io;
146 void * __iomem mii_io;
147 void * __iomem mac_io;
148
149 struct bhf_dma rx_buf;
150 struct rx_desc *rx_descs;
151 int rx_dnext;
152 int rx_dcount;
153
154 struct bhf_dma tx_buf;
155 struct tx_desc *tx_descs;
156 int tx_dcount;
157 int tx_dnext;
158
159 u64 stat_rx_bytes;
160 u64 stat_tx_bytes;
161};
162
163#define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
164
165#define ETHERCAT_MASTER_ID 0x14
166
167static void ec_bhf_print_status(struct ec_bhf_priv *priv)
168{
169 struct device *dev = PRIV_TO_DEV(priv);
170
171 dev_dbg(dev, "Frame error counter: %d\n",
172 ioread8(priv->mac_io + MAC_FRAME_ERR_CNT));
173 dev_dbg(dev, "RX error counter: %d\n",
174 ioread8(priv->mac_io + MAC_RX_ERR_CNT));
175 dev_dbg(dev, "CRC error counter: %d\n",
176 ioread8(priv->mac_io + MAC_CRC_ERR_CNT));
177 dev_dbg(dev, "TX frame counter: %d\n",
178 ioread32(priv->mac_io + MAC_TX_FRAME_CNT));
179 dev_dbg(dev, "RX frame counter: %d\n",
180 ioread32(priv->mac_io + MAC_RX_FRAME_CNT));
181 dev_dbg(dev, "TX fifo level: %d\n",
182 ioread8(priv->mac_io + MAC_TX_FIFO_LVL));
183 dev_dbg(dev, "Dropped frames: %d\n",
184 ioread8(priv->mac_io + MAC_DROPPED_FRMS));
185 dev_dbg(dev, "Connected with CCAT slot: %d\n",
186 ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG));
187 dev_dbg(dev, "Link status: %d\n",
188 ioread8(priv->mii_io + MII_LINK_STATUS));
189}
190
191static void ec_bhf_reset(struct ec_bhf_priv *priv)
192{
193 iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
194 iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT);
195 iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT);
196 iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT);
197 iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT);
198 iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT);
199 iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS);
200
201 iowrite8(0, priv->fifo_io + FIFO_TX_RESET);
202 iowrite8(0, priv->fifo_io + FIFO_RX_RESET);
203
204 iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL);
205}
206
207static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
208{
209 u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header);
210 u32 addr = (u8 *)desc - priv->tx_buf.buf;
211
212 iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
213
214 dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n");
215}
216
217static int ec_bhf_desc_sent(struct tx_desc *desc)
218{
219 return le32_to_cpu(desc->header.sent) & TX_HDR_SENT;
220}
221
222static void ec_bhf_process_tx(struct ec_bhf_priv *priv)
223{
224 if (unlikely(netif_queue_stopped(priv->net_dev))) {
225 /* Make sure that we perceive changes to tx_dnext. */
226 smp_rmb();
227
228 if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext]))
229 netif_wake_queue(priv->net_dev);
230 }
231}
232
233static int ec_bhf_pkt_received(struct rx_desc *desc)
234{
235 return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG;
236}
237
238static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
239{
240 iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf),
241 priv->fifo_io + FIFO_RX_REG);
242}
243
244static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
245{
246 struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
247 struct device *dev = PRIV_TO_DEV(priv);
248
249 while (ec_bhf_pkt_received(desc)) {
250 int pkt_size = (le16_to_cpu(desc->header.len) &
251 RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4;
252 u8 *data = desc->data;
253 struct sk_buff *skb;
254
255 skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
256 dev_dbg(dev, "Received packet, size: %d\n", pkt_size);
257
258 if (skb) {
259 memcpy(skb_put(skb, pkt_size), data, pkt_size);
260 skb->protocol = eth_type_trans(skb, priv->net_dev);
261 dev_dbg(dev, "Protocol type: %x\n", skb->protocol);
262
263 priv->stat_rx_bytes += pkt_size;
264
265 netif_rx(skb);
266 } else {
267 dev_err_ratelimited(dev,
268 "Couldn't allocate a skb_buff for a packet of size %u\n",
269 pkt_size);
270 }
271
272 desc->header.recv = 0;
273
274 ec_bhf_add_rx_desc(priv, desc);
275
276 priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
277 desc = &priv->rx_descs[priv->rx_dnext];
278 }
279
280}
281
282static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
283{
284 struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv,
285 hrtimer);
286 ec_bhf_process_rx(priv);
287 ec_bhf_process_tx(priv);
288
289 if (!netif_running(priv->net_dev))
290 return HRTIMER_NORESTART;
291
292 hrtimer_forward_now(timer, ktime_set(0, polling_frequency));
293 return HRTIMER_RESTART;
294}
295
296static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
297{
298 struct device *dev = PRIV_TO_DEV(priv);
299 unsigned block_count, i;
300 void * __iomem ec_info;
301
302 dev_dbg(dev, "Info block:\n");
303 dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
304 dev_dbg(dev, "Revision of function: %x\n",
305 (unsigned)ioread16(priv->io + INFO_BLOCK_REV));
306
307 block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
308 dev_dbg(dev, "Number of function blocks: %x\n", block_count);
309
310 for (i = 0; i < block_count; i++) {
311 u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
312 INFO_BLOCK_TYPE);
313 if (type == ETHERCAT_MASTER_ID)
314 break;
315 }
316 if (i == block_count) {
317 dev_err(dev, "EtherCAT master with DMA block not found\n");
318 return -ENODEV;
319 }
320 dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i);
321
322 ec_info = priv->io + i * INFO_BLOCK_SIZE;
323 dev_dbg(dev, "EtherCAT master revision: %d\n",
324 ioread16(ec_info + INFO_BLOCK_REV));
325
326 priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
327 dev_dbg(dev, "EtherCAT master tx dma channel: %d\n",
328 priv->tx_dma_chan);
329
330 priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
331 dev_dbg(dev, "EtherCAT master rx dma channel: %d\n",
332 priv->rx_dma_chan);
333
334 priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
335 priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
336 priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
337 priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
338
339 dev_dbg(dev,
340 "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n",
341 priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io);
342
343 return 0;
344}
345
346static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
347 struct net_device *net_dev)
348{
349 struct ec_bhf_priv *priv = netdev_priv(net_dev);
350 struct tx_desc *desc;
351 unsigned len;
352
353 dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n");
354
355 desc = &priv->tx_descs[priv->tx_dnext];
356
357 skb_copy_and_csum_dev(skb, desc->data);
358 len = skb->len;
359
360 memset(&desc->header, 0, sizeof(desc->header));
361 desc->header.len = cpu_to_le16(len);
362 desc->header.port = TX_HDR_PORT_0;
363
364 ec_bhf_send_packet(priv, desc);
365
366 priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
367
368 if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
369 /* Make sure that update updates to tx_dnext are perceived
370 * by timer routine.
371 */
372 smp_wmb();
373
374 netif_stop_queue(net_dev);
375
376 dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n");
377 ec_bhf_print_status(priv);
378 }
379
380 priv->stat_tx_bytes += len;
381
382 dev_kfree_skb(skb);
383
384 return NETDEV_TX_OK;
385}
386
387static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
388 struct bhf_dma *buf,
389 int channel,
390 int size)
391{
392 int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET;
393 struct device *dev = PRIV_TO_DEV(priv);
394 u32 mask;
395
396 iowrite32(0xffffffff, priv->dma_io + offset);
397
398 mask = ioread32(priv->dma_io + offset);
399 mask &= DMA_WINDOW_SIZE_MASK;
400 dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel);
401
402 /* We want to allocate a chunk of memory that is:
403 * - aligned to the mask we just read
404 * - is of size 2^mask bytes (at most)
405 * In order to ensure that we will allocate buffer of
406 * 2 * 2^mask bytes.
407 */
408 buf->len = min_t(int, ~mask + 1, size);
409 buf->alloc_len = 2 * buf->len;
410
411 dev_dbg(dev, "Allocating %d bytes for channel %d",
412 (int)buf->alloc_len, channel);
413 buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
414 GFP_KERNEL);
415 if (buf->alloc == NULL) {
416 dev_info(dev, "Failed to allocate buffer\n");
417 return -ENOMEM;
418 }
419
420 buf->buf_phys = (buf->alloc_phys + buf->len) & mask;
421 buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys);
422
423 iowrite32(0, priv->dma_io + offset + 4);
424 iowrite32(buf->buf_phys, priv->dma_io + offset);
425 dev_dbg(dev, "Buffer: %x and read from dev: %x",
426 (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset));
427
428 return 0;
429}
430
431static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
432{
433 int i = 0;
434
435 priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
436 priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf;
437 priv->tx_dnext = 0;
438
439 for (i = 0; i < priv->tx_dcount; i++)
440 priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT);
441}
442
443static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
444{
445 int i;
446
447 priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
448 priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf;
449 priv->rx_dnext = 0;
450
451 for (i = 0; i < priv->rx_dcount; i++) {
452 struct rx_desc *desc = &priv->rx_descs[i];
453 u32 next;
454
455 if (i != priv->rx_dcount - 1)
456 next = (u8 *)(desc + 1) - priv->rx_buf.buf;
457 else
458 next = 0;
459 next |= RXHDR_NEXT_VALID;
460 desc->header.next = cpu_to_le32(next);
461 desc->header.recv = 0;
462 ec_bhf_add_rx_desc(priv, desc);
463 }
464}
465
466static int ec_bhf_open(struct net_device *net_dev)
467{
468 struct ec_bhf_priv *priv = netdev_priv(net_dev);
469 struct device *dev = PRIV_TO_DEV(priv);
470 int err = 0;
471
472 dev_info(dev, "Opening device\n");
473
474 ec_bhf_reset(priv);
475
476 err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
477 FIFO_SIZE * sizeof(struct rx_desc));
478 if (err) {
479 dev_err(dev, "Failed to allocate rx buffer\n");
480 goto out;
481 }
482 ec_bhf_setup_rx_descs(priv);
483
484 dev_info(dev, "RX buffer allocated, address: %x\n",
485 (unsigned)priv->rx_buf.buf_phys);
486
487 err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
488 FIFO_SIZE * sizeof(struct tx_desc));
489 if (err) {
490 dev_err(dev, "Failed to allocate tx buffer\n");
491 goto error_rx_free;
492 }
493 dev_dbg(dev, "TX buffer allocated, addres: %x\n",
494 (unsigned)priv->tx_buf.buf_phys);
495
496 iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
497
498 ec_bhf_setup_tx_descs(priv);
499
500 netif_start_queue(net_dev);
501
502 hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
503 priv->hrtimer.function = ec_bhf_timer_fun;
504 hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency),
505 HRTIMER_MODE_REL);
506
507 dev_info(PRIV_TO_DEV(priv), "Device open\n");
508
509 ec_bhf_print_status(priv);
510
511 return 0;
512
513error_rx_free:
514 dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
515 priv->rx_buf.alloc_len);
516out:
517 return err;
518}
519
520static int ec_bhf_stop(struct net_device *net_dev)
521{
522 struct ec_bhf_priv *priv = netdev_priv(net_dev);
523 struct device *dev = PRIV_TO_DEV(priv);
524
525 hrtimer_cancel(&priv->hrtimer);
526
527 ec_bhf_reset(priv);
528
529 netif_tx_disable(net_dev);
530
531 dma_free_coherent(dev, priv->tx_buf.alloc_len,
532 priv->tx_buf.alloc, priv->tx_buf.alloc_phys);
533 dma_free_coherent(dev, priv->rx_buf.alloc_len,
534 priv->rx_buf.alloc, priv->rx_buf.alloc_phys);
535
536 return 0;
537}
538
539static struct rtnl_link_stats64 *
540ec_bhf_get_stats(struct net_device *net_dev,
541 struct rtnl_link_stats64 *stats)
542{
543 struct ec_bhf_priv *priv = netdev_priv(net_dev);
544
545 stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) +
546 ioread8(priv->mac_io + MAC_CRC_ERR_CNT) +
547 ioread8(priv->mac_io + MAC_FRAME_ERR_CNT);
548 stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT);
549 stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT);
550 stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS);
551
552 stats->tx_bytes = priv->stat_tx_bytes;
553 stats->rx_bytes = priv->stat_rx_bytes;
554
555 return stats;
556}
557
558static const struct net_device_ops ec_bhf_netdev_ops = {
559 .ndo_start_xmit = ec_bhf_start_xmit,
560 .ndo_open = ec_bhf_open,
561 .ndo_stop = ec_bhf_stop,
562 .ndo_get_stats64 = ec_bhf_get_stats,
563 .ndo_change_mtu = eth_change_mtu,
564 .ndo_validate_addr = eth_validate_addr,
565 .ndo_set_mac_address = eth_mac_addr
566};
567
568static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
569{
570 struct net_device *net_dev;
571 struct ec_bhf_priv *priv;
572 void * __iomem dma_io;
573 void * __iomem io;
574 int err = 0;
575
576 err = pci_enable_device(dev);
577 if (err)
578 return err;
579
580 pci_set_master(dev);
581
582 err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
583 if (err) {
584 dev_err(&dev->dev,
585 "Required dma mask not supported, failed to initialize device\n");
586 err = -EIO;
587 goto err_disable_dev;
588 }
589
590 err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32));
591 if (err) {
592 dev_err(&dev->dev,
593 "Required dma mask not supported, failed to initialize device\n");
594 goto err_disable_dev;
595 }
596
597 err = pci_request_regions(dev, "ec_bhf");
598 if (err) {
599 dev_err(&dev->dev, "Failed to request pci memory regions\n");
600 goto err_disable_dev;
601 }
602
603 io = pci_iomap(dev, 0, 0);
604 if (!io) {
605 dev_err(&dev->dev, "Failed to map pci card memory bar 0");
606 err = -EIO;
607 goto err_release_regions;
608 }
609
610 dma_io = pci_iomap(dev, 2, 0);
611 if (!dma_io) {
612 dev_err(&dev->dev, "Failed to map pci card memory bar 2");
613 err = -EIO;
614 goto err_unmap;
615 }
616
617 net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
618 if (net_dev == 0) {
619 err = -ENOMEM;
620 goto err_unmap_dma_io;
621 }
622
623 pci_set_drvdata(dev, net_dev);
624 SET_NETDEV_DEV(net_dev, &dev->dev);
625
626 net_dev->features = 0;
627 net_dev->flags |= IFF_NOARP;
628
629 net_dev->netdev_ops = &ec_bhf_netdev_ops;
630
631 priv = netdev_priv(net_dev);
632 priv->net_dev = net_dev;
633 priv->io = io;
634 priv->dma_io = dma_io;
635 priv->dev = dev;
636
637 err = ec_bhf_setup_offsets(priv);
638 if (err < 0)
639 goto err_free_net_dev;
640
641 memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
642
643 dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n",
644 net_dev->dev_addr);
645
646 err = register_netdev(net_dev);
647 if (err < 0)
648 goto err_free_net_dev;
649
650 return 0;
651
652err_free_net_dev:
653 free_netdev(net_dev);
654err_unmap_dma_io:
655 pci_iounmap(dev, dma_io);
656err_unmap:
657 pci_iounmap(dev, io);
658err_release_regions:
659 pci_release_regions(dev);
660err_disable_dev:
661 pci_clear_master(dev);
662 pci_disable_device(dev);
663
664 return err;
665}
666
667static void ec_bhf_remove(struct pci_dev *dev)
668{
669 struct net_device *net_dev = pci_get_drvdata(dev);
670 struct ec_bhf_priv *priv = netdev_priv(net_dev);
671
672 unregister_netdev(net_dev);
673 free_netdev(net_dev);
674
675 pci_iounmap(dev, priv->dma_io);
676 pci_iounmap(dev, priv->io);
677 pci_release_regions(dev);
678 pci_clear_master(dev);
679 pci_disable_device(dev);
680}
681
682static struct pci_driver pci_driver = {
683 .name = "ec_bhf",
684 .id_table = ids,
685 .probe = ec_bhf_probe,
686 .remove = ec_bhf_remove,
687};
688
689static int __init ec_bhf_init(void)
690{
691 return pci_register_driver(&pci_driver);
692}
693
694static void __exit ec_bhf_exit(void)
695{
696 pci_unregister_driver(&pci_driver);
697}
698
699module_init(ec_bhf_init);
700module_exit(ec_bhf_exit);
701
702module_param(polling_frequency, long, S_IRUGO);
703MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
704
705MODULE_LICENSE("GPL");
706MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index a18645407d21..dc19bc5dec77 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4949,6 +4949,12 @@ static void be_eeh_resume(struct pci_dev *pdev)
4949 if (status) 4949 if (status)
4950 goto err; 4950 goto err;
4951 4951
4952 /* On some BE3 FW versions, after a HW reset,
4953 * interrupts will remain disabled for each function.
4954 * So, explicitly enable interrupts
4955 */
4956 be_intr_set(adapter, true);
4957
4952 /* tell fw we're ready to fire cmds */ 4958 /* tell fw we're ready to fire cmds */
4953 status = be_cmd_fw_init(adapter); 4959 status = be_cmd_fw_init(adapter);
4954 if (status) 4960 if (status)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 9125d9abf099..e2d42475b006 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -121,6 +121,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id);
121static irqreturn_t gfar_transmit(int irq, void *dev_id); 121static irqreturn_t gfar_transmit(int irq, void *dev_id);
122static irqreturn_t gfar_interrupt(int irq, void *dev_id); 122static irqreturn_t gfar_interrupt(int irq, void *dev_id);
123static void adjust_link(struct net_device *dev); 123static void adjust_link(struct net_device *dev);
124static noinline void gfar_update_link_state(struct gfar_private *priv);
124static int init_phy(struct net_device *dev); 125static int init_phy(struct net_device *dev);
125static int gfar_probe(struct platform_device *ofdev); 126static int gfar_probe(struct platform_device *ofdev);
126static int gfar_remove(struct platform_device *ofdev); 127static int gfar_remove(struct platform_device *ofdev);
@@ -3076,41 +3077,6 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
3076 return IRQ_HANDLED; 3077 return IRQ_HANDLED;
3077} 3078}
3078 3079
3079static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3080{
3081 struct phy_device *phydev = priv->phydev;
3082 u32 val = 0;
3083
3084 if (!phydev->duplex)
3085 return val;
3086
3087 if (!priv->pause_aneg_en) {
3088 if (priv->tx_pause_en)
3089 val |= MACCFG1_TX_FLOW;
3090 if (priv->rx_pause_en)
3091 val |= MACCFG1_RX_FLOW;
3092 } else {
3093 u16 lcl_adv, rmt_adv;
3094 u8 flowctrl;
3095 /* get link partner capabilities */
3096 rmt_adv = 0;
3097 if (phydev->pause)
3098 rmt_adv = LPA_PAUSE_CAP;
3099 if (phydev->asym_pause)
3100 rmt_adv |= LPA_PAUSE_ASYM;
3101
3102 lcl_adv = mii_advertise_flowctrl(phydev->advertising);
3103
3104 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3105 if (flowctrl & FLOW_CTRL_TX)
3106 val |= MACCFG1_TX_FLOW;
3107 if (flowctrl & FLOW_CTRL_RX)
3108 val |= MACCFG1_RX_FLOW;
3109 }
3110
3111 return val;
3112}
3113
3114/* Called every time the controller might need to be made 3080/* Called every time the controller might need to be made
3115 * aware of new link state. The PHY code conveys this 3081 * aware of new link state. The PHY code conveys this
3116 * information through variables in the phydev structure, and this 3082 * information through variables in the phydev structure, and this
@@ -3120,83 +3086,12 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3120static void adjust_link(struct net_device *dev) 3086static void adjust_link(struct net_device *dev)
3121{ 3087{
3122 struct gfar_private *priv = netdev_priv(dev); 3088 struct gfar_private *priv = netdev_priv(dev);
3123 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3124 struct phy_device *phydev = priv->phydev; 3089 struct phy_device *phydev = priv->phydev;
3125 int new_state = 0;
3126 3090
3127 if (test_bit(GFAR_RESETTING, &priv->state)) 3091 if (unlikely(phydev->link != priv->oldlink ||
3128 return; 3092 phydev->duplex != priv->oldduplex ||
3129 3093 phydev->speed != priv->oldspeed))
3130 if (phydev->link) { 3094 gfar_update_link_state(priv);
3131 u32 tempval1 = gfar_read(&regs->maccfg1);
3132 u32 tempval = gfar_read(&regs->maccfg2);
3133 u32 ecntrl = gfar_read(&regs->ecntrl);
3134
3135 /* Now we make sure that we can be in full duplex mode.
3136 * If not, we operate in half-duplex mode.
3137 */
3138 if (phydev->duplex != priv->oldduplex) {
3139 new_state = 1;
3140 if (!(phydev->duplex))
3141 tempval &= ~(MACCFG2_FULL_DUPLEX);
3142 else
3143 tempval |= MACCFG2_FULL_DUPLEX;
3144
3145 priv->oldduplex = phydev->duplex;
3146 }
3147
3148 if (phydev->speed != priv->oldspeed) {
3149 new_state = 1;
3150 switch (phydev->speed) {
3151 case 1000:
3152 tempval =
3153 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3154
3155 ecntrl &= ~(ECNTRL_R100);
3156 break;
3157 case 100:
3158 case 10:
3159 tempval =
3160 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3161
3162 /* Reduced mode distinguishes
3163 * between 10 and 100
3164 */
3165 if (phydev->speed == SPEED_100)
3166 ecntrl |= ECNTRL_R100;
3167 else
3168 ecntrl &= ~(ECNTRL_R100);
3169 break;
3170 default:
3171 netif_warn(priv, link, dev,
3172 "Ack! Speed (%d) is not 10/100/1000!\n",
3173 phydev->speed);
3174 break;
3175 }
3176
3177 priv->oldspeed = phydev->speed;
3178 }
3179
3180 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3181 tempval1 |= gfar_get_flowctrl_cfg(priv);
3182
3183 gfar_write(&regs->maccfg1, tempval1);
3184 gfar_write(&regs->maccfg2, tempval);
3185 gfar_write(&regs->ecntrl, ecntrl);
3186
3187 if (!priv->oldlink) {
3188 new_state = 1;
3189 priv->oldlink = 1;
3190 }
3191 } else if (priv->oldlink) {
3192 new_state = 1;
3193 priv->oldlink = 0;
3194 priv->oldspeed = 0;
3195 priv->oldduplex = -1;
3196 }
3197
3198 if (new_state && netif_msg_link(priv))
3199 phy_print_status(phydev);
3200} 3095}
3201 3096
3202/* Update the hash table based on the current list of multicast 3097/* Update the hash table based on the current list of multicast
@@ -3442,6 +3337,114 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
3442 return IRQ_HANDLED; 3337 return IRQ_HANDLED;
3443} 3338}
3444 3339
3340static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3341{
3342 struct phy_device *phydev = priv->phydev;
3343 u32 val = 0;
3344
3345 if (!phydev->duplex)
3346 return val;
3347
3348 if (!priv->pause_aneg_en) {
3349 if (priv->tx_pause_en)
3350 val |= MACCFG1_TX_FLOW;
3351 if (priv->rx_pause_en)
3352 val |= MACCFG1_RX_FLOW;
3353 } else {
3354 u16 lcl_adv, rmt_adv;
3355 u8 flowctrl;
3356 /* get link partner capabilities */
3357 rmt_adv = 0;
3358 if (phydev->pause)
3359 rmt_adv = LPA_PAUSE_CAP;
3360 if (phydev->asym_pause)
3361 rmt_adv |= LPA_PAUSE_ASYM;
3362
3363 lcl_adv = mii_advertise_flowctrl(phydev->advertising);
3364
3365 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3366 if (flowctrl & FLOW_CTRL_TX)
3367 val |= MACCFG1_TX_FLOW;
3368 if (flowctrl & FLOW_CTRL_RX)
3369 val |= MACCFG1_RX_FLOW;
3370 }
3371
3372 return val;
3373}
3374
3375static noinline void gfar_update_link_state(struct gfar_private *priv)
3376{
3377 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3378 struct phy_device *phydev = priv->phydev;
3379
3380 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3381 return;
3382
3383 if (phydev->link) {
3384 u32 tempval1 = gfar_read(&regs->maccfg1);
3385 u32 tempval = gfar_read(&regs->maccfg2);
3386 u32 ecntrl = gfar_read(&regs->ecntrl);
3387
3388 if (phydev->duplex != priv->oldduplex) {
3389 if (!(phydev->duplex))
3390 tempval &= ~(MACCFG2_FULL_DUPLEX);
3391 else
3392 tempval |= MACCFG2_FULL_DUPLEX;
3393
3394 priv->oldduplex = phydev->duplex;
3395 }
3396
3397 if (phydev->speed != priv->oldspeed) {
3398 switch (phydev->speed) {
3399 case 1000:
3400 tempval =
3401 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3402
3403 ecntrl &= ~(ECNTRL_R100);
3404 break;
3405 case 100:
3406 case 10:
3407 tempval =
3408 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3409
3410 /* Reduced mode distinguishes
3411 * between 10 and 100
3412 */
3413 if (phydev->speed == SPEED_100)
3414 ecntrl |= ECNTRL_R100;
3415 else
3416 ecntrl &= ~(ECNTRL_R100);
3417 break;
3418 default:
3419 netif_warn(priv, link, priv->ndev,
3420 "Ack! Speed (%d) is not 10/100/1000!\n",
3421 phydev->speed);
3422 break;
3423 }
3424
3425 priv->oldspeed = phydev->speed;
3426 }
3427
3428 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3429 tempval1 |= gfar_get_flowctrl_cfg(priv);
3430
3431 gfar_write(&regs->maccfg1, tempval1);
3432 gfar_write(&regs->maccfg2, tempval);
3433 gfar_write(&regs->ecntrl, ecntrl);
3434
3435 if (!priv->oldlink)
3436 priv->oldlink = 1;
3437
3438 } else if (priv->oldlink) {
3439 priv->oldlink = 0;
3440 priv->oldspeed = 0;
3441 priv->oldduplex = -1;
3442 }
3443
3444 if (netif_msg_link(priv))
3445 phy_print_status(phydev);
3446}
3447
3445static struct of_device_id gfar_match[] = 3448static struct of_device_id gfar_match[] =
3446{ 3449{
3447 { 3450 {
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 891dbee6e6c1..76d70708f864 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -533,6 +533,9 @@ static int gfar_spauseparam(struct net_device *dev,
533 struct gfar __iomem *regs = priv->gfargrp[0].regs; 533 struct gfar __iomem *regs = priv->gfargrp[0].regs;
534 u32 oldadv, newadv; 534 u32 oldadv, newadv;
535 535
536 if (!phydev)
537 return -ENODEV;
538
536 if (!(phydev->supported & SUPPORTED_Pause) || 539 if (!(phydev->supported & SUPPORTED_Pause) ||
537 (!(phydev->supported & SUPPORTED_Asym_Pause) && 540 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
538 (epause->rx_pause != epause->tx_pause))) 541 (epause->rx_pause != epause->tx_pause)))
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 9d75fef6396f..63eb959a28aa 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -682,10 +682,7 @@ static int mal_probe(struct platform_device *ofdev)
682 goto fail6; 682 goto fail6;
683 683
684 /* Enable all MAL SERR interrupt sources */ 684 /* Enable all MAL SERR interrupt sources */
685 if (mal->version == 2) 685 set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS);
686 set_mal_dcrn(mal, MAL_IER, MAL2_IER_EVENTS);
687 else
688 set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS);
689 686
690 /* Enable EOB interrupt */ 687 /* Enable EOB interrupt */
691 mal_enable_eob_irq(mal); 688 mal_enable_eob_irq(mal);
diff --git a/drivers/net/ethernet/ibm/emac/mal.h b/drivers/net/ethernet/ibm/emac/mal.h
index e431a32e3d69..eeade2ea8334 100644
--- a/drivers/net/ethernet/ibm/emac/mal.h
+++ b/drivers/net/ethernet/ibm/emac/mal.h
@@ -95,24 +95,20 @@
95 95
96 96
97#define MAL_IER 0x02 97#define MAL_IER 0x02
98/* MAL IER bits */
98#define MAL_IER_DE 0x00000010 99#define MAL_IER_DE 0x00000010
99#define MAL_IER_OTE 0x00000004 100#define MAL_IER_OTE 0x00000004
100#define MAL_IER_OE 0x00000002 101#define MAL_IER_OE 0x00000002
101#define MAL_IER_PE 0x00000001 102#define MAL_IER_PE 0x00000001
102/* MAL V1 IER bits */
103#define MAL1_IER_NWE 0x00000008
104#define MAL1_IER_SOC_EVENTS MAL1_IER_NWE
105#define MAL1_IER_EVENTS (MAL1_IER_SOC_EVENTS | MAL_IER_DE | \
106 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
107 103
108/* MAL V2 IER bits */ 104/* PLB read/write/timeout errors */
109#define MAL2_IER_PT 0x00000080 105#define MAL_IER_PTE 0x00000080
110#define MAL2_IER_PRE 0x00000040 106#define MAL_IER_PRE 0x00000040
111#define MAL2_IER_PWE 0x00000020 107#define MAL_IER_PWE 0x00000020
112#define MAL2_IER_SOC_EVENTS (MAL2_IER_PT | MAL2_IER_PRE | MAL2_IER_PWE)
113#define MAL2_IER_EVENTS (MAL2_IER_SOC_EVENTS | MAL_IER_DE | \
114 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
115 108
109#define MAL_IER_SOC_EVENTS (MAL_IER_PTE | MAL_IER_PRE | MAL_IER_PWE)
110#define MAL_IER_EVENTS (MAL_IER_SOC_EVENTS | MAL_IER_DE | \
111 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
116 112
117#define MAL_TXCASR 0x04 113#define MAL_TXCASR 0x04
118#define MAL_TXCARR 0x05 114#define MAL_TXCARR 0x05
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index 4fb2f96da23b..a01182cce965 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -45,6 +45,7 @@
45 45
46/* RGMIIx_SSR */ 46/* RGMIIx_SSR */
47#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8)) 47#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8))
48#define RGMII_SSR_10(idx) (0x1 << ((idx) * 8))
48#define RGMII_SSR_100(idx) (0x2 << ((idx) * 8)) 49#define RGMII_SSR_100(idx) (0x2 << ((idx) * 8))
49#define RGMII_SSR_1000(idx) (0x4 << ((idx) * 8)) 50#define RGMII_SSR_1000(idx) (0x4 << ((idx) * 8))
50 51
@@ -139,6 +140,8 @@ void rgmii_set_speed(struct platform_device *ofdev, int input, int speed)
139 ssr |= RGMII_SSR_1000(input); 140 ssr |= RGMII_SSR_1000(input);
140 else if (speed == SPEED_100) 141 else if (speed == SPEED_100)
141 ssr |= RGMII_SSR_100(input); 142 ssr |= RGMII_SSR_100(input);
143 else if (speed == SPEED_10)
144 ssr |= RGMII_SSR_10(input);
142 145
143 out_be32(&p->ssr, ssr); 146 out_be32(&p->ssr, ssr);
144 147
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 9866f264f55e..f0bbd4246d71 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -186,7 +186,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
186{ 186{
187 u16 phy_reg = 0; 187 u16 phy_reg = 0;
188 u32 phy_id = 0; 188 u32 phy_id = 0;
189 s32 ret_val; 189 s32 ret_val = 0;
190 u16 retry_count; 190 u16 retry_count;
191 u32 mac_reg = 0; 191 u32 mac_reg = 0;
192 192
@@ -217,11 +217,13 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
217 /* In case the PHY needs to be in mdio slow mode, 217 /* In case the PHY needs to be in mdio slow mode,
218 * set slow mode and try to get the PHY id again. 218 * set slow mode and try to get the PHY id again.
219 */ 219 */
220 hw->phy.ops.release(hw); 220 if (hw->mac.type < e1000_pch_lpt) {
221 ret_val = e1000_set_mdio_slow_mode_hv(hw); 221 hw->phy.ops.release(hw);
222 if (!ret_val) 222 ret_val = e1000_set_mdio_slow_mode_hv(hw);
223 ret_val = e1000e_get_phy_id(hw); 223 if (!ret_val)
224 hw->phy.ops.acquire(hw); 224 ret_val = e1000e_get_phy_id(hw);
225 hw->phy.ops.acquire(hw);
226 }
225 227
226 if (ret_val) 228 if (ret_val)
227 return false; 229 return false;
@@ -842,6 +844,17 @@ s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
842 } 844 }
843 } 845 }
844 846
847 if (hw->phy.type == e1000_phy_82579) {
848 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
849 &data);
850 if (ret_val)
851 goto release;
852
853 data &= ~I82579_LPI_100_PLL_SHUT;
854 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
855 data);
856 }
857
845 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ 858 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
846 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); 859 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
847 if (ret_val) 860 if (ret_val)
@@ -1314,14 +1327,17 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1314 return ret_val; 1327 return ret_val;
1315 } 1328 }
1316 1329
1317 /* When connected at 10Mbps half-duplex, 82579 parts are excessively 1330 /* When connected at 10Mbps half-duplex, some parts are excessively
1318 * aggressive resulting in many collisions. To avoid this, increase 1331 * aggressive resulting in many collisions. To avoid this, increase
1319 * the IPG and reduce Rx latency in the PHY. 1332 * the IPG and reduce Rx latency in the PHY.
1320 */ 1333 */
1321 if ((hw->mac.type == e1000_pch2lan) && link) { 1334 if (((hw->mac.type == e1000_pch2lan) ||
1335 (hw->mac.type == e1000_pch_lpt)) && link) {
1322 u32 reg; 1336 u32 reg;
1323 reg = er32(STATUS); 1337 reg = er32(STATUS);
1324 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { 1338 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1339 u16 emi_addr;
1340
1325 reg = er32(TIPG); 1341 reg = er32(TIPG);
1326 reg &= ~E1000_TIPG_IPGT_MASK; 1342 reg &= ~E1000_TIPG_IPGT_MASK;
1327 reg |= 0xFF; 1343 reg |= 0xFF;
@@ -1332,8 +1348,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1332 if (ret_val) 1348 if (ret_val)
1333 return ret_val; 1349 return ret_val;
1334 1350
1335 ret_val = 1351 if (hw->mac.type == e1000_pch2lan)
1336 e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0); 1352 emi_addr = I82579_RX_CONFIG;
1353 else
1354 emi_addr = I217_RX_CONFIG;
1355
1356 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
1337 1357
1338 hw->phy.ops.release(hw); 1358 hw->phy.ops.release(hw);
1339 1359
@@ -2493,51 +2513,44 @@ release:
2493 * e1000_k1_gig_workaround_lv - K1 Si workaround 2513 * e1000_k1_gig_workaround_lv - K1 Si workaround
2494 * @hw: pointer to the HW structure 2514 * @hw: pointer to the HW structure
2495 * 2515 *
2496 * Workaround to set the K1 beacon duration for 82579 parts 2516 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2517 * Disable K1 in 1000Mbps and 100Mbps
2497 **/ 2518 **/
2498static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) 2519static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2499{ 2520{
2500 s32 ret_val = 0; 2521 s32 ret_val = 0;
2501 u16 status_reg = 0; 2522 u16 status_reg = 0;
2502 u32 mac_reg;
2503 u16 phy_reg;
2504 2523
2505 if (hw->mac.type != e1000_pch2lan) 2524 if (hw->mac.type != e1000_pch2lan)
2506 return 0; 2525 return 0;
2507 2526
2508 /* Set K1 beacon duration based on 1Gbps speed or otherwise */ 2527 /* Set K1 beacon duration based on 10Mbs speed */
2509 ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); 2528 ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
2510 if (ret_val) 2529 if (ret_val)
2511 return ret_val; 2530 return ret_val;
2512 2531
2513 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) 2532 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2514 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { 2533 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2515 mac_reg = er32(FEXTNVM4); 2534 if (status_reg &
2516 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; 2535 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2517
2518 ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
2519 if (ret_val)
2520 return ret_val;
2521
2522 if (status_reg & HV_M_STATUS_SPEED_1000) {
2523 u16 pm_phy_reg; 2536 u16 pm_phy_reg;
2524 2537
2525 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; 2538 /* LV 1G/100 Packet drop issue wa */
2526 phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2527 /* LV 1G Packet drop issue wa */
2528 ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg); 2539 ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
2529 if (ret_val) 2540 if (ret_val)
2530 return ret_val; 2541 return ret_val;
2531 pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA; 2542 pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2532 ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg); 2543 ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
2533 if (ret_val) 2544 if (ret_val)
2534 return ret_val; 2545 return ret_val;
2535 } else { 2546 } else {
2547 u32 mac_reg;
2548
2549 mac_reg = er32(FEXTNVM4);
2550 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2536 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; 2551 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2537 phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; 2552 ew32(FEXTNVM4, mac_reg);
2538 } 2553 }
2539 ew32(FEXTNVM4, mac_reg);
2540 ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
2541 } 2554 }
2542 2555
2543 return ret_val; 2556 return ret_val;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index bead50f9187b..5515126c81c1 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -232,16 +232,19 @@
232#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */ 232#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */
233#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ 233#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
234#define I82579_RX_CONFIG 0x3412 /* Receive configuration */ 234#define I82579_RX_CONFIG 0x3412 /* Receive configuration */
235#define I82579_LPI_PLL_SHUT 0x4412 /* LPI PLL Shut Enable */
235#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */ 236#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */
236#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */ 237#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */
237#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */ 238#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */
238#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */ 239#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
239#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */ 240#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */
240#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */ 241#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */
242#define I82579_LPI_100_PLL_SHUT (1 << 2) /* 100M LPI PLL Shut Enabled */
241#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */ 243#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */
242#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */ 244#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */
243#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */ 245#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
244#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */ 246#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
247#define I217_RX_CONFIG 0xB20C /* Receive configuration */
245 248
246#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */ 249#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */
247#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */ 250#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index d50c91e50528..3e69386add04 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1165,7 +1165,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
1165 dev_kfree_skb_any(adapter->tx_hwtstamp_skb); 1165 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
1166 adapter->tx_hwtstamp_skb = NULL; 1166 adapter->tx_hwtstamp_skb = NULL;
1167 adapter->tx_hwtstamp_timeouts++; 1167 adapter->tx_hwtstamp_timeouts++;
1168 e_warn("clearing Tx timestamp hang"); 1168 e_warn("clearing Tx timestamp hang\n");
1169 } else { 1169 } else {
1170 /* reschedule to check later */ 1170 /* reschedule to check later */
1171 schedule_work(&adapter->tx_hwtstamp_work); 1171 schedule_work(&adapter->tx_hwtstamp_work);
@@ -5687,7 +5687,7 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
5687static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 5687static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5688{ 5688{
5689 struct e1000_adapter *adapter = netdev_priv(netdev); 5689 struct e1000_adapter *adapter = netdev_priv(netdev);
5690 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 5690 int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN;
5691 5691
5692 /* Jumbo frame support */ 5692 /* Jumbo frame support */
5693 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) && 5693 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
@@ -6235,6 +6235,7 @@ static int __e1000_resume(struct pci_dev *pdev)
6235 return 0; 6235 return 0;
6236} 6236}
6237 6237
6238#ifdef CONFIG_PM_SLEEP
6238static int e1000e_pm_thaw(struct device *dev) 6239static int e1000e_pm_thaw(struct device *dev)
6239{ 6240{
6240 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); 6241 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
@@ -6255,7 +6256,6 @@ static int e1000e_pm_thaw(struct device *dev)
6255 return 0; 6256 return 0;
6256} 6257}
6257 6258
6258#ifdef CONFIG_PM_SLEEP
6259static int e1000e_pm_suspend(struct device *dev) 6259static int e1000e_pm_suspend(struct device *dev)
6260{ 6260{
6261 struct pci_dev *pdev = to_pci_dev(dev); 6261 struct pci_dev *pdev = to_pci_dev(dev);
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index 3841bccf058c..537d2780b408 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -164,6 +164,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
164#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 164#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
165#define HV_M_STATUS_SPEED_MASK 0x0300 165#define HV_M_STATUS_SPEED_MASK 0x0300
166#define HV_M_STATUS_SPEED_1000 0x0200 166#define HV_M_STATUS_SPEED_1000 0x0200
167#define HV_M_STATUS_SPEED_100 0x0100
167#define HV_M_STATUS_LINK_UP 0x0040 168#define HV_M_STATUS_LINK_UP 0x0040
168 169
169#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 170#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 861b722c2672..cf0761f08911 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2897,12 +2897,9 @@ static irqreturn_t i40e_intr(int irq, void *data)
2897 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); 2897 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
2898 2898
2899 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { 2899 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
2900 ena_mask &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 2900 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
2901 i40e_ptp_tx_hwtstamp(pf); 2901 i40e_ptp_tx_hwtstamp(pf);
2902 prttsyn_stat &= ~I40E_PRTTSYN_STAT_0_TXTIME_MASK;
2903 } 2902 }
2904
2905 wr32(hw, I40E_PRTTSYN_STAT_0, prttsyn_stat);
2906 } 2903 }
2907 2904
2908 /* If a critical error is pending we have no choice but to reset the 2905 /* If a critical error is pending we have no choice but to reset the
@@ -4271,6 +4268,14 @@ static int i40e_open(struct net_device *netdev)
4271 if (err) 4268 if (err)
4272 return err; 4269 return err;
4273 4270
4271 /* configure global TSO hardware offload settings */
4272 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
4273 TCP_FLAG_FIN) >> 16);
4274 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
4275 TCP_FLAG_FIN |
4276 TCP_FLAG_CWR) >> 16);
4277 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
4278
4274#ifdef CONFIG_I40E_VXLAN 4279#ifdef CONFIG_I40E_VXLAN
4275 vxlan_get_rx_port(netdev); 4280 vxlan_get_rx_port(netdev);
4276#endif 4281#endif
@@ -6712,6 +6717,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
6712 NETIF_F_HW_VLAN_CTAG_FILTER | 6717 NETIF_F_HW_VLAN_CTAG_FILTER |
6713 NETIF_F_IPV6_CSUM | 6718 NETIF_F_IPV6_CSUM |
6714 NETIF_F_TSO | 6719 NETIF_F_TSO |
6720 NETIF_F_TSO_ECN |
6715 NETIF_F_TSO6 | 6721 NETIF_F_TSO6 |
6716 NETIF_F_RXCSUM | 6722 NETIF_F_RXCSUM |
6717 NETIF_F_NTUPLE | 6723 NETIF_F_NTUPLE |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 262bdf11d221..81299189a47d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -160,7 +160,7 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
160 udelay(5); 160 udelay(5);
161 } 161 }
162 if (ret_code == I40E_ERR_TIMEOUT) 162 if (ret_code == I40E_ERR_TIMEOUT)
163 hw_dbg(hw, "Done bit in GLNVM_SRCTL not set"); 163 hw_dbg(hw, "Done bit in GLNVM_SRCTL not set\n");
164 return ret_code; 164 return ret_code;
165} 165}
166 166
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index e33ec6c842b7..e61e63720800 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -239,7 +239,7 @@ static void i40e_ptp_tx_work(struct work_struct *work)
239 dev_kfree_skb_any(pf->ptp_tx_skb); 239 dev_kfree_skb_any(pf->ptp_tx_skb);
240 pf->ptp_tx_skb = NULL; 240 pf->ptp_tx_skb = NULL;
241 pf->tx_hwtstamp_timeouts++; 241 pf->tx_hwtstamp_timeouts++;
242 dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang"); 242 dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang\n");
243 return; 243 return;
244 } 244 }
245 245
@@ -321,7 +321,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
321 pf->last_rx_ptp_check = jiffies; 321 pf->last_rx_ptp_check = jiffies;
322 pf->rx_hwtstamp_cleared++; 322 pf->rx_hwtstamp_cleared++;
323 dev_warn(&vsi->back->pdev->dev, 323 dev_warn(&vsi->back->pdev->dev,
324 "%s: clearing Rx timestamp hang", 324 "%s: clearing Rx timestamp hang\n",
325 __func__); 325 __func__);
326 } 326 }
327} 327}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 0f5d96ad281d..9478ddc66caf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -418,7 +418,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
418 } 418 }
419 break; 419 break;
420 default: 420 default:
421 dev_info(&pf->pdev->dev, "Could not specify spec type %d", 421 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
422 input->flow_type); 422 input->flow_type);
423 ret = -EINVAL; 423 ret = -EINVAL;
424 } 424 }
@@ -478,7 +478,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
478 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; 478 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
479 } 479 }
480 } else { 480 } else {
481 dev_info(&pdev->dev, "FD filter programming error"); 481 dev_info(&pdev->dev, "FD filter programming error\n");
482 } 482 }
483 } else if (error == 483 } else if (error ==
484 (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { 484 (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
@@ -1713,9 +1713,11 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
1713 I40E_TX_FLAGS_VLAN_PRIO_SHIFT; 1713 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
1714 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) { 1714 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
1715 struct vlan_ethhdr *vhdr; 1715 struct vlan_ethhdr *vhdr;
1716 if (skb_header_cloned(skb) && 1716 int rc;
1717 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 1717
1718 return -ENOMEM; 1718 rc = skb_cow_head(skb, 0);
1719 if (rc < 0)
1720 return rc;
1719 vhdr = (struct vlan_ethhdr *)skb->data; 1721 vhdr = (struct vlan_ethhdr *)skb->data;
1720 vhdr->h_vlan_TCI = htons(tx_flags >> 1722 vhdr->h_vlan_TCI = htons(tx_flags >>
1721 I40E_TX_FLAGS_VLAN_SHIFT); 1723 I40E_TX_FLAGS_VLAN_SHIFT);
@@ -1743,20 +1745,18 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1743 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) 1745 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
1744{ 1746{
1745 u32 cd_cmd, cd_tso_len, cd_mss; 1747 u32 cd_cmd, cd_tso_len, cd_mss;
1748 struct ipv6hdr *ipv6h;
1746 struct tcphdr *tcph; 1749 struct tcphdr *tcph;
1747 struct iphdr *iph; 1750 struct iphdr *iph;
1748 u32 l4len; 1751 u32 l4len;
1749 int err; 1752 int err;
1750 struct ipv6hdr *ipv6h;
1751 1753
1752 if (!skb_is_gso(skb)) 1754 if (!skb_is_gso(skb))
1753 return 0; 1755 return 0;
1754 1756
1755 if (skb_header_cloned(skb)) { 1757 err = skb_cow_head(skb, 0);
1756 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1758 if (err < 0)
1757 if (err) 1759 return err;
1758 return err;
1759 }
1760 1760
1761 if (protocol == htons(ETH_P_IP)) { 1761 if (protocol == htons(ETH_P_IP)) {
1762 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); 1762 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index db963397cc27..f67f8a170b90 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -365,7 +365,7 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
365 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); 365 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
366 if (word_address == address) { 366 if (word_address == address) {
367 *data = INVM_DWORD_TO_WORD_DATA(invm_dword); 367 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
368 hw_dbg("Read INVM Word 0x%02x = %x", 368 hw_dbg("Read INVM Word 0x%02x = %x\n",
369 address, *data); 369 address, *data);
370 status = E1000_SUCCESS; 370 status = E1000_SUCCESS;
371 break; 371 break;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 5910a932ea7c..1e0c404db81a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -929,11 +929,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
929 */ 929 */
930 if (hw->fc.requested_mode == e1000_fc_full) { 930 if (hw->fc.requested_mode == e1000_fc_full) {
931 hw->fc.current_mode = e1000_fc_full; 931 hw->fc.current_mode = e1000_fc_full;
932 hw_dbg("Flow Control = FULL.\r\n"); 932 hw_dbg("Flow Control = FULL.\n");
933 } else { 933 } else {
934 hw->fc.current_mode = e1000_fc_rx_pause; 934 hw->fc.current_mode = e1000_fc_rx_pause;
935 hw_dbg("Flow Control = " 935 hw_dbg("Flow Control = RX PAUSE frames only.\n");
936 "RX PAUSE frames only.\r\n");
937 } 936 }
938 } 937 }
939 /* For receiving PAUSE frames ONLY. 938 /* For receiving PAUSE frames ONLY.
@@ -948,7 +947,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
948 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 947 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
949 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 948 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
950 hw->fc.current_mode = e1000_fc_tx_pause; 949 hw->fc.current_mode = e1000_fc_tx_pause;
951 hw_dbg("Flow Control = TX PAUSE frames only.\r\n"); 950 hw_dbg("Flow Control = TX PAUSE frames only.\n");
952 } 951 }
953 /* For transmitting PAUSE frames ONLY. 952 /* For transmitting PAUSE frames ONLY.
954 * 953 *
@@ -962,7 +961,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
962 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 961 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
963 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 962 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
964 hw->fc.current_mode = e1000_fc_rx_pause; 963 hw->fc.current_mode = e1000_fc_rx_pause;
965 hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); 964 hw_dbg("Flow Control = RX PAUSE frames only.\n");
966 } 965 }
967 /* Per the IEEE spec, at this point flow control should be 966 /* Per the IEEE spec, at this point flow control should be
968 * disabled. However, we want to consider that we could 967 * disabled. However, we want to consider that we could
@@ -988,10 +987,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
988 (hw->fc.requested_mode == e1000_fc_tx_pause) || 987 (hw->fc.requested_mode == e1000_fc_tx_pause) ||
989 (hw->fc.strict_ieee)) { 988 (hw->fc.strict_ieee)) {
990 hw->fc.current_mode = e1000_fc_none; 989 hw->fc.current_mode = e1000_fc_none;
991 hw_dbg("Flow Control = NONE.\r\n"); 990 hw_dbg("Flow Control = NONE.\n");
992 } else { 991 } else {
993 hw->fc.current_mode = e1000_fc_rx_pause; 992 hw->fc.current_mode = e1000_fc_rx_pause;
994 hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); 993 hw_dbg("Flow Control = RX PAUSE frames only.\n");
995 } 994 }
996 995
997 /* Now we need to do one last check... If we auto- 996 /* Now we need to do one last check... If we auto-
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index fb98d4602f9d..16430a8440fa 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5193,8 +5193,10 @@ void igb_update_stats(struct igb_adapter *adapter,
5193 5193
5194 rcu_read_lock(); 5194 rcu_read_lock();
5195 for (i = 0; i < adapter->num_rx_queues; i++) { 5195 for (i = 0; i < adapter->num_rx_queues; i++) {
5196 u32 rqdpc = rd32(E1000_RQDPC(i));
5197 struct igb_ring *ring = adapter->rx_ring[i]; 5196 struct igb_ring *ring = adapter->rx_ring[i];
5197 u32 rqdpc = rd32(E1000_RQDPC(i));
5198 if (hw->mac.type >= e1000_i210)
5199 wr32(E1000_RQDPC(i), 0);
5198 5200
5199 if (rqdpc) { 5201 if (rqdpc) {
5200 ring->rx_stats.drops += rqdpc; 5202 ring->rx_stats.drops += rqdpc;
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 9209d652e1c9..ab25e49365f7 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -389,7 +389,7 @@ static void igb_ptp_tx_work(struct work_struct *work)
389 adapter->ptp_tx_skb = NULL; 389 adapter->ptp_tx_skb = NULL;
390 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); 390 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
391 adapter->tx_hwtstamp_timeouts++; 391 adapter->tx_hwtstamp_timeouts++;
392 dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang"); 392 dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
393 return; 393 return;
394 } 394 }
395 395
@@ -451,7 +451,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
451 rd32(E1000_RXSTMPH); 451 rd32(E1000_RXSTMPH);
452 adapter->last_rx_ptp_check = jiffies; 452 adapter->last_rx_ptp_check = jiffies;
453 adapter->rx_hwtstamp_cleared++; 453 adapter->rx_hwtstamp_cleared++;
454 dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang"); 454 dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n");
455 } 455 }
456} 456}
457 457
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 1a12c1dd7a27..c6c4ca7d68e6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -256,7 +256,6 @@ struct ixgbe_ring {
256 struct ixgbe_tx_buffer *tx_buffer_info; 256 struct ixgbe_tx_buffer *tx_buffer_info;
257 struct ixgbe_rx_buffer *rx_buffer_info; 257 struct ixgbe_rx_buffer *rx_buffer_info;
258 }; 258 };
259 unsigned long last_rx_timestamp;
260 unsigned long state; 259 unsigned long state;
261 u8 __iomem *tail; 260 u8 __iomem *tail;
262 dma_addr_t dma; /* phys. address of descriptor ring */ 261 dma_addr_t dma; /* phys. address of descriptor ring */
@@ -770,6 +769,7 @@ struct ixgbe_adapter {
770 unsigned long ptp_tx_start; 769 unsigned long ptp_tx_start;
771 unsigned long last_overflow_check; 770 unsigned long last_overflow_check;
772 unsigned long last_rx_ptp_check; 771 unsigned long last_rx_ptp_check;
772 unsigned long last_rx_timestamp;
773 spinlock_t tmreg_lock; 773 spinlock_t tmreg_lock;
774 struct cyclecounter cc; 774 struct cyclecounter cc;
775 struct timecounter tc; 775 struct timecounter tc;
@@ -944,24 +944,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
944void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); 944void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
945void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); 945void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
946void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); 946void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
947void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, 947void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb);
948 struct sk_buff *skb);
949static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
950 union ixgbe_adv_rx_desc *rx_desc,
951 struct sk_buff *skb)
952{
953 if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
954 return;
955
956 __ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
957
958 /*
959 * Update the last_rx_timestamp timer in order to enable watchdog check
960 * for error case of latched timestamp on a dropped packet.
961 */
962 rx_ring->last_rx_timestamp = jiffies;
963}
964
965int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); 948int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
966int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); 949int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
967void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); 950void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 24fba39e194e..981b8a7b100d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1195,7 +1195,7 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1195 */ 1195 */
1196 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; 1196 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1197 1197
1198 hw_dbg(hw, "Detected EEPROM page size = %d words.", 1198 hw_dbg(hw, "Detected EEPROM page size = %d words.\n",
1199 hw->eeprom.word_page_size); 1199 hw->eeprom.word_page_size);
1200out: 1200out:
1201 return status; 1201 return status;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c4c526b7f99f..d62e7a25cf97 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1664,7 +1664,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1664 1664
1665 ixgbe_rx_checksum(rx_ring, rx_desc, skb); 1665 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1666 1666
1667 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); 1667 if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
1668 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb);
1668 1669
1669 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1670 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1670 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { 1671 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 23f765263f12..a76af8e28a04 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -536,7 +536,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
536 536
537 if (time_out == max_time_out) { 537 if (time_out == max_time_out) {
538 status = IXGBE_ERR_LINK_SETUP; 538 status = IXGBE_ERR_LINK_SETUP;
539 hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out"); 539 hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out\n");
540 } 540 }
541 541
542 return status; 542 return status;
@@ -745,7 +745,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
745 745
746 if (time_out == max_time_out) { 746 if (time_out == max_time_out) {
747 status = IXGBE_ERR_LINK_SETUP; 747 status = IXGBE_ERR_LINK_SETUP;
748 hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out"); 748 hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out\n");
749 } 749 }
750 750
751 return status; 751 return status;
@@ -1175,7 +1175,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1175 status = 0; 1175 status = 0;
1176 } else { 1176 } else {
1177 if (hw->allow_unsupported_sfp) { 1177 if (hw->allow_unsupported_sfp) {
1178 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules."); 1178 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1179 status = 0; 1179 status = 0;
1180 } else { 1180 } else {
1181 hw_dbg(hw, 1181 hw_dbg(hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 63515a6f67fa..8902ae683457 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -435,10 +435,8 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
435void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) 435void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
436{ 436{
437 struct ixgbe_hw *hw = &adapter->hw; 437 struct ixgbe_hw *hw = &adapter->hw;
438 struct ixgbe_ring *rx_ring;
439 u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 438 u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
440 unsigned long rx_event; 439 unsigned long rx_event;
441 int n;
442 440
443 /* if we don't have a valid timestamp in the registers, just update the 441 /* if we don't have a valid timestamp in the registers, just update the
444 * timeout counter and exit 442 * timeout counter and exit
@@ -450,18 +448,15 @@ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
450 448
451 /* determine the most recent watchdog or rx_timestamp event */ 449 /* determine the most recent watchdog or rx_timestamp event */
452 rx_event = adapter->last_rx_ptp_check; 450 rx_event = adapter->last_rx_ptp_check;
453 for (n = 0; n < adapter->num_rx_queues; n++) { 451 if (time_after(adapter->last_rx_timestamp, rx_event))
454 rx_ring = adapter->rx_ring[n]; 452 rx_event = adapter->last_rx_timestamp;
455 if (time_after(rx_ring->last_rx_timestamp, rx_event))
456 rx_event = rx_ring->last_rx_timestamp;
457 }
458 453
459 /* only need to read the high RXSTMP register to clear the lock */ 454 /* only need to read the high RXSTMP register to clear the lock */
460 if (time_is_before_jiffies(rx_event + 5*HZ)) { 455 if (time_is_before_jiffies(rx_event + 5*HZ)) {
461 IXGBE_READ_REG(hw, IXGBE_RXSTMPH); 456 IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
462 adapter->last_rx_ptp_check = jiffies; 457 adapter->last_rx_ptp_check = jiffies;
463 458
464 e_warn(drv, "clearing RX Timestamp hang"); 459 e_warn(drv, "clearing RX Timestamp hang\n");
465 } 460 }
466} 461}
467 462
@@ -517,7 +512,7 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
517 dev_kfree_skb_any(adapter->ptp_tx_skb); 512 dev_kfree_skb_any(adapter->ptp_tx_skb);
518 adapter->ptp_tx_skb = NULL; 513 adapter->ptp_tx_skb = NULL;
519 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); 514 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
520 e_warn(drv, "clearing Tx Timestamp hang"); 515 e_warn(drv, "clearing Tx Timestamp hang\n");
521 return; 516 return;
522 } 517 }
523 518
@@ -530,35 +525,22 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
530} 525}
531 526
532/** 527/**
533 * __ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp 528 * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
534 * @q_vector: structure containing interrupt and ring information 529 * @adapter: pointer to adapter struct
535 * @skb: particular skb to send timestamp with 530 * @skb: particular skb to send timestamp with
536 * 531 *
537 * if the timestamp is valid, we convert it into the timecounter ns 532 * if the timestamp is valid, we convert it into the timecounter ns
538 * value, then store that result into the shhwtstamps structure which 533 * value, then store that result into the shhwtstamps structure which
539 * is passed up the network stack 534 * is passed up the network stack
540 */ 535 */
541void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, 536void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb)
542 struct sk_buff *skb)
543{ 537{
544 struct ixgbe_adapter *adapter; 538 struct ixgbe_hw *hw = &adapter->hw;
545 struct ixgbe_hw *hw;
546 struct skb_shared_hwtstamps *shhwtstamps; 539 struct skb_shared_hwtstamps *shhwtstamps;
547 u64 regval = 0, ns; 540 u64 regval = 0, ns;
548 u32 tsyncrxctl; 541 u32 tsyncrxctl;
549 unsigned long flags; 542 unsigned long flags;
550 543
551 /* we cannot process timestamps on a ring without a q_vector */
552 if (!q_vector || !q_vector->adapter)
553 return;
554
555 adapter = q_vector->adapter;
556 hw = &adapter->hw;
557
558 /*
559 * Read the tsyncrxctl register afterwards in order to prevent taking an
560 * I/O hit on every packet.
561 */
562 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 544 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
563 if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) 545 if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
564 return; 546 return;
@@ -566,13 +548,17 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
566 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 548 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
567 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; 549 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
568 550
569
570 spin_lock_irqsave(&adapter->tmreg_lock, flags); 551 spin_lock_irqsave(&adapter->tmreg_lock, flags);
571 ns = timecounter_cyc2time(&adapter->tc, regval); 552 ns = timecounter_cyc2time(&adapter->tc, regval);
572 spin_unlock_irqrestore(&adapter->tmreg_lock, flags); 553 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
573 554
574 shhwtstamps = skb_hwtstamps(skb); 555 shhwtstamps = skb_hwtstamps(skb);
575 shhwtstamps->hwtstamp = ns_to_ktime(ns); 556 shhwtstamps->hwtstamp = ns_to_ktime(ns);
557
558 /* Update the last_rx_timestamp timer in order to enable watchdog check
559 * for error case of latched timestamp on a dropped packet.
560 */
561 adapter->last_rx_timestamp = jiffies;
576} 562}
577 563
578int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) 564int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b0c6050479eb..b78378cea5e3 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme,
1988 return idx; 1988 return idx;
1989} 1989}
1990 1990
1991static void 1991static int
1992jme_fill_tx_map(struct pci_dev *pdev, 1992jme_fill_tx_map(struct pci_dev *pdev,
1993 struct txdesc *txdesc, 1993 struct txdesc *txdesc,
1994 struct jme_buffer_info *txbi, 1994 struct jme_buffer_info *txbi,
@@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev,
2005 len, 2005 len,
2006 PCI_DMA_TODEVICE); 2006 PCI_DMA_TODEVICE);
2007 2007
2008 if (unlikely(pci_dma_mapping_error(pdev, dmaaddr)))
2009 return -EINVAL;
2010
2008 pci_dma_sync_single_for_device(pdev, 2011 pci_dma_sync_single_for_device(pdev,
2009 dmaaddr, 2012 dmaaddr,
2010 len, 2013 len,
@@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev,
2021 2024
2022 txbi->mapping = dmaaddr; 2025 txbi->mapping = dmaaddr;
2023 txbi->len = len; 2026 txbi->len = len;
2027 return 0;
2024} 2028}
2025 2029
2026static void 2030static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
2031{
2032 struct jme_ring *txring = &(jme->txring[0]);
2033 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
2034 int mask = jme->tx_ring_mask;
2035 int j;
2036
2037 for (j = 0 ; j < count ; j++) {
2038 ctxbi = txbi + ((startidx + j + 2) & (mask));
2039 pci_unmap_page(jme->pdev,
2040 ctxbi->mapping,
2041 ctxbi->len,
2042 PCI_DMA_TODEVICE);
2043
2044 ctxbi->mapping = 0;
2045 ctxbi->len = 0;
2046 }
2047
2048}
2049
2050static int
2027jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) 2051jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2028{ 2052{
2029 struct jme_ring *txring = &(jme->txring[0]); 2053 struct jme_ring *txring = &(jme->txring[0]);
@@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2034 int mask = jme->tx_ring_mask; 2058 int mask = jme->tx_ring_mask;
2035 const struct skb_frag_struct *frag; 2059 const struct skb_frag_struct *frag;
2036 u32 len; 2060 u32 len;
2061 int ret = 0;
2037 2062
2038 for (i = 0 ; i < nr_frags ; ++i) { 2063 for (i = 0 ; i < nr_frags ; ++i) {
2039 frag = &skb_shinfo(skb)->frags[i]; 2064 frag = &skb_shinfo(skb)->frags[i];
2040 ctxdesc = txdesc + ((idx + i + 2) & (mask)); 2065 ctxdesc = txdesc + ((idx + i + 2) & (mask));
2041 ctxbi = txbi + ((idx + i + 2) & (mask)); 2066 ctxbi = txbi + ((idx + i + 2) & (mask));
2042 2067
2043 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, 2068 ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
2044 skb_frag_page(frag), 2069 skb_frag_page(frag),
2045 frag->page_offset, skb_frag_size(frag), hidma); 2070 frag->page_offset, skb_frag_size(frag), hidma);
2071 if (ret) {
2072 jme_drop_tx_map(jme, idx, i);
2073 goto out;
2074 }
2075
2046 } 2076 }
2047 2077
2048 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; 2078 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
2049 ctxdesc = txdesc + ((idx + 1) & (mask)); 2079 ctxdesc = txdesc + ((idx + 1) & (mask));
2050 ctxbi = txbi + ((idx + 1) & (mask)); 2080 ctxbi = txbi + ((idx + 1) & (mask));
2051 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), 2081 ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
2052 offset_in_page(skb->data), len, hidma); 2082 offset_in_page(skb->data), len, hidma);
2083 if (ret)
2084 jme_drop_tx_map(jme, idx, i);
2085
2086out:
2087 return ret;
2053 2088
2054} 2089}
2055 2090
2091
2056static int 2092static int
2057jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) 2093jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
2058{ 2094{
@@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2131 struct txdesc *txdesc; 2167 struct txdesc *txdesc;
2132 struct jme_buffer_info *txbi; 2168 struct jme_buffer_info *txbi;
2133 u8 flags; 2169 u8 flags;
2170 int ret = 0;
2134 2171
2135 txdesc = (struct txdesc *)txring->desc + idx; 2172 txdesc = (struct txdesc *)txring->desc + idx;
2136 txbi = txring->bufinf + idx; 2173 txbi = txring->bufinf + idx;
@@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2155 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) 2192 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
2156 jme_tx_csum(jme, skb, &flags); 2193 jme_tx_csum(jme, skb, &flags);
2157 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); 2194 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
2158 jme_map_tx_skb(jme, skb, idx); 2195 ret = jme_map_tx_skb(jme, skb, idx);
2196 if (ret)
2197 return ret;
2198
2159 txdesc->desc1.flags = flags; 2199 txdesc->desc1.flags = flags;
2160 /* 2200 /*
2161 * Set tx buffer info after telling NIC to send 2201 * Set tx buffer info after telling NIC to send
@@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2228 return NETDEV_TX_BUSY; 2268 return NETDEV_TX_BUSY;
2229 } 2269 }
2230 2270
2231 jme_fill_tx_desc(jme, skb, idx); 2271 if (jme_fill_tx_desc(jme, skb, idx))
2272 return NETDEV_TX_OK;
2232 2273
2233 jwrite32(jme, JME_TXCS, jme->reg_txcs | 2274 jwrite32(jme, JME_TXCS, jme->reg_txcs |
2234 TXCS_SELECT_QUEUE0 | 2275 TXCS_SELECT_QUEUE0 |
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index b161a525fc5b..9d5ced263a5e 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -232,7 +232,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
232 clk_prepare_enable(dev->clk); 232 clk_prepare_enable(dev->clk);
233 233
234 dev->err_interrupt = platform_get_irq(pdev, 0); 234 dev->err_interrupt = platform_get_irq(pdev, 0);
235 if (dev->err_interrupt != -ENXIO) { 235 if (dev->err_interrupt > 0) {
236 ret = devm_request_irq(&pdev->dev, dev->err_interrupt, 236 ret = devm_request_irq(&pdev->dev, dev->err_interrupt,
237 orion_mdio_err_irq, 237 orion_mdio_err_irq,
238 IRQF_SHARED, pdev->name, dev); 238 IRQF_SHARED, pdev->name, dev);
@@ -241,6 +241,9 @@ static int orion_mdio_probe(struct platform_device *pdev)
241 241
242 writel(MVMDIO_ERR_INT_SMI_DONE, 242 writel(MVMDIO_ERR_INT_SMI_DONE,
243 dev->regs + MVMDIO_ERR_INT_MASK); 243 dev->regs + MVMDIO_ERR_INT_MASK);
244
245 } else if (dev->err_interrupt == -EPROBE_DEFER) {
246 return -EPROBE_DEFER;
244 } 247 }
245 248
246 mutex_init(&dev->lock); 249 mutex_init(&dev->lock);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 78099eab7673..92d3249f63f1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1253,12 +1253,12 @@ static struct mlx4_cmd_info cmd_info[] = {
1253 }, 1253 },
1254 { 1254 {
1255 .opcode = MLX4_CMD_UPDATE_QP, 1255 .opcode = MLX4_CMD_UPDATE_QP,
1256 .has_inbox = false, 1256 .has_inbox = true,
1257 .has_outbox = false, 1257 .has_outbox = false,
1258 .out_is_imm = false, 1258 .out_is_imm = false,
1259 .encode_slave_id = false, 1259 .encode_slave_id = false,
1260 .verify = NULL, 1260 .verify = NULL,
1261 .wrapper = mlx4_CMD_EPERM_wrapper 1261 .wrapper = mlx4_UPDATE_QP_wrapper
1262 }, 1262 },
1263 { 1263 {
1264 .opcode = MLX4_CMD_GET_OP_REQ, 1264 .opcode = MLX4_CMD_GET_OP_REQ,
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index cef267e24f9c..c187d748115f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -754,10 +754,10 @@ static void mlx4_request_modules(struct mlx4_dev *dev)
754 has_eth_port = true; 754 has_eth_port = true;
755 } 755 }
756 756
757 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
758 request_module_nowait(IB_DRV_NAME);
759 if (has_eth_port) 757 if (has_eth_port)
760 request_module_nowait(EN_DRV_NAME); 758 request_module_nowait(EN_DRV_NAME);
759 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
760 request_module_nowait(IB_DRV_NAME);
761} 761}
762 762
763/* 763/*
@@ -2044,6 +2044,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2044 if (!mlx4_is_slave(dev)) { 2044 if (!mlx4_is_slave(dev)) {
2045 mlx4_init_mac_table(dev, &info->mac_table); 2045 mlx4_init_mac_table(dev, &info->mac_table);
2046 mlx4_init_vlan_table(dev, &info->vlan_table); 2046 mlx4_init_vlan_table(dev, &info->vlan_table);
2047 mlx4_init_roce_gid_table(dev, &info->gid_table);
2047 info->base_qpn = mlx4_get_base_qpn(dev, port); 2048 info->base_qpn = mlx4_get_base_qpn(dev, port);
2048 } 2049 }
2049 2050
@@ -2440,7 +2441,8 @@ slave_start:
2440 * No return code for this call, just warn the user in case of PCI 2441 * No return code for this call, just warn the user in case of PCI
2441 * express device capabilities are under-satisfied by the bus. 2442 * express device capabilities are under-satisfied by the bus.
2442 */ 2443 */
2443 mlx4_check_pcie_caps(dev); 2444 if (!mlx4_is_slave(dev))
2445 mlx4_check_pcie_caps(dev);
2444 2446
2445 /* In master functions, the communication channel must be initialized 2447 /* In master functions, the communication channel must be initialized
2446 * after obtaining its address from fw */ 2448 * after obtaining its address from fw */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index f9c465101963..8e9eb02e09cb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -695,6 +695,17 @@ struct mlx4_mac_table {
695 int max; 695 int max;
696}; 696};
697 697
698#define MLX4_ROCE_GID_ENTRY_SIZE 16
699
700struct mlx4_roce_gid_entry {
701 u8 raw[MLX4_ROCE_GID_ENTRY_SIZE];
702};
703
704struct mlx4_roce_gid_table {
705 struct mlx4_roce_gid_entry roce_gids[MLX4_ROCE_MAX_GIDS];
706 struct mutex mutex;
707};
708
698#define MLX4_MAX_VLAN_NUM 128 709#define MLX4_MAX_VLAN_NUM 128
699#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2) 710#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2)
700 711
@@ -758,6 +769,7 @@ struct mlx4_port_info {
758 struct device_attribute port_mtu_attr; 769 struct device_attribute port_mtu_attr;
759 struct mlx4_mac_table mac_table; 770 struct mlx4_mac_table mac_table;
760 struct mlx4_vlan_table vlan_table; 771 struct mlx4_vlan_table vlan_table;
772 struct mlx4_roce_gid_table gid_table;
761 int base_qpn; 773 int base_qpn;
762}; 774};
763 775
@@ -788,10 +800,6 @@ enum {
788 MLX4_USE_RR = 1, 800 MLX4_USE_RR = 1,
789}; 801};
790 802
791struct mlx4_roce_gid_entry {
792 u8 raw[16];
793};
794
795struct mlx4_priv { 803struct mlx4_priv {
796 struct mlx4_dev dev; 804 struct mlx4_dev dev;
797 805
@@ -839,7 +847,6 @@ struct mlx4_priv {
839 int fs_hash_mode; 847 int fs_hash_mode;
840 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; 848 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
841 __be64 slave_node_guids[MLX4_MFUNC_MAX]; 849 __be64 slave_node_guids[MLX4_MFUNC_MAX];
842 struct mlx4_roce_gid_entry roce_gids[MLX4_MAX_PORTS][MLX4_ROCE_MAX_GIDS];
843 850
844 atomic_t opreq_count; 851 atomic_t opreq_count;
845 struct work_struct opreq_task; 852 struct work_struct opreq_task;
@@ -1140,6 +1147,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
1140 1147
1141void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); 1148void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
1142void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); 1149void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
1150void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
1151 struct mlx4_roce_gid_table *table);
1143void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); 1152void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
1144int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 1153int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
1145 1154
@@ -1149,6 +1158,7 @@ int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
1149 enum mlx4_resource resource_type, 1158 enum mlx4_resource resource_type,
1150 u64 resource_id, int *slave); 1159 u64 resource_id, int *slave);
1151void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id); 1160void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
1161void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave);
1152int mlx4_init_resource_tracker(struct mlx4_dev *dev); 1162int mlx4_init_resource_tracker(struct mlx4_dev *dev);
1153 1163
1154void mlx4_free_resource_tracker(struct mlx4_dev *dev, 1164void mlx4_free_resource_tracker(struct mlx4_dev *dev,
@@ -1195,6 +1205,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
1195 struct mlx4_cmd_mailbox *outbox, 1205 struct mlx4_cmd_mailbox *outbox,
1196 struct mlx4_cmd_info *cmd); 1206 struct mlx4_cmd_info *cmd);
1197 1207
1208int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
1209 struct mlx4_vhcr *vhcr,
1210 struct mlx4_cmd_mailbox *inbox,
1211 struct mlx4_cmd_mailbox *outbox,
1212 struct mlx4_cmd_info *cmd);
1213
1198int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, 1214int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
1199 struct mlx4_vhcr *vhcr, 1215 struct mlx4_vhcr *vhcr,
1200 struct mlx4_cmd_mailbox *inbox, 1216 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index cfcad26ed40f..5ec6f203c6e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -75,6 +75,16 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
75 table->total = 0; 75 table->total = 0;
76} 76}
77 77
78void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
79 struct mlx4_roce_gid_table *table)
80{
81 int i;
82
83 mutex_init(&table->mutex);
84 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++)
85 memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE);
86}
87
78static int validate_index(struct mlx4_dev *dev, 88static int validate_index(struct mlx4_dev *dev,
79 struct mlx4_mac_table *table, int index) 89 struct mlx4_mac_table *table, int index)
80{ 90{
@@ -584,6 +594,84 @@ int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
584} 594}
585EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix); 595EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
586 596
597static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
598 int port, struct mlx4_cmd_mailbox *mailbox)
599{
600 struct mlx4_roce_gid_entry *gid_entry_mbox;
601 struct mlx4_priv *priv = mlx4_priv(dev);
602 int num_gids, base, offset;
603 int i, err;
604
605 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
606 base = mlx4_get_base_gid_ix(dev, slave, port);
607
608 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
609
610 mutex_lock(&(priv->port[port].gid_table.mutex));
611 /* Zero-out gids belonging to that slave in the port GID table */
612 for (i = 0, offset = base; i < num_gids; offset++, i++)
613 memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
614 zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE);
615
616 /* Now, copy roce port gids table to mailbox for passing to FW */
617 gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf;
618 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
619 memcpy(gid_entry_mbox->raw,
620 priv->port[port].gid_table.roce_gids[i].raw,
621 MLX4_ROCE_GID_ENTRY_SIZE);
622
623 err = mlx4_cmd(dev, mailbox->dma,
624 ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), 1,
625 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
626 MLX4_CMD_NATIVE);
627 mutex_unlock(&(priv->port[port].gid_table.mutex));
628 return err;
629}
630
631
632void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
633{
634 struct mlx4_active_ports actv_ports;
635 struct mlx4_cmd_mailbox *mailbox;
636 int num_eth_ports, err;
637 int i;
638
639 if (slave < 0 || slave > dev->num_vfs)
640 return;
641
642 actv_ports = mlx4_get_active_ports(dev, slave);
643
644 for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) {
645 if (test_bit(i, actv_ports.ports)) {
646 if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
647 continue;
648 num_eth_ports++;
649 }
650 }
651
652 if (!num_eth_ports)
653 return;
654
655 /* have ETH ports. Alloc mailbox for SET_PORT command */
656 mailbox = mlx4_alloc_cmd_mailbox(dev);
657 if (IS_ERR(mailbox))
658 return;
659
660 for (i = 0; i < dev->caps.num_ports; i++) {
661 if (test_bit(i, actv_ports.ports)) {
662 if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
663 continue;
664 err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox);
665 if (err)
666 mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n",
667 slave, i + 1, err);
668 }
669 }
670
671 mlx4_free_cmd_mailbox(dev, mailbox);
672 return;
673}
674
587static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, 675static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
588 u8 op_mod, struct mlx4_cmd_mailbox *inbox) 676 u8 op_mod, struct mlx4_cmd_mailbox *inbox)
589{ 677{
@@ -692,10 +780,12 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
692 /* 2. Check that do not have duplicates in OTHER 780 /* 2. Check that do not have duplicates in OTHER
693 * entries in the port GID table 781 * entries in the port GID table
694 */ 782 */
783
784 mutex_lock(&(priv->port[port].gid_table.mutex));
695 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { 785 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
696 if (i >= base && i < base + num_gids) 786 if (i >= base && i < base + num_gids)
697 continue; /* don't compare to slave's current gids */ 787 continue; /* don't compare to slave's current gids */
698 gid_entry_tbl = &priv->roce_gids[port - 1][i]; 788 gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i];
699 if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry))) 789 if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
700 continue; 790 continue;
701 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 791 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
@@ -709,6 +799,7 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
709 mlx4_warn(dev, "requested gid entry for slave:%d " 799 mlx4_warn(dev, "requested gid entry for slave:%d "
710 "is a duplicate of gid at index %d\n", 800 "is a duplicate of gid at index %d\n",
711 slave, i); 801 slave, i);
802 mutex_unlock(&(priv->port[port].gid_table.mutex));
712 return -EINVAL; 803 return -EINVAL;
713 } 804 }
714 } 805 }
@@ -717,16 +808,24 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
717 /* insert slave GIDs with memcpy, starting at slave's base index */ 808 /* insert slave GIDs with memcpy, starting at slave's base index */
718 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 809 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
719 for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++) 810 for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
720 memcpy(priv->roce_gids[port - 1][offset].raw, gid_entry_mbox->raw, 16); 811 memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
812 gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE);
721 813
722 /* Now, copy roce port gids table to current mailbox for passing to FW */ 814 /* Now, copy roce port gids table to current mailbox for passing to FW */
723 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 815 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
724 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++) 816 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
725 memcpy(gid_entry_mbox->raw, priv->roce_gids[port - 1][i].raw, 16); 817 memcpy(gid_entry_mbox->raw,
726 818 priv->port[port].gid_table.roce_gids[i].raw,
727 break; 819 MLX4_ROCE_GID_ENTRY_SIZE);
820
821 err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
822 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
823 MLX4_CMD_NATIVE);
824 mutex_unlock(&(priv->port[port].gid_table.mutex));
825 return err;
728 } 826 }
729 return mlx4_cmd(dev, inbox->dma, in_mod, op_mod, 827
828 return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
730 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 829 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
731 MLX4_CMD_NATIVE); 830 MLX4_CMD_NATIVE);
732 } 831 }
@@ -1099,13 +1198,17 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1099 num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; 1198 num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
1100 1199
1101 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { 1200 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
1102 if (!memcmp(priv->roce_gids[port - 1][i].raw, gid, 16)) { 1201 if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
1202 MLX4_ROCE_GID_ENTRY_SIZE)) {
1103 found_ix = i; 1203 found_ix = i;
1104 break; 1204 break;
1105 } 1205 }
1106 } 1206 }
1107 1207
1108 if (found_ix >= 0) { 1208 if (found_ix >= 0) {
1209 /* Calculate a slave_gid which is the slave number in the gid
1210 * table and not a globally unique slave number.
1211 */
1109 if (found_ix < MLX4_ROCE_PF_GIDS) 1212 if (found_ix < MLX4_ROCE_PF_GIDS)
1110 slave_gid = 0; 1213 slave_gid = 0;
1111 else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) * 1214 else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
@@ -1118,41 +1221,43 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1118 ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) / 1221 ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
1119 (vf_gids / num_vfs)) + vf_gids % num_vfs + 1; 1222 (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
1120 1223
1224 /* Calculate the globally unique slave id */
1121 if (slave_gid) { 1225 if (slave_gid) {
1122 struct mlx4_active_ports exclusive_ports; 1226 struct mlx4_active_ports exclusive_ports;
1123 struct mlx4_active_ports actv_ports; 1227 struct mlx4_active_ports actv_ports;
1124 struct mlx4_slaves_pport slaves_pport_actv; 1228 struct mlx4_slaves_pport slaves_pport_actv;
1125 unsigned max_port_p_one; 1229 unsigned max_port_p_one;
1126 int num_slaves_before = 1; 1230 int num_vfs_before = 0;
1231 int candidate_slave_gid;
1127 1232
1233 /* Calculate how many VFs are on the previous port, if exists */
1128 for (i = 1; i < port; i++) { 1234 for (i = 1; i < port; i++) {
1129 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); 1235 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1130 set_bit(i, exclusive_ports.ports); 1236 set_bit(i - 1, exclusive_ports.ports);
1131 slaves_pport_actv = 1237 slaves_pport_actv =
1132 mlx4_phys_to_slaves_pport_actv( 1238 mlx4_phys_to_slaves_pport_actv(
1133 dev, &exclusive_ports); 1239 dev, &exclusive_ports);
1134 num_slaves_before += bitmap_weight( 1240 num_vfs_before += bitmap_weight(
1135 slaves_pport_actv.slaves, 1241 slaves_pport_actv.slaves,
1136 dev->num_vfs + 1); 1242 dev->num_vfs + 1);
1137 } 1243 }
1138 1244
1139 if (slave_gid < num_slaves_before) { 1245 /* candidate_slave_gid isn't necessarily the correct slave, but
1140 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); 1246 * it has the same number of ports and is assigned to the same
1141 set_bit(port - 1, exclusive_ports.ports); 1247 * ports as the real slave we're looking for. On dual port VF,
1142 slaves_pport_actv = 1248 * slave_gid = [single port VFs on port <port>] +
1143 mlx4_phys_to_slaves_pport_actv( 1249 * [offset of the current slave from the first dual port VF] +
1144 dev, &exclusive_ports); 1250 * 1 (for the PF).
1145 slave_gid += bitmap_weight( 1251 */
1146 slaves_pport_actv.slaves, 1252 candidate_slave_gid = slave_gid + num_vfs_before;
1147 dev->num_vfs + 1) - 1253
1148 num_slaves_before; 1254 actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid);
1149 }
1150 actv_ports = mlx4_get_active_ports(dev, slave_gid);
1151 max_port_p_one = find_first_bit( 1255 max_port_p_one = find_first_bit(
1152 actv_ports.ports, dev->caps.num_ports) + 1256 actv_ports.ports, dev->caps.num_ports) +
1153 bitmap_weight(actv_ports.ports, 1257 bitmap_weight(actv_ports.ports,
1154 dev->caps.num_ports) + 1; 1258 dev->caps.num_ports) + 1;
1155 1259
1260 /* Calculate the real slave number */
1156 for (i = 1; i < max_port_p_one; i++) { 1261 for (i = 1; i < max_port_p_one; i++) {
1157 if (i == port) 1262 if (i == port)
1158 continue; 1263 continue;
@@ -1182,7 +1287,8 @@ int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
1182 if (!mlx4_is_master(dev)) 1287 if (!mlx4_is_master(dev))
1183 return -EINVAL; 1288 return -EINVAL;
1184 1289
1185 memcpy(gid, priv->roce_gids[port - 1][slave_id].raw, 16); 1290 memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw,
1291 MLX4_ROCE_GID_ENTRY_SIZE);
1186 return 0; 1292 return 0;
1187} 1293}
1188EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave); 1294EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 61d64ebffd56..fbd32af89c7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -389,6 +389,41 @@ err_icm:
389 389
390EXPORT_SYMBOL_GPL(mlx4_qp_alloc); 390EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
391 391
392#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
393int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
394 enum mlx4_update_qp_attr attr,
395 struct mlx4_update_qp_params *params)
396{
397 struct mlx4_cmd_mailbox *mailbox;
398 struct mlx4_update_qp_context *cmd;
399 u64 pri_addr_path_mask = 0;
400 int err = 0;
401
402 mailbox = mlx4_alloc_cmd_mailbox(dev);
403 if (IS_ERR(mailbox))
404 return PTR_ERR(mailbox);
405
406 cmd = (struct mlx4_update_qp_context *)mailbox->buf;
407
408 if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
409 return -EINVAL;
410
411 if (attr & MLX4_UPDATE_QP_SMAC) {
412 pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
413 cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
414 }
415
416 cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
417
418 err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
419 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
420 MLX4_CMD_NATIVE);
421
422 mlx4_free_cmd_mailbox(dev, mailbox);
423 return err;
424}
425EXPORT_SYMBOL_GPL(mlx4_update_qp);
426
392void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) 427void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
393{ 428{
394 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 429 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 3b5f53ef29b2..f16e539749c4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -586,6 +586,7 @@ void mlx4_free_resource_tracker(struct mlx4_dev *dev,
586 } 586 }
587 /* free master's vlans */ 587 /* free master's vlans */
588 i = dev->caps.function; 588 i = dev->caps.function;
589 mlx4_reset_roce_gids(dev, i);
589 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 590 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
590 rem_slave_vlans(dev, i); 591 rem_slave_vlans(dev, i);
591 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 592 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
@@ -3733,6 +3734,25 @@ static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3733 } 3734 }
3734} 3735}
3735 3736
3737static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
3738 u8 *gid, enum mlx4_protocol prot)
3739{
3740 int real_port;
3741
3742 if (prot != MLX4_PROT_ETH)
3743 return 0;
3744
3745 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
3746 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3747 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
3748 if (real_port < 0)
3749 return -EINVAL;
3750 gid[5] = real_port;
3751 }
3752
3753 return 0;
3754}
3755
3736int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 3756int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3737 struct mlx4_vhcr *vhcr, 3757 struct mlx4_vhcr *vhcr,
3738 struct mlx4_cmd_mailbox *inbox, 3758 struct mlx4_cmd_mailbox *inbox,
@@ -3768,6 +3788,10 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3768 if (err) 3788 if (err)
3769 goto ex_detach; 3789 goto ex_detach;
3770 } else { 3790 } else {
3791 err = mlx4_adjust_port(dev, slave, gid, prot);
3792 if (err)
3793 goto ex_put;
3794
3771 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id); 3795 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3772 if (err) 3796 if (err)
3773 goto ex_put; 3797 goto ex_put;
@@ -3872,6 +3896,60 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
3872 3896
3873} 3897}
3874 3898
3899#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
3900int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
3901 struct mlx4_vhcr *vhcr,
3902 struct mlx4_cmd_mailbox *inbox,
3903 struct mlx4_cmd_mailbox *outbox,
3904 struct mlx4_cmd_info *cmd_info)
3905{
3906 int err;
3907 u32 qpn = vhcr->in_modifier & 0xffffff;
3908 struct res_qp *rqp;
3909 u64 mac;
3910 unsigned port;
3911 u64 pri_addr_path_mask;
3912 struct mlx4_update_qp_context *cmd;
3913 int smac_index;
3914
3915 cmd = (struct mlx4_update_qp_context *)inbox->buf;
3916
3917 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
3918 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
3919 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
3920 return -EPERM;
3921
3922 /* Just change the smac for the QP */
3923 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3924 if (err) {
3925 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
3926 return err;
3927 }
3928
3929 port = (rqp->sched_queue >> 6 & 1) + 1;
3930 smac_index = cmd->qp_context.pri_path.grh_mylmc;
3931 err = mac_find_smac_ix_in_slave(dev, slave, port,
3932 smac_index, &mac);
3933 if (err) {
3934 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
3935 qpn, smac_index);
3936 goto err_mac;
3937 }
3938
3939 err = mlx4_cmd(dev, inbox->dma,
3940 vhcr->in_modifier, 0,
3941 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
3942 MLX4_CMD_NATIVE);
3943 if (err) {
3944 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
3945 goto err_mac;
3946 }
3947
3948err_mac:
3949 put_res(dev, slave, qpn, RES_QP);
3950 return err;
3951}
3952
3875int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 3953int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3876 struct mlx4_vhcr *vhcr, 3954 struct mlx4_vhcr *vhcr,
3877 struct mlx4_cmd_mailbox *inbox, 3955 struct mlx4_cmd_mailbox *inbox,
@@ -4604,7 +4682,7 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4604void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) 4682void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4605{ 4683{
4606 struct mlx4_priv *priv = mlx4_priv(dev); 4684 struct mlx4_priv *priv = mlx4_priv(dev);
4607 4685 mlx4_reset_roce_gids(dev, slave);
4608 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 4686 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4609 rem_slave_vlans(dev, slave); 4687 rem_slave_vlans(dev, slave);
4610 rem_slave_macs(dev, slave); 4688 rem_slave_macs(dev, slave);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 7b52a88923ef..f785d01c7d12 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1719,22 +1719,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1719 tx_ring->producer; 1719 tx_ring->producer;
1720} 1720}
1721 1721
1722static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
1723 struct net_device *netdev)
1724{
1725 int err;
1726
1727 netdev->num_tx_queues = adapter->drv_tx_rings;
1728 netdev->real_num_tx_queues = adapter->drv_tx_rings;
1729
1730 err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
1731 if (err)
1732 netdev_err(netdev, "failed to set %d Tx queues\n",
1733 adapter->drv_tx_rings);
1734
1735 return err;
1736}
1737
1738struct qlcnic_nic_template { 1722struct qlcnic_nic_template {
1739 int (*config_bridged_mode) (struct qlcnic_adapter *, u32); 1723 int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
1740 int (*config_led) (struct qlcnic_adapter *, u32, u32); 1724 int (*config_led) (struct qlcnic_adapter *, u32, u32);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index a51fe18f09a8..561cb11ca58c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -1020,6 +1020,7 @@ static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
1020 struct qlcnic_dcb_cee *peer; 1020 struct qlcnic_dcb_cee *peer;
1021 int i; 1021 int i;
1022 1022
1023 memset(info, 0, sizeof(*info));
1023 *app_count = 0; 1024 *app_count = 0;
1024 1025
1025 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) 1026 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index dbf75393f758..7e55e88a81bf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2206,6 +2206,31 @@ static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
2206 ahw->max_uc_count = count; 2206 ahw->max_uc_count = count;
2207} 2207}
2208 2208
2209static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
2210 u8 tx_queues, u8 rx_queues)
2211{
2212 struct net_device *netdev = adapter->netdev;
2213 int err = 0;
2214
2215 if (tx_queues) {
2216 err = netif_set_real_num_tx_queues(netdev, tx_queues);
2217 if (err) {
2218 netdev_err(netdev, "failed to set %d Tx queues\n",
2219 tx_queues);
2220 return err;
2221 }
2222 }
2223
2224 if (rx_queues) {
2225 err = netif_set_real_num_rx_queues(netdev, rx_queues);
2226 if (err)
2227 netdev_err(netdev, "failed to set %d Rx queues\n",
2228 rx_queues);
2229 }
2230
2231 return err;
2232}
2233
2209int 2234int
2210qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, 2235qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
2211 int pci_using_dac) 2236 int pci_using_dac)
@@ -2269,7 +2294,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
2269 netdev->priv_flags |= IFF_UNICAST_FLT; 2294 netdev->priv_flags |= IFF_UNICAST_FLT;
2270 netdev->irq = adapter->msix_entries[0].vector; 2295 netdev->irq = adapter->msix_entries[0].vector;
2271 2296
2272 err = qlcnic_set_real_num_queues(adapter, netdev); 2297 err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
2298 adapter->drv_sds_rings);
2273 if (err) 2299 if (err)
2274 return err; 2300 return err;
2275 2301
@@ -2374,6 +2400,14 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
2374 qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd); 2400 qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
2375} 2401}
2376 2402
2403/* Reset firmware API lock */
2404static void qlcnic_reset_api_lock(struct qlcnic_adapter *adapter)
2405{
2406 qlcnic_api_lock(adapter);
2407 qlcnic_api_unlock(adapter);
2408}
2409
2410
2377static int 2411static int
2378qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2412qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2379{ 2413{
@@ -2476,6 +2510,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2476 if (qlcnic_82xx_check(adapter)) { 2510 if (qlcnic_82xx_check(adapter)) {
2477 qlcnic_check_vf(adapter, ent); 2511 qlcnic_check_vf(adapter, ent);
2478 adapter->portnum = adapter->ahw->pci_func; 2512 adapter->portnum = adapter->ahw->pci_func;
2513 qlcnic_reset_api_lock(adapter);
2479 err = qlcnic_start_firmware(adapter); 2514 err = qlcnic_start_firmware(adapter);
2480 if (err) { 2515 if (err) {
2481 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n" 2516 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
@@ -2934,9 +2969,13 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
2934 tx_ring->tx_stats.xmit_called, 2969 tx_ring->tx_stats.xmit_called,
2935 tx_ring->tx_stats.xmit_on, 2970 tx_ring->tx_stats.xmit_on,
2936 tx_ring->tx_stats.xmit_off); 2971 tx_ring->tx_stats.xmit_off);
2972
2973 if (tx_ring->crb_intr_mask)
2974 netdev_info(netdev, "crb_intr_mask=%d\n",
2975 readl(tx_ring->crb_intr_mask));
2976
2937 netdev_info(netdev, 2977 netdev_info(netdev,
2938 "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n", 2978 "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
2939 readl(tx_ring->crb_intr_mask),
2940 readl(tx_ring->crb_cmd_producer), 2979 readl(tx_ring->crb_cmd_producer),
2941 tx_ring->producer, tx_ring->sw_consumer, 2980 tx_ring->producer, tx_ring->sw_consumer,
2942 le32_to_cpu(*(tx_ring->hw_consumer))); 2981 le32_to_cpu(*(tx_ring->hw_consumer)));
@@ -3969,12 +4008,21 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
3969int qlcnic_setup_rings(struct qlcnic_adapter *adapter) 4008int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
3970{ 4009{
3971 struct net_device *netdev = adapter->netdev; 4010 struct net_device *netdev = adapter->netdev;
4011 u8 tx_rings, rx_rings;
3972 int err; 4012 int err;
3973 4013
3974 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) 4014 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3975 return -EBUSY; 4015 return -EBUSY;
3976 4016
4017 tx_rings = adapter->drv_tss_rings;
4018 rx_rings = adapter->drv_rss_rings;
4019
3977 netif_device_detach(netdev); 4020 netif_device_detach(netdev);
4021
4022 err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings);
4023 if (err)
4024 goto done;
4025
3978 if (netif_running(netdev)) 4026 if (netif_running(netdev))
3979 __qlcnic_down(adapter, netdev); 4027 __qlcnic_down(adapter, netdev);
3980 4028
@@ -3994,7 +4042,17 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
3994 return err; 4042 return err;
3995 } 4043 }
3996 4044
3997 netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings); 4045 /* Check if we need to update real_num_{tx|rx}_queues because
4046 * qlcnic_setup_intr() may change Tx/Rx rings size
4047 */
4048 if ((tx_rings != adapter->drv_tx_rings) ||
4049 (rx_rings != adapter->drv_sds_rings)) {
4050 err = qlcnic_set_real_num_queues(adapter,
4051 adapter->drv_tx_rings,
4052 adapter->drv_sds_rings);
4053 if (err)
4054 goto done;
4055 }
3998 4056
3999 if (qlcnic_83xx_check(adapter)) { 4057 if (qlcnic_83xx_check(adapter)) {
4000 qlcnic_83xx_initialize_nic(adapter, 1); 4058 qlcnic_83xx_initialize_nic(adapter, 1);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 0638c1810d54..6afe9c1f5ab9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -1370,7 +1370,7 @@ static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1370 1370
1371 rsp = qlcnic_sriov_alloc_bc_trans(&trans); 1371 rsp = qlcnic_sriov_alloc_bc_trans(&trans);
1372 if (rsp) 1372 if (rsp)
1373 return rsp; 1373 goto free_cmd;
1374 1374
1375 rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND); 1375 rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
1376 if (rsp) 1376 if (rsp)
@@ -1425,6 +1425,13 @@ err_out:
1425 1425
1426cleanup_transaction: 1426cleanup_transaction:
1427 qlcnic_sriov_cleanup_transaction(trans); 1427 qlcnic_sriov_cleanup_transaction(trans);
1428
1429free_cmd:
1430 if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
1431 qlcnic_free_mbx_args(cmd);
1432 kfree(cmd);
1433 }
1434
1428 return rsp; 1435 return rsp;
1429} 1436}
1430 1437
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
index 6203c7d8550f..45019649bbbd 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -358,6 +358,8 @@ struct sxgbe_core_ops {
358 /* Enable disable checksum offload operations */ 358 /* Enable disable checksum offload operations */
359 void (*enable_rx_csum)(void __iomem *ioaddr); 359 void (*enable_rx_csum)(void __iomem *ioaddr);
360 void (*disable_rx_csum)(void __iomem *ioaddr); 360 void (*disable_rx_csum)(void __iomem *ioaddr);
361 void (*enable_rxqueue)(void __iomem *ioaddr, int queue_num);
362 void (*disable_rxqueue)(void __iomem *ioaddr, int queue_num);
361}; 363};
362 364
363const struct sxgbe_core_ops *sxgbe_get_core_ops(void); 365const struct sxgbe_core_ops *sxgbe_get_core_ops(void);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
index c4da7a2b002a..58c35692560e 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
@@ -165,6 +165,26 @@ static void sxgbe_core_set_speed(void __iomem *ioaddr, unsigned char speed)
165 writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG); 165 writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
166} 166}
167 167
168static void sxgbe_core_enable_rxqueue(void __iomem *ioaddr, int queue_num)
169{
170 u32 reg_val;
171
172 reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG);
173 reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num);
174 reg_val |= SXGBE_CORE_RXQ_ENABLE;
175 writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG);
176}
177
178static void sxgbe_core_disable_rxqueue(void __iomem *ioaddr, int queue_num)
179{
180 u32 reg_val;
181
182 reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG);
183 reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num);
184 reg_val |= SXGBE_CORE_RXQ_DISABLE;
185 writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG);
186}
187
168static void sxgbe_set_eee_mode(void __iomem *ioaddr) 188static void sxgbe_set_eee_mode(void __iomem *ioaddr)
169{ 189{
170 u32 ctrl; 190 u32 ctrl;
@@ -254,6 +274,8 @@ static const struct sxgbe_core_ops core_ops = {
254 .set_eee_pls = sxgbe_set_eee_pls, 274 .set_eee_pls = sxgbe_set_eee_pls,
255 .enable_rx_csum = sxgbe_enable_rx_csum, 275 .enable_rx_csum = sxgbe_enable_rx_csum,
256 .disable_rx_csum = sxgbe_disable_rx_csum, 276 .disable_rx_csum = sxgbe_disable_rx_csum,
277 .enable_rxqueue = sxgbe_core_enable_rxqueue,
278 .disable_rxqueue = sxgbe_core_disable_rxqueue,
257}; 279};
258 280
259const struct sxgbe_core_ops *sxgbe_get_core_ops(void) 281const struct sxgbe_core_ops *sxgbe_get_core_ops(void)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
index e896dbbd2e15..2686bb5b6765 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
@@ -45,10 +45,10 @@ static void sxgbe_prepare_tx_desc(struct sxgbe_tx_norm_desc *p, u8 is_fd,
45 p->tdes23.tx_rd_des23.first_desc = is_fd; 45 p->tdes23.tx_rd_des23.first_desc = is_fd;
46 p->tdes23.tx_rd_des23.buf1_size = buf1_len; 46 p->tdes23.tx_rd_des23.buf1_size = buf1_len;
47 47
48 p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.total_pkt_len = pkt_len; 48 p->tdes23.tx_rd_des23.tx_pkt_len.pkt_len.total_pkt_len = pkt_len;
49 49
50 if (cksum) 50 if (cksum)
51 p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.cksum_ctl = cic_full; 51 p->tdes23.tx_rd_des23.cksum_ctl = cic_full;
52} 52}
53 53
54/* Set VLAN control information */ 54/* Set VLAN control information */
@@ -233,6 +233,12 @@ static void sxgbe_set_rx_owner(struct sxgbe_rx_norm_desc *p)
233 p->rdes23.rx_rd_des23.own_bit = 1; 233 p->rdes23.rx_rd_des23.own_bit = 1;
234} 234}
235 235
236/* Set Interrupt on completion bit */
237static void sxgbe_set_rx_int_on_com(struct sxgbe_rx_norm_desc *p)
238{
239 p->rdes23.rx_rd_des23.int_on_com = 1;
240}
241
236/* Get the receive frame size */ 242/* Get the receive frame size */
237static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p) 243static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p)
238{ 244{
@@ -498,6 +504,7 @@ static const struct sxgbe_desc_ops desc_ops = {
498 .init_rx_desc = sxgbe_init_rx_desc, 504 .init_rx_desc = sxgbe_init_rx_desc,
499 .get_rx_owner = sxgbe_get_rx_owner, 505 .get_rx_owner = sxgbe_get_rx_owner,
500 .set_rx_owner = sxgbe_set_rx_owner, 506 .set_rx_owner = sxgbe_set_rx_owner,
507 .set_rx_int_on_com = sxgbe_set_rx_int_on_com,
501 .get_rx_frame_len = sxgbe_get_rx_frame_len, 508 .get_rx_frame_len = sxgbe_get_rx_frame_len,
502 .get_rx_fd_status = sxgbe_get_rx_fd_status, 509 .get_rx_fd_status = sxgbe_get_rx_fd_status,
503 .get_rx_ld_status = sxgbe_get_rx_ld_status, 510 .get_rx_ld_status = sxgbe_get_rx_ld_status,
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
index 838cb9fb0ea9..18609324db72 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
@@ -39,22 +39,22 @@ struct sxgbe_tx_norm_desc {
39 u32 int_on_com:1; 39 u32 int_on_com:1;
40 /* TDES3 */ 40 /* TDES3 */
41 union { 41 union {
42 u32 tcp_payload_len:18; 42 u16 tcp_payload_len;
43 struct { 43 struct {
44 u32 total_pkt_len:15; 44 u32 total_pkt_len:15;
45 u32 reserved1:1; 45 u32 reserved1:1;
46 u32 cksum_ctl:2; 46 } pkt_len;
47 } cksum_pktlen;
48 } tx_pkt_len; 47 } tx_pkt_len;
49 48
50 u32 tse_bit:1; 49 u16 cksum_ctl:2;
51 u32 tcp_hdr_len:4; 50 u16 tse_bit:1;
52 u32 sa_insert_ctl:3; 51 u16 tcp_hdr_len:4;
53 u32 crc_pad_ctl:2; 52 u16 sa_insert_ctl:3;
54 u32 last_desc:1; 53 u16 crc_pad_ctl:2;
55 u32 first_desc:1; 54 u16 last_desc:1;
56 u32 ctxt_bit:1; 55 u16 first_desc:1;
57 u32 own_bit:1; 56 u16 ctxt_bit:1;
57 u16 own_bit:1;
58 } tx_rd_des23; 58 } tx_rd_des23;
59 59
60 /* tx write back Desc 2,3 */ 60 /* tx write back Desc 2,3 */
@@ -70,25 +70,20 @@ struct sxgbe_tx_norm_desc {
70 70
71struct sxgbe_rx_norm_desc { 71struct sxgbe_rx_norm_desc {
72 union { 72 union {
73 u32 rdes0; /* buf1 address */ 73 u64 rdes01; /* buf1 address */
74 struct { 74 union {
75 u32 out_vlan_tag:16; 75 u32 out_vlan_tag:16;
76 u32 in_vlan_tag:16; 76 u32 in_vlan_tag:16;
77 } wb_rx_des0; 77 u32 rss_hash;
78 } rd_wb_des0; 78 } rx_wb_des01;
79 79 } rdes01;
80 union {
81 u32 rdes1; /* buf2 address or buf1[63:32] */
82 u32 rss_hash; /* Write-back RX */
83 } rd_wb_des1;
84 80
85 union { 81 union {
86 /* RX Read format Desc 2,3 */ 82 /* RX Read format Desc 2,3 */
87 struct{ 83 struct{
88 /* RDES2 */ 84 /* RDES2 */
89 u32 buf2_addr; 85 u64 buf2_addr:62;
90 /* RDES3 */ 86 /* RDES3 */
91 u32 buf2_hi_addr:30;
92 u32 int_on_com:1; 87 u32 int_on_com:1;
93 u32 own_bit:1; 88 u32 own_bit:1;
94 } rx_rd_des23; 89 } rx_rd_des23;
@@ -263,6 +258,9 @@ struct sxgbe_desc_ops {
263 /* Set own bit */ 258 /* Set own bit */
264 void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p); 259 void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p);
265 260
261 /* Set Interrupt on completion bit */
262 void (*set_rx_int_on_com)(struct sxgbe_rx_norm_desc *p);
263
266 /* Get the receive frame size */ 264 /* Get the receive frame size */
267 int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p); 265 int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p);
268 266
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
index 4d989ff6c978..bb9b5b8afc5f 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
@@ -23,21 +23,8 @@
23/* DMA core initialization */ 23/* DMA core initialization */
24static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map) 24static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map)
25{ 25{
26 int retry_count = 10;
27 u32 reg_val; 26 u32 reg_val;
28 27
29 /* reset the DMA */
30 writel(SXGBE_DMA_SOFT_RESET, ioaddr + SXGBE_DMA_MODE_REG);
31 while (retry_count--) {
32 if (!(readl(ioaddr + SXGBE_DMA_MODE_REG) &
33 SXGBE_DMA_SOFT_RESET))
34 break;
35 mdelay(10);
36 }
37
38 if (retry_count < 0)
39 return -EBUSY;
40
41 reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG); 28 reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
42 29
43 /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register. 30 /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 27e8c824b204..82a9a983869f 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -1076,6 +1076,9 @@ static int sxgbe_open(struct net_device *dev)
1076 1076
1077 /* Initialize the MAC Core */ 1077 /* Initialize the MAC Core */
1078 priv->hw->mac->core_init(priv->ioaddr); 1078 priv->hw->mac->core_init(priv->ioaddr);
1079 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
1080 priv->hw->mac->enable_rxqueue(priv->ioaddr, queue_num);
1081 }
1079 1082
1080 /* Request the IRQ lines */ 1083 /* Request the IRQ lines */
1081 ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt, 1084 ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt,
@@ -1453,6 +1456,7 @@ static void sxgbe_rx_refill(struct sxgbe_priv_data *priv)
1453 /* Added memory barrier for RX descriptor modification */ 1456 /* Added memory barrier for RX descriptor modification */
1454 wmb(); 1457 wmb();
1455 priv->hw->desc->set_rx_owner(p); 1458 priv->hw->desc->set_rx_owner(p);
1459 priv->hw->desc->set_rx_int_on_com(p);
1456 /* Added memory barrier for RX descriptor modification */ 1460 /* Added memory barrier for RX descriptor modification */
1457 wmb(); 1461 wmb();
1458 } 1462 }
@@ -2070,6 +2074,24 @@ static int sxgbe_hw_init(struct sxgbe_priv_data * const priv)
2070 return 0; 2074 return 0;
2071} 2075}
2072 2076
2077static int sxgbe_sw_reset(void __iomem *addr)
2078{
2079 int retry_count = 10;
2080
2081 writel(SXGBE_DMA_SOFT_RESET, addr + SXGBE_DMA_MODE_REG);
2082 while (retry_count--) {
2083 if (!(readl(addr + SXGBE_DMA_MODE_REG) &
2084 SXGBE_DMA_SOFT_RESET))
2085 break;
2086 mdelay(10);
2087 }
2088
2089 if (retry_count < 0)
2090 return -EBUSY;
2091
2092 return 0;
2093}
2094
2073/** 2095/**
2074 * sxgbe_drv_probe 2096 * sxgbe_drv_probe
2075 * @device: device pointer 2097 * @device: device pointer
@@ -2102,6 +2124,10 @@ struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
2102 priv->plat = plat_dat; 2124 priv->plat = plat_dat;
2103 priv->ioaddr = addr; 2125 priv->ioaddr = addr;
2104 2126
2127 ret = sxgbe_sw_reset(priv->ioaddr);
2128 if (ret)
2129 goto error_free_netdev;
2130
2105 /* Verify driver arguments */ 2131 /* Verify driver arguments */
2106 sxgbe_verify_args(); 2132 sxgbe_verify_args();
2107 2133
@@ -2218,9 +2244,14 @@ error_free_netdev:
2218int sxgbe_drv_remove(struct net_device *ndev) 2244int sxgbe_drv_remove(struct net_device *ndev)
2219{ 2245{
2220 struct sxgbe_priv_data *priv = netdev_priv(ndev); 2246 struct sxgbe_priv_data *priv = netdev_priv(ndev);
2247 u8 queue_num;
2221 2248
2222 netdev_info(ndev, "%s: removing driver\n", __func__); 2249 netdev_info(ndev, "%s: removing driver\n", __func__);
2223 2250
2251 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
2252 priv->hw->mac->disable_rxqueue(priv->ioaddr, queue_num);
2253 }
2254
2224 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES); 2255 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
2225 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES); 2256 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
2226 2257
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
index 01af2cbb479d..43ccb4a6de15 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
@@ -27,7 +27,7 @@
27#define SXGBE_SMA_PREAD_CMD 0x02 /* post read increament address */ 27#define SXGBE_SMA_PREAD_CMD 0x02 /* post read increament address */
28#define SXGBE_SMA_READ_CMD 0x03 /* read command */ 28#define SXGBE_SMA_READ_CMD 0x03 /* read command */
29#define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */ 29#define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */
30#define SXGBE_MII_BUSY 0x00800000 /* mii busy */ 30#define SXGBE_MII_BUSY 0x00400000 /* mii busy */
31 31
32static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data) 32static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data)
33{ 33{
@@ -147,6 +147,7 @@ int sxgbe_mdio_register(struct net_device *ndev)
147 struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data; 147 struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data;
148 int err, phy_addr; 148 int err, phy_addr;
149 int *irqlist; 149 int *irqlist;
150 bool phy_found = false;
150 bool act; 151 bool act;
151 152
152 /* allocate the new mdio bus */ 153 /* allocate the new mdio bus */
@@ -162,7 +163,7 @@ int sxgbe_mdio_register(struct net_device *ndev)
162 irqlist = priv->mii_irq; 163 irqlist = priv->mii_irq;
163 164
164 /* assign mii bus fields */ 165 /* assign mii bus fields */
165 mdio_bus->name = "samsxgbe"; 166 mdio_bus->name = "sxgbe";
166 mdio_bus->read = &sxgbe_mdio_read; 167 mdio_bus->read = &sxgbe_mdio_read;
167 mdio_bus->write = &sxgbe_mdio_write; 168 mdio_bus->write = &sxgbe_mdio_write;
168 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x", 169 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x",
@@ -216,13 +217,22 @@ int sxgbe_mdio_register(struct net_device *ndev)
216 netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", 217 netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
217 phy->phy_id, phy_addr, irq_str, 218 phy->phy_id, phy_addr, irq_str,
218 dev_name(&phy->dev), act ? " active" : ""); 219 dev_name(&phy->dev), act ? " active" : "");
220 phy_found = true;
219 } 221 }
220 } 222 }
221 223
224 if (!phy_found) {
225 netdev_err(ndev, "PHY not found\n");
226 goto phyfound_err;
227 }
228
222 priv->mii = mdio_bus; 229 priv->mii = mdio_bus;
223 230
224 return 0; 231 return 0;
225 232
233phyfound_err:
234 err = -ENODEV;
235 mdiobus_unregister(mdio_bus);
226mdiobus_err: 236mdiobus_err:
227 mdiobus_free(mdio_bus); 237 mdiobus_free(mdio_bus);
228 return err; 238 return err;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
index 5a89acb4c505..56f8bf5a3f1b 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
@@ -52,6 +52,10 @@
52#define SXGBE_CORE_RX_CTL2_REG 0x00A8 52#define SXGBE_CORE_RX_CTL2_REG 0x00A8
53#define SXGBE_CORE_RX_CTL3_REG 0x00AC 53#define SXGBE_CORE_RX_CTL3_REG 0x00AC
54 54
55#define SXGBE_CORE_RXQ_ENABLE_MASK 0x0003
56#define SXGBE_CORE_RXQ_ENABLE 0x0002
57#define SXGBE_CORE_RXQ_DISABLE 0x0000
58
55/* Interrupt Registers */ 59/* Interrupt Registers */
56#define SXGBE_CORE_INT_STATUS_REG 0x00B0 60#define SXGBE_CORE_INT_STATUS_REG 0x00B0
57#define SXGBE_CORE_INT_ENABLE_REG 0x00B4 61#define SXGBE_CORE_INT_ENABLE_REG 0x00B4
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 32d969e857f7..89b83e59e1dc 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
156 efx->net_dev->rx_cpu_rmap = NULL; 156 efx->net_dev->rx_cpu_rmap = NULL;
157#endif 157#endif
158 158
159 /* Disable MSI/MSI-X interrupts */ 159 if (EFX_INT_MODE_USE_MSI(efx)) {
160 efx_for_each_channel(channel, efx) 160 /* Disable MSI/MSI-X interrupts */
161 free_irq(channel->irq, &efx->msi_context[channel->channel]); 161 efx_for_each_channel(channel, efx)
162 162 free_irq(channel->irq,
163 /* Disable legacy interrupt */ 163 &efx->msi_context[channel->channel]);
164 if (efx->legacy_irq) 164 } else {
165 /* Disable legacy interrupt */
165 free_irq(efx->legacy_irq, efx); 166 free_irq(efx->legacy_irq, efx);
167 }
166} 168}
167 169
168/* Register dump */ 170/* Register dump */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index d1b4dca53a9d..bcaa41af1e62 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -147,18 +147,19 @@ MODULE_ALIAS("platform:smc91x");
147 */ 147 */
148#define MII_DELAY 1 148#define MII_DELAY 1
149 149
150#if SMC_DEBUG > 0 150#define DBG(n, dev, fmt, ...) \
151#define DBG(n, dev, args...) \ 151 do { \
152 do { \ 152 if (SMC_DEBUG >= (n)) \
153 if (SMC_DEBUG >= (n)) \ 153 netdev_dbg(dev, fmt, ##__VA_ARGS__); \
154 netdev_dbg(dev, args); \
155 } while (0) 154 } while (0)
156 155
157#define PRINTK(dev, args...) netdev_info(dev, args) 156#define PRINTK(dev, fmt, ...) \
158#else 157 do { \
159#define DBG(n, dev, args...) do { } while (0) 158 if (SMC_DEBUG > 0) \
160#define PRINTK(dev, args...) netdev_dbg(dev, args) 159 netdev_info(dev, fmt, ##__VA_ARGS__); \
161#endif 160 else \
161 netdev_dbg(dev, fmt, ##__VA_ARGS__); \
162 } while (0)
162 163
163#if SMC_DEBUG > 3 164#if SMC_DEBUG > 3
164static void PRINT_PKT(u_char *buf, int length) 165static void PRINT_PKT(u_char *buf, int length)
@@ -191,7 +192,7 @@ static void PRINT_PKT(u_char *buf, int length)
191 pr_cont("\n"); 192 pr_cont("\n");
192} 193}
193#else 194#else
194#define PRINT_PKT(x...) do { } while (0) 195static inline void PRINT_PKT(u_char *buf, int length) { }
195#endif 196#endif
196 197
197 198
@@ -1781,7 +1782,7 @@ static int smc_findirq(struct smc_local *lp)
1781 int timeout = 20; 1782 int timeout = 20;
1782 unsigned long cookie; 1783 unsigned long cookie;
1783 1784
1784 DBG(2, dev, "%s: %s\n", CARDNAME, __func__); 1785 DBG(2, lp->dev, "%s: %s\n", CARDNAME, __func__);
1785 1786
1786 cookie = probe_irq_on(); 1787 cookie = probe_irq_on();
1787 1788
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d940034acdd4..0f4841d2e8dc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1704,7 +1704,7 @@ static int stmmac_open(struct net_device *dev)
1704 if (ret) { 1704 if (ret) {
1705 pr_err("%s: Cannot attach to PHY (error: %d)\n", 1705 pr_err("%s: Cannot attach to PHY (error: %d)\n",
1706 __func__, ret); 1706 __func__, ret);
1707 goto phy_error; 1707 return ret;
1708 } 1708 }
1709 } 1709 }
1710 1710
@@ -1779,8 +1779,6 @@ init_error:
1779dma_desc_error: 1779dma_desc_error:
1780 if (priv->phydev) 1780 if (priv->phydev)
1781 phy_disconnect(priv->phydev); 1781 phy_disconnect(priv->phydev);
1782phy_error:
1783 clk_disable_unprepare(priv->stmmac_clk);
1784 1782
1785 return ret; 1783 return ret;
1786} 1784}
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index df8d383acf48..b9ac20f42651 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -246,7 +246,7 @@ static inline void cas_lock_tx(struct cas *cp)
246 int i; 246 int i;
247 247
248 for (i = 0; i < N_TX_RINGS; i++) 248 for (i = 0; i < N_TX_RINGS; i++)
249 spin_lock(&cp->tx_lock[i]); 249 spin_lock_nested(&cp->tx_lock[i], i);
250} 250}
251 251
252static inline void cas_lock_all(struct cas *cp) 252static inline void cas_lock_all(struct cas *cp)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 36aa109416c4..c331b7ebc812 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1871,18 +1871,13 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1871 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); 1871 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
1872 phyid = be32_to_cpup(parp+1); 1872 phyid = be32_to_cpup(parp+1);
1873 mdio = of_find_device_by_node(mdio_node); 1873 mdio = of_find_device_by_node(mdio_node);
1874 1874 of_node_put(mdio_node);
1875 if (strncmp(mdio->name, "gpio", 4) == 0) { 1875 if (!mdio) {
1876 /* GPIO bitbang MDIO driver attached */ 1876 pr_err("Missing mdio platform device\n");
1877 struct mii_bus *bus = dev_get_drvdata(&mdio->dev); 1877 return -EINVAL;
1878
1879 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1880 PHY_ID_FMT, bus->id, phyid);
1881 } else {
1882 /* davinci MDIO driver attached */
1883 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1884 PHY_ID_FMT, mdio->name, phyid);
1885 } 1878 }
1879 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1880 PHY_ID_FMT, mdio->name, phyid);
1886 1881
1887 mac_addr = of_get_mac_address(slave_node); 1882 mac_addr = of_get_mac_address(slave_node);
1888 if (mac_addr) 1883 if (mac_addr)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 31e55fba7cad..7918d5132c1f 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -382,6 +382,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
382 if (skb_is_gso(skb)) 382 if (skb_is_gso(skb))
383 goto do_lso; 383 goto do_lso;
384 384
385 if ((skb->ip_summed == CHECKSUM_NONE) ||
386 (skb->ip_summed == CHECKSUM_UNNECESSARY))
387 goto do_send;
388
385 rndis_msg_size += NDIS_CSUM_PPI_SIZE; 389 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
386 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, 390 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
387 TCPIP_CHKSUM_PKTINFO); 391 TCPIP_CHKSUM_PKTINFO);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 753a8c23d15d..d53e299ae1d9 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -263,11 +263,9 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
263 const struct macvlan_dev *vlan = netdev_priv(dev); 263 const struct macvlan_dev *vlan = netdev_priv(dev);
264 const struct macvlan_port *port = vlan->port; 264 const struct macvlan_port *port = vlan->port;
265 const struct macvlan_dev *dest; 265 const struct macvlan_dev *dest;
266 __u8 ip_summed = skb->ip_summed;
267 266
268 if (vlan->mode == MACVLAN_MODE_BRIDGE) { 267 if (vlan->mode == MACVLAN_MODE_BRIDGE) {
269 const struct ethhdr *eth = (void *)skb->data; 268 const struct ethhdr *eth = (void *)skb->data;
270 skb->ip_summed = CHECKSUM_UNNECESSARY;
271 269
272 /* send to other bridge ports directly */ 270 /* send to other bridge ports directly */
273 if (is_multicast_ether_addr(eth->h_dest)) { 271 if (is_multicast_ether_addr(eth->h_dest)) {
@@ -285,7 +283,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
285 } 283 }
286 284
287xmit_world: 285xmit_world:
288 skb->ip_summed = ip_summed;
289 skb->dev = vlan->lowerdev; 286 skb->dev = vlan->lowerdev;
290 return dev_queue_xmit(skb); 287 return dev_queue_xmit(skb);
291} 288}
@@ -461,8 +458,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
461 struct macvlan_dev *vlan = netdev_priv(dev); 458 struct macvlan_dev *vlan = netdev_priv(dev);
462 struct net_device *lowerdev = vlan->lowerdev; 459 struct net_device *lowerdev = vlan->lowerdev;
463 460
464 if (change & IFF_ALLMULTI) 461 if (dev->flags & IFF_UP) {
465 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); 462 if (change & IFF_ALLMULTI)
463 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
464 }
466} 465}
467 466
468static void macvlan_set_mac_lists(struct net_device *dev) 467static void macvlan_set_mac_lists(struct net_device *dev)
@@ -518,6 +517,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
518#define MACVLAN_STATE_MASK \ 517#define MACVLAN_STATE_MASK \
519 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) 518 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
520 519
520static int macvlan_get_nest_level(struct net_device *dev)
521{
522 return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
523}
524
521static void macvlan_set_lockdep_class_one(struct net_device *dev, 525static void macvlan_set_lockdep_class_one(struct net_device *dev,
522 struct netdev_queue *txq, 526 struct netdev_queue *txq,
523 void *_unused) 527 void *_unused)
@@ -528,8 +532,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev,
528 532
529static void macvlan_set_lockdep_class(struct net_device *dev) 533static void macvlan_set_lockdep_class(struct net_device *dev)
530{ 534{
531 lockdep_set_class(&dev->addr_list_lock, 535 lockdep_set_class_and_subclass(&dev->addr_list_lock,
532 &macvlan_netdev_addr_lock_key); 536 &macvlan_netdev_addr_lock_key,
537 macvlan_get_nest_level(dev));
533 netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL); 538 netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
534} 539}
535 540
@@ -724,6 +729,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
724 .ndo_fdb_add = macvlan_fdb_add, 729 .ndo_fdb_add = macvlan_fdb_add,
725 .ndo_fdb_del = macvlan_fdb_del, 730 .ndo_fdb_del = macvlan_fdb_del,
726 .ndo_fdb_dump = ndo_dflt_fdb_dump, 731 .ndo_fdb_dump = ndo_dflt_fdb_dump,
732 .ndo_get_lock_subclass = macvlan_get_nest_level,
727}; 733};
728 734
729void macvlan_common_setup(struct net_device *dev) 735void macvlan_common_setup(struct net_device *dev)
@@ -852,6 +858,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
852 vlan->dev = dev; 858 vlan->dev = dev;
853 vlan->port = port; 859 vlan->port = port;
854 vlan->set_features = MACVLAN_FEATURES; 860 vlan->set_features = MACVLAN_FEATURES;
861 vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
855 862
856 vlan->mode = MACVLAN_MODE_VEPA; 863 vlan->mode = MACVLAN_MODE_VEPA;
857 if (data && data[IFLA_MACVLAN_MODE]) 864 if (data && data[IFLA_MACVLAN_MODE])
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index ff111a89e17f..3381c4f91a8c 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -322,6 +322,15 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
322 segs = nskb; 322 segs = nskb;
323 } 323 }
324 } else { 324 } else {
325 /* If we receive a partial checksum and the tap side
326 * doesn't support checksum offload, compute the checksum.
327 * Note: it doesn't matter which checksum feature to
328 * check, we either support them all or none.
329 */
330 if (skb->ip_summed == CHECKSUM_PARTIAL &&
331 !(features & NETIF_F_ALL_CSUM) &&
332 skb_checksum_help(skb))
333 goto drop;
325 skb_queue_tail(&q->sk.sk_receive_queue, skb); 334 skb_queue_tail(&q->sk.sk_receive_queue, skb);
326 } 335 }
327 336
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 9c4defdec67b..5f1a2250018f 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -215,6 +215,10 @@ static int mdio_gpio_probe(struct platform_device *pdev)
215 if (pdev->dev.of_node) { 215 if (pdev->dev.of_node) {
216 pdata = mdio_gpio_of_get_data(pdev); 216 pdata = mdio_gpio_of_get_data(pdev);
217 bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); 217 bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
218 if (bus_id < 0) {
219 dev_warn(&pdev->dev, "failed to get alias id\n");
220 bus_id = 0;
221 }
218 } else { 222 } else {
219 pdata = dev_get_platdata(&pdev->dev); 223 pdata = dev_get_platdata(&pdev->dev);
220 bus_id = pdev->id; 224 bus_id = pdev->id;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 5ad971a55c5d..d849684231c1 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -246,13 +246,13 @@ static int ksz9021_load_values_from_of(struct phy_device *phydev,
246 if (val1 != -1) 246 if (val1 != -1)
247 newval = ((newval & 0xfff0) | ((val1 / PS_TO_REG) & 0xf) << 0); 247 newval = ((newval & 0xfff0) | ((val1 / PS_TO_REG) & 0xf) << 0);
248 248
249 if (val2 != -1) 249 if (val2 != -2)
250 newval = ((newval & 0xff0f) | ((val2 / PS_TO_REG) & 0xf) << 4); 250 newval = ((newval & 0xff0f) | ((val2 / PS_TO_REG) & 0xf) << 4);
251 251
252 if (val3 != -1) 252 if (val3 != -3)
253 newval = ((newval & 0xf0ff) | ((val3 / PS_TO_REG) & 0xf) << 8); 253 newval = ((newval & 0xf0ff) | ((val3 / PS_TO_REG) & 0xf) << 8);
254 254
255 if (val4 != -1) 255 if (val4 != -4)
256 newval = ((newval & 0x0fff) | ((val4 / PS_TO_REG) & 0xf) << 12); 256 newval = ((newval & 0x0fff) | ((val4 / PS_TO_REG) & 0xf) << 12);
257 257
258 return kszphy_extended_write(phydev, reg, newval); 258 return kszphy_extended_write(phydev, reg, newval);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1b6d09aef427..3bc079a67a3d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -715,7 +715,7 @@ void phy_state_machine(struct work_struct *work)
715 struct delayed_work *dwork = to_delayed_work(work); 715 struct delayed_work *dwork = to_delayed_work(work);
716 struct phy_device *phydev = 716 struct phy_device *phydev =
717 container_of(dwork, struct phy_device, state_queue); 717 container_of(dwork, struct phy_device, state_queue);
718 int needs_aneg = 0, do_suspend = 0; 718 bool needs_aneg = false, do_suspend = false, do_resume = false;
719 int err = 0; 719 int err = 0;
720 720
721 mutex_lock(&phydev->lock); 721 mutex_lock(&phydev->lock);
@@ -727,7 +727,7 @@ void phy_state_machine(struct work_struct *work)
727 case PHY_PENDING: 727 case PHY_PENDING:
728 break; 728 break;
729 case PHY_UP: 729 case PHY_UP:
730 needs_aneg = 1; 730 needs_aneg = true;
731 731
732 phydev->link_timeout = PHY_AN_TIMEOUT; 732 phydev->link_timeout = PHY_AN_TIMEOUT;
733 733
@@ -757,7 +757,7 @@ void phy_state_machine(struct work_struct *work)
757 phydev->adjust_link(phydev->attached_dev); 757 phydev->adjust_link(phydev->attached_dev);
758 758
759 } else if (0 == phydev->link_timeout--) 759 } else if (0 == phydev->link_timeout--)
760 needs_aneg = 1; 760 needs_aneg = true;
761 break; 761 break;
762 case PHY_NOLINK: 762 case PHY_NOLINK:
763 err = phy_read_status(phydev); 763 err = phy_read_status(phydev);
@@ -765,6 +765,17 @@ void phy_state_machine(struct work_struct *work)
765 break; 765 break;
766 766
767 if (phydev->link) { 767 if (phydev->link) {
768 if (AUTONEG_ENABLE == phydev->autoneg) {
769 err = phy_aneg_done(phydev);
770 if (err < 0)
771 break;
772
773 if (!err) {
774 phydev->state = PHY_AN;
775 phydev->link_timeout = PHY_AN_TIMEOUT;
776 break;
777 }
778 }
768 phydev->state = PHY_RUNNING; 779 phydev->state = PHY_RUNNING;
769 netif_carrier_on(phydev->attached_dev); 780 netif_carrier_on(phydev->attached_dev);
770 phydev->adjust_link(phydev->attached_dev); 781 phydev->adjust_link(phydev->attached_dev);
@@ -780,7 +791,7 @@ void phy_state_machine(struct work_struct *work)
780 netif_carrier_on(phydev->attached_dev); 791 netif_carrier_on(phydev->attached_dev);
781 } else { 792 } else {
782 if (0 == phydev->link_timeout--) 793 if (0 == phydev->link_timeout--)
783 needs_aneg = 1; 794 needs_aneg = true;
784 } 795 }
785 796
786 phydev->adjust_link(phydev->attached_dev); 797 phydev->adjust_link(phydev->attached_dev);
@@ -816,7 +827,7 @@ void phy_state_machine(struct work_struct *work)
816 phydev->link = 0; 827 phydev->link = 0;
817 netif_carrier_off(phydev->attached_dev); 828 netif_carrier_off(phydev->attached_dev);
818 phydev->adjust_link(phydev->attached_dev); 829 phydev->adjust_link(phydev->attached_dev);
819 do_suspend = 1; 830 do_suspend = true;
820 } 831 }
821 break; 832 break;
822 case PHY_RESUMING: 833 case PHY_RESUMING:
@@ -865,6 +876,7 @@ void phy_state_machine(struct work_struct *work)
865 } 876 }
866 phydev->adjust_link(phydev->attached_dev); 877 phydev->adjust_link(phydev->attached_dev);
867 } 878 }
879 do_resume = true;
868 break; 880 break;
869 } 881 }
870 882
@@ -872,9 +884,10 @@ void phy_state_machine(struct work_struct *work)
872 884
873 if (needs_aneg) 885 if (needs_aneg)
874 err = phy_start_aneg(phydev); 886 err = phy_start_aneg(phydev);
875 887 else if (do_suspend)
876 if (do_suspend)
877 phy_suspend(phydev); 888 phy_suspend(phydev);
889 else if (do_resume)
890 phy_resume(phydev);
878 891
879 if (err < 0) 892 if (err < 0)
880 phy_error(phydev); 893 phy_error(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 0ce606624296..4987a1c6dc52 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -614,8 +614,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
614 err = phy_init_hw(phydev); 614 err = phy_init_hw(phydev);
615 if (err) 615 if (err)
616 phy_detach(phydev); 616 phy_detach(phydev);
617 617 else
618 phy_resume(phydev); 618 phy_resume(phydev);
619 619
620 return err; 620 return err;
621} 621}
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index cc70ecfc7062..ad4a94e9ff57 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -429,13 +429,13 @@ static void slip_write_wakeup(struct tty_struct *tty)
429 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) 429 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
430 return; 430 return;
431 431
432 spin_lock(&sl->lock); 432 spin_lock_bh(&sl->lock);
433 if (sl->xleft <= 0) { 433 if (sl->xleft <= 0) {
434 /* Now serial buffer is almost free & we can start 434 /* Now serial buffer is almost free & we can start
435 * transmission of another packet */ 435 * transmission of another packet */
436 sl->dev->stats.tx_packets++; 436 sl->dev->stats.tx_packets++;
437 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 437 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
438 spin_unlock(&sl->lock); 438 spin_unlock_bh(&sl->lock);
439 sl_unlock(sl); 439 sl_unlock(sl);
440 return; 440 return;
441 } 441 }
@@ -443,7 +443,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
443 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 443 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
444 sl->xleft -= actual; 444 sl->xleft -= actual;
445 sl->xhead += actual; 445 sl->xhead += actual;
446 spin_unlock(&sl->lock); 446 spin_unlock_bh(&sl->lock);
447} 447}
448 448
449static void sl_tx_timeout(struct net_device *dev) 449static void sl_tx_timeout(struct net_device *dev)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 33008c1d1d67..ce4989be86d9 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1724,6 +1724,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
1724 * to traverse list in reverse under rcu_read_lock 1724 * to traverse list in reverse under rcu_read_lock
1725 */ 1725 */
1726 mutex_lock(&team->lock); 1726 mutex_lock(&team->lock);
1727 team->port_mtu_change_allowed = true;
1727 list_for_each_entry(port, &team->port_list, list) { 1728 list_for_each_entry(port, &team->port_list, list) {
1728 err = dev_set_mtu(port->dev, new_mtu); 1729 err = dev_set_mtu(port->dev, new_mtu);
1729 if (err) { 1730 if (err) {
@@ -1732,6 +1733,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
1732 goto unwind; 1733 goto unwind;
1733 } 1734 }
1734 } 1735 }
1736 team->port_mtu_change_allowed = false;
1735 mutex_unlock(&team->lock); 1737 mutex_unlock(&team->lock);
1736 1738
1737 dev->mtu = new_mtu; 1739 dev->mtu = new_mtu;
@@ -1741,6 +1743,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
1741unwind: 1743unwind:
1742 list_for_each_entry_continue_reverse(port, &team->port_list, list) 1744 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1743 dev_set_mtu(port->dev, dev->mtu); 1745 dev_set_mtu(port->dev, dev->mtu);
1746 team->port_mtu_change_allowed = false;
1744 mutex_unlock(&team->lock); 1747 mutex_unlock(&team->lock);
1745 1748
1746 return err; 1749 return err;
@@ -2834,8 +2837,10 @@ static int team_device_event(struct notifier_block *unused,
2834 case NETDEV_UP: 2837 case NETDEV_UP:
2835 if (netif_carrier_ok(dev)) 2838 if (netif_carrier_ok(dev))
2836 team_port_change_check(port, true); 2839 team_port_change_check(port, true);
2840 break;
2837 case NETDEV_DOWN: 2841 case NETDEV_DOWN:
2838 team_port_change_check(port, false); 2842 team_port_change_check(port, false);
2843 break;
2839 case NETDEV_CHANGE: 2844 case NETDEV_CHANGE:
2840 if (netif_running(port->dev)) 2845 if (netif_running(port->dev))
2841 team_port_change_check(port, 2846 team_port_change_check(port,
@@ -2849,7 +2854,9 @@ static int team_device_event(struct notifier_block *unused,
2849 break; 2854 break;
2850 case NETDEV_PRECHANGEMTU: 2855 case NETDEV_PRECHANGEMTU:
2851 /* Forbid to change mtu of underlaying device */ 2856 /* Forbid to change mtu of underlaying device */
2852 return NOTIFY_BAD; 2857 if (!port->team->port_mtu_change_allowed)
2858 return NOTIFY_BAD;
2859 break;
2853 case NETDEV_PRE_TYPE_CHANGE: 2860 case NETDEV_PRE_TYPE_CHANGE:
2854 /* Forbid to change type of underlaying device */ 2861 /* Forbid to change type of underlaying device */
2855 return NOTIFY_BAD; 2862 return NOTIFY_BAD;
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index c9f3281506af..2e025ddcef21 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -120,6 +120,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
120 cdc_ncm_unbind(dev, intf); 120 cdc_ncm_unbind(dev, intf);
121} 121}
122 122
123/* verify that the ethernet protocol is IPv4 or IPv6 */
124static bool is_ip_proto(__be16 proto)
125{
126 switch (proto) {
127 case htons(ETH_P_IP):
128 case htons(ETH_P_IPV6):
129 return true;
130 }
131 return false;
132}
123 133
124static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) 134static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
125{ 135{
@@ -128,6 +138,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
128 struct cdc_ncm_ctx *ctx = info->ctx; 138 struct cdc_ncm_ctx *ctx = info->ctx;
129 __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN); 139 __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
130 u16 tci = 0; 140 u16 tci = 0;
141 bool is_ip;
131 u8 *c; 142 u8 *c;
132 143
133 if (!ctx) 144 if (!ctx)
@@ -137,25 +148,32 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
137 if (skb->len <= ETH_HLEN) 148 if (skb->len <= ETH_HLEN)
138 goto error; 149 goto error;
139 150
151 /* Some applications using e.g. packet sockets will
152 * bypass the VLAN acceleration and create tagged
153 * ethernet frames directly. We primarily look for
154 * the accelerated out-of-band tag, but fall back if
155 * required
156 */
157 skb_reset_mac_header(skb);
158 if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
159 __vlan_get_tag(skb, &tci) == 0) {
160 is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
161 skb_pull(skb, VLAN_ETH_HLEN);
162 } else {
163 is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
164 skb_pull(skb, ETH_HLEN);
165 }
166
140 /* mapping VLANs to MBIM sessions: 167 /* mapping VLANs to MBIM sessions:
141 * no tag => IPS session <0> 168 * no tag => IPS session <0>
142 * 1 - 255 => IPS session <vlanid> 169 * 1 - 255 => IPS session <vlanid>
143 * 256 - 511 => DSS session <vlanid - 256> 170 * 256 - 511 => DSS session <vlanid - 256>
144 * 512 - 4095 => unsupported, drop 171 * 512 - 4095 => unsupported, drop
145 */ 172 */
146 vlan_get_tag(skb, &tci);
147
148 switch (tci & 0x0f00) { 173 switch (tci & 0x0f00) {
149 case 0x0000: /* VLAN ID 0 - 255 */ 174 case 0x0000: /* VLAN ID 0 - 255 */
150 /* verify that datagram is IPv4 or IPv6 */ 175 if (!is_ip)
151 skb_reset_mac_header(skb);
152 switch (eth_hdr(skb)->h_proto) {
153 case htons(ETH_P_IP):
154 case htons(ETH_P_IPV6):
155 break;
156 default:
157 goto error; 176 goto error;
158 }
159 c = (u8 *)&sign; 177 c = (u8 *)&sign;
160 c[3] = tci; 178 c[3] = tci;
161 break; 179 break;
@@ -169,7 +187,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
169 "unsupported tci=0x%04x\n", tci); 187 "unsupported tci=0x%04x\n", tci);
170 goto error; 188 goto error;
171 } 189 }
172 skb_pull(skb, ETH_HLEN);
173 } 190 }
174 191
175 spin_lock_bh(&ctx->mtx); 192 spin_lock_bh(&ctx->mtx);
@@ -204,17 +221,23 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
204 return; 221 return;
205 222
206 /* need to send the NA on the VLAN dev, if any */ 223 /* need to send the NA on the VLAN dev, if any */
207 if (tci) 224 rcu_read_lock();
225 if (tci) {
208 netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q), 226 netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
209 tci); 227 tci);
210 else 228 if (!netdev) {
229 rcu_read_unlock();
230 return;
231 }
232 } else {
211 netdev = dev->net; 233 netdev = dev->net;
212 if (!netdev) 234 }
213 return; 235 dev_hold(netdev);
236 rcu_read_unlock();
214 237
215 in6_dev = in6_dev_get(netdev); 238 in6_dev = in6_dev_get(netdev);
216 if (!in6_dev) 239 if (!in6_dev)
217 return; 240 goto out;
218 is_router = !!in6_dev->cnf.forwarding; 241 is_router = !!in6_dev->cnf.forwarding;
219 in6_dev_put(in6_dev); 242 in6_dev_put(in6_dev);
220 243
@@ -224,6 +247,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
224 true /* solicited */, 247 true /* solicited */,
225 false /* override */, 248 false /* override */,
226 true /* inc_opt */); 249 true /* inc_opt */);
250out:
251 dev_put(netdev);
227} 252}
228 253
229static bool is_neigh_solicit(u8 *buf, size_t len) 254static bool is_neigh_solicit(u8 *buf, size_t len)
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 549dbac710ed..9a2bd11943eb 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -785,7 +785,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
785 skb_out->len > CDC_NCM_MIN_TX_PKT) 785 skb_out->len > CDC_NCM_MIN_TX_PKT)
786 memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, 786 memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
787 ctx->tx_max - skb_out->len); 787 ctx->tx_max - skb_out->len);
788 else if ((skb_out->len % dev->maxpacket) == 0) 788 else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0)
789 *skb_put(skb_out, 1) = 0; /* force short packet */ 789 *skb_put(skb_out, 1) = 0; /* force short packet */
790 790
791 /* set final frame length */ 791 /* set final frame length */
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 421934c83f1c..973275fef250 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -59,6 +59,8 @@
59#define USB_PRODUCT_IPHONE_3GS 0x1294 59#define USB_PRODUCT_IPHONE_3GS 0x1294
60#define USB_PRODUCT_IPHONE_4 0x1297 60#define USB_PRODUCT_IPHONE_4 0x1297
61#define USB_PRODUCT_IPAD 0x129a 61#define USB_PRODUCT_IPAD 0x129a
62#define USB_PRODUCT_IPAD_2 0x12a2
63#define USB_PRODUCT_IPAD_3 0x12a6
62#define USB_PRODUCT_IPAD_MINI 0x12ab 64#define USB_PRODUCT_IPAD_MINI 0x12ab
63#define USB_PRODUCT_IPHONE_4_VZW 0x129c 65#define USB_PRODUCT_IPHONE_4_VZW 0x129c
64#define USB_PRODUCT_IPHONE_4S 0x12a0 66#define USB_PRODUCT_IPHONE_4S 0x12a0
@@ -107,6 +109,14 @@ static struct usb_device_id ipheth_table[] = {
107 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 109 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
108 IPHETH_USBINTF_PROTO) }, 110 IPHETH_USBINTF_PROTO) },
109 { USB_DEVICE_AND_INTERFACE_INFO( 111 { USB_DEVICE_AND_INTERFACE_INFO(
112 USB_VENDOR_APPLE, USB_PRODUCT_IPAD_2,
113 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
114 IPHETH_USBINTF_PROTO) },
115 { USB_DEVICE_AND_INTERFACE_INFO(
116 USB_VENDOR_APPLE, USB_PRODUCT_IPAD_3,
117 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
118 IPHETH_USBINTF_PROTO) },
119 { USB_DEVICE_AND_INTERFACE_INFO(
110 USB_VENDOR_APPLE, USB_PRODUCT_IPAD_MINI, 120 USB_VENDOR_APPLE, USB_PRODUCT_IPAD_MINI,
111 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 121 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
112 IPHETH_USBINTF_PROTO) }, 122 IPHETH_USBINTF_PROTO) },
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index e3458e3c44f1..dc4bf06948c7 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -669,6 +669,22 @@ static const struct usb_device_id products[] = {
669 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, 669 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
670 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 670 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
671 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ 671 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
672 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
673 {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
674 {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
675 {QMI_FIXED_INTF(0x16d8, 0x6280, 0)}, /* CMOTech CHU-628 */
676 {QMI_FIXED_INTF(0x16d8, 0x7001, 0)}, /* CMOTech CHU-720S */
677 {QMI_FIXED_INTF(0x16d8, 0x7002, 0)}, /* CMOTech 7002 */
678 {QMI_FIXED_INTF(0x16d8, 0x7003, 4)}, /* CMOTech CHU-629K */
679 {QMI_FIXED_INTF(0x16d8, 0x7004, 3)}, /* CMOTech 7004 */
680 {QMI_FIXED_INTF(0x16d8, 0x7006, 5)}, /* CMOTech CGU-629 */
681 {QMI_FIXED_INTF(0x16d8, 0x700a, 4)}, /* CMOTech CHU-629S */
682 {QMI_FIXED_INTF(0x16d8, 0x7211, 0)}, /* CMOTech CHU-720I */
683 {QMI_FIXED_INTF(0x16d8, 0x7212, 0)}, /* CMOTech 7212 */
684 {QMI_FIXED_INTF(0x16d8, 0x7213, 0)}, /* CMOTech 7213 */
685 {QMI_FIXED_INTF(0x16d8, 0x7251, 1)}, /* CMOTech 7251 */
686 {QMI_FIXED_INTF(0x16d8, 0x7252, 1)}, /* CMOTech 7252 */
687 {QMI_FIXED_INTF(0x16d8, 0x7253, 1)}, /* CMOTech 7253 */
672 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, 688 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
673 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)}, 689 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
674 {QMI_FIXED_INTF(0x19d2, 0x0017, 3)}, 690 {QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
@@ -730,16 +746,32 @@ static const struct usb_device_id products[] = {
730 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 746 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
731 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ 747 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
732 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ 748 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
749 {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC73xx */
750 {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC73xx */
733 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 751 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
752 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
753 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
734 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ 754 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
755 {QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */
756 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
757 {QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */
758 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
759 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
735 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 760 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
761 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
736 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 762 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
737 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 763 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
738 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 764 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
739 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ 765 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
740 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */ 766 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
767 {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
741 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ 768 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
742 {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ 769 {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
770 {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
771 {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
772 {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
773 {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
774 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
743 775
744 /* 4. Gobi 1000 devices */ 776 /* 4. Gobi 1000 devices */
745 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 777 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7b687469199b..8a852b5f215f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1285,7 +1285,7 @@ static int virtnet_set_channels(struct net_device *dev,
1285 if (channels->rx_count || channels->tx_count || channels->other_count) 1285 if (channels->rx_count || channels->tx_count || channels->other_count)
1286 return -EINVAL; 1286 return -EINVAL;
1287 1287
1288 if (queue_pairs > vi->max_queue_pairs) 1288 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
1289 return -EINVAL; 1289 return -EINVAL;
1290 1290
1291 get_online_cpus(); 1291 get_online_cpus();
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 82355d5d155a..4dbb2ed85b97 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -389,8 +389,8 @@ static inline size_t vxlan_nlmsg_size(void)
389 + nla_total_size(sizeof(struct nda_cacheinfo)); 389 + nla_total_size(sizeof(struct nda_cacheinfo));
390} 390}
391 391
392static void vxlan_fdb_notify(struct vxlan_dev *vxlan, 392static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
393 struct vxlan_fdb *fdb, int type) 393 struct vxlan_rdst *rd, int type)
394{ 394{
395 struct net *net = dev_net(vxlan->dev); 395 struct net *net = dev_net(vxlan->dev);
396 struct sk_buff *skb; 396 struct sk_buff *skb;
@@ -400,8 +400,7 @@ static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
400 if (skb == NULL) 400 if (skb == NULL)
401 goto errout; 401 goto errout;
402 402
403 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, 403 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
404 first_remote_rtnl(fdb));
405 if (err < 0) { 404 if (err < 0) {
406 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */ 405 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
407 WARN_ON(err == -EMSGSIZE); 406 WARN_ON(err == -EMSGSIZE);
@@ -427,10 +426,7 @@ static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
427 .remote_vni = VXLAN_N_VID, 426 .remote_vni = VXLAN_N_VID,
428 }; 427 };
429 428
430 INIT_LIST_HEAD(&f.remotes); 429 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
431 list_add_rcu(&remote.list, &f.remotes);
432
433 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
434} 430}
435 431
436static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) 432static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
@@ -438,11 +434,11 @@ static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
438 struct vxlan_fdb f = { 434 struct vxlan_fdb f = {
439 .state = NUD_STALE, 435 .state = NUD_STALE,
440 }; 436 };
437 struct vxlan_rdst remote = { };
441 438
442 INIT_LIST_HEAD(&f.remotes);
443 memcpy(f.eth_addr, eth_addr, ETH_ALEN); 439 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
444 440
445 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH); 441 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
446} 442}
447 443
448/* Hash Ethernet address */ 444/* Hash Ethernet address */
@@ -533,7 +529,8 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f,
533 529
534/* Add/update destinations for multicast */ 530/* Add/update destinations for multicast */
535static int vxlan_fdb_append(struct vxlan_fdb *f, 531static int vxlan_fdb_append(struct vxlan_fdb *f,
536 union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex) 532 union vxlan_addr *ip, __be16 port, __u32 vni,
533 __u32 ifindex, struct vxlan_rdst **rdp)
537{ 534{
538 struct vxlan_rdst *rd; 535 struct vxlan_rdst *rd;
539 536
@@ -551,6 +548,7 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
551 548
552 list_add_tail_rcu(&rd->list, &f->remotes); 549 list_add_tail_rcu(&rd->list, &f->remotes);
553 550
551 *rdp = rd;
554 return 1; 552 return 1;
555} 553}
556 554
@@ -690,6 +688,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
690 __be16 port, __u32 vni, __u32 ifindex, 688 __be16 port, __u32 vni, __u32 ifindex,
691 __u8 ndm_flags) 689 __u8 ndm_flags)
692{ 690{
691 struct vxlan_rdst *rd = NULL;
693 struct vxlan_fdb *f; 692 struct vxlan_fdb *f;
694 int notify = 0; 693 int notify = 0;
695 694
@@ -726,7 +725,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
726 if ((flags & NLM_F_APPEND) && 725 if ((flags & NLM_F_APPEND) &&
727 (is_multicast_ether_addr(f->eth_addr) || 726 (is_multicast_ether_addr(f->eth_addr) ||
728 is_zero_ether_addr(f->eth_addr))) { 727 is_zero_ether_addr(f->eth_addr))) {
729 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex); 728 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex,
729 &rd);
730 730
731 if (rc < 0) 731 if (rc < 0)
732 return rc; 732 return rc;
@@ -756,15 +756,18 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
756 INIT_LIST_HEAD(&f->remotes); 756 INIT_LIST_HEAD(&f->remotes);
757 memcpy(f->eth_addr, mac, ETH_ALEN); 757 memcpy(f->eth_addr, mac, ETH_ALEN);
758 758
759 vxlan_fdb_append(f, ip, port, vni, ifindex); 759 vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
760 760
761 ++vxlan->addrcnt; 761 ++vxlan->addrcnt;
762 hlist_add_head_rcu(&f->hlist, 762 hlist_add_head_rcu(&f->hlist,
763 vxlan_fdb_head(vxlan, mac)); 763 vxlan_fdb_head(vxlan, mac));
764 } 764 }
765 765
766 if (notify) 766 if (notify) {
767 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH); 767 if (rd == NULL)
768 rd = first_remote_rtnl(f);
769 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
770 }
768 771
769 return 0; 772 return 0;
770} 773}
@@ -785,7 +788,7 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
785 "delete %pM\n", f->eth_addr); 788 "delete %pM\n", f->eth_addr);
786 789
787 --vxlan->addrcnt; 790 --vxlan->addrcnt;
788 vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH); 791 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
789 792
790 hlist_del_rcu(&f->hlist); 793 hlist_del_rcu(&f->hlist);
791 call_rcu(&f->rcu, vxlan_fdb_free); 794 call_rcu(&f->rcu, vxlan_fdb_free);
@@ -919,6 +922,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
919 */ 922 */
920 if (rd && !list_is_singular(&f->remotes)) { 923 if (rd && !list_is_singular(&f->remotes)) {
921 list_del_rcu(&rd->list); 924 list_del_rcu(&rd->list);
925 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
922 kfree_rcu(rd, rcu); 926 kfree_rcu(rd, rcu);
923 goto out; 927 goto out;
924 } 928 }
@@ -993,7 +997,7 @@ static bool vxlan_snoop(struct net_device *dev,
993 997
994 rdst->remote_ip = *src_ip; 998 rdst->remote_ip = *src_ip;
995 f->updated = jiffies; 999 f->updated = jiffies;
996 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH); 1000 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
997 } else { 1001 } else {
998 /* learned new entry */ 1002 /* learned new entry */
999 spin_lock(&vxlan->hash_lock); 1003 spin_lock(&vxlan->hash_lock);
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index a0398fe3eb28..be3eb2a8d602 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -86,7 +86,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
86 int irq; 86 int irq;
87 int ret = 0; 87 int ret = 0;
88 struct ath_hw *ah; 88 struct ath_hw *ah;
89 struct ath_common *common;
90 char hw_name[64]; 89 char hw_name[64];
91 90
92 if (!dev_get_platdata(&pdev->dev)) { 91 if (!dev_get_platdata(&pdev->dev)) {
@@ -146,9 +145,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
146 wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", 145 wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
147 hw_name, (unsigned long)mem, irq); 146 hw_name, (unsigned long)mem, irq);
148 147
149 common = ath9k_hw_common(sc->sc_ah);
150 /* Will be cleared in ath9k_start() */
151 set_bit(ATH_OP_INVALID, &common->op_flags);
152 return 0; 148 return 0;
153 149
154 err_irq: 150 err_irq:
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 6d47783f2e5b..ba502a2d199b 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -155,6 +155,9 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
155 ATH9K_ANI_RSSI_THR_LOW, 155 ATH9K_ANI_RSSI_THR_LOW,
156 ATH9K_ANI_RSSI_THR_HIGH); 156 ATH9K_ANI_RSSI_THR_HIGH);
157 157
158 if (AR_SREV_9100(ah) && immunityLevel < ATH9K_ANI_OFDM_DEF_LEVEL)
159 immunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
160
158 if (!scan) 161 if (!scan)
159 aniState->ofdmNoiseImmunityLevel = immunityLevel; 162 aniState->ofdmNoiseImmunityLevel = immunityLevel;
160 163
@@ -235,6 +238,9 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
235 BEACON_RSSI(ah), ATH9K_ANI_RSSI_THR_LOW, 238 BEACON_RSSI(ah), ATH9K_ANI_RSSI_THR_LOW,
236 ATH9K_ANI_RSSI_THR_HIGH); 239 ATH9K_ANI_RSSI_THR_HIGH);
237 240
241 if (AR_SREV_9100(ah) && immunityLevel < ATH9K_ANI_CCK_DEF_LEVEL)
242 immunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
243
238 if (ah->opmode == NL80211_IFTYPE_STATION && 244 if (ah->opmode == NL80211_IFTYPE_STATION &&
239 BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_LOW && 245 BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_LOW &&
240 immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI) 246 immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI)
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 44d74495c4de..3ba03dde4215 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -251,7 +251,6 @@ struct ath_atx_tid {
251 251
252 s8 bar_index; 252 s8 bar_index;
253 bool sched; 253 bool sched;
254 bool paused;
255 bool active; 254 bool active;
256}; 255};
257 256
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
index d76e6e0120d2..ffca918ff16a 100644
--- a/drivers/net/wireless/ath/ath9k/debug_sta.c
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -72,7 +72,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
72 ath_txq_lock(sc, txq); 72 ath_txq_lock(sc, txq);
73 if (tid->active) { 73 if (tid->active) {
74 len += scnprintf(buf + len, size - len, 74 len += scnprintf(buf + len, size - len,
75 "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n", 75 "%3d%11d%10d%10d%10d%10d%9d%6d\n",
76 tid->tidno, 76 tid->tidno,
77 tid->seq_start, 77 tid->seq_start,
78 tid->seq_next, 78 tid->seq_next,
@@ -80,8 +80,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
80 tid->baw_head, 80 tid->baw_head,
81 tid->baw_tail, 81 tid->baw_tail,
82 tid->bar_index, 82 tid->bar_index,
83 tid->sched, 83 tid->sched);
84 tid->paused);
85 } 84 }
86 ath_txq_unlock(sc, txq); 85 ath_txq_unlock(sc, txq);
87 } 86 }
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index f46cd0250e48..5627917c5ff7 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -95,8 +95,10 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
95 95
96 if ((vif->type == NL80211_IFTYPE_AP || 96 if ((vif->type == NL80211_IFTYPE_AP ||
97 vif->type == NL80211_IFTYPE_MESH_POINT) && 97 vif->type == NL80211_IFTYPE_MESH_POINT) &&
98 bss_conf->enable_beacon) 98 bss_conf->enable_beacon) {
99 priv->reconfig_beacon = true; 99 priv->reconfig_beacon = true;
100 priv->rearm_ani = true;
101 }
100 102
101 if (bss_conf->assoc) { 103 if (bss_conf->assoc) {
102 priv->rearm_ani = true; 104 priv->rearm_ani = true;
@@ -257,6 +259,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
257 259
258 ath9k_htc_ps_wakeup(priv); 260 ath9k_htc_ps_wakeup(priv);
259 261
262 ath9k_htc_stop_ani(priv);
260 del_timer_sync(&priv->tx.cleanup_timer); 263 del_timer_sync(&priv->tx.cleanup_timer);
261 ath9k_htc_tx_drain(priv); 264 ath9k_htc_tx_drain(priv);
262 265
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index cbbb02a6b13b..36ae6490e554 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -783,6 +783,9 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
783 common = ath9k_hw_common(ah); 783 common = ath9k_hw_common(ah);
784 ath9k_set_hw_capab(sc, hw); 784 ath9k_set_hw_capab(sc, hw);
785 785
786 /* Will be cleared in ath9k_start() */
787 set_bit(ATH_OP_INVALID, &common->op_flags);
788
786 /* Initialize regulatory */ 789 /* Initialize regulatory */
787 error = ath_regd_init(&common->regulatory, sc->hw->wiphy, 790 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
788 ath9k_reg_notifier); 791 ath9k_reg_notifier);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 25304adece57..914dbc6b1720 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -784,7 +784,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
784{ 784{
785 struct ath_softc *sc; 785 struct ath_softc *sc;
786 struct ieee80211_hw *hw; 786 struct ieee80211_hw *hw;
787 struct ath_common *common;
788 u8 csz; 787 u8 csz;
789 u32 val; 788 u32 val;
790 int ret = 0; 789 int ret = 0;
@@ -877,10 +876,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
877 wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", 876 wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
878 hw_name, (unsigned long)sc->mem, pdev->irq); 877 hw_name, (unsigned long)sc->mem, pdev->irq);
879 878
880 /* Will be cleared in ath9k_start() */
881 common = ath9k_hw_common(sc->sc_ah);
882 set_bit(ATH_OP_INVALID, &common->op_flags);
883
884 return 0; 879 return 0;
885 880
886err_init: 881err_init:
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 6c9accdb52e4..19df969ec909 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -975,6 +975,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
975 u64 tsf = 0; 975 u64 tsf = 0;
976 unsigned long flags; 976 unsigned long flags;
977 dma_addr_t new_buf_addr; 977 dma_addr_t new_buf_addr;
978 unsigned int budget = 512;
978 979
979 if (edma) 980 if (edma)
980 dma_type = DMA_BIDIRECTIONAL; 981 dma_type = DMA_BIDIRECTIONAL;
@@ -1113,15 +1114,17 @@ requeue_drop_frag:
1113 } 1114 }
1114requeue: 1115requeue:
1115 list_add_tail(&bf->list, &sc->rx.rxbuf); 1116 list_add_tail(&bf->list, &sc->rx.rxbuf);
1116 if (flush)
1117 continue;
1118 1117
1119 if (edma) { 1118 if (edma) {
1120 ath_rx_edma_buf_link(sc, qtype); 1119 ath_rx_edma_buf_link(sc, qtype);
1121 } else { 1120 } else {
1122 ath_rx_buf_relink(sc, bf); 1121 ath_rx_buf_relink(sc, bf);
1123 ath9k_hw_rxena(ah); 1122 if (!flush)
1123 ath9k_hw_rxena(ah);
1124 } 1124 }
1125
1126 if (!budget--)
1127 break;
1125 } while (1); 1128 } while (1);
1126 1129
1127 if (!(ah->imask & ATH9K_INT_RXEOL)) { 1130 if (!(ah->imask & ATH9K_INT_RXEOL)) {
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 87cbec47fb48..66acb2cbd9df 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -107,9 +107,6 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
107{ 107{
108 struct ath_atx_ac *ac = tid->ac; 108 struct ath_atx_ac *ac = tid->ac;
109 109
110 if (tid->paused)
111 return;
112
113 if (tid->sched) 110 if (tid->sched)
114 return; 111 return;
115 112
@@ -1407,7 +1404,6 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1407 ath_tx_tid_change_state(sc, txtid); 1404 ath_tx_tid_change_state(sc, txtid);
1408 1405
1409 txtid->active = true; 1406 txtid->active = true;
1410 txtid->paused = true;
1411 *ssn = txtid->seq_start = txtid->seq_next; 1407 *ssn = txtid->seq_start = txtid->seq_next;
1412 txtid->bar_index = -1; 1408 txtid->bar_index = -1;
1413 1409
@@ -1427,7 +1423,6 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1427 1423
1428 ath_txq_lock(sc, txq); 1424 ath_txq_lock(sc, txq);
1429 txtid->active = false; 1425 txtid->active = false;
1430 txtid->paused = false;
1431 ath_tx_flush_tid(sc, txtid); 1426 ath_tx_flush_tid(sc, txtid);
1432 ath_tx_tid_change_state(sc, txtid); 1427 ath_tx_tid_change_state(sc, txtid);
1433 ath_txq_unlock_complete(sc, txq); 1428 ath_txq_unlock_complete(sc, txq);
@@ -1487,7 +1482,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1487 ath_txq_lock(sc, txq); 1482 ath_txq_lock(sc, txq);
1488 ac->clear_ps_filter = true; 1483 ac->clear_ps_filter = true;
1489 1484
1490 if (!tid->paused && ath_tid_has_buffered(tid)) { 1485 if (ath_tid_has_buffered(tid)) {
1491 ath_tx_queue_tid(txq, tid); 1486 ath_tx_queue_tid(txq, tid);
1492 ath_txq_schedule(sc, txq); 1487 ath_txq_schedule(sc, txq);
1493 } 1488 }
@@ -1510,7 +1505,6 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
1510 ath_txq_lock(sc, txq); 1505 ath_txq_lock(sc, txq);
1511 1506
1512 tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; 1507 tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1513 tid->paused = false;
1514 1508
1515 if (ath_tid_has_buffered(tid)) { 1509 if (ath_tid_has_buffered(tid)) {
1516 ath_tx_queue_tid(txq, tid); 1510 ath_tx_queue_tid(txq, tid);
@@ -1544,8 +1538,6 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
1544 continue; 1538 continue;
1545 1539
1546 tid = ATH_AN_2_TID(an, i); 1540 tid = ATH_AN_2_TID(an, i);
1547 if (tid->paused)
1548 continue;
1549 1541
1550 ath_txq_lock(sc, tid->ac->txq); 1542 ath_txq_lock(sc, tid->ac->txq);
1551 while (nframes > 0) { 1543 while (nframes > 0) {
@@ -1844,9 +1836,6 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1844 list_del(&tid->list); 1836 list_del(&tid->list);
1845 tid->sched = false; 1837 tid->sched = false;
1846 1838
1847 if (tid->paused)
1848 continue;
1849
1850 if (ath_tx_sched_aggr(sc, txq, tid, &stop)) 1839 if (ath_tx_sched_aggr(sc, txq, tid, &stop))
1851 sent = true; 1840 sent = true;
1852 1841
@@ -2698,7 +2687,6 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2698 tid->baw_size = WME_MAX_BA; 2687 tid->baw_size = WME_MAX_BA;
2699 tid->baw_head = tid->baw_tail = 0; 2688 tid->baw_head = tid->baw_tail = 0;
2700 tid->sched = false; 2689 tid->sched = false;
2701 tid->paused = false;
2702 tid->active = false; 2690 tid->active = false;
2703 __skb_queue_head_init(&tid->buf_q); 2691 __skb_queue_head_init(&tid->buf_q);
2704 __skb_queue_head_init(&tid->retry_q); 2692 __skb_queue_head_init(&tid->retry_q);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
index df130ef53d1c..c7c9f15c0fe0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -303,10 +303,10 @@ static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
303 303
304 ci = core->chip; 304 ci = core->chip;
305 305
306 /* if core is already in reset, just return */ 306 /* if core is already in reset, skip reset */
307 regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL); 307 regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
308 if ((regdata & BCMA_RESET_CTL_RESET) != 0) 308 if ((regdata & BCMA_RESET_CTL_RESET) != 0)
309 return; 309 goto in_reset_configure;
310 310
311 /* configure reset */ 311 /* configure reset */
312 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL, 312 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
@@ -322,6 +322,7 @@ static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
322 SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) != 322 SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) !=
323 BCMA_RESET_CTL_RESET, 300); 323 BCMA_RESET_CTL_RESET, 300);
324 324
325in_reset_configure:
325 /* in-reset configure */ 326 /* in-reset configure */
326 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL, 327 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
327 reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK); 328 reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index afb3d15e38ff..be1985296bdc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -4948,7 +4948,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
4948 if (!err) { 4948 if (!err) {
4949 /* only set 2G bandwidth using bw_cap command */ 4949 /* only set 2G bandwidth using bw_cap command */
4950 band_bwcap.band = cpu_to_le32(WLC_BAND_2G); 4950 band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
4951 band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT); 4951 band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ);
4952 err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap, 4952 err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
4953 sizeof(band_bwcap)); 4953 sizeof(band_bwcap));
4954 } else { 4954 } else {
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index fa858d548d13..0489314425cb 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -611,14 +611,14 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
611 bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO); 611 bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
612 612
613 if (IWL_MVM_BT_COEX_CORUNNING) { 613 if (IWL_MVM_BT_COEX_CORUNNING) {
614 bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 | 614 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
615 BT_VALID_CORUN_LUT_40); 615 BT_VALID_CORUN_LUT_40);
616 bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING); 616 bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
617 } 617 }
618 618
619 if (IWL_MVM_BT_COEX_MPLUT) { 619 if (IWL_MVM_BT_COEX_MPLUT) {
620 bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT); 620 bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
621 bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT); 621 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
622 } 622 }
623 623
624 if (mvm->cfg->bt_shared_single_ant) 624 if (mvm->cfg->bt_shared_single_ant)
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 9426905de6b2..d73a89ecd78a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -183,9 +183,9 @@ enum iwl_scan_type {
183 * this number of packets were received (typically 1) 183 * this number of packets were received (typically 1)
184 * @passive2active: is auto switching from passive to active during scan allowed 184 * @passive2active: is auto switching from passive to active during scan allowed
185 * @rxchain_sel_flags: RXON_RX_CHAIN_* 185 * @rxchain_sel_flags: RXON_RX_CHAIN_*
186 * @max_out_time: in usecs, max out of serving channel time 186 * @max_out_time: in TUs, max out of serving channel time
187 * @suspend_time: how long to pause scan when returning to service channel: 187 * @suspend_time: how long to pause scan when returning to service channel:
188 * bits 0-19: beacon interal in usecs (suspend before executing) 188 * bits 0-19: beacon interal in TUs (suspend before executing)
189 * bits 20-23: reserved 189 * bits 20-23: reserved
190 * bits 24-31: number of beacons (suspend between channels) 190 * bits 24-31: number of beacons (suspend between channels)
191 * @rxon_flags: RXON_FLG_* 191 * @rxon_flags: RXON_FLG_*
@@ -383,8 +383,8 @@ enum scan_framework_client {
383 * @quiet_plcp_th: quiet channel num of packets threshold 383 * @quiet_plcp_th: quiet channel num of packets threshold
384 * @good_CRC_th: passive to active promotion threshold 384 * @good_CRC_th: passive to active promotion threshold
385 * @rx_chain: RXON rx chain. 385 * @rx_chain: RXON rx chain.
386 * @max_out_time: max uSec to be out of assoceated channel 386 * @max_out_time: max TUs to be out of assoceated channel
387 * @suspend_time: pause scan this long when returning to service channel 387 * @suspend_time: pause scan this TUs when returning to service channel
388 * @flags: RXON flags 388 * @flags: RXON flags
389 * @filter_flags: RXONfilter 389 * @filter_flags: RXONfilter
390 * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz. 390 * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz.
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index f0cebf12c7b8..8735ef1f44ae 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -826,7 +826,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
826 if (ret) 826 if (ret)
827 goto out_remove_mac; 827 goto out_remove_mac;
828 828
829 if (!mvm->bf_allowed_vif && 829 if (!mvm->bf_allowed_vif && false &&
830 vif->type == NL80211_IFTYPE_STATION && !vif->p2p && 830 vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
831 mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){ 831 mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
832 mvm->bf_allowed_vif = mvmvif; 832 mvm->bf_allowed_vif = mvmvif;
@@ -1007,7 +1007,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1007 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); 1007 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1008 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); 1008 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1009 1009
1010 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd); 1010 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
1011 if (ret) 1011 if (ret)
1012 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); 1012 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1013} 1013}
@@ -1023,7 +1023,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1023 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) 1023 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1024 return; 1024 return;
1025 1025
1026 ieee80211_iterate_active_interfaces( 1026 ieee80211_iterate_active_interfaces_atomic(
1027 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1027 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1028 iwl_mvm_mc_iface_iterator, &iter_data); 1028 iwl_mvm_mc_iface_iterator, &iter_data);
1029} 1029}
@@ -1807,6 +1807,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
1807 1807
1808 mutex_lock(&mvm->mutex); 1808 mutex_lock(&mvm->mutex);
1809 1809
1810 if (!iwl_mvm_is_idle(mvm)) {
1811 ret = -EBUSY;
1812 goto out;
1813 }
1814
1810 switch (mvm->scan_status) { 1815 switch (mvm->scan_status) {
1811 case IWL_MVM_SCAN_OS: 1816 case IWL_MVM_SCAN_OS:
1812 IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n"); 1817 IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index d564233a65da..f1ec0986c3c9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -1003,6 +1003,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
1003 return mvmvif->low_latency; 1003 return mvmvif->low_latency;
1004} 1004}
1005 1005
1006/* Assoc status */
1007bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
1008
1006/* Thermal management and CT-kill */ 1009/* Thermal management and CT-kill */
1007void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); 1010void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
1008void iwl_mvm_tt_handler(struct iwl_mvm *mvm); 1011void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 9f52c5b3f0ec..e1c838899363 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -1010,7 +1010,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
1010 return; 1010 return;
1011 } 1011 }
1012 1012
1013#ifdef CPTCFG_MAC80211_DEBUGFS 1013#ifdef CONFIG_MAC80211_DEBUGFS
1014 /* Disable last tx check if we are debugging with fixed rate */ 1014 /* Disable last tx check if we are debugging with fixed rate */
1015 if (lq_sta->dbg_fixed_rate) { 1015 if (lq_sta->dbg_fixed_rate) {
1016 IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n"); 1016 IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index c91dc8498852..c28de54c75d4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -277,51 +277,22 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
277 IEEE80211_IFACE_ITER_NORMAL, 277 IEEE80211_IFACE_ITER_NORMAL,
278 iwl_mvm_scan_condition_iterator, 278 iwl_mvm_scan_condition_iterator,
279 &global_bound); 279 &global_bound);
280 /*
281 * Under low latency traffic passive scan is fragmented meaning
282 * that dwell on a particular channel will be fragmented. Each fragment
283 * dwell time is 20ms and fragments period is 105ms. Skipping to next
284 * channel will be delayed by the same period - 105ms. So suspend_time
285 * parameter describing both fragments and channels skipping periods is
286 * set to 105ms. This value is chosen so that overall passive scan
287 * duration will not be too long. Max_out_time in this case is set to
288 * 70ms, so for active scanning operating channel will be left for 70ms
289 * while for passive still for 20ms (fragment dwell).
290 */
291 if (global_bound) {
292 if (!iwl_mvm_low_latency(mvm)) {
293 params->suspend_time = ieee80211_tu_to_usec(100);
294 params->max_out_time = ieee80211_tu_to_usec(600);
295 } else {
296 params->suspend_time = ieee80211_tu_to_usec(105);
297 /* P2P doesn't support fragmented passive scan, so
298 * configure max_out_time to be at least longest dwell
299 * time for passive scan.
300 */
301 if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
302 params->max_out_time = ieee80211_tu_to_usec(70);
303 params->passive_fragmented = true;
304 } else {
305 u32 passive_dwell;
306 280
307 /* 281 if (!global_bound)
308 * Use band G so that passive channel dwell time 282 goto not_bound;
309 * will be assigned with maximum value. 283
310 */ 284 params->suspend_time = 100;
311 band = IEEE80211_BAND_2GHZ; 285 params->max_out_time = 600;
312 passive_dwell = iwl_mvm_get_passive_dwell(band); 286
313 params->max_out_time = 287 if (iwl_mvm_low_latency(mvm)) {
314 ieee80211_tu_to_usec(passive_dwell); 288 params->suspend_time = 250;
315 } 289 params->max_out_time = 250;
316 }
317 } 290 }
318 291
292not_bound:
293
319 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { 294 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
320 if (params->passive_fragmented) 295 params->dwell[band].passive = iwl_mvm_get_passive_dwell(band);
321 params->dwell[band].passive = 20;
322 else
323 params->dwell[band].passive =
324 iwl_mvm_get_passive_dwell(band);
325 params->dwell[band].active = iwl_mvm_get_active_dwell(band, 296 params->dwell[band].active = iwl_mvm_get_active_dwell(band,
326 n_ssids); 297 n_ssids);
327 } 298 }
@@ -761,7 +732,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
761 int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels; 732 int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
762 int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; 733 int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
763 int head = 0; 734 int head = 0;
764 int tail = band_2ghz + band_5ghz; 735 int tail = band_2ghz + band_5ghz - 1;
765 u32 ssid_bitmap; 736 u32 ssid_bitmap;
766 int cmd_len; 737 int cmd_len;
767 int ret; 738 int ret;
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index d619851745a1..2180902266ae 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -644,3 +644,22 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
644 644
645 return result; 645 return result;
646} 646}
647
648static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
649{
650 bool *idle = _data;
651
652 if (!vif->bss_conf.idle)
653 *idle = false;
654}
655
656bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
657{
658 bool idle = true;
659
660 ieee80211_iterate_active_interfaces_atomic(
661 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
662 iwl_mvm_idle_iter, &idle);
663
664 return idle;
665}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index dcfd6d866d09..2365553f1ef7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1749,6 +1749,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1749 * PCI Tx retries from interfering with C3 CPU state */ 1749 * PCI Tx retries from interfering with C3 CPU state */
1750 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 1750 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
1751 1751
1752 trans->dev = &pdev->dev;
1753 trans_pcie->pci_dev = pdev;
1754 iwl_disable_interrupts(trans);
1755
1752 err = pci_enable_msi(pdev); 1756 err = pci_enable_msi(pdev);
1753 if (err) { 1757 if (err) {
1754 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); 1758 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
@@ -1760,8 +1764,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1760 } 1764 }
1761 } 1765 }
1762 1766
1763 trans->dev = &pdev->dev;
1764 trans_pcie->pci_dev = pdev;
1765 trans->hw_rev = iwl_read32(trans, CSR_HW_REV); 1767 trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
1766 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; 1768 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
1767 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), 1769 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
@@ -1787,8 +1789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1787 goto out_pci_disable_msi; 1789 goto out_pci_disable_msi;
1788 } 1790 }
1789 1791
1790 trans_pcie->inta_mask = CSR_INI_SET_MASK;
1791
1792 if (iwl_pcie_alloc_ict(trans)) 1792 if (iwl_pcie_alloc_ict(trans))
1793 goto out_free_cmd_pool; 1793 goto out_free_cmd_pool;
1794 1794
@@ -1800,6 +1800,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1800 goto out_free_ict; 1800 goto out_free_ict;
1801 } 1801 }
1802 1802
1803 trans_pcie->inta_mask = CSR_INI_SET_MASK;
1804
1803 return trans; 1805 return trans;
1804 1806
1805out_free_ict: 1807out_free_ict:
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index ddeb5a709aa3..a87ee9b6585a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -621,20 +621,18 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
621 bss_conf->bssid); 621 bss_conf->bssid);
622 622
623 /* 623 /*
624 * Update the beacon. This is only required on USB devices. PCI
625 * devices fetch beacons periodically.
626 */
627 if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev))
628 rt2x00queue_update_beacon(rt2x00dev, vif);
629
630 /*
631 * Start/stop beaconing. 624 * Start/stop beaconing.
632 */ 625 */
633 if (changes & BSS_CHANGED_BEACON_ENABLED) { 626 if (changes & BSS_CHANGED_BEACON_ENABLED) {
634 if (!bss_conf->enable_beacon && intf->enable_beacon) { 627 if (!bss_conf->enable_beacon && intf->enable_beacon) {
635 rt2x00queue_clear_beacon(rt2x00dev, vif);
636 rt2x00dev->intf_beaconing--; 628 rt2x00dev->intf_beaconing--;
637 intf->enable_beacon = false; 629 intf->enable_beacon = false;
630 /*
631 * Clear beacon in the H/W for this vif. This is needed
632 * to disable beaconing on this particular interface
633 * and keep it running on other interfaces.
634 */
635 rt2x00queue_clear_beacon(rt2x00dev, vif);
638 636
639 if (rt2x00dev->intf_beaconing == 0) { 637 if (rt2x00dev->intf_beaconing == 0) {
640 /* 638 /*
@@ -645,11 +643,15 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
645 rt2x00queue_stop_queue(rt2x00dev->bcn); 643 rt2x00queue_stop_queue(rt2x00dev->bcn);
646 mutex_unlock(&intf->beacon_skb_mutex); 644 mutex_unlock(&intf->beacon_skb_mutex);
647 } 645 }
648
649
650 } else if (bss_conf->enable_beacon && !intf->enable_beacon) { 646 } else if (bss_conf->enable_beacon && !intf->enable_beacon) {
651 rt2x00dev->intf_beaconing++; 647 rt2x00dev->intf_beaconing++;
652 intf->enable_beacon = true; 648 intf->enable_beacon = true;
649 /*
650 * Upload beacon to the H/W. This is only required on
651 * USB devices. PCI devices fetch beacons periodically.
652 */
653 if (rt2x00_is_usb(rt2x00dev))
654 rt2x00queue_update_beacon(rt2x00dev, vif);
653 655
654 if (rt2x00dev->intf_beaconing == 1) { 656 if (rt2x00dev->intf_beaconing == 1) {
655 /* 657 /*
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
index 06ef47cd6203..5b4c225396f2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -293,7 +293,7 @@ static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw,
293 u8 *psaddr; 293 u8 *psaddr;
294 __le16 fc; 294 __le16 fc;
295 u16 type, ufc; 295 u16 type, ufc;
296 bool match_bssid, packet_toself, packet_beacon, addr; 296 bool match_bssid, packet_toself, packet_beacon = false, addr;
297 297
298 tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift; 298 tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
299 299
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 68b5c7e92cfb..07cb06da6729 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1001,7 +1001,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
1001 err = _rtl92cu_init_mac(hw); 1001 err = _rtl92cu_init_mac(hw);
1002 if (err) { 1002 if (err) {
1003 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "init mac failed!\n"); 1003 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "init mac failed!\n");
1004 return err; 1004 goto exit;
1005 } 1005 }
1006 err = rtl92c_download_fw(hw); 1006 err = rtl92c_download_fw(hw);
1007 if (err) { 1007 if (err) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 36b48be8329c..2b3c78baa9f8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -49,6 +49,12 @@ static u8 _rtl92se_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 skb_queue)
49 if (ieee80211_is_nullfunc(fc)) 49 if (ieee80211_is_nullfunc(fc))
50 return QSLT_HIGH; 50 return QSLT_HIGH;
51 51
52 /* Kernel commit 1bf4bbb4024dcdab changed EAPOL packets to use
53 * queue V0 at priority 7; however, the RTL8192SE appears to have
54 * that queue at priority 6
55 */
56 if (skb->priority == 7)
57 return QSLT_VO;
52 return skb->priority; 58 return skb->priority;
53} 59}
54 60
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 630a3fcf65bc..0d4a285cbd7e 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -226,7 +226,7 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
226 grant_ref_t rx_ring_ref); 226 grant_ref_t rx_ring_ref);
227 227
228/* Check for SKBs from frontend and schedule backend processing */ 228/* Check for SKBs from frontend and schedule backend processing */
229void xenvif_check_rx_xenvif(struct xenvif *vif); 229void xenvif_napi_schedule_or_enable_events(struct xenvif *vif);
230 230
231/* Prevent the device from generating any further traffic. */ 231/* Prevent the device from generating any further traffic. */
232void xenvif_carrier_off(struct xenvif *vif); 232void xenvif_carrier_off(struct xenvif *vif);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index ef05c5c49d41..20e9defa1060 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -75,32 +75,8 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
75 work_done = xenvif_tx_action(vif, budget); 75 work_done = xenvif_tx_action(vif, budget);
76 76
77 if (work_done < budget) { 77 if (work_done < budget) {
78 int more_to_do = 0; 78 napi_complete(napi);
79 unsigned long flags; 79 xenvif_napi_schedule_or_enable_events(vif);
80
81 /* It is necessary to disable IRQ before calling
82 * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
83 * lose event from the frontend.
84 *
85 * Consider:
86 * RING_HAS_UNCONSUMED_REQUESTS
87 * <frontend generates event to trigger napi_schedule>
88 * __napi_complete
89 *
90 * This handler is still in scheduled state so the
91 * event has no effect at all. After __napi_complete
92 * this handler is descheduled and cannot get
93 * scheduled again. We lose event in this case and the ring
94 * will be completely stalled.
95 */
96
97 local_irq_save(flags);
98
99 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
100 if (!more_to_do)
101 __napi_complete(napi);
102
103 local_irq_restore(flags);
104 } 80 }
105 81
106 return work_done; 82 return work_done;
@@ -194,7 +170,7 @@ static void xenvif_up(struct xenvif *vif)
194 enable_irq(vif->tx_irq); 170 enable_irq(vif->tx_irq);
195 if (vif->tx_irq != vif->rx_irq) 171 if (vif->tx_irq != vif->rx_irq)
196 enable_irq(vif->rx_irq); 172 enable_irq(vif->rx_irq);
197 xenvif_check_rx_xenvif(vif); 173 xenvif_napi_schedule_or_enable_events(vif);
198} 174}
199 175
200static void xenvif_down(struct xenvif *vif) 176static void xenvif_down(struct xenvif *vif)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 76665405c5aa..7367208ee8cd 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -104,7 +104,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
104 104
105/* Find the containing VIF's structure from a pointer in pending_tx_info array 105/* Find the containing VIF's structure from a pointer in pending_tx_info array
106 */ 106 */
107static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf) 107static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf)
108{ 108{
109 u16 pending_idx = ubuf->desc; 109 u16 pending_idx = ubuf->desc;
110 struct pending_tx_info *temp = 110 struct pending_tx_info *temp =
@@ -323,6 +323,35 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
323} 323}
324 324
325/* 325/*
326 * Find the grant ref for a given frag in a chain of struct ubuf_info's
327 * skb: the skb itself
328 * i: the frag's number
329 * ubuf: a pointer to an element in the chain. It should not be NULL
330 *
331 * Returns a pointer to the element in the chain where the page were found. If
332 * not found, returns NULL.
333 * See the definition of callback_struct in common.h for more details about
334 * the chain.
335 */
336static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
337 const int i,
338 const struct ubuf_info *ubuf)
339{
340 struct xenvif *foreign_vif = ubuf_to_vif(ubuf);
341
342 do {
343 u16 pending_idx = ubuf->desc;
344
345 if (skb_shinfo(skb)->frags[i].page.p ==
346 foreign_vif->mmap_pages[pending_idx])
347 break;
348 ubuf = (struct ubuf_info *) ubuf->ctx;
349 } while (ubuf);
350
351 return ubuf;
352}
353
354/*
326 * Prepare an SKB to be transmitted to the frontend. 355 * Prepare an SKB to be transmitted to the frontend.
327 * 356 *
328 * This function is responsible for allocating grant operations, meta 357 * This function is responsible for allocating grant operations, meta
@@ -346,9 +375,8 @@ static int xenvif_gop_skb(struct sk_buff *skb,
346 int head = 1; 375 int head = 1;
347 int old_meta_prod; 376 int old_meta_prod;
348 int gso_type; 377 int gso_type;
349 struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg; 378 const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
350 grant_ref_t foreign_grefs[MAX_SKB_FRAGS]; 379 const struct ubuf_info *const head_ubuf = ubuf;
351 struct xenvif *foreign_vif = NULL;
352 380
353 old_meta_prod = npo->meta_prod; 381 old_meta_prod = npo->meta_prod;
354 382
@@ -386,19 +414,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
386 npo->copy_off = 0; 414 npo->copy_off = 0;
387 npo->copy_gref = req->gref; 415 npo->copy_gref = req->gref;
388 416
389 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
390 (ubuf->callback == &xenvif_zerocopy_callback)) {
391 int i = 0;
392 foreign_vif = ubuf_to_vif(ubuf);
393
394 do {
395 u16 pending_idx = ubuf->desc;
396 foreign_grefs[i++] =
397 foreign_vif->pending_tx_info[pending_idx].req.gref;
398 ubuf = (struct ubuf_info *) ubuf->ctx;
399 } while (ubuf);
400 }
401
402 data = skb->data; 417 data = skb->data;
403 while (data < skb_tail_pointer(skb)) { 418 while (data < skb_tail_pointer(skb)) {
404 unsigned int offset = offset_in_page(data); 419 unsigned int offset = offset_in_page(data);
@@ -415,13 +430,60 @@ static int xenvif_gop_skb(struct sk_buff *skb,
415 } 430 }
416 431
417 for (i = 0; i < nr_frags; i++) { 432 for (i = 0; i < nr_frags; i++) {
433 /* This variable also signals whether foreign_gref has a real
434 * value or not.
435 */
436 struct xenvif *foreign_vif = NULL;
437 grant_ref_t foreign_gref;
438
439 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
440 (ubuf->callback == &xenvif_zerocopy_callback)) {
441 const struct ubuf_info *const startpoint = ubuf;
442
443 /* Ideally ubuf points to the chain element which
444 * belongs to this frag. Or if frags were removed from
445 * the beginning, then shortly before it.
446 */
447 ubuf = xenvif_find_gref(skb, i, ubuf);
448
449 /* Try again from the beginning of the list, if we
450 * haven't tried from there. This only makes sense in
451 * the unlikely event of reordering the original frags.
452 * For injected local pages it's an unnecessary second
453 * run.
454 */
455 if (unlikely(!ubuf) && startpoint != head_ubuf)
456 ubuf = xenvif_find_gref(skb, i, head_ubuf);
457
458 if (likely(ubuf)) {
459 u16 pending_idx = ubuf->desc;
460
461 foreign_vif = ubuf_to_vif(ubuf);
462 foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref;
463 /* Just a safety measure. If this was the last
464 * element on the list, the for loop will
465 * iterate again if a local page were added to
466 * the end. Using head_ubuf here prevents the
467 * second search on the chain. Or the original
468 * frags changed order, but that's less likely.
469 * In any way, ubuf shouldn't be NULL.
470 */
471 ubuf = ubuf->ctx ?
472 (struct ubuf_info *) ubuf->ctx :
473 head_ubuf;
474 } else
475 /* This frag was a local page, added to the
476 * array after the skb left netback.
477 */
478 ubuf = head_ubuf;
479 }
418 xenvif_gop_frag_copy(vif, skb, npo, 480 xenvif_gop_frag_copy(vif, skb, npo,
419 skb_frag_page(&skb_shinfo(skb)->frags[i]), 481 skb_frag_page(&skb_shinfo(skb)->frags[i]),
420 skb_frag_size(&skb_shinfo(skb)->frags[i]), 482 skb_frag_size(&skb_shinfo(skb)->frags[i]),
421 skb_shinfo(skb)->frags[i].page_offset, 483 skb_shinfo(skb)->frags[i].page_offset,
422 &head, 484 &head,
423 foreign_vif, 485 foreign_vif,
424 foreign_grefs[i]); 486 foreign_vif ? foreign_gref : UINT_MAX);
425 } 487 }
426 488
427 return npo->meta_prod - old_meta_prod; 489 return npo->meta_prod - old_meta_prod;
@@ -654,7 +716,7 @@ done:
654 notify_remote_via_irq(vif->rx_irq); 716 notify_remote_via_irq(vif->rx_irq);
655} 717}
656 718
657void xenvif_check_rx_xenvif(struct xenvif *vif) 719void xenvif_napi_schedule_or_enable_events(struct xenvif *vif)
658{ 720{
659 int more_to_do; 721 int more_to_do;
660 722
@@ -688,7 +750,7 @@ static void tx_credit_callback(unsigned long data)
688{ 750{
689 struct xenvif *vif = (struct xenvif *)data; 751 struct xenvif *vif = (struct xenvif *)data;
690 tx_add_credit(vif); 752 tx_add_credit(vif);
691 xenvif_check_rx_xenvif(vif); 753 xenvif_napi_schedule_or_enable_events(vif);
692} 754}
693 755
694static void xenvif_tx_err(struct xenvif *vif, 756static void xenvif_tx_err(struct xenvif *vif,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 6d4ee22708c9..32e969d95319 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1831,6 +1831,10 @@ int of_update_property(struct device_node *np, struct property *newprop)
1831 if (!found) 1831 if (!found)
1832 return -ENODEV; 1832 return -ENODEV;
1833 1833
1834 /* At early boot, bail out and defer setup to of_init() */
1835 if (!of_kset)
1836 return found ? 0 : -ENODEV;
1837
1834 /* Update the sysfs attribute */ 1838 /* Update the sysfs attribute */
1835 sysfs_remove_bin_file(&np->kobj, &oldprop->attr); 1839 sysfs_remove_bin_file(&np->kobj, &oldprop->attr);
1836 __of_add_property_sysfs(np, newprop); 1840 __of_add_property_sysfs(np, newprop);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index d3d1cfd51e09..e384e2534594 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -293,6 +293,58 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
293 return PCIBIOS_SUCCESSFUL; 293 return PCIBIOS_SUCCESSFUL;
294} 294}
295 295
296/*
297 * Remove windows, starting from the largest ones to the smallest
298 * ones.
299 */
300static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
301 phys_addr_t base, size_t size)
302{
303 while (size) {
304 size_t sz = 1 << (fls(size) - 1);
305
306 mvebu_mbus_del_window(base, sz);
307 base += sz;
308 size -= sz;
309 }
310}
311
312/*
313 * MBus windows can only have a power of two size, but PCI BARs do not
314 * have this constraint. Therefore, we have to split the PCI BAR into
315 * areas each having a power of two size. We start from the largest
316 * one (i.e highest order bit set in the size).
317 */
318static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
319 unsigned int target, unsigned int attribute,
320 phys_addr_t base, size_t size,
321 phys_addr_t remap)
322{
323 size_t size_mapped = 0;
324
325 while (size) {
326 size_t sz = 1 << (fls(size) - 1);
327 int ret;
328
329 ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
330 sz, remap);
331 if (ret) {
332 dev_err(&port->pcie->pdev->dev,
333 "Could not create MBus window at 0x%x, size 0x%x: %d\n",
334 base, sz, ret);
335 mvebu_pcie_del_windows(port, base - size_mapped,
336 size_mapped);
337 return;
338 }
339
340 size -= sz;
341 size_mapped += sz;
342 base += sz;
343 if (remap != MVEBU_MBUS_NO_REMAP)
344 remap += sz;
345 }
346}
347
296static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) 348static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
297{ 349{
298 phys_addr_t iobase; 350 phys_addr_t iobase;
@@ -304,8 +356,8 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
304 356
305 /* If a window was configured, remove it */ 357 /* If a window was configured, remove it */
306 if (port->iowin_base) { 358 if (port->iowin_base) {
307 mvebu_mbus_del_window(port->iowin_base, 359 mvebu_pcie_del_windows(port, port->iowin_base,
308 port->iowin_size); 360 port->iowin_size);
309 port->iowin_base = 0; 361 port->iowin_base = 0;
310 port->iowin_size = 0; 362 port->iowin_size = 0;
311 } 363 }
@@ -331,11 +383,11 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
331 port->iowin_base = port->pcie->io.start + iobase; 383 port->iowin_base = port->pcie->io.start + iobase;
332 port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) | 384 port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
333 (port->bridge.iolimitupper << 16)) - 385 (port->bridge.iolimitupper << 16)) -
334 iobase); 386 iobase) + 1;
335 387
336 mvebu_mbus_add_window_remap_by_id(port->io_target, port->io_attr, 388 mvebu_pcie_add_windows(port, port->io_target, port->io_attr,
337 port->iowin_base, port->iowin_size, 389 port->iowin_base, port->iowin_size,
338 iobase); 390 iobase);
339} 391}
340 392
341static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) 393static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
@@ -346,8 +398,8 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
346 398
347 /* If a window was configured, remove it */ 399 /* If a window was configured, remove it */
348 if (port->memwin_base) { 400 if (port->memwin_base) {
349 mvebu_mbus_del_window(port->memwin_base, 401 mvebu_pcie_del_windows(port, port->memwin_base,
350 port->memwin_size); 402 port->memwin_size);
351 port->memwin_base = 0; 403 port->memwin_base = 0;
352 port->memwin_size = 0; 404 port->memwin_size = 0;
353 } 405 }
@@ -364,10 +416,11 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
364 port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16); 416 port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16);
365 port->memwin_size = 417 port->memwin_size =
366 (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) - 418 (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
367 port->memwin_base; 419 port->memwin_base + 1;
368 420
369 mvebu_mbus_add_window_by_id(port->mem_target, port->mem_attr, 421 mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr,
370 port->memwin_base, port->memwin_size); 422 port->memwin_base, port->memwin_size,
423 MVEBU_MBUS_NO_REMAP);
371} 424}
372 425
373/* 426/*
@@ -743,14 +796,21 @@ static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
743 796
744 /* 797 /*
745 * On the PCI-to-PCI bridge side, the I/O windows must have at 798 * On the PCI-to-PCI bridge side, the I/O windows must have at
746 * least a 64 KB size and be aligned on their size, and the 799 * least a 64 KB size and the memory windows must have at
747 * memory windows must have at least a 1 MB size and be 800 * least a 1 MB size. Moreover, MBus windows need to have a
748 * aligned on their size 801 * base address aligned on their size, and their size must be
802 * a power of two. This means that if the BAR doesn't have a
803 * power of two size, several MBus windows will actually be
804 * created. We need to ensure that the biggest MBus window
805 * (which will be the first one) is aligned on its size, which
806 * explains the rounddown_pow_of_two() being done here.
749 */ 807 */
750 if (res->flags & IORESOURCE_IO) 808 if (res->flags & IORESOURCE_IO)
751 return round_up(start, max_t(resource_size_t, SZ_64K, size)); 809 return round_up(start, max_t(resource_size_t, SZ_64K,
810 rounddown_pow_of_two(size)));
752 else if (res->flags & IORESOURCE_MEM) 811 else if (res->flags & IORESOURCE_MEM)
753 return round_up(start, max_t(resource_size_t, SZ_1M, size)); 812 return round_up(start, max_t(resource_size_t, SZ_1M,
813 rounddown_pow_of_two(size)));
754 else 814 else
755 return start; 815 return start;
756} 816}
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 58499277903a..6efc2ec5e4db 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -282,8 +282,8 @@ static int board_added(struct slot *p_slot)
282 return WRONG_BUS_FREQUENCY; 282 return WRONG_BUS_FREQUENCY;
283 } 283 }
284 284
285 bsp = ctrl->pci_dev->bus->cur_bus_speed; 285 bsp = ctrl->pci_dev->subordinate->cur_bus_speed;
286 msp = ctrl->pci_dev->bus->max_bus_speed; 286 msp = ctrl->pci_dev->subordinate->max_bus_speed;
287 287
288 /* Check if there are other slots or devices on the same bus */ 288 /* Check if there are other slots or devices on the same bus */
289 if (!list_empty(&ctrl->pci_dev->subordinate->devices)) 289 if (!list_empty(&ctrl->pci_dev->subordinate->devices))
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 7325d43bf030..759475ef6ff3 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3067,7 +3067,8 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev)
3067 if (!pci_is_pcie(dev)) 3067 if (!pci_is_pcie(dev))
3068 return 1; 3068 return 1;
3069 3069
3070 return pci_wait_for_pending(dev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND); 3070 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
3071 PCI_EXP_DEVSTA_TRPND);
3071} 3072}
3072EXPORT_SYMBOL(pci_wait_for_pending_transaction); 3073EXPORT_SYMBOL(pci_wait_for_pending_transaction);
3073 3074
@@ -3109,7 +3110,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
3109 return 0; 3110 return 0;
3110 3111
3111 /* Wait for Transaction Pending bit clean */ 3112 /* Wait for Transaction Pending bit clean */
3112 if (pci_wait_for_pending(dev, PCI_AF_STATUS, PCI_AF_STATUS_TP)) 3113 if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP))
3113 goto clear; 3114 goto clear;
3114 3115
3115 dev_err(&dev->dev, "transaction is not cleared; " 3116 dev_err(&dev->dev, "transaction is not cleared; "
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c
index 92ed4b2e3c07..c862f9c0e9ce 100644
--- a/drivers/pinctrl/pinctrl-as3722.c
+++ b/drivers/pinctrl/pinctrl-as3722.c
@@ -64,7 +64,6 @@ struct as3722_pin_function {
64}; 64};
65 65
66struct as3722_gpio_pin_control { 66struct as3722_gpio_pin_control {
67 bool enable_gpio_invert;
68 unsigned mode_prop; 67 unsigned mode_prop;
69 int io_function; 68 int io_function;
70}; 69};
@@ -320,10 +319,8 @@ static int as3722_pinctrl_gpio_set_direction(struct pinctrl_dev *pctldev,
320 return mode; 319 return mode;
321 } 320 }
322 321
323 if (as_pci->gpio_control[offset].enable_gpio_invert) 322 return as3722_update_bits(as3722, AS3722_GPIOn_CONTROL_REG(offset),
324 mode |= AS3722_GPIO_INV; 323 AS3722_GPIO_MODE_MASK, mode);
325
326 return as3722_write(as3722, AS3722_GPIOn_CONTROL_REG(offset), mode);
327} 324}
328 325
329static const struct pinmux_ops as3722_pinmux_ops = { 326static const struct pinmux_ops as3722_pinmux_ops = {
@@ -496,10 +493,18 @@ static void as3722_gpio_set(struct gpio_chip *chip, unsigned offset,
496{ 493{
497 struct as3722_pctrl_info *as_pci = to_as_pci(chip); 494 struct as3722_pctrl_info *as_pci = to_as_pci(chip);
498 struct as3722 *as3722 = as_pci->as3722; 495 struct as3722 *as3722 = as_pci->as3722;
499 int en_invert = as_pci->gpio_control[offset].enable_gpio_invert; 496 int en_invert;
500 u32 val; 497 u32 val;
501 int ret; 498 int ret;
502 499
500 ret = as3722_read(as3722, AS3722_GPIOn_CONTROL_REG(offset), &val);
501 if (ret < 0) {
502 dev_err(as_pci->dev,
503 "GPIO_CONTROL%d_REG read failed: %d\n", offset, ret);
504 return;
505 }
506 en_invert = !!(val & AS3722_GPIO_INV);
507
503 if (value) 508 if (value)
504 val = (en_invert) ? 0 : AS3722_GPIOn_SIGNAL(offset); 509 val = (en_invert) ? 0 : AS3722_GPIOn_SIGNAL(offset);
505 else 510 else
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 81075f2a1d3f..2960557bfed9 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -810,6 +810,7 @@ static const struct pinconf_ops pcs_pinconf_ops = {
810static int pcs_add_pin(struct pcs_device *pcs, unsigned offset, 810static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
811 unsigned pin_pos) 811 unsigned pin_pos)
812{ 812{
813 struct pcs_soc_data *pcs_soc = &pcs->socdata;
813 struct pinctrl_pin_desc *pin; 814 struct pinctrl_pin_desc *pin;
814 struct pcs_name *pn; 815 struct pcs_name *pn;
815 int i; 816 int i;
@@ -821,6 +822,18 @@ static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
821 return -ENOMEM; 822 return -ENOMEM;
822 } 823 }
823 824
825 if (pcs_soc->irq_enable_mask) {
826 unsigned val;
827
828 val = pcs->read(pcs->base + offset);
829 if (val & pcs_soc->irq_enable_mask) {
830 dev_dbg(pcs->dev, "irq enabled at boot for pin at %lx (%x), clearing\n",
831 (unsigned long)pcs->res->start + offset, val);
832 val &= ~pcs_soc->irq_enable_mask;
833 pcs->write(val, pcs->base + offset);
834 }
835 }
836
824 pin = &pcs->pins.pa[i]; 837 pin = &pcs->pins.pa[i];
825 pn = &pcs->names[i]; 838 pn = &pcs->names[i];
826 sprintf(pn->name, "%lx.%d", 839 sprintf(pn->name, "%lx.%d",
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c
index c5e0f6973a3b..26ca6855f478 100644
--- a/drivers/pinctrl/pinctrl-tb10x.c
+++ b/drivers/pinctrl/pinctrl-tb10x.c
@@ -629,9 +629,8 @@ static int tb10x_gpio_request_enable(struct pinctrl_dev *pctl,
629 */ 629 */
630 for (i = 0; i < state->pinfuncgrpcnt; i++) { 630 for (i = 0; i < state->pinfuncgrpcnt; i++) {
631 const struct tb10x_pinfuncgrp *pfg = &state->pingroups[i]; 631 const struct tb10x_pinfuncgrp *pfg = &state->pingroups[i];
632 unsigned int port = pfg->port;
633 unsigned int mode = pfg->mode; 632 unsigned int mode = pfg->mode;
634 int j; 633 int j, port = pfg->port;
635 634
636 /* 635 /*
637 * Skip pin groups which are always mapped and don't need 636 * Skip pin groups which are always mapped and don't need
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index 48093719167a..f5cd3f961808 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -4794,8 +4794,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
4794 FN_MSIOF0_SCK_B, 0, 4794 FN_MSIOF0_SCK_B, 0,
4795 /* IP5_23_21 [3] */ 4795 /* IP5_23_21 [3] */
4796 FN_WE1_N, FN_IERX, FN_CAN1_RX, FN_VI1_G4, 4796 FN_WE1_N, FN_IERX, FN_CAN1_RX, FN_VI1_G4,
4797 FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B, 4797 FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B, FN_IERX_C,
4798 FN_IERX_C, 0,
4799 /* IP5_20_18 [3] */ 4798 /* IP5_20_18 [3] */
4800 FN_WE0_N, FN_IECLK, FN_CAN_CLK, 4799 FN_WE0_N, FN_IECLK, FN_CAN_CLK,
4801 FN_VI2_VSYNC_N, FN_SCIFA0_TXD_B, FN_VI2_VSYNC_N_B, 0, 0, 4800 FN_VI2_VSYNC_N, FN_SCIFA0_TXD_B, FN_VI2_VSYNC_N_B, 0, 0,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 5186d70c49d4..7868bf3a0f91 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -5288,7 +5288,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
5288 /* SEL_SCIF3 [2] */ 5288 /* SEL_SCIF3 [2] */
5289 FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3, 5289 FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3,
5290 /* SEL_IEB [2] */ 5290 /* SEL_IEB [2] */
5291 FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, 5291 FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, 0,
5292 /* SEL_MMC [1] */ 5292 /* SEL_MMC [1] */
5293 FN_SEL_MMC_0, FN_SEL_MMC_1, 5293 FN_SEL_MMC_0, FN_SEL_MMC_1,
5294 /* SEL_SCIF5 [1] */ 5294 /* SEL_SCIF5 [1] */
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 9802b67040cc..2c61281bebd7 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -523,17 +523,6 @@ static int wmt_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
523 return GPIOF_DIR_IN; 523 return GPIOF_DIR_IN;
524} 524}
525 525
526static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
527{
528 return pinctrl_gpio_direction_input(chip->base + offset);
529}
530
531static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
532 int value)
533{
534 return pinctrl_gpio_direction_output(chip->base + offset);
535}
536
537static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset) 526static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset)
538{ 527{
539 struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev); 528 struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev);
@@ -568,6 +557,18 @@ static void wmt_gpio_set_value(struct gpio_chip *chip, unsigned offset,
568 wmt_clearbits(data, reg_data_out, BIT(bit)); 557 wmt_clearbits(data, reg_data_out, BIT(bit));
569} 558}
570 559
560static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
561{
562 return pinctrl_gpio_direction_input(chip->base + offset);
563}
564
565static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
566 int value)
567{
568 wmt_gpio_set_value(chip, offset, value);
569 return pinctrl_gpio_direction_output(chip->base + offset);
570}
571
571static struct gpio_chip wmt_gpio_chip = { 572static struct gpio_chip wmt_gpio_chip = {
572 .label = "gpio-wmt", 573 .label = "gpio-wmt",
573 .owner = THIS_MODULE, 574 .owner = THIS_MODULE,
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 9f611cbbc294..c31aa07b3ba5 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -83,8 +83,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
83{ 83{
84 struct acpi_device *acpi_dev; 84 struct acpi_device *acpi_dev;
85 acpi_handle handle; 85 acpi_handle handle;
86 struct acpi_buffer buffer; 86 int ret = 0;
87 int ret;
88 87
89 pnp_dbg(&dev->dev, "set resources\n"); 88 pnp_dbg(&dev->dev, "set resources\n");
90 89
@@ -97,19 +96,26 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
97 if (WARN_ON_ONCE(acpi_dev != dev->data)) 96 if (WARN_ON_ONCE(acpi_dev != dev->data))
98 dev->data = acpi_dev; 97 dev->data = acpi_dev;
99 98
100 ret = pnpacpi_build_resource_template(dev, &buffer); 99 if (acpi_has_method(handle, METHOD_NAME__SRS)) {
101 if (ret) 100 struct acpi_buffer buffer;
102 return ret; 101
103 ret = pnpacpi_encode_resources(dev, &buffer); 102 ret = pnpacpi_build_resource_template(dev, &buffer);
104 if (ret) { 103 if (ret)
104 return ret;
105
106 ret = pnpacpi_encode_resources(dev, &buffer);
107 if (!ret) {
108 acpi_status status;
109
110 status = acpi_set_current_resources(handle, &buffer);
111 if (ACPI_FAILURE(status))
112 ret = -EIO;
113 }
105 kfree(buffer.pointer); 114 kfree(buffer.pointer);
106 return ret;
107 } 115 }
108 if (ACPI_FAILURE(acpi_set_current_resources(handle, &buffer))) 116 if (!ret && acpi_bus_power_manageable(handle))
109 ret = -EINVAL;
110 else if (acpi_bus_power_manageable(handle))
111 ret = acpi_bus_set_power(handle, ACPI_STATE_D0); 117 ret = acpi_bus_set_power(handle, ACPI_STATE_D0);
112 kfree(buffer.pointer); 118
113 return ret; 119 return ret;
114} 120}
115 121
@@ -117,7 +123,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
117{ 123{
118 struct acpi_device *acpi_dev; 124 struct acpi_device *acpi_dev;
119 acpi_handle handle; 125 acpi_handle handle;
120 int ret; 126 acpi_status status;
121 127
122 dev_dbg(&dev->dev, "disable resources\n"); 128 dev_dbg(&dev->dev, "disable resources\n");
123 129
@@ -128,13 +134,15 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
128 } 134 }
129 135
130 /* acpi_unregister_gsi(pnp_irq(dev, 0)); */ 136 /* acpi_unregister_gsi(pnp_irq(dev, 0)); */
131 ret = 0;
132 if (acpi_bus_power_manageable(handle)) 137 if (acpi_bus_power_manageable(handle))
133 acpi_bus_set_power(handle, ACPI_STATE_D3_COLD); 138 acpi_bus_set_power(handle, ACPI_STATE_D3_COLD);
134 /* continue even if acpi_bus_set_power() fails */ 139
135 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL))) 140 /* continue even if acpi_bus_set_power() fails */
136 ret = -ENODEV; 141 status = acpi_evaluate_object(handle, "_DIS", NULL, NULL);
137 return ret; 142 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
143 return -ENODEV;
144
145 return 0;
138} 146}
139 147
140#ifdef CONFIG_ACPI_SLEEP 148#ifdef CONFIG_ACPI_SLEEP
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
index deb7f4bcdb7b..438d4c72c7b3 100644
--- a/drivers/pnp/pnpbios/bioscalls.c
+++ b/drivers/pnp/pnpbios/bioscalls.c
@@ -37,7 +37,7 @@ __visible struct {
37 * kernel begins at offset 3GB... 37 * kernel begins at offset 3GB...
38 */ 38 */
39 39
40asmlinkage void pnp_bios_callfunc(void); 40asmlinkage __visible void pnp_bios_callfunc(void);
41 41
42__asm__(".text \n" 42__asm__(".text \n"
43 __ALIGN_STR "\n" 43 __ALIGN_STR "\n"
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index 3736bc408adb..ebf0d6710b5a 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -335,7 +335,7 @@ static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
335} 335}
336#endif 336#endif
337 337
338#ifdef CONFIG_X86 338#ifdef CONFIG_PCI
339/* Device IDs of parts that have 32KB MCH space */ 339/* Device IDs of parts that have 32KB MCH space */
340static const unsigned int mch_quirk_devices[] = { 340static const unsigned int mch_quirk_devices[] = {
341 0x0154, /* Ivy Bridge */ 341 0x0154, /* Ivy Bridge */
@@ -440,7 +440,7 @@ static struct pnp_fixup pnp_fixups[] = {
440#ifdef CONFIG_AMD_NB 440#ifdef CONFIG_AMD_NB
441 {"PNP0c01", quirk_amd_mmconfig_area}, 441 {"PNP0c01", quirk_amd_mmconfig_area},
442#endif 442#endif
443#ifdef CONFIG_X86 443#ifdef CONFIG_PCI
444 {"PNP0c02", quirk_intel_mch}, 444 {"PNP0c02", quirk_intel_mch},
445#endif 445#endif
446 {""} 446 {""}
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 6963bdf54175..6aea373547f6 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -6,6 +6,7 @@ menu "PTP clock support"
6 6
7config PTP_1588_CLOCK 7config PTP_1588_CLOCK
8 tristate "PTP clock support" 8 tristate "PTP clock support"
9 depends on NET
9 select PPS 10 select PPS
10 select NET_PTP_CLASSIFY 11 select NET_PTP_CLASSIFY
11 help 12 help
@@ -74,7 +75,7 @@ config DP83640_PHY
74config PTP_1588_CLOCK_PCH 75config PTP_1588_CLOCK_PCH
75 tristate "Intel PCH EG20T as PTP clock" 76 tristate "Intel PCH EG20T as PTP clock"
76 depends on X86 || COMPILE_TEST 77 depends on X86 || COMPILE_TEST
77 depends on HAS_IOMEM 78 depends on HAS_IOMEM && NET
78 select PTP_1588_CLOCK 79 select PTP_1588_CLOCK
79 help 80 help
80 This driver adds support for using the PCH EG20T as a PTP 81 This driver adds support for using the PCH EG20T as a PTP
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index bd628a6f981d..e5f13c4310fe 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -569,6 +569,9 @@ static int hym8563_probe(struct i2c_client *client,
569 if (IS_ERR(hym8563->rtc)) 569 if (IS_ERR(hym8563->rtc))
570 return PTR_ERR(hym8563->rtc); 570 return PTR_ERR(hym8563->rtc);
571 571
572 /* the hym8563 alarm only supports a minute accuracy */
573 hym8563->rtc->uie_unsupported = 1;
574
572#ifdef CONFIG_COMMON_CLK 575#ifdef CONFIG_COMMON_CLK
573 hym8563_clkout_register_clk(hym8563); 576 hym8563_clkout_register_clk(hym8563);
574#endif 577#endif
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 5c8f8226c848..4cdb64be061b 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -206,7 +206,7 @@ static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm)
206 tm->tm_hour = bcd2bin(regs[2] & 0x3f); 206 tm->tm_hour = bcd2bin(regs[2] & 0x3f);
207 tm->tm_mday = bcd2bin(regs[3] & 0x3f); 207 tm->tm_mday = bcd2bin(regs[3] & 0x3f);
208 tm->tm_wday = regs[4] & 0x7; 208 tm->tm_wday = regs[4] & 0x7;
209 tm->tm_mon = bcd2bin(regs[5] & 0x1f); 209 tm->tm_mon = bcd2bin(regs[5] & 0x1f) - 1;
210 tm->tm_year = bcd2bin(regs[6]) + 100; 210 tm->tm_year = bcd2bin(regs[6]) + 100;
211 211
212 return rtc_valid_tm(tm); 212 return rtc_valid_tm(tm);
@@ -229,7 +229,7 @@ static int pcf8523_rtc_set_time(struct device *dev, struct rtc_time *tm)
229 regs[3] = bin2bcd(tm->tm_hour); 229 regs[3] = bin2bcd(tm->tm_hour);
230 regs[4] = bin2bcd(tm->tm_mday); 230 regs[4] = bin2bcd(tm->tm_mday);
231 regs[5] = tm->tm_wday; 231 regs[5] = tm->tm_wday;
232 regs[6] = bin2bcd(tm->tm_mon); 232 regs[6] = bin2bcd(tm->tm_mon + 1);
233 regs[7] = bin2bcd(tm->tm_year - 100); 233 regs[7] = bin2bcd(tm->tm_year - 100);
234 234
235 msg.addr = client->addr; 235 msg.addr = client->addr;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 9f0ea6cb6922..e3bf885f4a6c 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -541,18 +541,27 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
541 541
542static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) 542static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
543{ 543{
544 do { 544 static int ntsm_unsupported;
545
546 while (true) {
545 memset(sei, 0, sizeof(*sei)); 547 memset(sei, 0, sizeof(*sei));
546 sei->request.length = 0x0010; 548 sei->request.length = 0x0010;
547 sei->request.code = 0x000e; 549 sei->request.code = 0x000e;
548 sei->ntsm = ntsm; 550 if (!ntsm_unsupported)
551 sei->ntsm = ntsm;
549 552
550 if (chsc(sei)) 553 if (chsc(sei))
551 break; 554 break;
552 555
553 if (sei->response.code != 0x0001) { 556 if (sei->response.code != 0x0001) {
554 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 557 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
555 sei->response.code); 558 sei->response.code, sei->ntsm);
559
560 if (sei->response.code == 3 && sei->ntsm) {
561 /* Fallback for old firmware. */
562 ntsm_unsupported = 1;
563 continue;
564 }
556 break; 565 break;
557 } 566 }
558 567
@@ -568,7 +577,10 @@ static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
568 CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); 577 CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
569 break; 578 break;
570 } 579 }
571 } while (sei->u.nt0_area.flags & 0x80); 580
581 if (!(sei->u.nt0_area.flags & 0x80))
582 break;
583 }
572} 584}
573 585
574/* 586/*
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 7f0af4fcc001..6fd7d40b2c4d 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -8293,7 +8293,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
8293 8293
8294 mpt2sas_base_free_resources(ioc); 8294 mpt2sas_base_free_resources(ioc);
8295 pci_save_state(pdev); 8295 pci_save_state(pdev);
8296 pci_disable_device(pdev);
8297 pci_set_power_state(pdev, device_state); 8296 pci_set_power_state(pdev, device_state);
8298 return 0; 8297 return 0;
8299} 8298}
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index fe30ea94ffe6..109802f776ed 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -77,7 +77,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
77 goto next_msg; 77 goto next_msg;
78 } 78 }
79 79
80 if (!capable(CAP_SYS_ADMIN)) { 80 if (!netlink_capable(skb, CAP_SYS_ADMIN)) {
81 err = -EPERM; 81 err = -EPERM;
82 goto next_msg; 82 goto next_msg;
83 } 83 }
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 1b681427dde0..c341f855fadc 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1621,8 +1621,6 @@ void sas_rphy_free(struct sas_rphy *rphy)
1621 list_del(&rphy->list); 1621 list_del(&rphy->list);
1622 mutex_unlock(&sas_host->lock); 1622 mutex_unlock(&sas_host->lock);
1623 1623
1624 sas_bsg_remove(shost, rphy);
1625
1626 transport_destroy_device(dev); 1624 transport_destroy_device(dev);
1627 1625
1628 put_device(dev); 1626 put_device(dev);
@@ -1681,6 +1679,7 @@ sas_rphy_remove(struct sas_rphy *rphy)
1681 } 1679 }
1682 1680
1683 sas_rphy_unlink(rphy); 1681 sas_rphy_unlink(rphy);
1682 sas_bsg_remove(NULL, rphy);
1684 transport_remove_device(dev); 1683 transport_remove_device(dev);
1685 device_del(dev); 1684 device_del(dev);
1686} 1685}
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 16bfd50cd3fe..db3b494e5926 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -750,8 +750,12 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
750 750
751 vscsi->affinity_hint_set = true; 751 vscsi->affinity_hint_set = true;
752 } else { 752 } else {
753 for (i = 0; i < vscsi->num_queues; i++) 753 for (i = 0; i < vscsi->num_queues; i++) {
754 if (!vscsi->req_vqs[i].vq)
755 continue;
756
754 virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); 757 virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
758 }
755 759
756 vscsi->affinity_hint_set = false; 760 vscsi->affinity_hint_set = false;
757 } 761 }
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index fc67f564f02c..788ed9b59b4e 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -1,10 +1,12 @@
1# 1#
2# Makefile for the SuperH specific drivers. 2# Makefile for the SuperH specific drivers.
3# 3#
4obj-y := intc/ 4obj-$(CONFIG_SUPERH) += intc/
5obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += intc/
6ifneq ($(CONFIG_COMMON_CLK),y)
7obj-$(CONFIG_HAVE_CLK) += clk/
8endif
9obj-$(CONFIG_MAPLE) += maple/
10obj-$(CONFIG_SUPERHYWAY) += superhyway/
5 11
6obj-$(CONFIG_HAVE_CLK) += clk/ 12obj-y += pm_runtime.o
7obj-$(CONFIG_MAPLE) += maple/
8obj-$(CONFIG_SUPERHYWAY) += superhyway/
9
10obj-y += pm_runtime.o
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index 8afa5a4589f2..10c65eb51f85 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -50,8 +50,25 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
50 .con_ids = { NULL, }, 50 .con_ids = { NULL, },
51}; 51};
52 52
53static bool default_pm_on;
54
53static int __init sh_pm_runtime_init(void) 55static int __init sh_pm_runtime_init(void)
54{ 56{
57 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) {
58 if (!of_machine_is_compatible("renesas,emev2") &&
59 !of_machine_is_compatible("renesas,r7s72100") &&
60 !of_machine_is_compatible("renesas,r8a73a4") &&
61 !of_machine_is_compatible("renesas,r8a7740") &&
62 !of_machine_is_compatible("renesas,r8a7778") &&
63 !of_machine_is_compatible("renesas,r8a7779") &&
64 !of_machine_is_compatible("renesas,r8a7790") &&
65 !of_machine_is_compatible("renesas,r8a7791") &&
66 !of_machine_is_compatible("renesas,sh7372") &&
67 !of_machine_is_compatible("renesas,sh73a0"))
68 return 0;
69 }
70
71 default_pm_on = true;
55 pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); 72 pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
56 return 0; 73 return 0;
57} 74}
@@ -59,7 +76,8 @@ core_initcall(sh_pm_runtime_init);
59 76
60static int __init sh_pm_runtime_late_init(void) 77static int __init sh_pm_runtime_late_init(void)
61{ 78{
62 pm_genpd_poweroff_unused(); 79 if (default_pm_on)
80 pm_genpd_poweroff_unused();
63 return 0; 81 return 0;
64} 82}
65late_initcall(sh_pm_runtime_late_init); 83late_initcall(sh_pm_runtime_late_init);
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 713af4806f26..f6759dc0153b 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -29,18 +29,6 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
29 struct sg_table *sgt; 29 struct sg_table *sgt;
30 void *buf, *pbuf; 30 void *buf, *pbuf;
31 31
32 /*
33 * Some DMA controllers have problems transferring buffers that are
34 * not multiple of 4 bytes. So we truncate the transfer so that it
35 * is suitable for such controllers, and handle the trailing bytes
36 * manually after the DMA completes.
37 *
38 * REVISIT: It would be better if this information could be
39 * retrieved directly from the DMA device in a similar way than
40 * ->copy_align etc. is done.
41 */
42 len = ALIGN(drv_data->len, 4);
43
44 if (dir == DMA_TO_DEVICE) { 32 if (dir == DMA_TO_DEVICE) {
45 dmadev = drv_data->tx_chan->device->dev; 33 dmadev = drv_data->tx_chan->device->dev;
46 sgt = &drv_data->tx_sgt; 34 sgt = &drv_data->tx_sgt;
@@ -144,12 +132,8 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
144 if (!error) { 132 if (!error) {
145 pxa2xx_spi_unmap_dma_buffers(drv_data); 133 pxa2xx_spi_unmap_dma_buffers(drv_data);
146 134
147 /* Handle the last bytes of unaligned transfer */
148 drv_data->tx += drv_data->tx_map_len; 135 drv_data->tx += drv_data->tx_map_len;
149 drv_data->write(drv_data);
150
151 drv_data->rx += drv_data->rx_map_len; 136 drv_data->rx += drv_data->rx_map_len;
152 drv_data->read(drv_data);
153 137
154 msg->actual_length += drv_data->len; 138 msg->actual_length += drv_data->len;
155 msg->state = pxa2xx_spi_next_transfer(drv_data); 139 msg->state = pxa2xx_spi_next_transfer(drv_data);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index b032e8885e24..78c66e3c53ed 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -734,7 +734,7 @@ static int spi_qup_remove(struct platform_device *pdev)
734 int ret; 734 int ret;
735 735
736 ret = pm_runtime_get_sync(&pdev->dev); 736 ret = pm_runtime_get_sync(&pdev->dev);
737 if (ret) 737 if (ret < 0)
738 return ret; 738 return ret;
739 739
740 ret = spi_qup_set_state(controller, QUP_STATE_RESET); 740 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 4eb9bf02996c..939edf473235 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -580,6 +580,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
580 spi->master->set_cs(spi, !enable); 580 spi->master->set_cs(spi, !enable);
581} 581}
582 582
583#ifdef CONFIG_HAS_DMA
583static int spi_map_buf(struct spi_master *master, struct device *dev, 584static int spi_map_buf(struct spi_master *master, struct device *dev,
584 struct sg_table *sgt, void *buf, size_t len, 585 struct sg_table *sgt, void *buf, size_t len,
585 enum dma_data_direction dir) 586 enum dma_data_direction dir)
@@ -637,55 +638,12 @@ static void spi_unmap_buf(struct spi_master *master, struct device *dev,
637 } 638 }
638} 639}
639 640
640static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 641static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
641{ 642{
642 struct device *tx_dev, *rx_dev; 643 struct device *tx_dev, *rx_dev;
643 struct spi_transfer *xfer; 644 struct spi_transfer *xfer;
644 void *tmp;
645 unsigned int max_tx, max_rx;
646 int ret; 645 int ret;
647 646
648 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
649 max_tx = 0;
650 max_rx = 0;
651
652 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
653 if ((master->flags & SPI_MASTER_MUST_TX) &&
654 !xfer->tx_buf)
655 max_tx = max(xfer->len, max_tx);
656 if ((master->flags & SPI_MASTER_MUST_RX) &&
657 !xfer->rx_buf)
658 max_rx = max(xfer->len, max_rx);
659 }
660
661 if (max_tx) {
662 tmp = krealloc(master->dummy_tx, max_tx,
663 GFP_KERNEL | GFP_DMA);
664 if (!tmp)
665 return -ENOMEM;
666 master->dummy_tx = tmp;
667 memset(tmp, 0, max_tx);
668 }
669
670 if (max_rx) {
671 tmp = krealloc(master->dummy_rx, max_rx,
672 GFP_KERNEL | GFP_DMA);
673 if (!tmp)
674 return -ENOMEM;
675 master->dummy_rx = tmp;
676 }
677
678 if (max_tx || max_rx) {
679 list_for_each_entry(xfer, &msg->transfers,
680 transfer_list) {
681 if (!xfer->tx_buf)
682 xfer->tx_buf = master->dummy_tx;
683 if (!xfer->rx_buf)
684 xfer->rx_buf = master->dummy_rx;
685 }
686 }
687 }
688
689 if (!master->can_dma) 647 if (!master->can_dma)
690 return 0; 648 return 0;
691 649
@@ -742,6 +700,69 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
742 700
743 return 0; 701 return 0;
744} 702}
703#else /* !CONFIG_HAS_DMA */
704static inline int __spi_map_msg(struct spi_master *master,
705 struct spi_message *msg)
706{
707 return 0;
708}
709
710static inline int spi_unmap_msg(struct spi_master *master,
711 struct spi_message *msg)
712{
713 return 0;
714}
715#endif /* !CONFIG_HAS_DMA */
716
717static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
718{
719 struct spi_transfer *xfer;
720 void *tmp;
721 unsigned int max_tx, max_rx;
722
723 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
724 max_tx = 0;
725 max_rx = 0;
726
727 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
728 if ((master->flags & SPI_MASTER_MUST_TX) &&
729 !xfer->tx_buf)
730 max_tx = max(xfer->len, max_tx);
731 if ((master->flags & SPI_MASTER_MUST_RX) &&
732 !xfer->rx_buf)
733 max_rx = max(xfer->len, max_rx);
734 }
735
736 if (max_tx) {
737 tmp = krealloc(master->dummy_tx, max_tx,
738 GFP_KERNEL | GFP_DMA);
739 if (!tmp)
740 return -ENOMEM;
741 master->dummy_tx = tmp;
742 memset(tmp, 0, max_tx);
743 }
744
745 if (max_rx) {
746 tmp = krealloc(master->dummy_rx, max_rx,
747 GFP_KERNEL | GFP_DMA);
748 if (!tmp)
749 return -ENOMEM;
750 master->dummy_rx = tmp;
751 }
752
753 if (max_tx || max_rx) {
754 list_for_each_entry(xfer, &msg->transfers,
755 transfer_list) {
756 if (!xfer->tx_buf)
757 xfer->tx_buf = master->dummy_tx;
758 if (!xfer->rx_buf)
759 xfer->rx_buf = master->dummy_rx;
760 }
761 }
762 }
763
764 return __spi_map_msg(master, msg);
765}
745 766
746/* 767/*
747 * spi_transfer_one_message - Default implementation of transfer_one_message() 768 * spi_transfer_one_message - Default implementation of transfer_one_message()
@@ -1151,7 +1172,6 @@ static int spi_master_initialize_queue(struct spi_master *master)
1151{ 1172{
1152 int ret; 1173 int ret;
1153 1174
1154 master->queued = true;
1155 master->transfer = spi_queued_transfer; 1175 master->transfer = spi_queued_transfer;
1156 if (!master->transfer_one_message) 1176 if (!master->transfer_one_message)
1157 master->transfer_one_message = spi_transfer_one_message; 1177 master->transfer_one_message = spi_transfer_one_message;
@@ -1162,6 +1182,7 @@ static int spi_master_initialize_queue(struct spi_master *master)
1162 dev_err(&master->dev, "problem initializing queue\n"); 1182 dev_err(&master->dev, "problem initializing queue\n");
1163 goto err_init_queue; 1183 goto err_init_queue;
1164 } 1184 }
1185 master->queued = true;
1165 ret = spi_start_queue(master); 1186 ret = spi_start_queue(master);
1166 if (ret) { 1187 if (ret) {
1167 dev_err(&master->dev, "problem starting queue\n"); 1188 dev_err(&master->dev, "problem starting queue\n");
@@ -1171,8 +1192,8 @@ static int spi_master_initialize_queue(struct spi_master *master)
1171 return 0; 1192 return 0;
1172 1193
1173err_start_queue: 1194err_start_queue:
1174err_init_queue:
1175 spi_destroy_queue(master); 1195 spi_destroy_queue(master);
1196err_init_queue:
1176 return ret; 1197 return ret;
1177} 1198}
1178 1199
@@ -1756,7 +1777,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
1756 */ 1777 */
1757int spi_setup(struct spi_device *spi) 1778int spi_setup(struct spi_device *spi)
1758{ 1779{
1759 unsigned bad_bits; 1780 unsigned bad_bits, ugly_bits;
1760 int status = 0; 1781 int status = 0;
1761 1782
1762 /* check mode to prevent that DUAL and QUAD set at the same time 1783 /* check mode to prevent that DUAL and QUAD set at the same time
@@ -1776,6 +1797,15 @@ int spi_setup(struct spi_device *spi)
1776 * that aren't supported with their current master 1797 * that aren't supported with their current master
1777 */ 1798 */
1778 bad_bits = spi->mode & ~spi->master->mode_bits; 1799 bad_bits = spi->mode & ~spi->master->mode_bits;
1800 ugly_bits = bad_bits &
1801 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
1802 if (ugly_bits) {
1803 dev_warn(&spi->dev,
1804 "setup: ignoring unsupported mode bits %x\n",
1805 ugly_bits);
1806 spi->mode &= ~ugly_bits;
1807 bad_bits &= ~ugly_bits;
1808 }
1779 if (bad_bits) { 1809 if (bad_bits) {
1780 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 1810 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
1781 bad_bits); 1811 bad_bits);
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index 171a71d20c88..728bf7f14f7b 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -139,6 +139,8 @@ static int daq700_ai_rinsn(struct comedi_device *dev,
139 /* write channel to multiplexer */ 139 /* write channel to multiplexer */
140 /* set mask scan bit high to disable scanning */ 140 /* set mask scan bit high to disable scanning */
141 outb(chan | 0x80, dev->iobase + CMD_R1); 141 outb(chan | 0x80, dev->iobase + CMD_R1);
142 /* mux needs 2us to really settle [Fred Brooks]. */
143 udelay(2);
142 144
143 /* convert n samples */ 145 /* convert n samples */
144 for (n = 0; n < insn->n; n++) { 146 for (n = 0; n < insn->n; n++) {
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c
index e2b482045158..017d2f8379b7 100644
--- a/drivers/staging/iio/resolver/ad2s1200.c
+++ b/drivers/staging/iio/resolver/ad2s1200.c
@@ -107,7 +107,7 @@ static int ad2s1200_probe(struct spi_device *spi)
107 int pn, ret = 0; 107 int pn, ret = 0;
108 unsigned short *pins = spi->dev.platform_data; 108 unsigned short *pins = spi->dev.platform_data;
109 109
110 for (pn = 0; pn < AD2S1200_PN; pn++) 110 for (pn = 0; pn < AD2S1200_PN; pn++) {
111 ret = devm_gpio_request_one(&spi->dev, pins[pn], GPIOF_DIR_OUT, 111 ret = devm_gpio_request_one(&spi->dev, pins[pn], GPIOF_DIR_OUT,
112 DRV_NAME); 112 DRV_NAME);
113 if (ret) { 113 if (ret) {
@@ -115,6 +115,7 @@ static int ad2s1200_probe(struct spi_device *spi)
115 pins[pn]); 115 pins[pn]);
116 return ret; 116 return ret;
117 } 117 }
118 }
118 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); 119 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
119 if (!indio_dev) 120 if (!indio_dev)
120 return -ENOMEM; 121 return -ENOMEM;
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 6543b349bfb6..def8280d7ee6 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -510,7 +510,7 @@ int imx_drm_encoder_get_mux_id(struct device_node *node,
510 of_node_put(port); 510 of_node_put(port);
511 if (port == imx_crtc->port) { 511 if (port == imx_crtc->port) {
512 ret = of_graph_parse_endpoint(ep, &endpoint); 512 ret = of_graph_parse_endpoint(ep, &endpoint);
513 return ret ? ret : endpoint.id; 513 return ret ? ret : endpoint.port;
514 } 514 }
515 } while (ep); 515 } while (ep);
516 516
@@ -668,6 +668,11 @@ static int imx_drm_platform_probe(struct platform_device *pdev)
668 if (!remote || !of_device_is_available(remote)) { 668 if (!remote || !of_device_is_available(remote)) {
669 of_node_put(remote); 669 of_node_put(remote);
670 continue; 670 continue;
671 } else if (!of_device_is_available(remote->parent)) {
672 dev_warn(&pdev->dev, "parent device of %s is not available\n",
673 remote->full_name);
674 of_node_put(remote);
675 continue;
671 } 676 }
672 677
673 ret = imx_drm_add_component(&pdev->dev, remote); 678 ret = imx_drm_add_component(&pdev->dev, remote);
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c
index af8af4d1c988..4caef2b1653d 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/staging/imx-drm/imx-tve.c
@@ -577,7 +577,7 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
577 tve->dev = dev; 577 tve->dev = dev;
578 spin_lock_init(&tve->lock); 578 spin_lock_init(&tve->lock);
579 579
580 ddc_node = of_parse_phandle(np, "i2c-ddc-bus", 0); 580 ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
581 if (ddc_node) { 581 if (ddc_node) {
582 tve->ddc = of_find_i2c_adapter_by_node(ddc_node); 582 tve->ddc = of_find_i2c_adapter_by_node(ddc_node);
583 of_node_put(ddc_node); 583 of_node_put(ddc_node);
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 8c101cbbee97..acc8184c46cd 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -1247,9 +1247,18 @@ static int vpfe_stop_streaming(struct vb2_queue *vq)
1247 struct vpfe_fh *fh = vb2_get_drv_priv(vq); 1247 struct vpfe_fh *fh = vb2_get_drv_priv(vq);
1248 struct vpfe_video_device *video = fh->video; 1248 struct vpfe_video_device *video = fh->video;
1249 1249
1250 if (!vb2_is_streaming(vq))
1251 return 0;
1252 /* release all active buffers */ 1250 /* release all active buffers */
1251 if (video->cur_frm == video->next_frm) {
1252 vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_ERROR);
1253 } else {
1254 if (video->cur_frm != NULL)
1255 vb2_buffer_done(&video->cur_frm->vb,
1256 VB2_BUF_STATE_ERROR);
1257 if (video->next_frm != NULL)
1258 vb2_buffer_done(&video->next_frm->vb,
1259 VB2_BUF_STATE_ERROR);
1260 }
1261
1253 while (!list_empty(&video->dma_queue)) { 1262 while (!list_empty(&video->dma_queue)) {
1254 video->next_frm = list_entry(video->dma_queue.next, 1263 video->next_frm = list_entry(video->dma_queue.next,
1255 struct vpfe_cap_buffer, list); 1264 struct vpfe_cap_buffer, list);
diff --git a/drivers/staging/media/sn9c102/sn9c102_devtable.h b/drivers/staging/media/sn9c102/sn9c102_devtable.h
index b3d2cc729657..4ba569258498 100644
--- a/drivers/staging/media/sn9c102/sn9c102_devtable.h
+++ b/drivers/staging/media/sn9c102/sn9c102_devtable.h
@@ -48,10 +48,8 @@ static const struct usb_device_id sn9c102_id_table[] = {
48 { SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), }, 48 { SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), },
49/* { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */ 49/* { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */
50 { SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), }, 50 { SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), },
51#endif
52 { SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), }, 51 { SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), },
53 { SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), }, 52 { SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), },
54#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
55 { SN9C102_USB_DEVICE(0x0c45, 0x6028, BRIDGE_SN9C102), }, 53 { SN9C102_USB_DEVICE(0x0c45, 0x6028, BRIDGE_SN9C102), },
56 { SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), }, 54 { SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), },
57 { SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), }, 55 { SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), },
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 11d0a9d8ee59..b7dd1539bbc4 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -171,7 +171,7 @@ inline int rtllib_put_snap(u8 *data, u16 h_proto)
171 snap->oui[1] = oui[1]; 171 snap->oui[1] = oui[1];
172 snap->oui[2] = oui[2]; 172 snap->oui[2] = oui[2];
173 173
174 *(u16 *)(data + SNAP_SIZE) = h_proto; 174 *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
175 175
176 return SNAP_SIZE + sizeof(u16); 176 return SNAP_SIZE + sizeof(u16);
177} 177}
diff --git a/drivers/staging/rtl8723au/os_dep/os_intfs.c b/drivers/staging/rtl8723au/os_dep/os_intfs.c
index 57eca7a45672..4fe751f7c2bf 100644
--- a/drivers/staging/rtl8723au/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723au/os_dep/os_intfs.c
@@ -953,8 +953,6 @@ static int netdev_close(struct net_device *pnetdev)
953#endif /* CONFIG_8723AU_P2P */ 953#endif /* CONFIG_8723AU_P2P */
954 954
955 rtw_scan_abort23a(padapter); 955 rtw_scan_abort23a(padapter);
956 /* set this at the end */
957 padapter->rtw_wdev->iftype = NL80211_IFTYPE_MONITOR;
958 956
959 RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - drv_close\n")); 957 RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - drv_close\n"));
960 DBG_8723A("-871x_drv - drv_close, bup =%d\n", padapter->bup); 958 DBG_8723A("-871x_drv - drv_close, bup =%d\n", padapter->bup);
diff --git a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
index c49160e477d8..07e542e5d156 100644
--- a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
@@ -26,7 +26,7 @@ unsigned int ffaddr2pipehdl23a(struct dvobj_priv *pdvobj, u32 addr)
26 if (addr == RECV_BULK_IN_ADDR) { 26 if (addr == RECV_BULK_IN_ADDR) {
27 pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]); 27 pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]);
28 } else if (addr == RECV_INT_IN_ADDR) { 28 } else if (addr == RECV_INT_IN_ADDR) {
29 pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[1]); 29 pipe = usb_rcvintpipe(pusbd, pdvobj->RtInPipe[1]);
30 } else if (addr < HW_QUEUE_ENTRY) { 30 } else if (addr < HW_QUEUE_ENTRY) {
31 ep_num = pdvobj->Queue2Pipe[addr]; 31 ep_num = pdvobj->Queue2Pipe[addr];
32 pipe = usb_sndbulkpipe(pusbd, ep_num); 32 pipe = usb_sndbulkpipe(pusbd, ep_num);
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 3b6e5358c723..7de79d59a4cd 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -2218,6 +2218,7 @@ static void __exit speakup_exit(void)
2218 unregister_keyboard_notifier(&keyboard_notifier_block); 2218 unregister_keyboard_notifier(&keyboard_notifier_block);
2219 unregister_vt_notifier(&vt_notifier_block); 2219 unregister_vt_notifier(&vt_notifier_block);
2220 speakup_unregister_devsynth(); 2220 speakup_unregister_devsynth();
2221 speakup_cancel_paste();
2221 del_timer(&cursor_timer); 2222 del_timer(&cursor_timer);
2222 kthread_stop(speakup_task); 2223 kthread_stop(speakup_task);
2223 speakup_task = NULL; 2224 speakup_task = NULL;
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index f0fb00392d6b..ca04d3669acc 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -4,6 +4,10 @@
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/device.h> /* for dev_warn */ 5#include <linux/device.h> /* for dev_warn */
6#include <linux/selection.h> 6#include <linux/selection.h>
7#include <linux/workqueue.h>
8#include <linux/tty.h>
9#include <linux/tty_flip.h>
10#include <asm/cmpxchg.h>
7 11
8#include "speakup.h" 12#include "speakup.h"
9 13
@@ -121,31 +125,61 @@ int speakup_set_selection(struct tty_struct *tty)
121 return 0; 125 return 0;
122} 126}
123 127
124/* TODO: move to some helper thread, probably. That'd fix having to check for 128struct speakup_paste_work {
125 * in_atomic(). */ 129 struct work_struct work;
126int speakup_paste_selection(struct tty_struct *tty) 130 struct tty_struct *tty;
131};
132
133static void __speakup_paste_selection(struct work_struct *work)
127{ 134{
135 struct speakup_paste_work *spw =
136 container_of(work, struct speakup_paste_work, work);
137 struct tty_struct *tty = xchg(&spw->tty, NULL);
128 struct vc_data *vc = (struct vc_data *) tty->driver_data; 138 struct vc_data *vc = (struct vc_data *) tty->driver_data;
129 int pasted = 0, count; 139 int pasted = 0, count;
140 struct tty_ldisc *ld;
130 DECLARE_WAITQUEUE(wait, current); 141 DECLARE_WAITQUEUE(wait, current);
142
143 ld = tty_ldisc_ref_wait(tty);
144 tty_buffer_lock_exclusive(&vc->port);
145
131 add_wait_queue(&vc->paste_wait, &wait); 146 add_wait_queue(&vc->paste_wait, &wait);
132 while (sel_buffer && sel_buffer_lth > pasted) { 147 while (sel_buffer && sel_buffer_lth > pasted) {
133 set_current_state(TASK_INTERRUPTIBLE); 148 set_current_state(TASK_INTERRUPTIBLE);
134 if (test_bit(TTY_THROTTLED, &tty->flags)) { 149 if (test_bit(TTY_THROTTLED, &tty->flags)) {
135 if (in_atomic())
136 /* if we are in an interrupt handler, abort */
137 break;
138 schedule(); 150 schedule();
139 continue; 151 continue;
140 } 152 }
141 count = sel_buffer_lth - pasted; 153 count = sel_buffer_lth - pasted;
142 count = min_t(int, count, tty->receive_room); 154 count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL,
143 tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted, 155 count);
144 NULL, count);
145 pasted += count; 156 pasted += count;
146 } 157 }
147 remove_wait_queue(&vc->paste_wait, &wait); 158 remove_wait_queue(&vc->paste_wait, &wait);
148 current->state = TASK_RUNNING; 159 current->state = TASK_RUNNING;
160
161 tty_buffer_unlock_exclusive(&vc->port);
162 tty_ldisc_deref(ld);
163 tty_kref_put(tty);
164}
165
166static struct speakup_paste_work speakup_paste_work = {
167 .work = __WORK_INITIALIZER(speakup_paste_work.work,
168 __speakup_paste_selection)
169};
170
171int speakup_paste_selection(struct tty_struct *tty)
172{
173 if (cmpxchg(&speakup_paste_work.tty, NULL, tty) != NULL)
174 return -EBUSY;
175
176 tty_kref_get(tty);
177 schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work);
149 return 0; 178 return 0;
150} 179}
151 180
181void speakup_cancel_paste(void)
182{
183 cancel_work_sync(&speakup_paste_work.work);
184 tty_kref_put(speakup_paste_work.tty);
185}
diff --git a/drivers/staging/speakup/speakup.h b/drivers/staging/speakup/speakup.h
index a7bcceec436a..898dce5e1243 100644
--- a/drivers/staging/speakup/speakup.h
+++ b/drivers/staging/speakup/speakup.h
@@ -75,6 +75,7 @@ extern void synth_buffer_clear(void);
75extern void speakup_clear_selection(void); 75extern void speakup_clear_selection(void);
76extern int speakup_set_selection(struct tty_struct *tty); 76extern int speakup_set_selection(struct tty_struct *tty);
77extern int speakup_paste_selection(struct tty_struct *tty); 77extern int speakup_paste_selection(struct tty_struct *tty);
78extern void speakup_cancel_paste(void);
78extern void speakup_register_devsynth(void); 79extern void speakup_register_devsynth(void);
79extern void speakup_unregister_devsynth(void); 80extern void speakup_unregister_devsynth(void);
80extern void synth_write(const char *buf, size_t count); 81extern void synth_write(const char *buf, size_t count);
diff --git a/drivers/staging/speakup/speakup_acntsa.c b/drivers/staging/speakup/speakup_acntsa.c
index c7f014ed9628..5079dbd5d7ad 100644
--- a/drivers/staging/speakup/speakup_acntsa.c
+++ b/drivers/staging/speakup/speakup_acntsa.c
@@ -60,15 +60,15 @@ static struct kobj_attribute vol_attribute =
60 __ATTR(vol, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store); 60 __ATTR(vol, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
61 61
62static struct kobj_attribute delay_time_attribute = 62static struct kobj_attribute delay_time_attribute =
63 __ATTR(delay_time, S_IRUSR|S_IRUGO, spk_var_show, spk_var_store); 63 __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
64static struct kobj_attribute direct_attribute = 64static struct kobj_attribute direct_attribute =
65 __ATTR(direct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store); 65 __ATTR(direct, S_IWUGO|S_IRUGO, spk_var_show, spk_var_store);
66static struct kobj_attribute full_time_attribute = 66static struct kobj_attribute full_time_attribute =
67 __ATTR(full_time, S_IRUSR|S_IRUGO, spk_var_show, spk_var_store); 67 __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
68static struct kobj_attribute jiffy_delta_attribute = 68static struct kobj_attribute jiffy_delta_attribute =
69 __ATTR(jiffy_delta, S_IRUSR|S_IRUGO, spk_var_show, spk_var_store); 69 __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
70static struct kobj_attribute trigger_time_attribute = 70static struct kobj_attribute trigger_time_attribute =
71 __ATTR(trigger_time, S_IRUSR|S_IRUGO, spk_var_show, spk_var_store); 71 __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
72 72
73/* 73/*
74 * Create a group of attributes so that we can create and destroy them all 74 * Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 78cab13bbb1b..46588c85d39b 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1593,7 +1593,9 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1593 * Initiator is expecting a NopIN ping reply.. 1593 * Initiator is expecting a NopIN ping reply..
1594 */ 1594 */
1595 if (hdr->itt != RESERVED_ITT) { 1595 if (hdr->itt != RESERVED_ITT) {
1596 BUG_ON(!cmd); 1596 if (!cmd)
1597 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1598 (unsigned char *)hdr);
1597 1599
1598 spin_lock_bh(&conn->cmd_lock); 1600 spin_lock_bh(&conn->cmd_lock);
1599 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 1601 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 6960f22909ae..302eb3b78715 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -775,6 +775,7 @@ struct iscsi_np {
775 int np_ip_proto; 775 int np_ip_proto;
776 int np_sock_type; 776 int np_sock_type;
777 enum np_thread_state_table np_thread_state; 777 enum np_thread_state_table np_thread_state;
778 bool enabled;
778 enum iscsi_timer_flags_table np_login_timer_flags; 779 enum iscsi_timer_flags_table np_login_timer_flags;
779 u32 np_exports; 780 u32 np_exports;
780 enum np_flags_table np_flags; 781 enum np_flags_table np_flags;
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 8739b98f6f93..ca31fa1b8a4b 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -436,7 +436,7 @@ static int iscsi_login_zero_tsih_s2(
436 } 436 }
437 off = mrdsl % PAGE_SIZE; 437 off = mrdsl % PAGE_SIZE;
438 if (!off) 438 if (!off)
439 return 0; 439 goto check_prot;
440 440
441 if (mrdsl < PAGE_SIZE) 441 if (mrdsl < PAGE_SIZE)
442 mrdsl = PAGE_SIZE; 442 mrdsl = PAGE_SIZE;
@@ -452,6 +452,31 @@ static int iscsi_login_zero_tsih_s2(
452 ISCSI_LOGIN_STATUS_NO_RESOURCES); 452 ISCSI_LOGIN_STATUS_NO_RESOURCES);
453 return -1; 453 return -1;
454 } 454 }
455 /*
456 * ISER currently requires that ImmediateData + Unsolicited
457 * Data be disabled when protection / signature MRs are enabled.
458 */
459check_prot:
460 if (sess->se_sess->sup_prot_ops &
461 (TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS |
462 TARGET_PROT_DOUT_INSERT)) {
463
464 sprintf(buf, "ImmediateData=No");
465 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
466 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
467 ISCSI_LOGIN_STATUS_NO_RESOURCES);
468 return -1;
469 }
470
471 sprintf(buf, "InitialR2T=Yes");
472 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
473 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
474 ISCSI_LOGIN_STATUS_NO_RESOURCES);
475 return -1;
476 }
477 pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for"
478 " T10-PI enabled ISER session\n");
479 }
455 } 480 }
456 481
457 return 0; 482 return 0;
@@ -984,6 +1009,7 @@ int iscsi_target_setup_login_socket(
984 } 1009 }
985 1010
986 np->np_transport = t; 1011 np->np_transport = t;
1012 np->enabled = true;
987 return 0; 1013 return 0;
988} 1014}
989 1015
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index eb96b20dc09e..ca1811858afd 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -184,6 +184,7 @@ static void iscsit_clear_tpg_np_login_thread(
184 return; 184 return;
185 } 185 }
186 186
187 tpg_np->tpg_np->enabled = false;
187 iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown); 188 iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);
188} 189}
189 190
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 65001e133670..26416c15d65c 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -798,10 +798,10 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
798 pr_err("emulate_write_cache not supported for pSCSI\n"); 798 pr_err("emulate_write_cache not supported for pSCSI\n");
799 return -EINVAL; 799 return -EINVAL;
800 } 800 }
801 if (dev->transport->get_write_cache) { 801 if (flag &&
802 pr_warn("emulate_write_cache cannot be changed when underlying" 802 dev->transport->get_write_cache) {
803 " HW reports WriteCacheEnabled, ignoring request\n"); 803 pr_err("emulate_write_cache not supported for this device\n");
804 return 0; 804 return -EINVAL;
805 } 805 }
806 806
807 dev->dev_attrib.emulate_write_cache = flag; 807 dev->dev_attrib.emulate_write_cache = flag;
@@ -936,6 +936,10 @@ int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
936 return 0; 936 return 0;
937 } 937 }
938 if (!dev->transport->init_prot || !dev->transport->free_prot) { 938 if (!dev->transport->init_prot || !dev->transport->free_prot) {
939 /* 0 is only allowed value for non-supporting backends */
940 if (flag == 0)
941 return 0;
942
939 pr_err("DIF protection not supported by backend: %s\n", 943 pr_err("DIF protection not supported by backend: %s\n",
940 dev->transport->name); 944 dev->transport->name);
941 return -ENOSYS; 945 return -ENOSYS;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index d4b98690a736..789aa9eb0a1e 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1113,6 +1113,7 @@ void transport_init_se_cmd(
1113 init_completion(&cmd->cmd_wait_comp); 1113 init_completion(&cmd->cmd_wait_comp);
1114 init_completion(&cmd->task_stop_comp); 1114 init_completion(&cmd->task_stop_comp);
1115 spin_lock_init(&cmd->t_state_lock); 1115 spin_lock_init(&cmd->t_state_lock);
1116 kref_init(&cmd->cmd_kref);
1116 cmd->transport_state = CMD_T_DEV_ACTIVE; 1117 cmd->transport_state = CMD_T_DEV_ACTIVE;
1117 1118
1118 cmd->se_tfo = tfo; 1119 cmd->se_tfo = tfo;
@@ -2357,7 +2358,6 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
2357 unsigned long flags; 2358 unsigned long flags;
2358 int ret = 0; 2359 int ret = 0;
2359 2360
2360 kref_init(&se_cmd->cmd_kref);
2361 /* 2361 /*
2362 * Add a second kref if the fabric caller is expecting to handle 2362 * Add a second kref if the fabric caller is expecting to handle
2363 * fabric acknowledgement that requires two target_put_sess_cmd() 2363 * fabric acknowledgement that requires two target_put_sess_cmd()
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 01cf37f212c3..f5fd515b2bee 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -90,18 +90,18 @@ static void ft_free_cmd(struct ft_cmd *cmd)
90{ 90{
91 struct fc_frame *fp; 91 struct fc_frame *fp;
92 struct fc_lport *lport; 92 struct fc_lport *lport;
93 struct se_session *se_sess; 93 struct ft_sess *sess;
94 94
95 if (!cmd) 95 if (!cmd)
96 return; 96 return;
97 se_sess = cmd->sess->se_sess; 97 sess = cmd->sess;
98 fp = cmd->req_frame; 98 fp = cmd->req_frame;
99 lport = fr_dev(fp); 99 lport = fr_dev(fp);
100 if (fr_seq(fp)) 100 if (fr_seq(fp))
101 lport->tt.seq_release(fr_seq(fp)); 101 lport->tt.seq_release(fr_seq(fp));
102 fc_frame_free(fp); 102 fc_frame_free(fp);
103 percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 103 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
104 ft_sess_put(cmd->sess); /* undo get from lookup at recv */ 104 ft_sess_put(sess); /* undo get from lookup at recv */
105} 105}
106 106
107void ft_release_cmd(struct se_cmd *se_cmd) 107void ft_release_cmd(struct se_cmd *se_cmd)
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 94f9e3a38412..0ff7fda0742f 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -190,7 +190,7 @@ static struct tty_driver *hvc_console_device(struct console *c, int *index)
190 return hvc_driver; 190 return hvc_driver;
191} 191}
192 192
193static int __init hvc_console_setup(struct console *co, char *options) 193static int hvc_console_setup(struct console *co, char *options)
194{ 194{
195 if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES) 195 if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES)
196 return -ENODEV; 196 return -ENODEV;
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 41fe8a047d37..fe9d129c8735 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2353,8 +2353,12 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
2353 if (tty->ops->flush_chars) 2353 if (tty->ops->flush_chars)
2354 tty->ops->flush_chars(tty); 2354 tty->ops->flush_chars(tty);
2355 } else { 2355 } else {
2356 struct n_tty_data *ldata = tty->disc_data;
2357
2356 while (nr > 0) { 2358 while (nr > 0) {
2359 mutex_lock(&ldata->output_lock);
2357 c = tty->ops->write(tty, b, nr); 2360 c = tty->ops->write(tty, b, nr);
2361 mutex_unlock(&ldata->output_lock);
2358 if (c < 0) { 2362 if (c < 0) {
2359 retval = c; 2363 retval = c;
2360 goto break_out; 2364 goto break_out;
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 0e1bf8858431..2d4bd3929e50 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -555,7 +555,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
555 */ 555 */
556 if ((p->port.type == PORT_XR17V35X) || 556 if ((p->port.type == PORT_XR17V35X) ||
557 (p->port.type == PORT_XR17D15X)) { 557 (p->port.type == PORT_XR17D15X)) {
558 serial_out(p, UART_EXAR_SLEEP, 0xff); 558 serial_out(p, UART_EXAR_SLEEP, sleep ? 0xff : 0);
559 return; 559 return;
560 } 560 }
561 561
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index f1d30f6945af..143deb62467d 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -60,6 +60,7 @@ void tty_buffer_lock_exclusive(struct tty_port *port)
60 atomic_inc(&buf->priority); 60 atomic_inc(&buf->priority);
61 mutex_lock(&buf->lock); 61 mutex_lock(&buf->lock);
62} 62}
63EXPORT_SYMBOL_GPL(tty_buffer_lock_exclusive);
63 64
64void tty_buffer_unlock_exclusive(struct tty_port *port) 65void tty_buffer_unlock_exclusive(struct tty_port *port)
65{ 66{
@@ -73,6 +74,7 @@ void tty_buffer_unlock_exclusive(struct tty_port *port)
73 if (restart) 74 if (restart)
74 queue_work(system_unbound_wq, &buf->work); 75 queue_work(system_unbound_wq, &buf->work);
75} 76}
77EXPORT_SYMBOL_GPL(tty_buffer_unlock_exclusive);
76 78
77/** 79/**
78 * tty_buffer_space_avail - return unused buffer space 80 * tty_buffer_space_avail - return unused buffer space
@@ -255,16 +257,15 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size,
255 if (change || left < size) { 257 if (change || left < size) {
256 /* This is the slow path - looking for new buffers to use */ 258 /* This is the slow path - looking for new buffers to use */
257 if ((n = tty_buffer_alloc(port, size)) != NULL) { 259 if ((n = tty_buffer_alloc(port, size)) != NULL) {
258 unsigned long iflags;
259
260 n->flags = flags; 260 n->flags = flags;
261 buf->tail = n; 261 buf->tail = n;
262
263 spin_lock_irqsave(&buf->flush_lock, iflags);
264 b->commit = b->used; 262 b->commit = b->used;
263 /* paired w/ barrier in flush_to_ldisc(); ensures the
264 * latest commit value can be read before the head is
265 * advanced to the next buffer
266 */
267 smp_wmb();
265 b->next = n; 268 b->next = n;
266 spin_unlock_irqrestore(&buf->flush_lock, iflags);
267
268 } else if (change) 269 } else if (change)
269 size = 0; 270 size = 0;
270 else 271 else
@@ -448,27 +449,28 @@ static void flush_to_ldisc(struct work_struct *work)
448 mutex_lock(&buf->lock); 449 mutex_lock(&buf->lock);
449 450
450 while (1) { 451 while (1) {
451 unsigned long flags;
452 struct tty_buffer *head = buf->head; 452 struct tty_buffer *head = buf->head;
453 struct tty_buffer *next;
453 int count; 454 int count;
454 455
455 /* Ldisc or user is trying to gain exclusive access */ 456 /* Ldisc or user is trying to gain exclusive access */
456 if (atomic_read(&buf->priority)) 457 if (atomic_read(&buf->priority))
457 break; 458 break;
458 459
459 spin_lock_irqsave(&buf->flush_lock, flags); 460 next = head->next;
461 /* paired w/ barrier in __tty_buffer_request_room();
462 * ensures commit value read is not stale if the head
463 * is advancing to the next buffer
464 */
465 smp_rmb();
460 count = head->commit - head->read; 466 count = head->commit - head->read;
461 if (!count) { 467 if (!count) {
462 if (head->next == NULL) { 468 if (next == NULL)
463 spin_unlock_irqrestore(&buf->flush_lock, flags);
464 break; 469 break;
465 } 470 buf->head = next;
466 buf->head = head->next;
467 spin_unlock_irqrestore(&buf->flush_lock, flags);
468 tty_buffer_free(port, head); 471 tty_buffer_free(port, head);
469 continue; 472 continue;
470 } 473 }
471 spin_unlock_irqrestore(&buf->flush_lock, flags);
472 474
473 count = receive_buf(tty, head, count); 475 count = receive_buf(tty, head, count);
474 if (!count) 476 if (!count)
@@ -523,7 +525,6 @@ void tty_buffer_init(struct tty_port *port)
523 struct tty_bufhead *buf = &port->buf; 525 struct tty_bufhead *buf = &port->buf;
524 526
525 mutex_init(&buf->lock); 527 mutex_init(&buf->lock);
526 spin_lock_init(&buf->flush_lock);
527 tty_buffer_reset(&buf->sentinel, 0); 528 tty_buffer_reset(&buf->sentinel, 0);
528 buf->head = &buf->sentinel; 529 buf->head = &buf->sentinel;
529 buf->tail = &buf->sentinel; 530 buf->tail = &buf->sentinel;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 888881e5f292..4aeb10034de7 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1822,10 +1822,13 @@ int usb_runtime_suspend(struct device *dev)
1822 if (status == -EAGAIN || status == -EBUSY) 1822 if (status == -EAGAIN || status == -EBUSY)
1823 usb_mark_last_busy(udev); 1823 usb_mark_last_busy(udev);
1824 1824
1825 /* The PM core reacts badly unless the return code is 0, 1825 /*
1826 * -EAGAIN, or -EBUSY, so always return -EBUSY on an error. 1826 * The PM core reacts badly unless the return code is 0,
1827 * -EAGAIN, or -EBUSY, so always return -EBUSY on an error
1828 * (except for root hubs, because they don't suspend through
1829 * an upstream port like other USB devices).
1827 */ 1830 */
1828 if (status != 0) 1831 if (status != 0 && udev->parent)
1829 return -EBUSY; 1832 return -EBUSY;
1830 return status; 1833 return status;
1831} 1834}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 090469ebfcff..229a73f64304 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1691,8 +1691,19 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
1691 */ 1691 */
1692 pm_runtime_set_autosuspend_delay(&hdev->dev, 0); 1692 pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
1693 1693
1694 /* Hubs have proper suspend/resume support. */ 1694 /*
1695 usb_enable_autosuspend(hdev); 1695 * Hubs have proper suspend/resume support, except for root hubs
1696 * where the controller driver doesn't have bus_suspend and
1697 * bus_resume methods.
1698 */
1699 if (hdev->parent) { /* normal device */
1700 usb_enable_autosuspend(hdev);
1701 } else { /* root hub */
1702 const struct hc_driver *drv = bus_to_hcd(hdev->bus)->driver;
1703
1704 if (drv->bus_suspend && drv->bus_resume)
1705 usb_enable_autosuspend(hdev);
1706 }
1696 1707
1697 if (hdev->level == MAX_TOPO_LEVEL) { 1708 if (hdev->level == MAX_TOPO_LEVEL) {
1698 dev_err(&intf->dev, 1709 dev_err(&intf->dev,
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index f605ad8c1902..cfd18bcca723 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1709,16 +1709,6 @@ static int at91udc_probe(struct platform_device *pdev)
1709 return -ENODEV; 1709 return -ENODEV;
1710 } 1710 }
1711 1711
1712 if (pdev->num_resources != 2) {
1713 DBG("invalid num_resources\n");
1714 return -ENODEV;
1715 }
1716 if ((pdev->resource[0].flags != IORESOURCE_MEM)
1717 || (pdev->resource[1].flags != IORESOURCE_IRQ)) {
1718 DBG("invalid resource type\n");
1719 return -ENODEV;
1720 }
1721
1722 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1712 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1723 if (!res) 1713 if (!res)
1724 return -ENXIO; 1714 return -ENXIO;
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 6f2c8d3899d2..cf2734b532a7 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -248,7 +248,8 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
248 break; 248 break;
249 } 249 }
250 250
251 if (pdata->have_sysif_regs && pdata->controller_ver && 251 if (pdata->have_sysif_regs &&
252 pdata->controller_ver > FSL_USB_VER_1_6 &&
252 (phy_mode == FSL_USB2_PHY_ULPI)) { 253 (phy_mode == FSL_USB2_PHY_ULPI)) {
253 /* check PHY_CLK_VALID to get phy clk valid */ 254 /* check PHY_CLK_VALID to get phy clk valid */
254 if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) & 255 if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index c81c8721cc5a..cd871b895013 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -90,6 +90,24 @@ __acquires(ohci->lock)
90 dl_done_list (ohci); 90 dl_done_list (ohci);
91 finish_unlinks (ohci, ohci_frame_no(ohci)); 91 finish_unlinks (ohci, ohci_frame_no(ohci));
92 92
93 /*
94 * Some controllers don't handle "global" suspend properly if
95 * there are unsuspended ports. For these controllers, put all
96 * the enabled ports into suspend before suspending the root hub.
97 */
98 if (ohci->flags & OHCI_QUIRK_GLOBAL_SUSPEND) {
99 __hc32 __iomem *portstat = ohci->regs->roothub.portstatus;
100 int i;
101 unsigned temp;
102
103 for (i = 0; i < ohci->num_ports; (++i, ++portstat)) {
104 temp = ohci_readl(ohci, portstat);
105 if ((temp & (RH_PS_PES | RH_PS_PSS)) ==
106 RH_PS_PES)
107 ohci_writel(ohci, RH_PS_PSS, portstat);
108 }
109 }
110
93 /* maybe resume can wake root hub */ 111 /* maybe resume can wake root hub */
94 if (ohci_to_hcd(ohci)->self.root_hub->do_remote_wakeup || autostop) { 112 if (ohci_to_hcd(ohci)->self.root_hub->do_remote_wakeup || autostop) {
95 ohci->hc_control |= OHCI_CTRL_RWE; 113 ohci->hc_control |= OHCI_CTRL_RWE;
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 90879e9ccbec..bb1509675727 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -160,6 +160,7 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
160 ohci_dbg(ohci, "enabled AMD prefetch quirk\n"); 160 ohci_dbg(ohci, "enabled AMD prefetch quirk\n");
161 } 161 }
162 162
163 ohci->flags |= OHCI_QUIRK_GLOBAL_SUSPEND;
163 return 0; 164 return 0;
164} 165}
165 166
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 9250cada13f0..4550ce05af7f 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -405,6 +405,8 @@ struct ohci_hcd {
405#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */ 405#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
406#define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/ 406#define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/
407#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */ 407#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */
408#define OHCI_QUIRK_GLOBAL_SUSPEND 0x800 /* must suspend ports */
409
408 // there are also chip quirks/bugs in init logic 410 // there are also chip quirks/bugs in init logic
409 411
410 struct work_struct nec_work; /* Worker for NEC quirk */ 412 struct work_struct nec_work; /* Worker for NEC quirk */
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 00661d305143..4a6d3dd68572 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -847,6 +847,13 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
847 bool ehci_found = false; 847 bool ehci_found = false;
848 struct pci_dev *companion = NULL; 848 struct pci_dev *companion = NULL;
849 849
850 /* Sony VAIO t-series with subsystem device ID 90a8 is not capable of
851 * switching ports from EHCI to xHCI
852 */
853 if (xhci_pdev->subsystem_vendor == PCI_VENDOR_ID_SONY &&
854 xhci_pdev->subsystem_device == 0x90a8)
855 return;
856
850 /* make sure an intel EHCI controller exists */ 857 /* make sure an intel EHCI controller exists */
851 for_each_pci_dev(companion) { 858 for_each_pci_dev(companion) {
852 if (companion->class == PCI_CLASS_SERIAL_USB_EHCI && 859 if (companion->class == PCI_CLASS_SERIAL_USB_EHCI &&
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index c089668308ad..b1a8a5f4bbb8 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1822,6 +1822,16 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1822 kfree(cur_cd); 1822 kfree(cur_cd);
1823 } 1823 }
1824 1824
1825 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1826 for (i = 0; i < num_ports; i++) {
1827 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1828 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1829 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1830 while (!list_empty(ep))
1831 list_del_init(ep->next);
1832 }
1833 }
1834
1825 for (i = 1; i < MAX_HC_SLOTS; ++i) 1835 for (i = 1; i < MAX_HC_SLOTS; ++i)
1826 xhci_free_virt_device(xhci, i); 1836 xhci_free_virt_device(xhci, i);
1827 1837
@@ -1857,16 +1867,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1857 if (!xhci->rh_bw) 1867 if (!xhci->rh_bw)
1858 goto no_bw; 1868 goto no_bw;
1859 1869
1860 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1861 for (i = 0; i < num_ports; i++) {
1862 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1863 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1864 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1865 while (!list_empty(ep))
1866 list_del_init(ep->next);
1867 }
1868 }
1869
1870 for (i = 0; i < num_ports; i++) { 1870 for (i = 0; i < num_ports; i++) {
1871 struct xhci_tt_bw_info *tt, *n; 1871 struct xhci_tt_bw_info *tt, *n;
1872 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) { 1872 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c
index c47e5a6edde2..d03fadd2629f 100644
--- a/drivers/usb/phy/phy-fsm-usb.c
+++ b/drivers/usb/phy/phy-fsm-usb.c
@@ -303,17 +303,18 @@ int otg_statemachine(struct otg_fsm *fsm)
303 otg_set_state(fsm, OTG_STATE_A_WAIT_VRISE); 303 otg_set_state(fsm, OTG_STATE_A_WAIT_VRISE);
304 break; 304 break;
305 case OTG_STATE_A_WAIT_VRISE: 305 case OTG_STATE_A_WAIT_VRISE:
306 if (fsm->id || fsm->a_bus_drop || fsm->a_vbus_vld || 306 if (fsm->a_vbus_vld)
307 fsm->a_wait_vrise_tmout) {
308 otg_set_state(fsm, OTG_STATE_A_WAIT_BCON); 307 otg_set_state(fsm, OTG_STATE_A_WAIT_BCON);
309 } 308 else if (fsm->id || fsm->a_bus_drop ||
309 fsm->a_wait_vrise_tmout)
310 otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
310 break; 311 break;
311 case OTG_STATE_A_WAIT_BCON: 312 case OTG_STATE_A_WAIT_BCON:
312 if (!fsm->a_vbus_vld) 313 if (!fsm->a_vbus_vld)
313 otg_set_state(fsm, OTG_STATE_A_VBUS_ERR); 314 otg_set_state(fsm, OTG_STATE_A_VBUS_ERR);
314 else if (fsm->b_conn) 315 else if (fsm->b_conn)
315 otg_set_state(fsm, OTG_STATE_A_HOST); 316 otg_set_state(fsm, OTG_STATE_A_HOST);
316 else if (fsm->id | fsm->a_bus_drop | fsm->a_wait_bcon_tmout) 317 else if (fsm->id || fsm->a_bus_drop || fsm->a_wait_bcon_tmout)
317 otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL); 318 otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
318 break; 319 break;
319 case OTG_STATE_A_HOST: 320 case OTG_STATE_A_HOST:
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 7c6e1dedeb06..edf3b124583c 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -580,6 +580,8 @@ static const struct usb_device_id id_table_combined[] = {
580 { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) }, 580 { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) },
581 { USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID), 581 { USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID),
582 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 582 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
583 { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
584 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
583 /* 585 /*
584 * ELV devices: 586 * ELV devices:
585 */ 587 */
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 993c93df6874..500474c48f4b 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -538,6 +538,11 @@
538 */ 538 */
539#define FTDI_TIAO_UMPA_PID 0x8a98 /* TIAO/DIYGADGET USB Multi-Protocol Adapter */ 539#define FTDI_TIAO_UMPA_PID 0x8a98 /* TIAO/DIYGADGET USB Multi-Protocol Adapter */
540 540
541/*
542 * NovaTech product ids (FTDI_VID)
543 */
544#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
545
541 546
542/********************************/ 547/********************************/
543/** third-party VID/PID combos **/ 548/** third-party VID/PID combos **/
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index df90dae53eb9..c0a42e9e6777 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -821,7 +821,7 @@ static int build_i2c_fw_hdr(__u8 *header, struct device *dev)
821 firmware_rec = (struct ti_i2c_firmware_rec*)i2c_header->Data; 821 firmware_rec = (struct ti_i2c_firmware_rec*)i2c_header->Data;
822 822
823 i2c_header->Type = I2C_DESC_TYPE_FIRMWARE_BLANK; 823 i2c_header->Type = I2C_DESC_TYPE_FIRMWARE_BLANK;
824 i2c_header->Size = (__u16)buffer_size; 824 i2c_header->Size = cpu_to_le16(buffer_size);
825 i2c_header->CheckSum = cs; 825 i2c_header->CheckSum = cs;
826 firmware_rec->Ver_Major = OperationalMajorVersion; 826 firmware_rec->Ver_Major = OperationalMajorVersion;
827 firmware_rec->Ver_Minor = OperationalMinorVersion; 827 firmware_rec->Ver_Minor = OperationalMinorVersion;
diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h
index 51f83fbb73bb..6f6a856bc37c 100644
--- a/drivers/usb/serial/io_usbvend.h
+++ b/drivers/usb/serial/io_usbvend.h
@@ -594,7 +594,7 @@ struct edge_boot_descriptor {
594 594
595struct ti_i2c_desc { 595struct ti_i2c_desc {
596 __u8 Type; // Type of descriptor 596 __u8 Type; // Type of descriptor
597 __u16 Size; // Size of data only not including header 597 __le16 Size; // Size of data only not including header
598 __u8 CheckSum; // Checksum (8 bit sum of data only) 598 __u8 CheckSum; // Checksum (8 bit sum of data only)
599 __u8 Data[0]; // Data starts here 599 __u8 Data[0]; // Data starts here
600} __attribute__((packed)); 600} __attribute__((packed));
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index f213ee978516..948a19f0cdf7 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -161,6 +161,7 @@ static void option_instat_callback(struct urb *urb);
161#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000 161#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
162#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 162#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
163#define NOVATELWIRELESS_PRODUCT_E362 0x9010 163#define NOVATELWIRELESS_PRODUCT_E362 0x9010
164#define NOVATELWIRELESS_PRODUCT_E371 0x9011
164#define NOVATELWIRELESS_PRODUCT_G2 0xA010 165#define NOVATELWIRELESS_PRODUCT_G2 0xA010
165#define NOVATELWIRELESS_PRODUCT_MC551 0xB001 166#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
166 167
@@ -1012,6 +1013,7 @@ static const struct usb_device_id option_ids[] = {
1012 /* Novatel Ovation MC551 a.k.a. Verizon USB551L */ 1013 /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
1013 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, 1014 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
1014 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, 1015 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
1016 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) },
1015 1017
1016 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, 1018 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
1017 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, 1019 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 7ed681a714a5..6c0a542e8ec1 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -151,6 +151,21 @@ static const struct usb_device_id id_table[] = {
151 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */ 151 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */
152 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */ 152 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */
153 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */ 153 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */
154 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 0)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
155 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
156 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 3)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
157 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 0)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Device Management */
158 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 2)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card NMEA */
159 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Modem */
160 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 0)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Device Management */
161 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 2)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card NMEA */
162 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 3)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Modem */
163 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 0)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
164 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 2)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
165 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 3)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
166 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 0)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
167 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 2)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
168 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 3)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Modem */
154 169
155 { } /* Terminating entry */ 170 { } /* Terminating entry */
156}; 171};
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index 4ef2a80728f7..008d805c3d21 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -1851,7 +1851,7 @@ static int usbat_probe(struct usb_interface *intf,
1851 us->transport_name = "Shuttle USBAT"; 1851 us->transport_name = "Shuttle USBAT";
1852 us->transport = usbat_flash_transport; 1852 us->transport = usbat_flash_transport;
1853 us->transport_reset = usb_stor_CB_reset; 1853 us->transport_reset = usb_stor_CB_reset;
1854 us->max_lun = 1; 1854 us->max_lun = 0;
1855 1855
1856 result = usb_stor_probe2(us); 1856 result = usb_stor_probe2(us);
1857 return result; 1857 return result;
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index f4a82291894a..174a447868cd 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -234,6 +234,20 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
234 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 234 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
235 US_FL_MAX_SECTORS_64 ), 235 US_FL_MAX_SECTORS_64 ),
236 236
237/* Reported by Daniele Forsi <dforsi@gmail.com> */
238UNUSUAL_DEV( 0x0421, 0x04b9, 0x0350, 0x0350,
239 "Nokia",
240 "5300",
241 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
242 US_FL_MAX_SECTORS_64 ),
243
244/* Patch submitted by Victor A. Santos <victoraur.santos@gmail.com> */
245UNUSUAL_DEV( 0x0421, 0x05af, 0x0742, 0x0742,
246 "Nokia",
247 "305",
248 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
249 US_FL_MAX_SECTORS_64),
250
237/* Patch submitted by Mikhail Zolotaryov <lebon@lebon.org.ua> */ 251/* Patch submitted by Mikhail Zolotaryov <lebon@lebon.org.ua> */
238UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110, 252UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110,
239 "Nokia", 253 "Nokia",
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index 96109a9972b6..84b4bfb84344 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -66,7 +66,22 @@ static DEFINE_PER_CPU(struct evtchn_fifo_queue, cpu_queue);
66static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly; 66static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly;
67static unsigned event_array_pages __read_mostly; 67static unsigned event_array_pages __read_mostly;
68 68
69/*
70 * sync_set_bit() and friends must be unsigned long aligned on non-x86
71 * platforms.
72 */
73#if !defined(CONFIG_X86) && BITS_PER_LONG > 32
74
75#define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL)
76#define EVTCHN_FIFO_BIT(b, w) \
77 (((unsigned long)w & 0x4UL) ? (EVTCHN_FIFO_ ##b + 32) : EVTCHN_FIFO_ ##b)
78
79#else
80
69#define BM(w) ((unsigned long *)(w)) 81#define BM(w) ((unsigned long *)(w))
82#define EVTCHN_FIFO_BIT(b, w) EVTCHN_FIFO_ ##b
83
84#endif
70 85
71static inline event_word_t *event_word_from_port(unsigned port) 86static inline event_word_t *event_word_from_port(unsigned port)
72{ 87{
@@ -161,33 +176,38 @@ static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu)
161static void evtchn_fifo_clear_pending(unsigned port) 176static void evtchn_fifo_clear_pending(unsigned port)
162{ 177{
163 event_word_t *word = event_word_from_port(port); 178 event_word_t *word = event_word_from_port(port);
164 sync_clear_bit(EVTCHN_FIFO_PENDING, BM(word)); 179 sync_clear_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
165} 180}
166 181
167static void evtchn_fifo_set_pending(unsigned port) 182static void evtchn_fifo_set_pending(unsigned port)
168{ 183{
169 event_word_t *word = event_word_from_port(port); 184 event_word_t *word = event_word_from_port(port);
170 sync_set_bit(EVTCHN_FIFO_PENDING, BM(word)); 185 sync_set_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
171} 186}
172 187
173static bool evtchn_fifo_is_pending(unsigned port) 188static bool evtchn_fifo_is_pending(unsigned port)
174{ 189{
175 event_word_t *word = event_word_from_port(port); 190 event_word_t *word = event_word_from_port(port);
176 return sync_test_bit(EVTCHN_FIFO_PENDING, BM(word)); 191 return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
177} 192}
178 193
179static bool evtchn_fifo_test_and_set_mask(unsigned port) 194static bool evtchn_fifo_test_and_set_mask(unsigned port)
180{ 195{
181 event_word_t *word = event_word_from_port(port); 196 event_word_t *word = event_word_from_port(port);
182 return sync_test_and_set_bit(EVTCHN_FIFO_MASKED, BM(word)); 197 return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
183} 198}
184 199
185static void evtchn_fifo_mask(unsigned port) 200static void evtchn_fifo_mask(unsigned port)
186{ 201{
187 event_word_t *word = event_word_from_port(port); 202 event_word_t *word = event_word_from_port(port);
188 sync_set_bit(EVTCHN_FIFO_MASKED, BM(word)); 203 sync_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
189} 204}
190 205
206static bool evtchn_fifo_is_masked(unsigned port)
207{
208 event_word_t *word = event_word_from_port(port);
209 return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
210}
191/* 211/*
192 * Clear MASKED, spinning if BUSY is set. 212 * Clear MASKED, spinning if BUSY is set.
193 */ 213 */
@@ -211,7 +231,7 @@ static void evtchn_fifo_unmask(unsigned port)
211 BUG_ON(!irqs_disabled()); 231 BUG_ON(!irqs_disabled());
212 232
213 clear_masked(word); 233 clear_masked(word);
214 if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))) { 234 if (evtchn_fifo_is_pending(port)) {
215 struct evtchn_unmask unmask = { .port = port }; 235 struct evtchn_unmask unmask = { .port = port };
216 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); 236 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
217 } 237 }
@@ -243,7 +263,7 @@ static void handle_irq_for_port(unsigned port)
243 263
244static void consume_one_event(unsigned cpu, 264static void consume_one_event(unsigned cpu,
245 struct evtchn_fifo_control_block *control_block, 265 struct evtchn_fifo_control_block *control_block,
246 unsigned priority, uint32_t *ready) 266 unsigned priority, unsigned long *ready)
247{ 267{
248 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); 268 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
249 uint32_t head; 269 uint32_t head;
@@ -273,10 +293,9 @@ static void consume_one_event(unsigned cpu,
273 * copy of the ready word. 293 * copy of the ready word.
274 */ 294 */
275 if (head == 0) 295 if (head == 0)
276 clear_bit(priority, BM(ready)); 296 clear_bit(priority, ready);
277 297
278 if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word)) 298 if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port))
279 && !sync_test_bit(EVTCHN_FIFO_MASKED, BM(word)))
280 handle_irq_for_port(port); 299 handle_irq_for_port(port);
281 300
282 q->head[priority] = head; 301 q->head[priority] = head;
@@ -285,7 +304,7 @@ static void consume_one_event(unsigned cpu,
285static void evtchn_fifo_handle_events(unsigned cpu) 304static void evtchn_fifo_handle_events(unsigned cpu)
286{ 305{
287 struct evtchn_fifo_control_block *control_block; 306 struct evtchn_fifo_control_block *control_block;
288 uint32_t ready; 307 unsigned long ready;
289 unsigned q; 308 unsigned q;
290 309
291 control_block = per_cpu(cpu_control_block, cpu); 310 control_block = per_cpu(cpu_control_block, cpu);